summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/accurate_count_with_predicate.js52
-rw-r--r--jstests/sharding/add_and_remove_shard_from_zone.js52
-rw-r--r--jstests/sharding/addshard1.js115
-rw-r--r--jstests/sharding/addshard2.js371
-rw-r--r--jstests/sharding/addshard4.js83
-rw-r--r--jstests/sharding/addshard5.js60
-rw-r--r--jstests/sharding/addshard6.js86
-rw-r--r--jstests/sharding/addshard_idempotent.js105
-rw-r--r--jstests/sharding/advance_cluster_time_action_type.js117
-rw-r--r--jstests/sharding/advance_logical_time_with_valid_signature.js58
-rw-r--r--jstests/sharding/after_cluster_time.js180
-rw-r--r--jstests/sharding/agg_error_reports_shard_host_and_port.js42
-rw-r--r--jstests/sharding/agg_explain_fmt.js66
-rw-r--r--jstests/sharding/agg_project_limit_pipe_split.js140
-rw-r--r--jstests/sharding/agg_sort.js420
-rw-r--r--jstests/sharding/agg_write_stages_cannot_run_on_mongos.js71
-rw-r--r--jstests/sharding/aggregates_during_balancing.js479
-rw-r--r--jstests/sharding/aggregation_currentop.js1573
-rw-r--r--jstests/sharding/aggregation_internal_parameters.js224
-rw-r--r--jstests/sharding/aggregations_in_session.js60
-rw-r--r--jstests/sharding/all_config_servers_blackholed_from_mongos.js55
-rw-r--r--jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js110
-rw-r--r--jstests/sharding/allow_partial_results.js107
-rw-r--r--jstests/sharding/arbiters_do_not_use_cluster_time.js42
-rw-r--r--jstests/sharding/array_shard_key.js157
-rw-r--r--jstests/sharding/auth.js594
-rw-r--r--jstests/sharding/auth2.js40
-rw-r--r--jstests/sharding/authCommands.js561
-rw-r--r--jstests/sharding/authConnectionHook.js72
-rw-r--r--jstests/sharding/auth_add_shard.js137
-rw-r--r--jstests/sharding/auth_no_config_primary.js62
-rw-r--r--jstests/sharding/auth_sharding_cmd_metadata.js55
-rw-r--r--jstests/sharding/auth_slaveok_routing.js214
-rw-r--r--jstests/sharding/authmr.js217
-rw-r--r--jstests/sharding/authwhere.js141
-rw-r--r--jstests/sharding/auto_rebalance_parallel.js96
-rw-r--r--jstests/sharding/auto_rebalance_parallel_replica_sets.js102
-rw-r--r--jstests/sharding/autodiscover_config_rs_from_secondary.js102
-rw-r--r--jstests/sharding/autosplit.js126
-rw-r--r--jstests/sharding/autosplit_heuristics.js133
-rw-r--r--jstests/sharding/autosplit_with_balancer.js270
-rw-r--r--jstests/sharding/balance_repl.js106
-rw-r--r--jstests/sharding/balancer_shell_commands.js24
-rw-r--r--jstests/sharding/balancer_window.js128
-rw-r--r--jstests/sharding/basic_drop_coll.js83
-rw-r--r--jstests/sharding/basic_merge.js88
-rw-r--r--jstests/sharding/basic_sharding_params.js110
-rw-r--r--jstests/sharding/basic_split.js151
-rw-r--r--jstests/sharding/batch_write_command_sharded.js490
-rw-r--r--jstests/sharding/bouncing_count.js100
-rw-r--r--jstests/sharding/bulk_insert.js396
-rw-r--r--jstests/sharding/bulk_shard_insert.js128
-rw-r--r--jstests/sharding/causal_consistency_shell_support.js354
-rw-r--r--jstests/sharding/change_stream_chunk_migration.js322
-rw-r--r--jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js351
-rw-r--r--jstests/sharding/change_stream_lookup_single_shard_cluster.js92
-rw-r--r--jstests/sharding/change_stream_metadata_notifications.js292
-rw-r--r--jstests/sharding/change_stream_no_shards.js54
-rw-r--r--jstests/sharding/change_stream_read_preference.js263
-rw-r--r--jstests/sharding/change_stream_resume_from_different_mongos.js182
-rw-r--r--jstests/sharding/change_stream_shard_failover.js192
-rw-r--r--jstests/sharding/change_stream_show_migration_events.js518
-rw-r--r--jstests/sharding/change_stream_transaction_sharded.js481
-rw-r--r--jstests/sharding/change_stream_update_lookup_collation.js317
-rw-r--r--jstests/sharding/change_stream_update_lookup_read_concern.js373
-rw-r--r--jstests/sharding/change_streams.js485
-rw-r--r--jstests/sharding/change_streams_establishment_finds_new_shards.js104
-rw-r--r--jstests/sharding/change_streams_primary_shard_unaware.js355
-rw-r--r--jstests/sharding/change_streams_shards_start_in_sync.js191
-rw-r--r--jstests/sharding/change_streams_unsharded_becomes_sharded.js334
-rw-r--r--jstests/sharding/change_streams_whole_db.js366
-rw-r--r--jstests/sharding/cleanup_orphaned_auth.js82
-rw-r--r--jstests/sharding/cleanup_orphaned_basic.js228
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js291
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js208
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js131
-rw-r--r--jstests/sharding/clone_catalog_data.js5
-rw-r--r--jstests/sharding/coll_epoch_test0.js2
-rw-r--r--jstests/sharding/coll_epoch_test1.js120
-rw-r--r--jstests/sharding/collation_lookup.js496
-rw-r--r--jstests/sharding/collation_targeting.js843
-rw-r--r--jstests/sharding/collation_targeting_inherited.js894
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_configRS.js387
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_shards.js686
-rw-r--r--jstests/sharding/config_rs_no_primary.js100
-rw-r--r--jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js374
-rw-r--r--jstests/sharding/conn_pool_stats.js54
-rw-r--r--jstests/sharding/convert_to_and_from_sharded.js174
-rw-r--r--jstests/sharding/count1.js355
-rw-r--r--jstests/sharding/count2.js77
-rw-r--r--jstests/sharding/count_config_servers.js115
-rw-r--r--jstests/sharding/count_slaveok.js92
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js282
-rw-r--r--jstests/sharding/create_database.js95
-rw-r--r--jstests/sharding/create_idx_empty_primary.js39
-rw-r--r--jstests/sharding/current_op_no_shards.js15
-rw-r--r--jstests/sharding/current_op_with_drop_shard.js30
-rw-r--r--jstests/sharding/cursor1.js109
-rw-r--r--jstests/sharding/cursor_timeout.js213
-rw-r--r--jstests/sharding/cursor_valid_after_shard_stepdown.js57
-rw-r--r--jstests/sharding/database_and_shard_versioning_all_commands.js1133
-rw-r--r--jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js106
-rw-r--r--jstests/sharding/database_versioning_safe_secondary_reads.js426
-rw-r--r--jstests/sharding/delete_during_migrate.js46
-rw-r--r--jstests/sharding/diffservers1.js34
-rw-r--r--jstests/sharding/disable_autosplit.js45
-rw-r--r--jstests/sharding/drop_configdb.js46
-rw-r--r--jstests/sharding/drop_sharded_db.js100
-rw-r--r--jstests/sharding/drop_sharded_db_tags_cleanup.js40
-rw-r--r--jstests/sharding/dump_coll_metadata.js109
-rw-r--r--jstests/sharding/empty_doc_results.js106
-rw-r--r--jstests/sharding/enable_sharding_basic.js77
-rw-r--r--jstests/sharding/enforce_zone_policy.js150
-rw-r--r--jstests/sharding/error_during_agg_getmore.js95
-rw-r--r--jstests/sharding/error_propagation.js34
-rw-r--r--jstests/sharding/explainFind_stale_mongos.js38
-rw-r--r--jstests/sharding/explain_agg_read_pref.js289
-rw-r--r--jstests/sharding/explain_cmd.js309
-rw-r--r--jstests/sharding/explain_find_and_modify_sharded.js138
-rw-r--r--jstests/sharding/explain_read_pref.js1
-rw-r--r--jstests/sharding/failcommand_failpoint_not_parallel.js32
-rw-r--r--jstests/sharding/failcommand_ignores_internal.js91
-rw-r--r--jstests/sharding/features1.js259
-rw-r--r--jstests/sharding/features2.js286
-rw-r--r--jstests/sharding/features3.js271
-rw-r--r--jstests/sharding/find_and_modify_after_multi_write.js105
-rw-r--r--jstests/sharding/find_collname_uuid_test.js20
-rw-r--r--jstests/sharding/find_getmore_cmd.js318
-rw-r--r--jstests/sharding/findandmodify1.js154
-rw-r--r--jstests/sharding/findandmodify2.js198
-rw-r--r--jstests/sharding/geo_near_random1.js76
-rw-r--r--jstests/sharding/geo_near_random2.js96
-rw-r--r--jstests/sharding/geo_near_sharded.js104
-rw-r--r--jstests/sharding/geo_near_sort.js155
-rw-r--r--jstests/sharding/graph_lookup.js42
-rw-r--r--jstests/sharding/hash_basic.js81
-rw-r--r--jstests/sharding/hash_shard_num_chunks.js45
-rw-r--r--jstests/sharding/hash_shard_unique_compound.js54
-rw-r--r--jstests/sharding/implicit_db_creation.js53
-rw-r--r--jstests/sharding/in_memory_sort_limit.js89
-rw-r--r--jstests/sharding/index1.js651
-rw-r--r--jstests/sharding/index_and_collection_option_propagation.js398
-rw-r--r--jstests/sharding/initial_split_validate_shard_collections.js128
-rw-r--r--jstests/sharding/inserts_consistent.js92
-rw-r--r--jstests/sharding/invalid_system_views_sharded_collection.js214
-rw-r--r--jstests/sharding/json_schema.js117
-rw-r--r--jstests/sharding/jumbo1.js91
-rw-r--r--jstests/sharding/key_many.js448
-rw-r--r--jstests/sharding/key_rotation.js150
-rw-r--r--jstests/sharding/key_string.js109
-rw-r--r--jstests/sharding/keys_rotation_interval_sec.js44
-rw-r--r--jstests/sharding/kill_op_overflow.js10
-rw-r--r--jstests/sharding/kill_pinned_cursor.js422
-rw-r--r--jstests/sharding/kill_sessions.js96
-rw-r--r--jstests/sharding/killop.js93
-rw-r--r--jstests/sharding/lagged_config_secondary.js92
-rw-r--r--jstests/sharding/large_chunk.js94
-rw-r--r--jstests/sharding/libs/sharded_transactions_helpers.js48
-rw-r--r--jstests/sharding/limit_push.js111
-rw-r--r--jstests/sharding/linearizable_read_concern.js200
-rw-r--r--jstests/sharding/listDatabases.js174
-rw-r--r--jstests/sharding/listshards.js112
-rw-r--r--jstests/sharding/localhostAuthBypass.js447
-rw-r--r--jstests/sharding/logical_time_api.js173
-rw-r--r--jstests/sharding/logical_time_metadata.js81
-rw-r--r--jstests/sharding/lookup.js1026
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js208
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js117
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_id_shard_key.js176
-rw-r--r--jstests/sharding/lookup_mongod_unaware.js341
-rw-r--r--jstests/sharding/lookup_on_shard.js190
-rw-r--r--jstests/sharding/lookup_stale_mongos.js257
-rw-r--r--jstests/sharding/major_version_check.js67
-rw-r--r--jstests/sharding/mapReduce_inSharded.js165
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js121
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js3
-rw-r--r--jstests/sharding/mapReduce_outSharded.js3
-rw-r--r--jstests/sharding/mapReduce_outSharded_checkUUID.js294
-rw-r--r--jstests/sharding/max_time_ms_sharded.js463
-rw-r--r--jstests/sharding/max_time_ms_sharded_new_commands.js71
-rw-r--r--jstests/sharding/merge_chunks_compound_shard_key.js173
-rw-r--r--jstests/sharding/merge_chunks_test.js270
-rw-r--r--jstests/sharding/merge_chunks_test_with_md_ops.js69
-rw-r--r--jstests/sharding/merge_command_options.js335
-rw-r--r--jstests/sharding/merge_does_not_force_pipeline_split.js172
-rw-r--r--jstests/sharding/merge_from_stale_mongos.js446
-rw-r--r--jstests/sharding/merge_hashed_shard_key.js139
-rw-r--r--jstests/sharding/merge_on_fields.js138
-rw-r--r--jstests/sharding/merge_requires_unique_index.js493
-rw-r--r--jstests/sharding/merge_stale_on_fields.js328
-rw-r--r--jstests/sharding/merge_to_existing.js248
-rw-r--r--jstests/sharding/merge_to_non_existing.js197
-rw-r--r--jstests/sharding/merge_with_chunk_migrations.js222
-rw-r--r--jstests/sharding/merge_with_drop_shard.js204
-rw-r--r--jstests/sharding/merge_with_move_primary.js288
-rw-r--r--jstests/sharding/merge_write_concern.js178
-rw-r--r--jstests/sharding/migrateBig.js95
-rw-r--r--jstests/sharding/migrateBig_balancer.js105
-rw-r--r--jstests/sharding/migration_critical_section_concurrency.js88
-rw-r--r--jstests/sharding/migration_failure.js116
-rw-r--r--jstests/sharding/migration_id_index.js70
-rw-r--r--jstests/sharding/migration_ignore_interrupts_1.js136
-rw-r--r--jstests/sharding/migration_ignore_interrupts_2.js76
-rw-r--r--jstests/sharding/migration_ignore_interrupts_3.js190
-rw-r--r--jstests/sharding/migration_ignore_interrupts_4.js193
-rw-r--r--jstests/sharding/migration_move_chunk_after_receive.js106
-rw-r--r--jstests/sharding/migration_server_status.js135
-rw-r--r--jstests/sharding/migration_sets_fromMigrate_flag.js345
-rw-r--r--jstests/sharding/migration_with_source_ops.js246
-rw-r--r--jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js52
-rw-r--r--jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js67
-rw-r--r--jstests/sharding/missing_key.js58
-rw-r--r--jstests/sharding/mongod_returns_no_cluster_time_without_keys.js162
-rw-r--r--jstests/sharding/mongos_dataSize_test.js20
-rw-r--r--jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js124
-rw-r--r--jstests/sharding/mongos_local_explain.js38
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js49
-rw-r--r--jstests/sharding/mongos_no_replica_set_refresh.js222
-rw-r--r--jstests/sharding/mongos_query_comment.js155
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js746
-rw-r--r--jstests/sharding/mongos_shard_failure_tolerance.js168
-rw-r--r--jstests/sharding/mongos_validate_writes.js120
-rw-r--r--jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js65
-rw-r--r--jstests/sharding/movePrimary1.js83
-rw-r--r--jstests/sharding/move_chunk_basic.js106
-rw-r--r--jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js221
-rw-r--r--jstests/sharding/move_chunk_insert_with_write_retryability.js76
-rw-r--r--jstests/sharding/move_chunk_open_cursors.js80
-rw-r--r--jstests/sharding/move_chunk_remove_with_write_retryability.js90
-rw-r--r--jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js710
-rw-r--r--jstests/sharding/move_chunk_update_with_write_retryability.js96
-rw-r--r--jstests/sharding/move_chunk_wc.js171
-rw-r--r--jstests/sharding/move_primary_basic.js87
-rw-r--r--jstests/sharding/move_primary_clone_test.js367
-rw-r--r--jstests/sharding/move_primary_fails_without_database_version.js20
-rw-r--r--jstests/sharding/movechunk_commit_changelog_stats.js48
-rw-r--r--jstests/sharding/movechunk_interrupt_at_primary_stepdown.js98
-rw-r--r--jstests/sharding/movechunk_parallel.js148
-rw-r--r--jstests/sharding/mrShardedOutputAuth.js171
-rw-r--r--jstests/sharding/mr_and_agg_versioning.js95
-rw-r--r--jstests/sharding/mr_output_sharded_validation.js76
-rw-r--r--jstests/sharding/mr_shard_version.js119
-rw-r--r--jstests/sharding/multi_coll_drop.js53
-rw-r--r--jstests/sharding/multi_mongos2.js76
-rw-r--r--jstests/sharding/multi_mongos2a.js36
-rw-r--r--jstests/sharding/multi_shard_transaction_without_majority_reads.js42
-rw-r--r--jstests/sharding/multi_write_target.js99
-rw-r--r--jstests/sharding/names.js109
-rw-r--r--jstests/sharding/nonreplicated_uuids_on_shardservers.js30
-rw-r--r--jstests/sharding/not_allowed_on_sharded_collection_cmd.js31
-rw-r--r--jstests/sharding/now_variable_replset.js247
-rw-r--r--jstests/sharding/now_variable_sharding.js285
-rw-r--r--jstests/sharding/operation_time_api.js96
-rw-r--r--jstests/sharding/oplog_document_key.js196
-rw-r--r--jstests/sharding/out_fails_to_replace_sharded_collection.js84
-rw-r--r--jstests/sharding/parallel.js103
-rw-r--r--jstests/sharding/pending_chunk.js115
-rw-r--r--jstests/sharding/prefix_shard_key.js335
-rw-r--r--jstests/sharding/prepare_transaction_then_migrate.js96
-rw-r--r--jstests/sharding/presplit.js73
-rw-r--r--jstests/sharding/primary_config_server_blackholed_from_mongos.js112
-rw-r--r--jstests/sharding/printShardingStatus.js433
-rw-r--r--jstests/sharding/query_after_multi_write.js91
-rw-r--r--jstests/sharding/query_config.js685
-rw-r--r--jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js93
-rw-r--r--jstests/sharding/read_after_optime.js76
-rw-r--r--jstests/sharding/read_committed_lookup.js91
-rw-r--r--jstests/sharding/read_does_not_create_namespaces.js15
-rw-r--r--jstests/sharding/read_pref.js336
-rw-r--r--jstests/sharding/read_pref_cmd.js6
-rw-r--r--jstests/sharding/read_pref_multi_mongos_stale_config.js57
-rw-r--r--jstests/sharding/recovering_slaveok.js175
-rw-r--r--jstests/sharding/refresh_sessions.js168
-rw-r--r--jstests/sharding/regex_targeting.js565
-rw-r--r--jstests/sharding/remove1.js66
-rw-r--r--jstests/sharding/remove2.js362
-rw-r--r--jstests/sharding/remove3.js61
-rw-r--r--jstests/sharding/rename.js118
-rw-r--r--jstests/sharding/rename_across_mongos.js39
-rw-r--r--jstests/sharding/repl_monitor_refresh.js135
-rw-r--r--jstests/sharding/replication_with_undefined_shard_key.js40
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js38
-rw-r--r--jstests/sharding/restart_transactions.js320
-rw-r--r--jstests/sharding/resume_change_stream.js356
-rw-r--r--jstests/sharding/resume_change_stream_from_stale_mongos.js140
-rw-r--r--jstests/sharding/resume_change_stream_on_subset_of_shards.js111
-rw-r--r--jstests/sharding/retryable_writes.js1041
-rw-r--r--jstests/sharding/rs_stepdown_and_pooling.js136
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js1140
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js1007
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js818
-rw-r--r--jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js186
-rw-r--r--jstests/sharding/secondary_shard_versioning.js160
-rw-r--r--jstests/sharding/server_status.js52
-rw-r--r--jstests/sharding/server_status_crud_metrics.js104
-rw-r--r--jstests/sharding/session_info_in_oplog.js655
-rw-r--r--jstests/sharding/sessions_collection_auto_healing.js237
-rw-r--r--jstests/sharding/shard1.js61
-rw-r--r--jstests/sharding/shard2.js413
-rw-r--r--jstests/sharding/shard3.js353
-rw-r--r--jstests/sharding/shard6.js188
-rw-r--r--jstests/sharding/shard_aware_init.js306
-rw-r--r--jstests/sharding/shard_aware_init_secondaries.js127
-rw-r--r--jstests/sharding/shard_aware_on_add_shard.js91
-rw-r--r--jstests/sharding/shard_aware_primary_failover.js106
-rw-r--r--jstests/sharding/shard_collection_basic.js579
-rw-r--r--jstests/sharding/shard_collection_existing_zones.js346
-rw-r--r--jstests/sharding/shard_collection_verify_initial_chunks.js94
-rw-r--r--jstests/sharding/shard_config_db_collections.js72
-rw-r--r--jstests/sharding/shard_existing.js54
-rw-r--r--jstests/sharding/shard_existing_coll_chunk_count.js323
-rw-r--r--jstests/sharding/shard_identity_config_update.js134
-rw-r--r--jstests/sharding/shard_identity_rollback.js248
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js157
-rw-r--r--jstests/sharding/shard_keycount.js52
-rw-r--r--jstests/sharding/shard_kill_and_pooling.js99
-rw-r--r--jstests/sharding/shard_targeting.js89
-rw-r--r--jstests/sharding/shard_with_special_db_names.js38
-rw-r--r--jstests/sharding/sharded_limit_batchsize.js293
-rw-r--r--jstests/sharding/sharded_profile.js37
-rw-r--r--jstests/sharding/sharding_balance1.js98
-rw-r--r--jstests/sharding/sharding_balance2.js104
-rw-r--r--jstests/sharding/sharding_balance3.js129
-rw-r--r--jstests/sharding/sharding_balance4.js246
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js119
-rw-r--r--jstests/sharding/sharding_multiple_ns_rs.js72
-rw-r--r--jstests/sharding/sharding_options.js3
-rw-r--r--jstests/sharding/sharding_rs1.js81
-rw-r--r--jstests/sharding/sharding_rs2.js368
-rw-r--r--jstests/sharding/sharding_statistics_server_status.js343
-rw-r--r--jstests/sharding/shards_and_config_return_last_committed_optime.js342
-rw-r--r--jstests/sharding/single_shard_transaction_with_arbiter.js78
-rw-r--r--jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js144
-rw-r--r--jstests/sharding/snapshot_cursor_commands_mongos.js505
-rw-r--r--jstests/sharding/sort1.js198
-rw-r--r--jstests/sharding/split_against_shard_with_invalid_split_points.js74
-rw-r--r--jstests/sharding/split_large_key.js105
-rw-r--r--jstests/sharding/split_with_force_small.js92
-rw-r--r--jstests/sharding/ssv_config_check.js64
-rw-r--r--jstests/sharding/stale_mongos_updates_and_removes.js425
-rw-r--r--jstests/sharding/stale_version_write.js42
-rw-r--r--jstests/sharding/startup_with_all_configs_down.js167
-rw-r--r--jstests/sharding/stats.js419
-rw-r--r--jstests/sharding/tag_auto_split.js44
-rw-r--r--jstests/sharding/tag_auto_split_partial_key.js62
-rw-r--r--jstests/sharding/tag_range.js126
-rw-r--r--jstests/sharding/test_stacked_migration_cleanup.js91
-rw-r--r--jstests/sharding/time_zone_info_mongos.js191
-rw-r--r--jstests/sharding/top_chunk_autosplit.js262
-rw-r--r--jstests/sharding/top_chunk_split.js273
-rw-r--r--jstests/sharding/trace_missing_docs_test.js56
-rw-r--r--jstests/sharding/transactions_causal_consistency.js131
-rw-r--r--jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js60
-rw-r--r--jstests/sharding/transactions_error_labels.js375
-rw-r--r--jstests/sharding/transactions_expiration.js135
-rw-r--r--jstests/sharding/transactions_implicit_abort.js83
-rw-r--r--jstests/sharding/transactions_multi_writes.js260
-rw-r--r--jstests/sharding/transactions_read_concerns.js121
-rw-r--r--jstests/sharding/transactions_reject_writes_for_moved_chunks.js262
-rw-r--r--jstests/sharding/transactions_snapshot_errors_first_statement.js272
-rw-r--r--jstests/sharding/transactions_snapshot_errors_subsequent_statements.js188
-rw-r--r--jstests/sharding/transactions_stale_database_version_errors.js171
-rw-r--r--jstests/sharding/transactions_stale_shard_version_errors.js365
-rw-r--r--jstests/sharding/transactions_target_at_point_in_time.js160
-rw-r--r--jstests/sharding/transactions_targeting_errors.js48
-rw-r--r--jstests/sharding/transactions_view_resolution.js565
-rw-r--r--jstests/sharding/transactions_writes_not_retryable.js210
-rw-r--r--jstests/sharding/txn_agg.js163
-rw-r--r--jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js197
-rw-r--r--jstests/sharding/txn_commit_optimizations_for_read_only_shards.js645
-rw-r--r--jstests/sharding/txn_recover_decision_using_recovery_router.js1025
-rw-r--r--jstests/sharding/txn_two_phase_commit_basic.js469
-rw-r--r--jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js99
-rw-r--r--jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js269
-rw-r--r--jstests/sharding/txn_two_phase_commit_failover.js367
-rw-r--r--jstests/sharding/txn_two_phase_commit_killop.js333
-rw-r--r--jstests/sharding/txn_two_phase_commit_server_status.js30
-rw-r--r--jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js226
-rw-r--r--jstests/sharding/txn_with_several_routers.js386
-rw-r--r--jstests/sharding/txn_writes_during_movechunk.js72
-rw-r--r--jstests/sharding/unique_index_on_shardservers.js44
-rw-r--r--jstests/sharding/unowned_doc_filtering.js68
-rw-r--r--jstests/sharding/unsharded_collection_targetting.js42
-rw-r--r--jstests/sharding/unsharded_lookup_in_txn.js130
-rw-r--r--jstests/sharding/update_compound_shard_key.js802
-rw-r--r--jstests/sharding/update_immutable_fields.js116
-rw-r--r--jstests/sharding/update_replace_id.js355
-rw-r--r--jstests/sharding/update_shard_key_conflicting_writes.js664
-rw-r--r--jstests/sharding/update_shard_key_doc_moves_shards.js825
-rw-r--r--jstests/sharding/update_shard_key_doc_on_same_shard.js1543
-rw-r--r--jstests/sharding/update_shard_key_pipeline_update.js437
-rw-r--r--jstests/sharding/update_sharded.js210
-rw-r--r--jstests/sharding/update_zone_key_range.js57
-rw-r--r--jstests/sharding/update_zone_key_range_not_sharded.js55
-rw-r--r--jstests/sharding/upsert_sharded.js210
-rw-r--r--jstests/sharding/use_rsm_data_for_cs.js54
-rw-r--r--jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js50
-rw-r--r--jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js55
-rw-r--r--jstests/sharding/validate_collection.js119
-rw-r--r--jstests/sharding/verify_sessions_expiration_sharded.js246
-rw-r--r--jstests/sharding/version1.js176
-rw-r--r--jstests/sharding/version2.js146
-rw-r--r--jstests/sharding/view_rewrite.js440
-rw-r--r--jstests/sharding/views.js293
-rw-r--r--jstests/sharding/wildcard_index_banned_for_shard_key.js52
-rw-r--r--jstests/sharding/write_cmd_auto_split.js222
-rw-r--r--jstests/sharding/write_commands_sharding_state.js118
-rw-r--r--jstests/sharding/write_transactions_during_migration.js299
-rw-r--r--jstests/sharding/zbigMapReduce.js374
-rw-r--r--jstests/sharding/zero_shard_version.js275
410 files changed, 41946 insertions, 42520 deletions
diff --git a/jstests/sharding/accurate_count_with_predicate.js b/jstests/sharding/accurate_count_with_predicate.js
index 1cb2cdf0d8a..bb440e88873 100644
--- a/jstests/sharding/accurate_count_with_predicate.js
+++ b/jstests/sharding/accurate_count_with_predicate.js
@@ -7,37 +7,37 @@
* twice.
*/
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
- const shard0Coll = st.shard0.getCollection("test.slowcount");
- const num = 10;
- const middle = num / 2;
+const st = new ShardingTest({shards: 2});
+const shard0Coll = st.shard0.getCollection("test.slowcount");
+const num = 10;
+const middle = num / 2;
- function getNthDocument(n) {
- return {_id: n, one: 1, x: n};
- }
+function getNthDocument(n) {
+ return {_id: n, one: 1, x: n};
+}
- // Shard the collection. Shard 0 will get keys from [0, middle) and shard 1 will get everything
- // from [middle, num).
- assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: "test"}));
- st.ensurePrimaryShard("test", st.shard0.name);
- st.shardColl(shard0Coll.getName(), {x: 1}, {x: middle}, {x: middle + 1}, "test", true);
+// Shard the collection. Shard 0 will get keys from [0, middle) and shard 1 will get everything
+// from [middle, num).
+assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: "test"}));
+st.ensurePrimaryShard("test", st.shard0.name);
+st.shardColl(shard0Coll.getName(), {x: 1}, {x: middle}, {x: middle + 1}, "test", true);
- // Insert some docs.
- for (let i = 0; i < num; i++) {
- assert.writeOK(st.getDB("test").slowcount.insert(getNthDocument(i)));
- }
+// Insert some docs.
+for (let i = 0; i < num; i++) {
+ assert.writeOK(st.getDB("test").slowcount.insert(getNthDocument(i)));
+}
- // Insert some orphan documents to shard 0. These are just documents outside the range
- // which shard 0 owns.
- for (let i = middle + 1; i < middle + 3; i++) {
- assert.writeOK(shard0Coll.insert(getNthDocument(i)));
- }
+// Insert some orphan documents to shard 0. These are just documents outside the range
+// which shard 0 owns.
+for (let i = middle + 1; i < middle + 3; i++) {
+ assert.writeOK(shard0Coll.insert(getNthDocument(i)));
+}
- // Run a count on the whole collection. The orphaned documents on shard 0 shouldn't be double
- // counted.
- assert.eq(st.getDB("test").slowcount.count({one: 1}), num);
+// Run a count on the whole collection. The orphaned documents on shard 0 shouldn't be double
+// counted.
+assert.eq(st.getDB("test").slowcount.count({one: 1}), num);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/add_and_remove_shard_from_zone.js b/jstests/sharding/add_and_remove_shard_from_zone.js
index d4773597259..d6d78b3c2cb 100644
--- a/jstests/sharding/add_and_remove_shard_from_zone.js
+++ b/jstests/sharding/add_and_remove_shard_from_zone.js
@@ -3,38 +3,38 @@
* in sharding_catalog_add_shard_to_zone_test.cpp.
*/
(function() {
- 'use strict';
+'use strict';
- let st = new ShardingTest({shards: 1});
- let mongos = st.s0;
+let st = new ShardingTest({shards: 1});
+let mongos = st.s0;
- let config = mongos.getDB('config');
- var shardName = st.shard0.shardName;
+let config = mongos.getDB('config');
+var shardName = st.shard0.shardName;
- // Test adding shard with no zone to a new zone.
- assert.commandWorked(mongos.adminCommand({addShardToZone: shardName, zone: 'x'}));
- var shardDoc = config.shards.findOne();
- assert.eq(['x'], shardDoc.tags);
+// Test adding shard with no zone to a new zone.
+assert.commandWorked(mongos.adminCommand({addShardToZone: shardName, zone: 'x'}));
+var shardDoc = config.shards.findOne();
+assert.eq(['x'], shardDoc.tags);
- // Test adding zone to a shard with existing zones.
- assert.commandWorked(mongos.adminCommand({addShardToZone: shardName, zone: 'y'}));
- shardDoc = config.shards.findOne();
- assert.eq(['x', 'y'], shardDoc.tags);
+// Test adding zone to a shard with existing zones.
+assert.commandWorked(mongos.adminCommand({addShardToZone: shardName, zone: 'y'}));
+shardDoc = config.shards.findOne();
+assert.eq(['x', 'y'], shardDoc.tags);
- // Test removing shard from existing zone.
- assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
- shardDoc = config.shards.findOne();
- assert.eq(['y'], shardDoc.tags);
+// Test removing shard from existing zone.
+assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
+shardDoc = config.shards.findOne();
+assert.eq(['y'], shardDoc.tags);
- // Test removing shard from zone that no longer exists.
- assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
- shardDoc = config.shards.findOne();
- assert.eq(['y'], shardDoc.tags);
+// Test removing shard from zone that no longer exists.
+assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
+shardDoc = config.shards.findOne();
+assert.eq(['y'], shardDoc.tags);
- // Test removing the last zone from a shard
- assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'y'}));
- shardDoc = config.shards.findOne();
- assert.eq([], shardDoc.tags);
+// Test removing the last zone from a shard
+assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'y'}));
+shardDoc = config.shards.findOne();
+assert.eq([], shardDoc.tags);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index 4fcfde18f83..b676cb474e7 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -1,80 +1,77 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false});
+var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false});
- // Create a shard and add a database; if the database is not duplicated the mongod should accept
- // it as shard
- var conn1 = MongoRunner.runMongod({'shardsvr': ""});
- var db1 = conn1.getDB("testDB");
+// Create a shard and add a database; if the database is not duplicated the mongod should accept
+// it as shard
+var conn1 = MongoRunner.runMongod({'shardsvr': ""});
+var db1 = conn1.getDB("testDB");
- var numObjs = 3;
- for (var i = 0; i < numObjs; i++) {
- assert.writeOK(db1.foo.save({a: i}));
- }
+var numObjs = 3;
+for (var i = 0; i < numObjs; i++) {
+ assert.writeOK(db1.foo.save({a: i}));
+}
- var configDB = s.s.getDB('config');
- assert.eq(null, configDB.databases.findOne({_id: 'testDB'}));
+var configDB = s.s.getDB('config');
+assert.eq(null, configDB.databases.findOne({_id: 'testDB'}));
- var newShard = "myShard";
- assert.commandWorked(
- s.admin.runCommand({addshard: "localhost:" + conn1.port, name: newShard, maxSize: 1024}));
+var newShard = "myShard";
+assert.commandWorked(
+ s.admin.runCommand({addshard: "localhost:" + conn1.port, name: newShard, maxSize: 1024}));
- assert.neq(null, configDB.databases.findOne({_id: 'testDB'}));
+assert.neq(null, configDB.databases.findOne({_id: 'testDB'}));
- var newShardDoc = configDB.shards.findOne({_id: newShard});
- assert.eq(1024, newShardDoc.maxSize);
+var newShardDoc = configDB.shards.findOne({_id: newShard});
+assert.eq(1024, newShardDoc.maxSize);
- // a mongod with an existing database name should not be allowed to become a shard
- var conn2 = MongoRunner.runMongod({'shardsvr': ""});
+// a mongod with an existing database name should not be allowed to become a shard
+var conn2 = MongoRunner.runMongod({'shardsvr': ""});
- var db2 = conn2.getDB("otherDB");
- assert.writeOK(db2.foo.save({a: 1}));
+var db2 = conn2.getDB("otherDB");
+assert.writeOK(db2.foo.save({a: 1}));
- var db3 = conn2.getDB("testDB");
- assert.writeOK(db3.foo.save({a: 1}));
+var db3 = conn2.getDB("testDB");
+assert.writeOK(db3.foo.save({a: 1}));
- s.config.databases.find().forEach(printjson);
+s.config.databases.find().forEach(printjson);
- var rejectedShard = "rejectedShard";
- assert(!s.admin.runCommand({addshard: "localhost:" + conn2.port, name: rejectedShard}).ok,
- "accepted mongod with duplicate db");
+var rejectedShard = "rejectedShard";
+assert(!s.admin.runCommand({addshard: "localhost:" + conn2.port, name: rejectedShard}).ok,
+ "accepted mongod with duplicate db");
- // Check that all collection that were local to the mongod's are accessible through the mongos
- var sdb1 = s.getDB("testDB");
- assert.eq(numObjs, sdb1.foo.count(), "wrong count for database that existed before addshard");
+// Check that all collection that were local to the mongod's are accessible through the mongos
+var sdb1 = s.getDB("testDB");
+assert.eq(numObjs, sdb1.foo.count(), "wrong count for database that existed before addshard");
- var sdb2 = s.getDB("otherDB");
- assert.eq(0, sdb2.foo.count(), "database of rejected shard appears through mongos");
+var sdb2 = s.getDB("otherDB");
+assert.eq(0, sdb2.foo.count(), "database of rejected shard appears through mongos");
- // make sure we can move a DB from the original mongod to a previoulsy existing shard
- assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
- newShard,
- "DB primary is wrong");
+// make sure we can move a DB from the original mongod to a previoulsy existing shard
+assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
+ newShard,
+ "DB primary is wrong");
- var origShard = s.getNonPrimaries("testDB")[0];
- s.ensurePrimaryShard("testDB", origShard);
- assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
- origShard,
- "DB primary didn't move");
- assert.eq(
- numObjs, sdb1.foo.count(), "wrong count after moving datbase that existed before addshard");
+var origShard = s.getNonPrimaries("testDB")[0];
+s.ensurePrimaryShard("testDB", origShard);
+assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
+ origShard,
+ "DB primary didn't move");
+assert.eq(
+ numObjs, sdb1.foo.count(), "wrong count after moving datbase that existed before addshard");
- // make sure we can shard the original collections
- sdb1.foo.ensureIndex({a: 1},
- {unique: true}); // can't shard populated collection without an index
- s.adminCommand({enablesharding: "testDB"});
- s.adminCommand({shardcollection: "testDB.foo", key: {a: 1}});
- s.adminCommand({split: "testDB.foo", middle: {a: Math.floor(numObjs / 2)}});
- assert.eq(2,
- s.config.chunks.count({"ns": "testDB.foo"}),
- "wrong chunk number after splitting collection that existed before");
- assert.eq(
- numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
+// make sure we can shard the original collections
+sdb1.foo.ensureIndex({a: 1}, {unique: true}); // can't shard populated collection without an index
+s.adminCommand({enablesharding: "testDB"});
+s.adminCommand({shardcollection: "testDB.foo", key: {a: 1}});
+s.adminCommand({split: "testDB.foo", middle: {a: Math.floor(numObjs / 2)}});
+assert.eq(2,
+ s.config.chunks.count({"ns": "testDB.foo"}),
+ "wrong chunk number after splitting collection that existed before");
+assert.eq(numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
- MongoRunner.stopMongod(conn1);
- MongoRunner.stopMongod(conn2);
-
- s.stop();
+MongoRunner.stopMongod(conn1);
+MongoRunner.stopMongod(conn2);
+s.stop();
})();
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index cb61d4b4245..7fb1ab2efe1 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -4,196 +4,195 @@
*/
(function() {
- var addShardRes;
+var addShardRes;
- var assertAddShardSucceeded = function(res, shardName) {
- assert.commandWorked(res);
+var assertAddShardSucceeded = function(res, shardName) {
+ assert.commandWorked(res);
- // If a shard name was specified, make sure that the name the addShard command reports the
- // shard was added with matches the specified name.
- if (shardName) {
- assert.eq(shardName,
- res.shardAdded,
- "name returned by addShard does not match name specified in addShard");
- }
-
- // Make sure the shard shows up in config.shards with the shardName reported by the
- // addShard command.
- assert.neq(null,
- st.s.getDB('config').shards.findOne({_id: res.shardAdded}),
- "newly added shard " + res.shardAdded + " not found in config.shards");
- };
-
- // Note: this method expects that the failure is *not* that the specified shardName is already
- // the shardName of an existing shard.
- var assertAddShardFailed = function(res, shardName) {
- assert.commandFailed(res);
-
- // If a shard name was specified in the addShard, make sure no shard with its name shows up
- // in config.shards.
- if (shardName) {
- assert.eq(null,
- st.s.getDB('config').shards.findOne({_id: shardName}),
- "addShard for " + shardName +
- " reported failure, but shard shows up in config.shards");
- }
- };
-
- var removeShardWithName = function(shardName) {
- var res = st.s.adminCommand({removeShard: shardName});
- assert.commandWorked(res);
- assert.eq('started', res.state);
- assert.soon(function() {
- res = st.s.adminCommand({removeShard: shardName});
- assert.commandWorked(res);
- return ('completed' === res.state);
- }, "removeShard never completed for shard " + shardName);
- };
-
- var st = new ShardingTest({
- shards: 0,
- mongos: 1,
- });
-
- // Add one shard since the last shard cannot be removed.
- var normalShard = MongoRunner.runMongod({shardsvr: ''});
- st.s.adminCommand({addShard: normalShard.name, name: 'normalShard'});
-
- // Allocate a port that can be used to test adding invalid hosts.
- var portWithoutHostRunning = allocatePort();
-
- // 1. Test adding a *standalone*
-
- // 1.a. with or without specifying the shardName.
-
- jsTest.log("Adding a standalone *without* a specified shardName should succeed.");
- let standalone1 = MongoRunner.runMongod({shardsvr: ''});
- addShardRes = st.s.adminCommand({addshard: standalone1.name});
- assertAddShardSucceeded(addShardRes);
- removeShardWithName(addShardRes.shardAdded);
- MongoRunner.stopMongod(standalone1);
-
- jsTest.log("Adding a standalone *with* a specified shardName should succeed.");
- let standalone2 = MongoRunner.runMongod({shardsvr: ''});
- addShardRes = st.s.adminCommand({addshard: standalone2.name, name: "shardName"});
- assertAddShardSucceeded(addShardRes, "shardName");
- removeShardWithName(addShardRes.shardAdded);
- MongoRunner.stopMongod(standalone2);
-
- // 1.b. with an invalid hostname.
-
- jsTest.log("Adding a standalone with a non-existing host should fail.");
- addShardRes = st.s.adminCommand({addShard: getHostName() + ":" + portWithoutHostRunning});
- assertAddShardFailed(addShardRes);
-
- // 2. Test adding a *replica set* with an ordinary set name
-
- // 2.a. with or without specifying the shardName.
-
- jsTest.log("Adding a replica set without a specified shardName should succeed.");
- let rst1 = new ReplSetTest({nodes: 1});
- rst1.startSet({shardsvr: ''});
- rst1.initiate();
- addShardRes = st.s.adminCommand({addShard: rst1.getURL()});
- assertAddShardSucceeded(addShardRes);
- assert.eq(rst1.name, addShardRes.shardAdded);
- removeShardWithName(addShardRes.shardAdded);
- rst1.stopSet();
-
- jsTest.log(
- "Adding a replica set with a specified shardName that matches the set's name should succeed.");
- let rst2 = new ReplSetTest({nodes: 1});
- rst2.startSet({shardsvr: ''});
- rst2.initiate();
- addShardRes = st.s.adminCommand({addShard: rst2.getURL(), name: rst2.name});
- assertAddShardSucceeded(addShardRes, rst2.name);
- removeShardWithName(addShardRes.shardAdded);
- rst2.stopSet();
-
- let rst3 = new ReplSetTest({nodes: 1});
- rst3.startSet({shardsvr: ''});
- rst3.initiate();
-
- jsTest.log(
- "Adding a replica set with a specified shardName that differs from the set's name should succeed.");
- addShardRes = st.s.adminCommand({addShard: rst3.getURL(), name: "differentShardName"});
- assertAddShardSucceeded(addShardRes, "differentShardName");
- removeShardWithName(addShardRes.shardAdded);
-
- jsTest.log("Adding a replica with a specified shardName of 'config' should fail.");
- addShardRes = st.s.adminCommand({addShard: rst3.getURL(), name: "config"});
- assertAddShardFailed(addShardRes, "config");
-
- // 2.b. with invalid hostnames.
-
- jsTest.log("Adding a replica set with only non-existing hosts should fail.");
- addShardRes =
- st.s.adminCommand({addShard: rst3.name + "/NonExistingHost:" + portWithoutHostRunning});
- assertAddShardFailed(addShardRes);
-
- jsTest.log("Adding a replica set with mixed existing/non-existing hosts should fail.");
- addShardRes = st.s.adminCommand({
- addShard:
- rst3.name + "/" + rst3.getPrimary().name + ",NonExistingHost:" + portWithoutHostRunning
- });
- assertAddShardFailed(addShardRes);
-
- rst3.stopSet();
-
- // 3. Test adding a replica set whose *set name* is "config" with or without specifying the
- // shardName.
-
- let rst4 = new ReplSetTest({name: "config", nodes: 1});
- rst4.startSet({shardsvr: ''});
- rst4.initiate();
-
- jsTest.log(
- "Adding a replica set whose setName is config without specifying shardName should fail.");
- addShardRes = st.s.adminCommand({addShard: rst4.getURL()});
- assertAddShardFailed(addShardRes);
-
- jsTest.log(
- "Adding a replica set whose setName is config with specified shardName 'config' should fail.");
- addShardRes = st.s.adminCommand({addShard: rst4.getURL(), name: rst4.name});
- assertAddShardFailed(addShardRes, rst4.name);
-
- jsTest.log(
- "Adding a replica set whose setName is config with a non-'config' shardName should succeed");
- addShardRes = st.s.adminCommand({addShard: rst4.getURL(), name: "nonConfig"});
- assertAddShardSucceeded(addShardRes, "nonConfig");
- removeShardWithName(addShardRes.shardAdded);
-
- rst4.stopSet();
-
- // 4. Test that a replica set whose *set name* is "admin" can be written to (SERVER-17232).
-
- let rst5 = new ReplSetTest({name: "admin", nodes: 1});
- rst5.startSet({shardsvr: ''});
- rst5.initiate();
-
- jsTest.log("A replica set whose set name is 'admin' should be able to be written to.");
-
- addShardRes = st.s.adminCommand({addShard: rst5.getURL()});
- assertAddShardSucceeded(addShardRes);
-
- // Ensure the write goes to the newly added shard.
- assert.commandWorked(st.s.getDB('test').runCommand({create: "foo"}));
- var res = st.s.getDB('config').getCollection('databases').findOne({_id: 'test'});
- assert.neq(null, res);
- if (res.primary != addShardRes.shardAdded) {
- assert.commandWorked(st.s.adminCommand({movePrimary: 'test', to: addShardRes.shardAdded}));
+ // If a shard name was specified, make sure that the name the addShard command reports the
+ // shard was added with matches the specified name.
+ if (shardName) {
+ assert.eq(shardName,
+ res.shardAdded,
+ "name returned by addShard does not match name specified in addShard");
}
- assert.writeOK(st.s.getDB('test').foo.insert({x: 1}));
- assert.neq(null, rst5.getPrimary().getDB('test').foo.findOne());
-
- assert.commandWorked(st.s.getDB('test').runCommand({dropDatabase: 1}));
-
- removeShardWithName(addShardRes.shardAdded);
-
- rst5.stopSet();
+ // Make sure the shard shows up in config.shards with the shardName reported by the
+ // addShard command.
+ assert.neq(null,
+ st.s.getDB('config').shards.findOne({_id: res.shardAdded}),
+ "newly added shard " + res.shardAdded + " not found in config.shards");
+};
+
+// Note: this method expects that the failure is *not* that the specified shardName is already
+// the shardName of an existing shard.
+var assertAddShardFailed = function(res, shardName) {
+ assert.commandFailed(res);
+
+ // If a shard name was specified in the addShard, make sure no shard with its name shows up
+ // in config.shards.
+ if (shardName) {
+ assert.eq(
+ null,
+ st.s.getDB('config').shards.findOne({_id: shardName}),
+ "addShard for " + shardName + " reported failure, but shard shows up in config.shards");
+ }
+};
+
+var removeShardWithName = function(shardName) {
+ var res = st.s.adminCommand({removeShard: shardName});
+ assert.commandWorked(res);
+ assert.eq('started', res.state);
+ assert.soon(function() {
+ res = st.s.adminCommand({removeShard: shardName});
+ assert.commandWorked(res);
+ return ('completed' === res.state);
+ }, "removeShard never completed for shard " + shardName);
+};
+
+var st = new ShardingTest({
+ shards: 0,
+ mongos: 1,
+});
+
+// Add one shard since the last shard cannot be removed.
+var normalShard = MongoRunner.runMongod({shardsvr: ''});
+st.s.adminCommand({addShard: normalShard.name, name: 'normalShard'});
+
+// Allocate a port that can be used to test adding invalid hosts.
+var portWithoutHostRunning = allocatePort();
+
+// 1. Test adding a *standalone*
+
+// 1.a. with or without specifying the shardName.
+
+jsTest.log("Adding a standalone *without* a specified shardName should succeed.");
+let standalone1 = MongoRunner.runMongod({shardsvr: ''});
+addShardRes = st.s.adminCommand({addshard: standalone1.name});
+assertAddShardSucceeded(addShardRes);
+removeShardWithName(addShardRes.shardAdded);
+MongoRunner.stopMongod(standalone1);
+
+jsTest.log("Adding a standalone *with* a specified shardName should succeed.");
+let standalone2 = MongoRunner.runMongod({shardsvr: ''});
+addShardRes = st.s.adminCommand({addshard: standalone2.name, name: "shardName"});
+assertAddShardSucceeded(addShardRes, "shardName");
+removeShardWithName(addShardRes.shardAdded);
+MongoRunner.stopMongod(standalone2);
+
+// 1.b. with an invalid hostname.
+
+jsTest.log("Adding a standalone with a non-existing host should fail.");
+addShardRes = st.s.adminCommand({addShard: getHostName() + ":" + portWithoutHostRunning});
+assertAddShardFailed(addShardRes);
+
+// 2. Test adding a *replica set* with an ordinary set name
+
+// 2.a. with or without specifying the shardName.
+
+jsTest.log("Adding a replica set without a specified shardName should succeed.");
+let rst1 = new ReplSetTest({nodes: 1});
+rst1.startSet({shardsvr: ''});
+rst1.initiate();
+addShardRes = st.s.adminCommand({addShard: rst1.getURL()});
+assertAddShardSucceeded(addShardRes);
+assert.eq(rst1.name, addShardRes.shardAdded);
+removeShardWithName(addShardRes.shardAdded);
+rst1.stopSet();
+
+jsTest.log(
+ "Adding a replica set with a specified shardName that matches the set's name should succeed.");
+let rst2 = new ReplSetTest({nodes: 1});
+rst2.startSet({shardsvr: ''});
+rst2.initiate();
+addShardRes = st.s.adminCommand({addShard: rst2.getURL(), name: rst2.name});
+assertAddShardSucceeded(addShardRes, rst2.name);
+removeShardWithName(addShardRes.shardAdded);
+rst2.stopSet();
+
+let rst3 = new ReplSetTest({nodes: 1});
+rst3.startSet({shardsvr: ''});
+rst3.initiate();
+
+jsTest.log(
+ "Adding a replica set with a specified shardName that differs from the set's name should succeed.");
+addShardRes = st.s.adminCommand({addShard: rst3.getURL(), name: "differentShardName"});
+assertAddShardSucceeded(addShardRes, "differentShardName");
+removeShardWithName(addShardRes.shardAdded);
+
+jsTest.log("Adding a replica with a specified shardName of 'config' should fail.");
+addShardRes = st.s.adminCommand({addShard: rst3.getURL(), name: "config"});
+assertAddShardFailed(addShardRes, "config");
+
+// 2.b. with invalid hostnames.
+
+jsTest.log("Adding a replica set with only non-existing hosts should fail.");
+addShardRes =
+ st.s.adminCommand({addShard: rst3.name + "/NonExistingHost:" + portWithoutHostRunning});
+assertAddShardFailed(addShardRes);
+
+jsTest.log("Adding a replica set with mixed existing/non-existing hosts should fail.");
+addShardRes = st.s.adminCommand({
+ addShard:
+ rst3.name + "/" + rst3.getPrimary().name + ",NonExistingHost:" + portWithoutHostRunning
+});
+assertAddShardFailed(addShardRes);
+
+rst3.stopSet();
+
+// 3. Test adding a replica set whose *set name* is "config" with or without specifying the
+// shardName.
+
+let rst4 = new ReplSetTest({name: "config", nodes: 1});
+rst4.startSet({shardsvr: ''});
+rst4.initiate();
+
+jsTest.log(
+ "Adding a replica set whose setName is config without specifying shardName should fail.");
+addShardRes = st.s.adminCommand({addShard: rst4.getURL()});
+assertAddShardFailed(addShardRes);
+
+jsTest.log(
+ "Adding a replica set whose setName is config with specified shardName 'config' should fail.");
+addShardRes = st.s.adminCommand({addShard: rst4.getURL(), name: rst4.name});
+assertAddShardFailed(addShardRes, rst4.name);
- st.stop();
- MongoRunner.stopMongod(normalShard);
+jsTest.log(
+ "Adding a replica set whose setName is config with a non-'config' shardName should succeed");
+addShardRes = st.s.adminCommand({addShard: rst4.getURL(), name: "nonConfig"});
+assertAddShardSucceeded(addShardRes, "nonConfig");
+removeShardWithName(addShardRes.shardAdded);
+
+rst4.stopSet();
+
+// 4. Test that a replica set whose *set name* is "admin" can be written to (SERVER-17232).
+
+let rst5 = new ReplSetTest({name: "admin", nodes: 1});
+rst5.startSet({shardsvr: ''});
+rst5.initiate();
+
+jsTest.log("A replica set whose set name is 'admin' should be able to be written to.");
+addShardRes = st.s.adminCommand({addShard: rst5.getURL()});
+assertAddShardSucceeded(addShardRes);
+
+// Ensure the write goes to the newly added shard.
+assert.commandWorked(st.s.getDB('test').runCommand({create: "foo"}));
+var res = st.s.getDB('config').getCollection('databases').findOne({_id: 'test'});
+assert.neq(null, res);
+if (res.primary != addShardRes.shardAdded) {
+ assert.commandWorked(st.s.adminCommand({movePrimary: 'test', to: addShardRes.shardAdded}));
+}
+
+assert.writeOK(st.s.getDB('test').foo.insert({x: 1}));
+assert.neq(null, rst5.getPrimary().getDB('test').foo.findOne());
+
+assert.commandWorked(st.s.getDB('test').runCommand({dropDatabase: 1}));
+
+removeShardWithName(addShardRes.shardAdded);
+
+rst5.stopSet();
+
+st.stop();
+MongoRunner.stopMongod(normalShard);
})();
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index 67d4641f1e2..a23d0a13797 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -1,64 +1,63 @@
// A replica set's passive nodes should be okay to add as part of a shard config
(function() {
- var s = new ShardingTest({name: "addshard4", shards: 2, mongos: 1, other: {useHostname: true}});
+var s = new ShardingTest({name: "addshard4", shards: 2, mongos: 1, other: {useHostname: true}});
- var r = new ReplSetTest({name: "addshard4", nodes: 3, nodeOptions: {shardsvr: ""}});
+var r = new ReplSetTest({name: "addshard4", nodes: 3, nodeOptions: {shardsvr: ""}});
- r.startSet();
+r.startSet();
- var config = r.getReplSetConfig();
- config.members[2].priority = 0;
+var config = r.getReplSetConfig();
+config.members[2].priority = 0;
- r.initiate(config);
- // Wait for replica set to be fully initialized - could take some time
- // to pre-allocate files on slow systems
- r.awaitReplication();
+r.initiate(config);
+// Wait for replica set to be fully initialized - could take some time
+// to pre-allocate files on slow systems
+r.awaitReplication();
- var master = r.getPrimary();
+var master = r.getPrimary();
- var members = config.members.map(function(elem) {
- return elem.host;
- });
- var shardName = "addshard4/" + members.join(",");
- var invalidShardName = "addshard4/foobar";
+var members = config.members.map(function(elem) {
+ return elem.host;
+});
+var shardName = "addshard4/" + members.join(",");
+var invalidShardName = "addshard4/foobar";
- print("adding shard " + shardName);
+print("adding shard " + shardName);
- // First try adding shard with the correct replica set name but incorrect hostname
- // This will make sure that the metadata for this replica set name is cleaned up
- // so that the set can be added correctly when it has the proper hostnames.
- assert.throws(function() {
- s.adminCommand({"addshard": invalidShardName});
- });
+// First try adding shard with the correct replica set name but incorrect hostname
+// This will make sure that the metadata for this replica set name is cleaned up
+// so that the set can be added correctly when it has the proper hostnames.
+assert.throws(function() {
+ s.adminCommand({"addshard": invalidShardName});
+});
- var result = s.adminCommand({"addshard": shardName});
+var result = s.adminCommand({"addshard": shardName});
- printjson(result);
- assert.eq(result, true);
+printjson(result);
+assert.eq(result, true);
- r.stopSet();
- r = new ReplSetTest({name: "addshard42", nodes: 3, nodeOptions: {shardsvr: ""}});
- r.startSet();
+r.stopSet();
+r = new ReplSetTest({name: "addshard42", nodes: 3, nodeOptions: {shardsvr: ""}});
+r.startSet();
- config = r.getReplSetConfig();
- config.members[2].arbiterOnly = true;
+config = r.getReplSetConfig();
+config.members[2].arbiterOnly = true;
- r.initiate(config);
- // Wait for replica set to be fully initialized - could take some time
- // to pre-allocate files on slow systems
- r.awaitReplication();
+r.initiate(config);
+// Wait for replica set to be fully initialized - could take some time
+// to pre-allocate files on slow systems
+r.awaitReplication();
- master = r.getPrimary();
+master = r.getPrimary();
- print("adding shard addshard42");
+print("adding shard addshard42");
- result = s.adminCommand({"addshard": "addshard42/" + config.members[2].host});
+result = s.adminCommand({"addshard": "addshard42/" + config.members[2].host});
- printjson(result);
- assert.eq(result, true);
-
- s.stop();
- r.stopSet();
+printjson(result);
+assert.eq(result, true);
+s.stop();
+r.stopSet();
})();
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js
index f2e6068d023..31d2c10f505 100644
--- a/jstests/sharding/addshard5.js
+++ b/jstests/sharding/addshard5.js
@@ -1,46 +1,46 @@
// Tests that dropping and re-adding a shard with the same name to a cluster doesn't mess up
// migrations
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- var mongos = st.s;
- var admin = mongos.getDB('admin');
- var coll = mongos.getCollection('foo.bar');
+var mongos = st.s;
+var admin = mongos.getDB('admin');
+var coll = mongos.getCollection('foo.bar');
- // Shard collection
- assert.commandWorked(mongos.adminCommand({enableSharding: coll.getDB() + ''}));
+// Shard collection
+assert.commandWorked(mongos.adminCommand({enableSharding: coll.getDB() + ''}));
- // Just to be sure what primary we start from
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
- assert.commandWorked(mongos.adminCommand({shardCollection: coll + '', key: {_id: 1}}));
+// Just to be sure what primary we start from
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({shardCollection: coll + '', key: {_id: 1}}));
- // Insert one document
- assert.writeOK(coll.insert({hello: 'world'}));
+// Insert one document
+assert.writeOK(coll.insert({hello: 'world'}));
- // Migrate the collection to and from shard1 so shard0 loads the shard1 host
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+// Migrate the collection to and from shard1 so shard0 loads the shard1 host
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Drop and re-add shard with the same name but a new host.
- assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName}));
- assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName}));
+// Drop and re-add shard with the same name but a new host.
+assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName}));
+assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName}));
- var shard2 = MongoRunner.runMongod({'shardsvr': ''});
- assert.commandWorked(mongos.adminCommand({addShard: shard2.host, name: st.shard1.shardName}));
+var shard2 = MongoRunner.runMongod({'shardsvr': ''});
+assert.commandWorked(mongos.adminCommand({addShard: shard2.host, name: st.shard1.shardName}));
- jsTest.log('Shard was dropped and re-added with same name...');
- st.printShardingStatus();
+jsTest.log('Shard was dropped and re-added with same name...');
+st.printShardingStatus();
- // Try a migration
- assert.commandWorked(
- mongos.adminCommand({moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName}));
+// Try a migration
+assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName}));
- assert.eq('world', shard2.getCollection(coll + '').findOne().hello);
+assert.eq('world', shard2.getCollection(coll + '').findOne().hello);
- st.stop();
- MongoRunner.stopMongod(shard2);
+st.stop();
+MongoRunner.stopMongod(shard2);
})();
diff --git a/jstests/sharding/addshard6.js b/jstests/sharding/addshard6.js
index df23fbc4939..b69350e76b6 100644
--- a/jstests/sharding/addshard6.js
+++ b/jstests/sharding/addshard6.js
@@ -3,48 +3,46 @@
*/
(function() {
- var addShardRes;
-
- // Note: this method expects that the failure is *not* that the specified shardName is already
- // the shardName of an existing shard.
- var assertAddShardFailed = function(res, shardName) {
- assert.commandFailed(res);
-
- // If a shard name was specified in the addShard, make sure no shard with its name shows up
- // in config.shards.
- if (shardName) {
- assert.eq(null,
- st.s.getDB('config').shards.findOne({_id: shardName}),
- "addShard for " + shardName +
- " reported failure, but shard shows up in config.shards");
- }
- };
-
- var st = new ShardingTest({
- shards: 0,
- mongos: 1,
- });
-
- var configRS = new ReplSetTest({name: "configsvrReplicaSet", nodes: 1});
- configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
- configRS.initiate();
-
- jsTest.log("Adding a config server replica set without a specified shardName should fail.");
- addShardRes = st.s.adminCommand({addShard: configRS.getURL()});
- assertAddShardFailed(addShardRes);
-
- jsTest.log(
- "Adding a config server replica set with a shardName that matches the set's name should fail.");
- addShardRes = st.s.adminCommand({addShard: configRS.getURL(), name: configRS.name});
- assertAddShardFailed(addShardRes, configRS.name);
-
- jsTest.log(
- "Adding a config server replica set even with a non-'config' shardName should fail.");
- addShardRes = st.s.adminCommand({addShard: configRS.getURL(), name: "nonConfig"});
- assertAddShardFailed(addShardRes, "nonConfig");
-
- configRS.stopSet();
-
- st.stop();
-
+var addShardRes;
+
+// Note: this method expects that the failure is *not* that the specified shardName is already
+// the shardName of an existing shard.
+var assertAddShardFailed = function(res, shardName) {
+ assert.commandFailed(res);
+
+ // If a shard name was specified in the addShard, make sure no shard with its name shows up
+ // in config.shards.
+ if (shardName) {
+ assert.eq(
+ null,
+ st.s.getDB('config').shards.findOne({_id: shardName}),
+ "addShard for " + shardName + " reported failure, but shard shows up in config.shards");
+ }
+};
+
+var st = new ShardingTest({
+ shards: 0,
+ mongos: 1,
+});
+
+var configRS = new ReplSetTest({name: "configsvrReplicaSet", nodes: 1});
+configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
+configRS.initiate();
+
+jsTest.log("Adding a config server replica set without a specified shardName should fail.");
+addShardRes = st.s.adminCommand({addShard: configRS.getURL()});
+assertAddShardFailed(addShardRes);
+
+jsTest.log(
+ "Adding a config server replica set with a shardName that matches the set's name should fail.");
+addShardRes = st.s.adminCommand({addShard: configRS.getURL(), name: configRS.name});
+assertAddShardFailed(addShardRes, configRS.name);
+
+jsTest.log("Adding a config server replica set even with a non-'config' shardName should fail.");
+addShardRes = st.s.adminCommand({addShard: configRS.getURL(), name: "nonConfig"});
+assertAddShardFailed(addShardRes, "nonConfig");
+
+configRS.stopSet();
+
+st.stop();
})();
diff --git a/jstests/sharding/addshard_idempotent.js b/jstests/sharding/addshard_idempotent.js
index 38000b62e49..800cf5fd88f 100644
--- a/jstests/sharding/addshard_idempotent.js
+++ b/jstests/sharding/addshard_idempotent.js
@@ -1,57 +1,56 @@
// Tests that adding an equivalent shard multiple times returns success.
(function() {
- 'use strict';
-
- var st = new ShardingTest({name: "add_shard_idempotent", shards: 0});
-
- jsTestLog("Testing adding a standalone shard multiple times");
- var shard1 = MongoRunner.runMongod({'shardsvr': ""});
- assert.commandWorked(
- st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
-
- // Running the identical addShard command should succeed.
- assert.commandWorked(
- st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
-
- // Trying to add the same shard with different options should fail
- assert.commandFailed(
- st.admin.runCommand({addshard: shard1.host, name: "newShard1"})); // No maxSize
-
- assert.commandFailed(st.admin.runCommand(
- {addshard: shard1.host, name: "a different shard name", maxSize: 1024}));
-
- jsTestLog("Testing adding a replica set shard multiple times");
- var shard2 = new ReplSetTest({name: 'rsShard', nodes: 3, nodeOptions: {shardsvr: ""}});
- shard2.startSet();
- shard2.initiate();
- shard2.getPrimary(); // Wait for there to be a primary
- var shard2SeedList1 = shard2.name + "/" + shard2.nodes[0].host;
- var shard2SeedList2 = shard2.name + "/" + shard2.nodes[2].host;
-
- assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newShard2"}));
-
- // Running the identical addShard command should succeed.
- assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newShard2"}));
-
- // We can only compare replica sets by their set name, so calling addShard with a different
- // seed list should still be considered a successful no-op.
- assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList2, name: "newShard2"}));
-
- // Verify that the config.shards collection looks right.
- var shards = st.s.getDB('config').shards.find().toArray();
- assert.eq(2, shards.length);
- for (var i = 0; i < shards.length; i++) {
- var shard = shards[i];
- if (shard._id == 'newShard1') {
- assert.eq(shard1.host, shard.host);
- assert.eq(1024, shard.maxSize);
- } else {
- assert.eq('newShard2', shard._id);
- assert.eq(shard2.getURL(), shard.host);
- }
+'use strict';
+
+var st = new ShardingTest({name: "add_shard_idempotent", shards: 0});
+
+jsTestLog("Testing adding a standalone shard multiple times");
+var shard1 = MongoRunner.runMongod({'shardsvr': ""});
+assert.commandWorked(
+ st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
+
+// Running the identical addShard command should succeed.
+assert.commandWorked(
+ st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
+
+// Trying to add the same shard with different options should fail
+assert.commandFailed(
+ st.admin.runCommand({addshard: shard1.host, name: "newShard1"})); // No maxSize
+
+assert.commandFailed(
+ st.admin.runCommand({addshard: shard1.host, name: "a different shard name", maxSize: 1024}));
+
+jsTestLog("Testing adding a replica set shard multiple times");
+var shard2 = new ReplSetTest({name: 'rsShard', nodes: 3, nodeOptions: {shardsvr: ""}});
+shard2.startSet();
+shard2.initiate();
+shard2.getPrimary(); // Wait for there to be a primary
+var shard2SeedList1 = shard2.name + "/" + shard2.nodes[0].host;
+var shard2SeedList2 = shard2.name + "/" + shard2.nodes[2].host;
+
+assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newShard2"}));
+
+// Running the identical addShard command should succeed.
+assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newShard2"}));
+
+// We can only compare replica sets by their set name, so calling addShard with a different
+// seed list should still be considered a successful no-op.
+assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList2, name: "newShard2"}));
+
+// Verify that the config.shards collection looks right.
+var shards = st.s.getDB('config').shards.find().toArray();
+assert.eq(2, shards.length);
+for (var i = 0; i < shards.length; i++) {
+ var shard = shards[i];
+ if (shard._id == 'newShard1') {
+ assert.eq(shard1.host, shard.host);
+ assert.eq(1024, shard.maxSize);
+ } else {
+ assert.eq('newShard2', shard._id);
+ assert.eq(shard2.getURL(), shard.host);
}
- MongoRunner.stopMongod(shard1);
- shard2.stopSet();
- st.stop();
-
+}
+MongoRunner.stopMongod(shard1);
+shard2.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/advance_cluster_time_action_type.js b/jstests/sharding/advance_cluster_time_action_type.js
index 676dde8b62e..1fff92c5e1d 100644
--- a/jstests/sharding/advance_cluster_time_action_type.js
+++ b/jstests/sharding/advance_cluster_time_action_type.js
@@ -3,60 +3,65 @@
*/
(function() {
- "use strict";
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- let st = new ShardingTest({
- mongos: 1,
- config: 1,
- shards: 1,
- keyFile: 'jstests/libs/key1',
- other: {shardAsReplicaSet: false}
- });
-
- let adminDB = st.s.getDB('admin');
-
- assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
- assert.eq(1, adminDB.auth("admin", "admin"));
-
- assert.commandWorked(adminDB.runCommand({
- createRole: "advanceClusterTimeRole",
- privileges: [{resource: {cluster: true}, actions: ["advanceClusterTime"]}],
- roles: []
- }));
-
- let testDB = adminDB.getSiblingDB("testDB");
-
- assert.commandWorked(
- testDB.runCommand({createUser: 'NotTrusted', pwd: 'pwd', roles: ['readWrite']}));
- assert.commandWorked(testDB.runCommand({
- createUser: 'Trusted',
- pwd: 'pwd',
- roles: [{role: 'advanceClusterTimeRole', db: 'admin'}, 'readWrite']
- }));
- assert.eq(1, testDB.auth("NotTrusted", "pwd"));
-
- let res = testDB.runCommand({insert: "foo", documents: [{_id: 0}]});
- assert.commandWorked(res);
-
- let clusterTime = Object.assign({}, res.$clusterTime);
- let clusterTimeTS = new Timestamp(clusterTime.clusterTime.getTime() + 1000, 0);
- clusterTime.clusterTime = clusterTimeTS;
-
- const cmdObj = {find: "foo", limit: 1, singleBatch: true, $clusterTime: clusterTime};
- jsTestLog("running NonTrusted. command: " + tojson(cmdObj));
- res = testDB.runCommand(cmdObj);
- assert.commandFailed(res, "Command request was: " + tojsononeline(cmdObj));
-
- assert.eq(1, testDB.auth("Trusted", "pwd"));
- jsTestLog("running Trusted. command: " + tojson(cmdObj));
- res = testDB.runCommand(cmdObj);
- assert.commandWorked(res, "Command request was: " + tojsononeline(cmdObj));
-
- testDB.logout();
-
- st.stop();
+"use strict";
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+let st = new ShardingTest({
+ mongos: 1,
+ config: 1,
+ shards: 1,
+ keyFile: 'jstests/libs/key1',
+ other: {shardAsReplicaSet: false}
+});
+
+let adminDB = st.s.getDB('admin');
+
+assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
+assert.eq(1, adminDB.auth("admin", "admin"));
+
+assert.commandWorked(adminDB.runCommand({
+ createRole: "advanceClusterTimeRole",
+ privileges: [{resource: {cluster: true}, actions: ["advanceClusterTime"]}],
+ roles: []
+}));
+
+let testDB = adminDB.getSiblingDB("testDB");
+
+assert.commandWorked(
+ testDB.runCommand({createUser: 'NotTrusted', pwd: 'pwd', roles: ['readWrite']}));
+assert.commandWorked(testDB.runCommand({
+ createUser: 'Trusted',
+ pwd: 'pwd',
+ roles: [{role: 'advanceClusterTimeRole', db: 'admin'}, 'readWrite']
+}));
+assert.eq(1, testDB.auth("NotTrusted", "pwd"));
+
+let res = testDB.runCommand({insert: "foo", documents: [{_id: 0}]});
+assert.commandWorked(res);
+
+let clusterTime = Object.assign({}, res.$clusterTime);
+let clusterTimeTS = new Timestamp(clusterTime.clusterTime.getTime() + 1000, 0);
+clusterTime.clusterTime = clusterTimeTS;
+
+const cmdObj = {
+ find: "foo",
+ limit: 1,
+ singleBatch: true,
+ $clusterTime: clusterTime
+};
+jsTestLog("running NonTrusted. command: " + tojson(cmdObj));
+res = testDB.runCommand(cmdObj);
+assert.commandFailed(res, "Command request was: " + tojsononeline(cmdObj));
+
+assert.eq(1, testDB.auth("Trusted", "pwd"));
+jsTestLog("running Trusted. command: " + tojson(cmdObj));
+res = testDB.runCommand(cmdObj);
+assert.commandWorked(res, "Command request was: " + tojsononeline(cmdObj));
+
+testDB.logout();
+
+st.stop();
})();
diff --git a/jstests/sharding/advance_logical_time_with_valid_signature.js b/jstests/sharding/advance_logical_time_with_valid_signature.js
index bc9f8d86353..fccd047f6fd 100644
--- a/jstests/sharding/advance_logical_time_with_valid_signature.js
+++ b/jstests/sharding/advance_logical_time_with_valid_signature.js
@@ -3,42 +3,42 @@
* cluster time.
*/
(function() {
- "use strict";
+"use strict";
- // Setup 2 mongos processes with mongobridge.
- let st = new ShardingTest({shards: 1, mongos: 2, useBridge: true});
+// Setup 2 mongos processes with mongobridge.
+let st = new ShardingTest({shards: 1, mongos: 2, useBridge: true});
- // Sever outgoing communications from the second mongos.
- st.s0.disconnect(st.s1);
- st._configServers.forEach(function(configSvr) {
- configSvr.disconnect(st.s1);
- });
+// Sever outgoing communications from the second mongos.
+st.s0.disconnect(st.s1);
+st._configServers.forEach(function(configSvr) {
+ configSvr.disconnect(st.s1);
+});
- st._rsObjects.forEach(function(rsNodes) {
- rsNodes.nodes.forEach(function(conn) {
- conn.disconnect(st.s1);
- });
+st._rsObjects.forEach(function(rsNodes) {
+ rsNodes.nodes.forEach(function(conn) {
+ conn.disconnect(st.s1);
});
+});
- let connectedDB = st.s0.getDB("test");
- let disconnectedDB = st.s1.getDB("test");
+let connectedDB = st.s0.getDB("test");
+let disconnectedDB = st.s1.getDB("test");
- // Send an insert to the connected mongos to advance its cluster time.
- let res = assert.commandWorked(connectedDB.runCommand({insert: "foo", documents: [{x: 1}]}));
+// Send an insert to the connected mongos to advance its cluster time.
+let res = assert.commandWorked(connectedDB.runCommand({insert: "foo", documents: [{x: 1}]}));
- // Get logicalTime metadata from the connected mongos's response and send it in an isMaster
- // command to the disconnected mongos. isMaster does not require mongos to contact any other
- // servers, so the command should succeed.
- let lt = res.$clusterTime;
- res = assert.commandWorked(
- disconnectedDB.runCommand({isMaster: 1, $clusterTime: lt}),
- "expected the disconnected mongos to accept cluster time: " + tojson(lt));
+// Get logicalTime metadata from the connected mongos's response and send it in an isMaster
+// command to the disconnected mongos. isMaster does not require mongos to contact any other
+// servers, so the command should succeed.
+let lt = res.$clusterTime;
+res =
+ assert.commandWorked(disconnectedDB.runCommand({isMaster: 1, $clusterTime: lt}),
+ "expected the disconnected mongos to accept cluster time: " + tojson(lt));
- // Verify cluster time response from the disconnected mongos matches what was passed.
- assert.eq(lt,
- res.$clusterTime,
- "expected the disconnected mongos to send cluster time: " + tojson(lt) +
- ", received: " + tojson(res.$clusterTime));
+// Verify cluster time response from the disconnected mongos matches what was passed.
+assert.eq(lt,
+ res.$clusterTime,
+ "expected the disconnected mongos to send cluster time: " + tojson(lt) +
+ ", received: " + tojson(res.$clusterTime));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/after_cluster_time.js b/jstests/sharding/after_cluster_time.js
index eeb8ec05a7e..f79bf03bf23 100644
--- a/jstests/sharding/after_cluster_time.js
+++ b/jstests/sharding/after_cluster_time.js
@@ -3,122 +3,118 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- function assertAfterClusterTimeReadFailsWithCode(db, readConcernObj, errorCode) {
- return assert.commandFailedWithCode(
- db.runCommand({find: "foo", readConcern: readConcernObj}),
- errorCode,
- "expected command with read concern options: " + tojson(readConcernObj) + " to fail");
- }
-
- function assertAfterClusterTimeReadSucceeds(db, readConcernObj) {
- return assert.commandWorked(db.runCommand({find: "foo", readConcern: readConcernObj}),
- "expected command with read concern options: " +
- tojson(readConcernObj) + " to succeed");
+"use strict";
+
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+function assertAfterClusterTimeReadFailsWithCode(db, readConcernObj, errorCode) {
+ return assert.commandFailedWithCode(
+ db.runCommand({find: "foo", readConcern: readConcernObj}),
+ errorCode,
+ "expected command with read concern options: " + tojson(readConcernObj) + " to fail");
+}
+
+function assertAfterClusterTimeReadSucceeds(db, readConcernObj) {
+ return assert.commandWorked(
+ db.runCommand({find: "foo", readConcern: readConcernObj}),
+ "expected command with read concern options: " + tojson(readConcernObj) + " to succeed");
+}
+
+const rst = new ReplSetTest({
+ nodes: 1,
+ nodeOptions: {
+ enableMajorityReadConcern: "",
+ shardsvr: "",
}
+});
- const rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {
- enableMajorityReadConcern: "",
- shardsvr: "",
- }
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
- // Start the sharding test and add the majority read concern enabled replica set.
- const st = new ShardingTest({manualAddShard: true});
- assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
+// Start the sharding test and add the majority read concern enabled replica set.
+const st = new ShardingTest({manualAddShard: true});
+assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
- const testDB = st.s.getDB("test");
+const testDB = st.s.getDB("test");
- // Insert some data to find later.
- assert.commandWorked(testDB.runCommand(
- {insert: "foo", documents: [{_id: 1, x: 1}], writeConcern: {w: "majority"}}));
+// Insert some data to find later.
+assert.commandWorked(
+ testDB.runCommand({insert: "foo", documents: [{_id: 1, x: 1}], writeConcern: {w: "majority"}}));
- // Test the afterClusterTime API without causal consistency enabled on the mongo connection.
+// Test the afterClusterTime API without causal consistency enabled on the mongo connection.
- assertAfterClusterTimeReadFailsWithCode(
- testDB,
- {level: "linearizable", afterClusterTime: Timestamp(1, 1)},
- ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "linearizable", afterClusterTime: Timestamp(1, 1)}, ErrorCodes.InvalidOptions);
- // Reads with afterClusterTime require a non-zero timestamp.
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "local", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
+// Reads with afterClusterTime require a non-zero timestamp.
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "local", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "local", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "local", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "local", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "local", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "local", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "local", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
- // Reads with proper afterClusterTime arguments return committed data after the given time.
- // Reads with afterClusterTime require a non-zero timestamp.
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
+// Reads with proper afterClusterTime arguments return committed data after the given time.
+// Reads with afterClusterTime require a non-zero timestamp.
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
- // Reads with proper afterClusterTime arguments return committed data after the given time.
- let testReadOwnWrite = function(readConcern) {
- let res = assert.commandWorked(testDB.runCommand(
- {find: "foo", readConcern: {level: readConcern, afterClusterTime: Timestamp(1, 1)}}));
+// Reads with proper afterClusterTime arguments return committed data after the given time.
+let testReadOwnWrite = function(readConcern) {
+ let res = assert.commandWorked(testDB.runCommand(
+ {find: "foo", readConcern: {level: readConcern, afterClusterTime: Timestamp(1, 1)}}));
- assert.eq(res.cursor.firstBatch,
- [{_id: 1, x: 1}],
- "expected afterClusterTime read to return the committed document");
+ assert.eq(res.cursor.firstBatch,
+ [{_id: 1, x: 1}],
+ "expected afterClusterTime read to return the committed document");
- // Test the afterClusterTime API with causal consistency enabled on the mongo connection.
- testDB.getMongo().setCausalConsistency(true);
+ // Test the afterClusterTime API with causal consistency enabled on the mongo connection.
+ testDB.getMongo().setCausalConsistency(true);
- // With causal consistency enabled, the shell sets read concern to level "majority" if it is
- // not specified.
- assertAfterClusterTimeReadSucceeds(testDB, {afterClusterTime: Timestamp(1, 1)});
- testDB.getMongo().setCausalConsistency(false);
- };
+ // With causal consistency enabled, the shell sets read concern to level "majority" if it is
+ // not specified.
+ assertAfterClusterTimeReadSucceeds(testDB, {afterClusterTime: Timestamp(1, 1)});
+ testDB.getMongo().setCausalConsistency(false);
+};
- testReadOwnWrite("local");
- testReadOwnWrite("majority");
+testReadOwnWrite("local");
+testReadOwnWrite("majority");
- // Read concern levels other than majority are still not accepted.
- assertAfterClusterTimeReadFailsWithCode(
- testDB,
- {level: "linearizable", afterClusterTime: Timestamp(1, 1)},
- ErrorCodes.InvalidOptions);
+// Read concern levels other than majority are still not accepted.
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "linearizable", afterClusterTime: Timestamp(1, 1)}, ErrorCodes.InvalidOptions);
- // Reads with afterClusterTime still require a non-zero timestamp.
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
+// Reads with afterClusterTime still require a non-zero timestamp.
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
- rst.stopSet();
- st.stop();
+rst.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/agg_error_reports_shard_host_and_port.js b/jstests/sharding/agg_error_reports_shard_host_and_port.js
index 3a73c1d2493..346351d35e3 100644
--- a/jstests/sharding/agg_error_reports_shard_host_and_port.js
+++ b/jstests/sharding/agg_error_reports_shard_host_and_port.js
@@ -1,34 +1,34 @@
// Tests that an aggregation error which occurs on a sharded collection will send an error message
// containing the host and port of the shard where the error occurred.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrMsgContains.
+load("jstests/aggregation/extras/utils.js"); // For assertErrMsgContains.
- const st = new ShardingTest({shards: 2, config: 1});
+const st = new ShardingTest({shards: 2, config: 1});
- const mongosDb = st.s.getDB(jsTestName());
- const coll = mongosDb.getCollection("foo");
+const mongosDb = st.s.getDB(jsTestName());
+const coll = mongosDb.getCollection("foo");
- // Enable sharding on the test DB and ensure its primary is shard 0.
- assert.commandWorked(mongosDb.adminCommand({enableSharding: mongosDb.getName()}));
- st.ensurePrimaryShard(mongosDb.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard 0.
+assert.commandWorked(mongosDb.adminCommand({enableSharding: mongosDb.getName()}));
+st.ensurePrimaryShard(mongosDb.getName(), st.rs0.getURL());
- // Shard the collection.
- coll.drop();
- st.shardColl(coll, {_id: 1}, {_id: 0}, {_id: 1});
+// Shard the collection.
+coll.drop();
+st.shardColl(coll, {_id: 1}, {_id: 0}, {_id: 1});
- assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
- // Run an aggregation which will fail on shard 1, and verify that the error message contains
- // the host and port of the shard that failed.
- // We need to be careful here to involve some data in the computation that is actually
- // sent to the shard before failing (i.e. "$_id") so that mongos doesn't short-curcuit and
- // fail during optimization.
- const pipe = [{$project: {a: {$divide: ["$_id", 0]}}}];
- const divideByZeroErrorCode = 16608;
+// Run an aggregation which will fail on shard 1, and verify that the error message contains
+// the host and port of the shard that failed.
+// We need to be careful here to involve some data in the computation that is actually
+// sent to the shard before failing (i.e. "$_id") so that mongos doesn't short-curcuit and
+// fail during optimization.
+const pipe = [{$project: {a: {$divide: ["$_id", 0]}}}];
+const divideByZeroErrorCode = 16608;
- assertErrMsgContains(coll, pipe, divideByZeroErrorCode, st.rs1.getPrimary().host);
+assertErrMsgContains(coll, pipe, divideByZeroErrorCode, st.rs1.getPrimary().host);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/agg_explain_fmt.js b/jstests/sharding/agg_explain_fmt.js
index c331b2686b1..3d88d8a2383 100644
--- a/jstests/sharding/agg_explain_fmt.js
+++ b/jstests/sharding/agg_explain_fmt.js
@@ -1,42 +1,42 @@
// This test ensuexplain an explain of an aggregate through mongos has the intended format.
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/analyze_plan.js'); // For planHasStage.
+load('jstests/libs/analyze_plan.js'); // For planHasStage.
- const st = new ShardingTest({shards: 2});
- const mongosDB = st.s.getDB("test");
- const coll = mongosDB.agg_explain_fmt;
- // Insert documents with {_id: -5} to {_id: 4}.
- assert.commandWorked(coll.insert(Array.from({length: 10}, (_, i) => ({_id: i - 5}))));
+const st = new ShardingTest({shards: 2});
+const mongosDB = st.s.getDB("test");
+const coll = mongosDB.agg_explain_fmt;
+// Insert documents with {_id: -5} to {_id: 4}.
+assert.commandWorked(coll.insert(Array.from({length: 10}, (_, i) => ({_id: i - 5}))));
- // Test that with an unsharded collection we don't get any kind of 'splitPipeline', just the
- // normal explain with 'stages'.
- let explain = coll.explain().aggregate([{$project: {a: 1}}]);
- assert(!explain.hasOwnProperty("splitPipeline"), explain);
- assert(explain.hasOwnProperty("stages"), explain);
+// Test that with an unsharded collection we don't get any kind of 'splitPipeline', just the
+// normal explain with 'stages'.
+let explain = coll.explain().aggregate([{$project: {a: 1}}]);
+assert(!explain.hasOwnProperty("splitPipeline"), explain);
+assert(explain.hasOwnProperty("stages"), explain);
- // Now shard the collection by _id and move a chunk to each shard.
- st.shardColl(coll, {_id: 1}, {_id: 0}, {_id: 0});
+// Now shard the collection by _id and move a chunk to each shard.
+st.shardColl(coll, {_id: 1}, {_id: 0}, {_id: 0});
- // Test that we now have a split pipeline with information about what pipeline ran on each
- // shard.
- explain = coll.explain().aggregate([{$project: {a: 1}}]);
- assert(explain.hasOwnProperty("splitPipeline"), explain);
- assert(explain.splitPipeline.hasOwnProperty("shardsPart"), explain.splitPipeline);
- assert(explain.splitPipeline.hasOwnProperty("mergerPart"), explain.splitPipeline);
- assert(explain.hasOwnProperty("shards"), explain);
- for (let shardId in explain.shards) {
- const shardExplain = explain.shards[shardId];
- assert(shardExplain.hasOwnProperty("host"), shardExplain);
- assert(shardExplain.hasOwnProperty("stages") || shardExplain.hasOwnProperty("queryPlanner"),
- shardExplain);
- }
+// Test that we now have a split pipeline with information about what pipeline ran on each
+// shard.
+explain = coll.explain().aggregate([{$project: {a: 1}}]);
+assert(explain.hasOwnProperty("splitPipeline"), explain);
+assert(explain.splitPipeline.hasOwnProperty("shardsPart"), explain.splitPipeline);
+assert(explain.splitPipeline.hasOwnProperty("mergerPart"), explain.splitPipeline);
+assert(explain.hasOwnProperty("shards"), explain);
+for (let shardId in explain.shards) {
+ const shardExplain = explain.shards[shardId];
+ assert(shardExplain.hasOwnProperty("host"), shardExplain);
+ assert(shardExplain.hasOwnProperty("stages") || shardExplain.hasOwnProperty("queryPlanner"),
+ shardExplain);
+}
- // Do a sharded explain from a mongod, not mongos, to ensure that it does not have a
- // SHARDING_FILTER stage.");
- const shardDB = st.shard0.getDB(mongosDB.getName());
- explain = shardDB[coll.getName()].explain().aggregate([{$match: {}}]);
- assert(!planHasStage(shardDB, explain.queryPlanner.winningPlan, "SHARDING_FILTER"), explain);
- st.stop();
+// Do a sharded explain from a mongod, not mongos, to ensure that it does not have a
+// SHARDING_FILTER stage.");
+const shardDB = st.shard0.getDB(mongosDB.getName());
+explain = shardDB[coll.getName()].explain().aggregate([{$match: {}}]);
+assert(!planHasStage(shardDB, explain.queryPlanner.winningPlan, "SHARDING_FILTER"), explain);
+st.stop();
}());
diff --git a/jstests/sharding/agg_project_limit_pipe_split.js b/jstests/sharding/agg_project_limit_pipe_split.js
index 010cd46c46e..f17148a0877 100644
--- a/jstests/sharding/agg_project_limit_pipe_split.js
+++ b/jstests/sharding/agg_project_limit_pipe_split.js
@@ -1,82 +1,78 @@
// Tests that the correct number of results are returned when $limit is coalesced with $sort.
(function() {
- "use strict";
- load("jstests/libs/analyze_plan.js");
+"use strict";
+load("jstests/libs/analyze_plan.js");
- const shardingTest = new ShardingTest({shards: 2});
- const db = shardingTest.getDB("project_limit");
- const coll = db.project_limit_pipe_split;
- coll.drop();
- assert.commandWorked(shardingTest.s0.adminCommand({enableSharding: db.getName()}));
- assert.commandWorked(
- shardingTest.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: "hashed"}}));
- const bulkOp = coll.initializeOrderedBulkOp();
- for (let i = 0; i < 400; ++i) {
- bulkOp.insert({x: i, y: ["a", "b", "c"], z: Math.floor(i / 12)});
- }
- assert.writeOK(bulkOp.execute());
+const shardingTest = new ShardingTest({shards: 2});
+const db = shardingTest.getDB("project_limit");
+const coll = db.project_limit_pipe_split;
+coll.drop();
+assert.commandWorked(shardingTest.s0.adminCommand({enableSharding: db.getName()}));
+assert.commandWorked(
+ shardingTest.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: "hashed"}}));
+const bulkOp = coll.initializeOrderedBulkOp();
+for (let i = 0; i < 400; ++i) {
+ bulkOp.insert({x: i, y: ["a", "b", "c"], z: Math.floor(i / 12)});
+}
+assert.writeOK(bulkOp.execute());
- let agg = coll.aggregate([
- {$match: {$or: [{z: 9}, {z: 10}]}},
- {$sort: {x: -1}},
- {$project: {x: 1, y: 1, z: 1, _id: 0}},
- {$limit: 6},
- ]);
- assert.eq(
- [
- {"x": 131, "y": ["a", "b", "c"], "z": 10},
- {"x": 130, "y": ["a", "b", "c"], "z": 10},
- {"x": 129, "y": ["a", "b", "c"], "z": 10},
- {"x": 128, "y": ["a", "b", "c"], "z": 10},
- {"x": 127, "y": ["a", "b", "c"], "z": 10},
- {"x": 126, "y": ["a", "b", "c"], "z": 10}
- ],
- agg.toArray());
+let agg = coll.aggregate([
+ {$match: {$or: [{z: 9}, {z: 10}]}},
+ {$sort: {x: -1}},
+ {$project: {x: 1, y: 1, z: 1, _id: 0}},
+ {$limit: 6},
+]);
+assert.eq(
+ [
+ {"x": 131, "y": ["a", "b", "c"], "z": 10},
+ {"x": 130, "y": ["a", "b", "c"], "z": 10},
+ {"x": 129, "y": ["a", "b", "c"], "z": 10},
+ {"x": 128, "y": ["a", "b", "c"], "z": 10},
+ {"x": 127, "y": ["a", "b", "c"], "z": 10},
+ {"x": 126, "y": ["a", "b", "c"], "z": 10}
+ ],
+ agg.toArray());
- agg = coll.aggregate([
- {$sort: {x: 1}},
- {$redact: "$$KEEP"},
- {$project: {x: 1, y: 1, z: 1, _id: 0}},
- {$limit: 6}
- ]);
- assert.eq(
- [
- {"x": 0, "y": ["a", "b", "c"], "z": 0},
- {"x": 1, "y": ["a", "b", "c"], "z": 0},
- {"x": 2, "y": ["a", "b", "c"], "z": 0},
- {"x": 3, "y": ["a", "b", "c"], "z": 0},
- {"x": 4, "y": ["a", "b", "c"], "z": 0},
- {"x": 5, "y": ["a", "b", "c"], "z": 0}
- ],
- agg.toArray());
+agg = coll.aggregate(
+ [{$sort: {x: 1}}, {$redact: "$$KEEP"}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$limit: 6}]);
+assert.eq(
+ [
+ {"x": 0, "y": ["a", "b", "c"], "z": 0},
+ {"x": 1, "y": ["a", "b", "c"], "z": 0},
+ {"x": 2, "y": ["a", "b", "c"], "z": 0},
+ {"x": 3, "y": ["a", "b", "c"], "z": 0},
+ {"x": 4, "y": ["a", "b", "c"], "z": 0},
+ {"x": 5, "y": ["a", "b", "c"], "z": 0}
+ ],
+ agg.toArray());
- agg = coll.aggregate(
- [{$sort: {x: -1}}, {$skip: 399}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$limit: 6}]);
- assert.eq([{"x": 0, "y": ["a", "b", "c"], "z": 0}], agg.toArray());
+agg = coll.aggregate(
+ [{$sort: {x: -1}}, {$skip: 399}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$limit: 6}]);
+assert.eq([{"x": 0, "y": ["a", "b", "c"], "z": 0}], agg.toArray());
- agg = coll.aggregate(
- [{$sort: {x: -1}}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$skip: 401}, {$limit: 6}]);
- assert.eq(0, agg.itcount());
+agg = coll.aggregate(
+ [{$sort: {x: -1}}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$skip: 401}, {$limit: 6}]);
+assert.eq(0, agg.itcount());
- agg = coll.aggregate([
- {$sort: {x: -1}},
- {$skip: 4},
- {$project: {x: 1, y: 1, z: 1, _id: 0}},
- {$skip: 3},
- {$limit: 30},
- {$skip: 3},
- {$limit: 6},
- ]);
- assert.eq(
- [
- {"x": 389, "y": ["a", "b", "c"], "z": 32},
- {"x": 388, "y": ["a", "b", "c"], "z": 32},
- {"x": 387, "y": ["a", "b", "c"], "z": 32},
- {"x": 386, "y": ["a", "b", "c"], "z": 32},
- {"x": 385, "y": ["a", "b", "c"], "z": 32},
- {"x": 384, "y": ["a", "b", "c"], "z": 32}
- ],
- agg.toArray());
+agg = coll.aggregate([
+ {$sort: {x: -1}},
+ {$skip: 4},
+ {$project: {x: 1, y: 1, z: 1, _id: 0}},
+ {$skip: 3},
+ {$limit: 30},
+ {$skip: 3},
+ {$limit: 6},
+]);
+assert.eq(
+ [
+ {"x": 389, "y": ["a", "b", "c"], "z": 32},
+ {"x": 388, "y": ["a", "b", "c"], "z": 32},
+ {"x": 387, "y": ["a", "b", "c"], "z": 32},
+ {"x": 386, "y": ["a", "b", "c"], "z": 32},
+ {"x": 385, "y": ["a", "b", "c"], "z": 32},
+ {"x": 384, "y": ["a", "b", "c"], "z": 32}
+ ],
+ agg.toArray());
- shardingTest.stop();
+shardingTest.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/agg_sort.js b/jstests/sharding/agg_sort.js
index 2aebb8e0ded..0ee78631ec0 100644
--- a/jstests/sharding/agg_sort.js
+++ b/jstests/sharding/agg_sort.js
@@ -1,225 +1,219 @@
// Tests that the sort order is obeyed when an aggregation requests sorted results that are
// scattered across multiple shards.
(function() {
- 'use strict';
-
- const shardingTest = new ShardingTest({shards: 2});
-
- const db = shardingTest.getDB("test");
- const coll = db.sharded_agg_sort;
- coll.drop();
-
- assert.commandWorked(shardingTest.s0.adminCommand({enableSharding: db.getName()}));
- shardingTest.ensurePrimaryShard(db.getName(), shardingTest.shard1.shardName);
- assert.commandWorked(
- shardingTest.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
-
- const nDocs = 10;
- const yValues = [
- "abc",
- "ABC",
- null,
- 1,
- NumberLong(2),
- NumberDecimal(-20),
- MinKey,
- MaxKey,
- BinData(0, ""),
- [3, 4],
- ];
- const bulkOp = coll.initializeOrderedBulkOp();
- for (var i = 0; i < nDocs; ++i) {
- bulkOp.insert({_id: i, x: Math.floor(i / 2), y: yValues[i]});
- }
- assert.writeOK(bulkOp.execute());
-
- // Split the data into 3 chunks
- assert.commandWorked(
- shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 3}}));
- assert.commandWorked(
- shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 6}}));
-
- // Migrate the middle chunk to another shard
- assert.commandWorked(shardingTest.s0.adminCommand({
- movechunk: coll.getFullName(),
- find: {_id: 5},
- to: shardingTest.getOther(shardingTest.getPrimaryShard(db.getName())).name
- }));
-
- function assertResultsEqual({actual, expected}) {
- const resultsAsString = " actual: " + tojson(actual) + "\n expected: " + tojson(expected);
- assert.eq(
- actual.length, expected.length, `different number of results:\n${resultsAsString}`);
- for (let i = 0; i < actual.length; i++) {
- assert.eq(
- actual[i], expected[i], `different results at index ${i}:\n${resultsAsString}`);
- }
+'use strict';
+
+const shardingTest = new ShardingTest({shards: 2});
+
+const db = shardingTest.getDB("test");
+const coll = db.sharded_agg_sort;
+coll.drop();
+
+assert.commandWorked(shardingTest.s0.adminCommand({enableSharding: db.getName()}));
+shardingTest.ensurePrimaryShard(db.getName(), shardingTest.shard1.shardName);
+assert.commandWorked(
+ shardingTest.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+
+const nDocs = 10;
+const yValues = [
+ "abc",
+ "ABC",
+ null,
+ 1,
+ NumberLong(2),
+ NumberDecimal(-20),
+ MinKey,
+ MaxKey,
+ BinData(0, ""),
+ [3, 4],
+];
+const bulkOp = coll.initializeOrderedBulkOp();
+for (var i = 0; i < nDocs; ++i) {
+ bulkOp.insert({_id: i, x: Math.floor(i / 2), y: yValues[i]});
+}
+assert.writeOK(bulkOp.execute());
+
+// Split the data into 3 chunks
+assert.commandWorked(shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 3}}));
+assert.commandWorked(shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 6}}));
+
+// Migrate the middle chunk to another shard
+assert.commandWorked(shardingTest.s0.adminCommand({
+ movechunk: coll.getFullName(),
+ find: {_id: 5},
+ to: shardingTest.getOther(shardingTest.getPrimaryShard(db.getName())).name
+}));
+
+function assertResultsEqual({actual, expected}) {
+ const resultsAsString = " actual: " + tojson(actual) + "\n expected: " + tojson(expected);
+ assert.eq(actual.length, expected.length, `different number of results:\n${resultsAsString}`);
+ for (let i = 0; i < actual.length; i++) {
+ assert.eq(actual[i], expected[i], `different results at index ${i}:\n${resultsAsString}`);
}
+}
- function testSorts() {
- // Test a basic sort by _id.
- assertResultsEqual({
- actual: coll.aggregate([{$sort: {_id: 1}}]).toArray(),
- expected: [
- {_id: 0, x: 0, y: "abc"},
- {_id: 1, x: 0, y: "ABC"},
- {_id: 2, x: 1, y: null},
- {_id: 3, x: 1, y: 1},
- {_id: 4, x: 2, y: NumberLong(2)},
- {_id: 5, x: 2, y: NumberDecimal(-20)},
- {_id: 6, x: 3, y: MinKey},
- {_id: 7, x: 3, y: MaxKey},
- {_id: 8, x: 4, y: BinData(0, "")},
- {_id: 9, x: 4, y: [3, 4]},
- ],
- });
- assertResultsEqual({
- actual: coll.aggregate([{$sort: {_id: 1}}, {$project: {_id: 1}}]).toArray(),
- expected: new Array(nDocs).fill().map(function(_, index) {
- return {_id: index};
- }),
- });
-
- // Test a compound sort.
- assertResultsEqual({
- actual: coll.aggregate([{$sort: {x: 1, y: 1}}]).toArray(),
- expected: [
- {_id: 1, x: 0, y: "ABC"},
- {_id: 0, x: 0, y: "abc"},
- {_id: 2, x: 1, y: null},
- {_id: 3, x: 1, y: 1},
- {_id: 5, x: 2, y: NumberDecimal(-20)},
- {_id: 4, x: 2, y: NumberLong(2)},
- {_id: 6, x: 3, y: MinKey},
- {_id: 7, x: 3, y: MaxKey},
- {_id: 9, x: 4, y: [3, 4]},
- {_id: 8, x: 4, y: BinData(0, "")},
- ],
- });
- assertResultsEqual({
- actual:
- coll.aggregate([{$sort: {x: 1, y: 1}}, {$project: {_id: 0, x: 1, y: 1}}]).toArray(),
- expected: [
- {x: 0, y: "ABC"},
- {x: 0, y: "abc"},
- {x: 1, y: null},
- {x: 1, y: 1},
- {x: 2, y: NumberDecimal(-20)},
- {x: 2, y: NumberLong(2)},
- {x: 3, y: MinKey},
- {x: 3, y: MaxKey},
- {x: 4, y: [3, 4]},
- {x: 4, y: BinData(0, "")},
- ],
- });
-
- // Test a compound sort with a missing field.
- assertResultsEqual({
- actual: coll.aggregate({$sort: {missing: -1, x: 1, _id: -1}}).toArray(),
- expected: [
- {_id: 1, x: 0, y: "ABC"},
- {_id: 0, x: 0, y: "abc"},
- {_id: 3, x: 1, y: 1},
- {_id: 2, x: 1, y: null},
- {_id: 5, x: 2, y: NumberDecimal(-20)},
- {_id: 4, x: 2, y: NumberLong(2)},
- {_id: 7, x: 3, y: MaxKey},
- {_id: 6, x: 3, y: MinKey},
- {_id: 9, x: 4, y: [3, 4]},
- {_id: 8, x: 4, y: BinData(0, "")},
- ]
- });
- }
- testSorts();
- assert.commandWorked(coll.createIndex({x: 1}));
- testSorts();
- assert.commandWorked(coll.createIndex({x: 1, y: 1}));
- testSorts();
- assert.commandWorked(coll.createIndex({missing: 1, x: -1}));
- testSorts();
- assert.commandWorked(coll.createIndex({missing: -1, x: 1, _id: -1}));
- testSorts();
-
- // Test that a sort including the text score is merged properly in a sharded cluster.
- const textColl = db.sharded_agg_sort_text;
-
- assert.commandWorked(
- shardingTest.s0.adminCommand({shardCollection: textColl.getFullName(), key: {_id: 1}}));
-
- assert.writeOK(textColl.insert([
- {_id: 0, text: "apple"},
- {_id: 1, text: "apple orange banana apple"},
- {_id: 2, text: "apple orange"},
- {_id: 3, text: "apple orange banana apple apple banana"},
- {_id: 4, text: "apple orange banana"},
- {_id: 5, text: "apple orange banana apple apple"},
- ]));
-
- // Split the data into 3 chunks
- assert.commandWorked(
- shardingTest.s0.adminCommand({split: textColl.getFullName(), middle: {_id: 2}}));
- assert.commandWorked(
- shardingTest.s0.adminCommand({split: textColl.getFullName(), middle: {_id: 4}}));
-
- // Migrate the middle chunk to another shard
- assert.commandWorked(shardingTest.s0.adminCommand({
- movechunk: textColl.getFullName(),
- find: {_id: 3},
- to: shardingTest.getOther(shardingTest.getPrimaryShard(db.getName())).name
- }));
-
- assert.commandWorked(textColl.createIndex({text: "text"}));
+function testSorts() {
+ // Test a basic sort by _id.
assertResultsEqual({
- actual: textColl
- .aggregate([
- {$match: {$text: {$search: "apple banana orange"}}},
- {$sort: {x: {$meta: "textScore"}}}
- ])
- .toArray(),
+ actual: coll.aggregate([{$sort: {_id: 1}}]).toArray(),
expected: [
- {_id: 3, text: "apple orange banana apple apple banana"},
- {_id: 5, text: "apple orange banana apple apple"},
- {_id: 1, text: "apple orange banana apple"},
- {_id: 4, text: "apple orange banana"},
- {_id: 2, text: "apple orange"},
- {_id: 0, text: "apple"},
+ {_id: 0, x: 0, y: "abc"},
+ {_id: 1, x: 0, y: "ABC"},
+ {_id: 2, x: 1, y: null},
+ {_id: 3, x: 1, y: 1},
+ {_id: 4, x: 2, y: NumberLong(2)},
+ {_id: 5, x: 2, y: NumberDecimal(-20)},
+ {_id: 6, x: 3, y: MinKey},
+ {_id: 7, x: 3, y: MaxKey},
+ {_id: 8, x: 4, y: BinData(0, "")},
+ {_id: 9, x: 4, y: [3, 4]},
],
});
+ assertResultsEqual({
+ actual: coll.aggregate([{$sort: {_id: 1}}, {$project: {_id: 1}}]).toArray(),
+ expected: new Array(nDocs).fill().map(function(_, index) {
+ return {_id: index};
+ }),
+ });
- function assertSortedByMetaField(results) {
- for (let i = 0; i < results.length - 1; ++i) {
- assert(results[i].hasOwnProperty("meta"),
- `Expected all results to have "meta" field, found one without it at index ${i}`);
- assert.gte(
- results[i].meta,
- results[i + 1].meta,
- `Expected results to be sorted by "meta" field, descending. Detected unsorted` +
- ` results at index ${i}, entire result set: ${tojson(results)}`);
- }
- }
+ // Test a compound sort.
+ assertResultsEqual({
+ actual: coll.aggregate([{$sort: {x: 1, y: 1}}]).toArray(),
+ expected: [
+ {_id: 1, x: 0, y: "ABC"},
+ {_id: 0, x: 0, y: "abc"},
+ {_id: 2, x: 1, y: null},
+ {_id: 3, x: 1, y: 1},
+ {_id: 5, x: 2, y: NumberDecimal(-20)},
+ {_id: 4, x: 2, y: NumberLong(2)},
+ {_id: 6, x: 3, y: MinKey},
+ {_id: 7, x: 3, y: MaxKey},
+ {_id: 9, x: 4, y: [3, 4]},
+ {_id: 8, x: 4, y: BinData(0, "")},
+ ],
+ });
+ assertResultsEqual({
+ actual: coll.aggregate([{$sort: {x: 1, y: 1}}, {$project: {_id: 0, x: 1, y: 1}}]).toArray(),
+ expected: [
+ {x: 0, y: "ABC"},
+ {x: 0, y: "abc"},
+ {x: 1, y: null},
+ {x: 1, y: 1},
+ {x: 2, y: NumberDecimal(-20)},
+ {x: 2, y: NumberLong(2)},
+ {x: 3, y: MinKey},
+ {x: 3, y: MaxKey},
+ {x: 4, y: [3, 4]},
+ {x: 4, y: BinData(0, "")},
+ ],
+ });
- assertSortedByMetaField(textColl
- .aggregate([
- {$match: {$text: {$search: "apple banana orange"}}},
- {$sort: {x: {$meta: "textScore"}}},
- {$project: {_id: 0, meta: {$meta: "textScore"}}},
- ])
- .toArray());
-
- assertSortedByMetaField(textColl
- .aggregate([
- {$match: {$text: {$search: "apple banana orange"}}},
- {$project: {_id: 0, meta: {$meta: "textScore"}}},
- {$sort: {meta: -1}},
- ])
- .toArray());
-
- assertSortedByMetaField(textColl
- .aggregate([
- {$sample: {size: 10}},
- {$project: {_id: 0, meta: {$meta: "randVal"}}},
- ])
- .toArray());
-
- shardingTest.stop();
+ // Test a compound sort with a missing field.
+ assertResultsEqual({
+ actual: coll.aggregate({$sort: {missing: -1, x: 1, _id: -1}}).toArray(),
+ expected: [
+ {_id: 1, x: 0, y: "ABC"},
+ {_id: 0, x: 0, y: "abc"},
+ {_id: 3, x: 1, y: 1},
+ {_id: 2, x: 1, y: null},
+ {_id: 5, x: 2, y: NumberDecimal(-20)},
+ {_id: 4, x: 2, y: NumberLong(2)},
+ {_id: 7, x: 3, y: MaxKey},
+ {_id: 6, x: 3, y: MinKey},
+ {_id: 9, x: 4, y: [3, 4]},
+ {_id: 8, x: 4, y: BinData(0, "")},
+ ]
+ });
+}
+testSorts();
+assert.commandWorked(coll.createIndex({x: 1}));
+testSorts();
+assert.commandWorked(coll.createIndex({x: 1, y: 1}));
+testSorts();
+assert.commandWorked(coll.createIndex({missing: 1, x: -1}));
+testSorts();
+assert.commandWorked(coll.createIndex({missing: -1, x: 1, _id: -1}));
+testSorts();
+
+// Test that a sort including the text score is merged properly in a sharded cluster.
+const textColl = db.sharded_agg_sort_text;
+
+assert.commandWorked(
+ shardingTest.s0.adminCommand({shardCollection: textColl.getFullName(), key: {_id: 1}}));
+
+assert.writeOK(textColl.insert([
+ {_id: 0, text: "apple"},
+ {_id: 1, text: "apple orange banana apple"},
+ {_id: 2, text: "apple orange"},
+ {_id: 3, text: "apple orange banana apple apple banana"},
+ {_id: 4, text: "apple orange banana"},
+ {_id: 5, text: "apple orange banana apple apple"},
+]));
+
+// Split the data into 3 chunks
+assert.commandWorked(
+ shardingTest.s0.adminCommand({split: textColl.getFullName(), middle: {_id: 2}}));
+assert.commandWorked(
+ shardingTest.s0.adminCommand({split: textColl.getFullName(), middle: {_id: 4}}));
+
+// Migrate the middle chunk to another shard
+assert.commandWorked(shardingTest.s0.adminCommand({
+ movechunk: textColl.getFullName(),
+ find: {_id: 3},
+ to: shardingTest.getOther(shardingTest.getPrimaryShard(db.getName())).name
+}));
+
+assert.commandWorked(textColl.createIndex({text: "text"}));
+assertResultsEqual({
+ actual: textColl
+ .aggregate([
+ {$match: {$text: {$search: "apple banana orange"}}},
+ {$sort: {x: {$meta: "textScore"}}}
+ ])
+ .toArray(),
+ expected: [
+ {_id: 3, text: "apple orange banana apple apple banana"},
+ {_id: 5, text: "apple orange banana apple apple"},
+ {_id: 1, text: "apple orange banana apple"},
+ {_id: 4, text: "apple orange banana"},
+ {_id: 2, text: "apple orange"},
+ {_id: 0, text: "apple"},
+ ],
+});
+
+function assertSortedByMetaField(results) {
+ for (let i = 0; i < results.length - 1; ++i) {
+ assert(results[i].hasOwnProperty("meta"),
+ `Expected all results to have "meta" field, found one without it at index ${i}`);
+ assert.gte(results[i].meta,
+ results[i + 1].meta,
+ `Expected results to be sorted by "meta" field, descending. Detected unsorted` +
+ ` results at index ${i}, entire result set: ${tojson(results)}`);
+ }
+}
+
+assertSortedByMetaField(textColl
+ .aggregate([
+ {$match: {$text: {$search: "apple banana orange"}}},
+ {$sort: {x: {$meta: "textScore"}}},
+ {$project: {_id: 0, meta: {$meta: "textScore"}}},
+ ])
+ .toArray());
+
+assertSortedByMetaField(textColl
+ .aggregate([
+ {$match: {$text: {$search: "apple banana orange"}}},
+ {$project: {_id: 0, meta: {$meta: "textScore"}}},
+ {$sort: {meta: -1}},
+ ])
+ .toArray());
+
+assertSortedByMetaField(textColl
+ .aggregate([
+ {$sample: {size: 10}},
+ {$project: {_id: 0, meta: {$meta: "randVal"}}},
+ ])
+ .toArray());
+
+shardingTest.stop();
})();
diff --git a/jstests/sharding/agg_write_stages_cannot_run_on_mongos.js b/jstests/sharding/agg_write_stages_cannot_run_on_mongos.js
index 740eade7b12..05a48adf3eb 100644
--- a/jstests/sharding/agg_write_stages_cannot_run_on_mongos.js
+++ b/jstests/sharding/agg_write_stages_cannot_run_on_mongos.js
@@ -1,46 +1,43 @@
// Tests that special stages which must run on mongos cannot be run in combination with an $out or
// $merge stage.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const db = st.s0.getDB("db");
- const admin = st.s0.getDB("admin");
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const db = st.s0.getDB("db");
+const admin = st.s0.getDB("admin");
- // Create a collection in the db to get around optimizations that will do nothing in lieu of
- // failing when the db is empty.
- assert.commandWorked(db.runCommand({create: "coll"}));
+// Create a collection in the db to get around optimizations that will do nothing in lieu of
+// failing when the db is empty.
+assert.commandWorked(db.runCommand({create: "coll"}));
- // These should fail because the initial stages require mongos execution and $out/$merge
- // requires shard execution.
- assert.commandFailedWithCode(
- db.runCommand(
- {aggregate: 1, pipeline: [{$listLocalSessions: {}}, {$out: "test"}], cursor: {}}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(
- admin.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: {localOps: true}}, {$out: "test"}], cursor: {}}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(
- db.runCommand({aggregate: 1, pipeline: [{$changeStream: {}}, {$out: "test"}], cursor: {}}),
- ErrorCodes.IllegalOperation);
+// These should fail because the initial stages require mongos execution and $out/$merge
+// requires shard execution.
+assert.commandFailedWithCode(
+ db.runCommand({aggregate: 1, pipeline: [{$listLocalSessions: {}}, {$out: "test"}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(
+ admin.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {localOps: true}}, {$out: "test"}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(
+ db.runCommand({aggregate: 1, pipeline: [{$changeStream: {}}, {$out: "test"}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(db.runCommand({
- aggregate: 1,
- pipeline: [{$listLocalSessions: {}}, {$merge: {into: "test"}}],
- cursor: {}
- }),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(admin.runCommand({
- aggregate: 1,
- pipeline: [{$currentOp: {localOps: true}}, {$merge: {into: "test"}}],
- cursor: {}
- }),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(
- db.runCommand(
- {aggregate: 1, pipeline: [{$changeStream: {}}, {$merge: {into: "test"}}], cursor: {}}),
- ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(
+ db.runCommand(
+ {aggregate: 1, pipeline: [{$listLocalSessions: {}}, {$merge: {into: "test"}}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(admin.runCommand({
+ aggregate: 1,
+ pipeline: [{$currentOp: {localOps: true}}, {$merge: {into: "test"}}],
+ cursor: {}
+}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(
+ db.runCommand(
+ {aggregate: 1, pipeline: [{$changeStream: {}}, {$merge: {into: "test"}}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/aggregates_during_balancing.js b/jstests/sharding/aggregates_during_balancing.js
index 149f6031583..8adad032ce3 100644
--- a/jstests/sharding/aggregates_during_balancing.js
+++ b/jstests/sharding/aggregates_during_balancing.js
@@ -1,251 +1,246 @@
// Inserts some interesting data into a sharded collection, enables the balancer, and tests that
// various kinds of aggregations return the expected results.
(function() {
- load('jstests/aggregation/extras/utils.js');
-
- var shardedAggTest =
- new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
- shardedAggTest.adminCommand({enablesharding: "aggShard"});
- db = shardedAggTest.getDB("aggShard");
- shardedAggTest.ensurePrimaryShard('aggShard', shardedAggTest.shard0.shardName);
-
- db.ts1.drop();
- db.literal.drop();
-
- shardedAggTest.adminCommand({shardcollection: "aggShard.ts1", key: {"_id": 1}});
- shardedAggTest.adminCommand({shardcollection: "aggShard.literal", key: {"_id": 1}});
-
- /*
- Test combining results in mongos for operations that sub-aggregate on shards.
-
- The unusual operators here are $avg, $pushToSet, $push. In the case of $avg,
- the shard pipeline produces an object with the current subtotal and item count
- so that these can be combined in mongos by totalling the subtotals counts
- before performing the final division. For $pushToSet and $push, the shard
- pipelines produce arrays, but in mongos these are combined rather than simply
- being added as arrays within arrays.
- */
-
- var count = 0;
- var strings = [
- "one", "two", "three", "four", "five", "six", "seven",
- "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen",
- "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty"
- ];
-
- jsTestLog("Bulk inserting data");
- var nItems = 200000;
- var bulk = db.ts1.initializeUnorderedBulkOp();
- for (i = 0; i < nItems; ++i) {
- bulk.insert({
- _id: i,
- counter: ++count,
- number: strings[i % 20],
- random: Math.random(),
- filler: "0123456789012345678901234567890123456789"
- });
- }
- assert.writeOK(bulk.execute());
-
- jsTestLog('a project and group in shards, result combined in mongos');
- var a1 = db.ts1
- .aggregate([
- {$project: {cMod10: {$mod: ["$counter", 10]}, number: 1, counter: 1}},
- {
- $group: {
- _id: "$cMod10",
- numberSet: {$addToSet: "$number"},
- avgCounter: {$avg: "$cMod10"}
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray();
-
- for (i = 0; i < 10; ++i) {
- assert.eq(a1[i].avgCounter, a1[i]._id, 'agg sharded test avgCounter failed');
- assert.eq(a1[i].numberSet.length, 2, 'agg sharded test numberSet length failed');
- }
-
- jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
- var a2 = db.ts1.aggregate([{$group: {_id: "all", total: {$sum: "$counter"}}}]).toArray();
-
- jsTestLog('sum of an arithmetic progression S(n) = (n/2)(a(1) + a(n));');
- assert.eq(a2[0].total, (nItems / 2) * (1 + nItems), 'agg sharded test counter sum failed');
-
- jsTestLog('A group combining all documents into one, averaging a null field.');
- assert.eq(db.ts1.aggregate([{$group: {_id: null, avg: {$avg: "$missing"}}}]).toArray(),
- [{_id: null, avg: null}]);
-
- jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
- var a3 = db.ts1.aggregate([{$group: {_id: "$number", total: {$sum: 1}}}, {$sort: {_id: 1}}])
- .toArray();
-
- for (i = 0; i < strings.length; ++i) {
- assert.eq(a3[i].total, nItems / strings.length, 'agg sharded test sum numbers failed');
- }
-
- jsTestLog('a match takes place in the shards; just returning the results from mongos');
- var a4 = db.ts1
- .aggregate([{
- $match: {
- $or: [
- {counter: 55},
- {counter: 1111},
- {counter: 2222},
- {counter: 33333},
- {counter: 99999},
- {counter: 55555}
- ]
+load('jstests/aggregation/extras/utils.js');
+
+var shardedAggTest =
+ new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+
+shardedAggTest.adminCommand({enablesharding: "aggShard"});
+db = shardedAggTest.getDB("aggShard");
+shardedAggTest.ensurePrimaryShard('aggShard', shardedAggTest.shard0.shardName);
+
+db.ts1.drop();
+db.literal.drop();
+
+shardedAggTest.adminCommand({shardcollection: "aggShard.ts1", key: {"_id": 1}});
+shardedAggTest.adminCommand({shardcollection: "aggShard.literal", key: {"_id": 1}});
+
+/*
+Test combining results in mongos for operations that sub-aggregate on shards.
+
+The unusual operators here are $avg, $pushToSet, $push. In the case of $avg,
+the shard pipeline produces an object with the current subtotal and item count
+so that these can be combined in mongos by totalling the subtotals counts
+before performing the final division. For $pushToSet and $push, the shard
+pipelines produce arrays, but in mongos these are combined rather than simply
+being added as arrays within arrays.
+*/
+
+var count = 0;
+var strings = [
+ "one", "two", "three", "four", "five", "six", "seven",
+ "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen",
+ "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty"
+];
+
+jsTestLog("Bulk inserting data");
+var nItems = 200000;
+var bulk = db.ts1.initializeUnorderedBulkOp();
+for (i = 0; i < nItems; ++i) {
+ bulk.insert({
+ _id: i,
+ counter: ++count,
+ number: strings[i % 20],
+ random: Math.random(),
+ filler: "0123456789012345678901234567890123456789"
+ });
+}
+assert.writeOK(bulk.execute());
+
+jsTestLog('a project and group in shards, result combined in mongos');
+var a1 = db.ts1
+ .aggregate([
+ {$project: {cMod10: {$mod: ["$counter", 10]}, number: 1, counter: 1}},
+ {
+ $group: {
+ _id: "$cMod10",
+ numberSet: {$addToSet: "$number"},
+ avgCounter: {$avg: "$cMod10"}
}
- }])
- .toArray();
- assert.eq(a4.length, 6, tojson(a4));
- for (i = 0; i < 6; ++i) {
- c = a4[i].counter;
- printjson({c: c});
- assert(
- (c == 55) || (c == 1111) || (c == 2222) || (c == 33333) || (c == 99999) || (c == 55555),
- 'agg sharded test simple match failed');
- }
-
- function testSkipLimit(ops, expectedCount) {
- jsTestLog('testSkipLimit(' + tojson(ops) + ', ' + expectedCount + ')');
- if (expectedCount > 10) {
- // make shard -> mongos intermediate results less than 16MB
- ops.unshift({$project: {_id: 1}});
- }
-
- ops.push({$group: {_id: 1, count: {$sum: 1}}});
-
- var out = db.ts1.aggregate(ops).toArray();
- assert.eq(out[0].count, expectedCount);
- }
-
- testSkipLimit([], nItems); // control
- testSkipLimit([{$skip: 10}], nItems - 10);
- testSkipLimit([{$limit: 10}], 10);
- testSkipLimit([{$skip: 5}, {$limit: 10}], 10);
- testSkipLimit([{$limit: 10}, {$skip: 5}], 10 - 5);
- testSkipLimit([{$skip: 5}, {$skip: 3}, {$limit: 10}], 10);
- testSkipLimit([{$skip: 5}, {$limit: 10}, {$skip: 3}], 10 - 3);
- testSkipLimit([{$limit: 10}, {$skip: 5}, {$skip: 3}], 10 - 3 - 5);
-
- // test sort + limit (using random to pull from both shards)
- function testSortLimit(limit, direction) {
- jsTestLog('testSortLimit(' + limit + ', ' + direction + ')');
- var from_cursor =
- db.ts1.find({}, {random: 1, _id: 0}).sort({random: direction}).limit(limit).toArray();
- var from_agg = db.ts1
- .aggregate([
- {$project: {random: 1, _id: 0}},
- {$sort: {random: direction}},
- {$limit: limit}
- ])
- .toArray();
- assert.eq(from_cursor, from_agg);
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray();
+
+for (i = 0; i < 10; ++i) {
+ assert.eq(a1[i].avgCounter, a1[i]._id, 'agg sharded test avgCounter failed');
+ assert.eq(a1[i].numberSet.length, 2, 'agg sharded test numberSet length failed');
+}
+
+jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
+var a2 = db.ts1.aggregate([{$group: {_id: "all", total: {$sum: "$counter"}}}]).toArray();
+
+jsTestLog('sum of an arithmetic progression S(n) = (n/2)(a(1) + a(n));');
+assert.eq(a2[0].total, (nItems / 2) * (1 + nItems), 'agg sharded test counter sum failed');
+
+jsTestLog('A group combining all documents into one, averaging a null field.');
+assert.eq(db.ts1.aggregate([{$group: {_id: null, avg: {$avg: "$missing"}}}]).toArray(),
+ [{_id: null, avg: null}]);
+
+jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
+var a3 =
+ db.ts1.aggregate([{$group: {_id: "$number", total: {$sum: 1}}}, {$sort: {_id: 1}}]).toArray();
+
+for (i = 0; i < strings.length; ++i) {
+ assert.eq(a3[i].total, nItems / strings.length, 'agg sharded test sum numbers failed');
+}
+
+jsTestLog('a match takes place in the shards; just returning the results from mongos');
+var a4 = db.ts1
+ .aggregate([{
+ $match: {
+ $or: [
+ {counter: 55},
+ {counter: 1111},
+ {counter: 2222},
+ {counter: 33333},
+ {counter: 99999},
+ {counter: 55555}
+ ]
+ }
+ }])
+ .toArray();
+assert.eq(a4.length, 6, tojson(a4));
+for (i = 0; i < 6; ++i) {
+ c = a4[i].counter;
+ printjson({c: c});
+ assert((c == 55) || (c == 1111) || (c == 2222) || (c == 33333) || (c == 99999) || (c == 55555),
+ 'agg sharded test simple match failed');
+}
+
+function testSkipLimit(ops, expectedCount) {
+ jsTestLog('testSkipLimit(' + tojson(ops) + ', ' + expectedCount + ')');
+ if (expectedCount > 10) {
+ // make shard -> mongos intermediate results less than 16MB
+ ops.unshift({$project: {_id: 1}});
}
- testSortLimit(1, 1);
- testSortLimit(1, -1);
- testSortLimit(10, 1);
- testSortLimit(10, -1);
- testSortLimit(100, 1);
- testSortLimit(100, -1);
-
- function testAvgStdDev() {
- jsTestLog('testing $avg and $stdDevPop in sharded $group');
- // $stdDevPop can vary slightly between runs if a migration occurs. This is why we use
- // assert.close below.
- var res = db.ts1
- .aggregate([{
- $group: {
- _id: null,
- avg: {$avg: '$counter'},
- stdDevPop: {$stdDevPop: '$counter'},
- }
- }])
- .toArray();
- // http://en.wikipedia.org/wiki/Arithmetic_progression#Sum
- var avg = (1 + nItems) / 2;
- assert.close(res[0].avg, avg, '', 10 /*decimal places*/);
-
- // http://en.wikipedia.org/wiki/Arithmetic_progression#Standard_deviation
- var stdDev = Math.sqrt(((nItems - 1) * (nItems + 1)) / 12);
- assert.close(res[0].stdDevPop, stdDev, '', 10 /*decimal places*/);
- }
- testAvgStdDev();
-
- function testSample() {
- jsTestLog('testing $sample');
- [0, 1, 10, nItems, nItems + 1].forEach(function(size) {
- var res = db.ts1.aggregate([{$sample: {size: size}}]).toArray();
- assert.eq(res.length, Math.min(nItems, size));
- });
- }
-
- testSample();
- jsTestLog('test $out by copying source collection verbatim to output');
- var outCollection = db.ts1_out;
- var res = db.ts1.aggregate([{$out: outCollection.getName()}]).toArray();
- assert.eq(db.ts1.find().itcount(), outCollection.find().itcount());
- assert.eq(db.ts1.find().sort({_id: 1}).toArray(),
- outCollection.find().sort({_id: 1}).toArray());
-
- // Make sure we error out if $out collection is sharded
- assert.commandFailed(
- db.runCommand({aggregate: outCollection.getName(), pipeline: [{$out: db.ts1.getName()}]}));
-
- assert.writeOK(db.literal.save({dollar: false}));
-
- result =
- db.literal
- .aggregate([{
- $project:
- {_id: 0, cost: {$cond: ['$dollar', {$literal: '$1.00'}, {$literal: '$.99'}]}}
- }])
+ ops.push({$group: {_id: 1, count: {$sum: 1}}});
+
+ var out = db.ts1.aggregate(ops).toArray();
+ assert.eq(out[0].count, expectedCount);
+}
+
+testSkipLimit([], nItems); // control
+testSkipLimit([{$skip: 10}], nItems - 10);
+testSkipLimit([{$limit: 10}], 10);
+testSkipLimit([{$skip: 5}, {$limit: 10}], 10);
+testSkipLimit([{$limit: 10}, {$skip: 5}], 10 - 5);
+testSkipLimit([{$skip: 5}, {$skip: 3}, {$limit: 10}], 10);
+testSkipLimit([{$skip: 5}, {$limit: 10}, {$skip: 3}], 10 - 3);
+testSkipLimit([{$limit: 10}, {$skip: 5}, {$skip: 3}], 10 - 3 - 5);
+
+// test sort + limit (using random to pull from both shards)
+function testSortLimit(limit, direction) {
+ jsTestLog('testSortLimit(' + limit + ', ' + direction + ')');
+ var from_cursor =
+ db.ts1.find({}, {random: 1, _id: 0}).sort({random: direction}).limit(limit).toArray();
+ var from_agg =
+ db.ts1
+ .aggregate(
+ [{$project: {random: 1, _id: 0}}, {$sort: {random: direction}}, {$limit: limit}])
.toArray();
+ assert.eq(from_cursor, from_agg);
+}
+testSortLimit(1, 1);
+testSortLimit(1, -1);
+testSortLimit(10, 1);
+testSortLimit(10, -1);
+testSortLimit(100, 1);
+testSortLimit(100, -1);
+
+function testAvgStdDev() {
+ jsTestLog('testing $avg and $stdDevPop in sharded $group');
+ // $stdDevPop can vary slightly between runs if a migration occurs. This is why we use
+ // assert.close below.
+ var res = db.ts1
+ .aggregate([{
+ $group: {
+ _id: null,
+ avg: {$avg: '$counter'},
+ stdDevPop: {$stdDevPop: '$counter'},
+ }
+ }])
+ .toArray();
+ // http://en.wikipedia.org/wiki/Arithmetic_progression#Sum
+ var avg = (1 + nItems) / 2;
+ assert.close(res[0].avg, avg, '', 10 /*decimal places*/);
+
+ // http://en.wikipedia.org/wiki/Arithmetic_progression#Standard_deviation
+ var stdDev = Math.sqrt(((nItems - 1) * (nItems + 1)) / 12);
+ assert.close(res[0].stdDevPop, stdDev, '', 10 /*decimal places*/);
+}
+testAvgStdDev();
+
+function testSample() {
+ jsTestLog('testing $sample');
+ [0, 1, 10, nItems, nItems + 1].forEach(function(size) {
+ var res = db.ts1.aggregate([{$sample: {size: size}}]).toArray();
+ assert.eq(res.length, Math.min(nItems, size));
+ });
+}
+
+testSample();
+
+jsTestLog('test $out by copying source collection verbatim to output');
+var outCollection = db.ts1_out;
+var res = db.ts1.aggregate([{$out: outCollection.getName()}]).toArray();
+assert.eq(db.ts1.find().itcount(), outCollection.find().itcount());
+assert.eq(db.ts1.find().sort({_id: 1}).toArray(), outCollection.find().sort({_id: 1}).toArray());
+
+// Make sure we error out if $out collection is sharded
+assert.commandFailed(
+ db.runCommand({aggregate: outCollection.getName(), pipeline: [{$out: db.ts1.getName()}]}));
+
+assert.writeOK(db.literal.save({dollar: false}));
+
+result =
+ db.literal
+ .aggregate([{
+ $project: {_id: 0, cost: {$cond: ['$dollar', {$literal: '$1.00'}, {$literal: '$.99'}]}}
+ }])
+ .toArray();
+
+assert.eq([{cost: '$.99'}], result);
+
+(function() {
+jsTestLog('Testing a $match stage on the shard key.');
+
+var outCollection = 'testShardKeyMatchOut';
+
+// Point query.
+var targetId = Math.floor(nItems * Math.random());
+var pipeline = [{$match: {_id: targetId}}, {$project: {_id: 1}}, {$sort: {_id: 1}}];
+var expectedDocs = [{_id: targetId}];
+// Normal pipeline.
+assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
+// With $out.
+db[outCollection].drop();
+pipeline.push({$out: outCollection});
+db.ts1.aggregate(pipeline);
+assert.eq(db[outCollection].find().toArray(), expectedDocs);
+
+// Range query.
+var range = 500;
+var targetStart = Math.floor((nItems - range) * Math.random());
+pipeline = [
+ {$match: {_id: {$gte: targetStart, $lt: targetStart + range}}},
+ {$project: {_id: 1}},
+ {$sort: {_id: 1}}
+];
+expectedDocs = [];
+for (var i = targetStart; i < targetStart + range; i++) {
+ expectedDocs.push({_id: i});
+}
+// Normal pipeline.
+assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
+// With $out.
+db[outCollection].drop();
+pipeline.push({$out: outCollection});
+db.ts1.aggregate(pipeline);
+assert.eq(db[outCollection].find().toArray(), expectedDocs);
+}());
- assert.eq([{cost: '$.99'}], result);
-
- (function() {
- jsTestLog('Testing a $match stage on the shard key.');
-
- var outCollection = 'testShardKeyMatchOut';
-
- // Point query.
- var targetId = Math.floor(nItems * Math.random());
- var pipeline = [{$match: {_id: targetId}}, {$project: {_id: 1}}, {$sort: {_id: 1}}];
- var expectedDocs = [{_id: targetId}];
- // Normal pipeline.
- assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
- // With $out.
- db[outCollection].drop();
- pipeline.push({$out: outCollection});
- db.ts1.aggregate(pipeline);
- assert.eq(db[outCollection].find().toArray(), expectedDocs);
-
- // Range query.
- var range = 500;
- var targetStart = Math.floor((nItems - range) * Math.random());
- pipeline = [
- {$match: {_id: {$gte: targetStart, $lt: targetStart + range}}},
- {$project: {_id: 1}},
- {$sort: {_id: 1}}
- ];
- expectedDocs = [];
- for (var i = targetStart; i < targetStart + range; i++) {
- expectedDocs.push({_id: i});
- }
- // Normal pipeline.
- assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
- // With $out.
- db[outCollection].drop();
- pipeline.push({$out: outCollection});
- db.ts1.aggregate(pipeline);
- assert.eq(db[outCollection].find().toArray(), expectedDocs);
- }());
-
- shardedAggTest.stop();
+shardedAggTest.stop();
}());
diff --git a/jstests/sharding/aggregation_currentop.js b/jstests/sharding/aggregation_currentop.js
index 5e7ed32f09a..4973b4f2d3f 100644
--- a/jstests/sharding/aggregation_currentop.js
+++ b/jstests/sharding/aggregation_currentop.js
@@ -20,906 +20,883 @@
TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
(function() {
- "use strict";
-
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/libs/namespace_utils.js"); // For getCollectionNameFromFullNamespace.
-
- // Replica set nodes started with --shardsvr do not enable key generation until they are added
- // to a sharded cluster and reject commands with gossiped clusterTime from users without the
- // advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
- // briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
- // gossip that time later in setup.
- //
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
+"use strict";
+
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/namespace_utils.js"); // For getCollectionNameFromFullNamespace.
+
+// Replica set nodes started with --shardsvr do not enable key generation until they are added
+// to a sharded cluster and reject commands with gossiped clusterTime from users without the
+// advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
+// briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
+// gossip that time later in setup.
+//
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
+
+const key = "jstests/libs/key1";
+
+// Parameters used to establish the sharded cluster.
+const stParams = {
+ name: jsTestName(),
+ keyFile: key,
+ shards: 3,
+ rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
+};
+
+// Create a new sharded cluster for testing. We set the internalQueryExecYieldIterations
+// parameter so that plan execution yields on every iteration. For some tests, we will
+// temporarily set yields to hang the mongod so we can capture particular operations in the
+// currentOp output.
+const st = new ShardingTest(stParams);
+
+// Assign various elements of the cluster. We will use shard rs0 to test replica-set level
+// $currentOp behaviour.
+let shardConn = st.rs0.getPrimary();
+let mongosConn = st.s;
+let shardRS = st.rs0;
+
+let clusterTestDB = mongosConn.getDB(jsTestName());
+let clusterAdminDB = mongosConn.getDB("admin");
+shardConn.waitForClusterTime(60);
+let shardTestDB = shardConn.getDB(jsTestName());
+let shardAdminDB = shardConn.getDB("admin");
+
+function createUsers(conn) {
+ let adminDB = conn.getDB("admin");
+
+ // Create an admin user, one user with the inprog privilege, and one without.
+ assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "pwd", roles: ["root"]}));
+ assert(adminDB.auth("admin", "pwd"));
+
+ assert.commandWorked(adminDB.runCommand({
+ createRole: "role_inprog",
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ["inprog"]}]
+ }));
- const key = "jstests/libs/key1";
+ assert.commandWorked(adminDB.runCommand(
+ {createUser: "user_inprog", pwd: "pwd", roles: ["readWriteAnyDatabase", "role_inprog"]}));
- // Parameters used to establish the sharded cluster.
- const stParams = {
- name: jsTestName(),
- keyFile: key,
- shards: 3,
- rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
- };
+ assert.commandWorked(adminDB.runCommand(
+ {createUser: "user_no_inprog", pwd: "pwd", roles: ["readWriteAnyDatabase"]}));
+}
- // Create a new sharded cluster for testing. We set the internalQueryExecYieldIterations
- // parameter so that plan execution yields on every iteration. For some tests, we will
- // temporarily set yields to hang the mongod so we can capture particular operations in the
- // currentOp output.
- const st = new ShardingTest(stParams);
+// Create necessary users at both cluster and shard-local level.
+createUsers(shardConn);
+createUsers(mongosConn);
- // Assign various elements of the cluster. We will use shard rs0 to test replica-set level
- // $currentOp behaviour.
- let shardConn = st.rs0.getPrimary();
- let mongosConn = st.s;
- let shardRS = st.rs0;
+// Create a test database and some dummy data on rs0.
+assert(clusterAdminDB.auth("admin", "pwd"));
- let clusterTestDB = mongosConn.getDB(jsTestName());
- let clusterAdminDB = mongosConn.getDB("admin");
- shardConn.waitForClusterTime(60);
- let shardTestDB = shardConn.getDB(jsTestName());
- let shardAdminDB = shardConn.getDB("admin");
+for (let i = 0; i < 5; i++) {
+ assert.writeOK(clusterTestDB.test.insert({_id: i, a: i}));
+}
- function createUsers(conn) {
- let adminDB = conn.getDB("admin");
+st.ensurePrimaryShard(clusterTestDB.getName(), shardRS.name);
- // Create an admin user, one user with the inprog privilege, and one without.
- assert.commandWorked(
- adminDB.runCommand({createUser: "admin", pwd: "pwd", roles: ["root"]}));
- assert(adminDB.auth("admin", "pwd"));
-
- assert.commandWorked(adminDB.runCommand({
- createRole: "role_inprog",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["inprog"]}]
- }));
-
- assert.commandWorked(adminDB.runCommand({
- createUser: "user_inprog",
- pwd: "pwd",
- roles: ["readWriteAnyDatabase", "role_inprog"]
- }));
-
- assert.commandWorked(adminDB.runCommand(
- {createUser: "user_no_inprog", pwd: "pwd", roles: ["readWriteAnyDatabase"]}));
+// Restarts a replset with a different set of parameters. Explicitly set the keyFile to null,
+// since if ReplSetTest#stopSet sees a keyFile property, it attempts to auth before dbhash
+// checks.
+function restartReplSet(replSet, newOpts) {
+ const numNodes = replSet.nodeList().length;
+ for (let n = 0; n < numNodes; n++) {
+ replSet.restart(n, newOpts);
}
+ replSet.keyFile = newOpts.keyFile;
+ return replSet.getPrimary();
+}
+// Restarts a cluster with a different set of parameters.
+function restartCluster(st, newOpts) {
+ restartReplSet(st.configRS, newOpts);
+ for (let i = 0; i < stParams.shards; i++) {
+ restartReplSet(st[`rs${i}`], newOpts);
+ }
+ st.restartMongos(0, Object.assign(newOpts, {restart: true}));
+ st.keyFile = newOpts.keyFile;
+ // Re-link the cluster components.
+ shardConn = st.rs0.getPrimary();
+ mongosConn = st.s;
+ shardRS = st.rs0;
+ clusterTestDB = mongosConn.getDB(jsTestName());
+ clusterAdminDB = mongosConn.getDB("admin");
+ shardTestDB = shardConn.getDB(jsTestName());
+ shardAdminDB = shardConn.getDB("admin");
+}
- // Create necessary users at both cluster and shard-local level.
- createUsers(shardConn);
- createUsers(mongosConn);
+function runCommandOnAllPrimaries({dbName, cmdObj, username, password}) {
+ for (let i = 0; i < stParams.shards; i++) {
+ const rsAdminDB = st[`rs${i}`].getPrimary().getDB("admin");
+ rsAdminDB.auth(username, password);
+ assert.commandWorked(rsAdminDB.getSiblingDB(dbName).runCommand(cmdObj));
+ }
+}
+
+// Functions to support running an operation in a parallel shell for testing allUsers behaviour.
+function runInParallelShell({conn, testfunc, username, password}) {
+ TestData.aggCurOpTest = testfunc;
+ TestData.aggCurOpUser = username;
+ TestData.aggCurOpPwd = password;
+
+ runCommandOnAllPrimaries({
+ dbName: "admin",
+ username: username,
+ password: password,
+ cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "alwaysOn"}
+ });
+
+ testfunc = function() {
+ db.getSiblingDB("admin").auth(TestData.aggCurOpUser, TestData.aggCurOpPwd);
+ TestData.aggCurOpTest();
+ db.getSiblingDB("admin").logout();
+ };
- // Create a test database and some dummy data on rs0.
- assert(clusterAdminDB.auth("admin", "pwd"));
+ return startParallelShell(testfunc, conn.port);
+}
- for (let i = 0; i < 5; i++) {
- assert.writeOK(clusterTestDB.test.insert({_id: i, a: i}));
- }
+function assertCurrentOpHasSingleMatchingEntry({conn, currentOpAggFilter, curOpSpec}) {
+ curOpSpec = (curOpSpec || {allUsers: true});
- st.ensurePrimaryShard(clusterTestDB.getName(), shardRS.name);
+ const connAdminDB = conn.getDB("admin");
- // Restarts a replset with a different set of parameters. Explicitly set the keyFile to null,
- // since if ReplSetTest#stopSet sees a keyFile property, it attempts to auth before dbhash
- // checks.
- function restartReplSet(replSet, newOpts) {
- const numNodes = replSet.nodeList().length;
- for (let n = 0; n < numNodes; n++) {
- replSet.restart(n, newOpts);
- }
- replSet.keyFile = newOpts.keyFile;
- return replSet.getPrimary();
- }
- // Restarts a cluster with a different set of parameters.
- function restartCluster(st, newOpts) {
- restartReplSet(st.configRS, newOpts);
- for (let i = 0; i < stParams.shards; i++) {
- restartReplSet(st[`rs${i}`], newOpts);
- }
- st.restartMongos(0, Object.assign(newOpts, {restart: true}));
- st.keyFile = newOpts.keyFile;
- // Re-link the cluster components.
- shardConn = st.rs0.getPrimary();
- mongosConn = st.s;
- shardRS = st.rs0;
- clusterTestDB = mongosConn.getDB(jsTestName());
- clusterAdminDB = mongosConn.getDB("admin");
- shardTestDB = shardConn.getDB(jsTestName());
- shardAdminDB = shardConn.getDB("admin");
- }
+ let curOpResult;
- function runCommandOnAllPrimaries({dbName, cmdObj, username, password}) {
- for (let i = 0; i < stParams.shards; i++) {
- const rsAdminDB = st[`rs${i}`].getPrimary().getDB("admin");
- rsAdminDB.auth(username, password);
- assert.commandWorked(rsAdminDB.getSiblingDB(dbName).runCommand(cmdObj));
- }
- }
+ assert.soon(
+ function() {
+ curOpResult =
+ connAdminDB.aggregate([{$currentOp: curOpSpec}, {$match: currentOpAggFilter}])
+ .toArray();
- // Functions to support running an operation in a parallel shell for testing allUsers behaviour.
- function runInParallelShell({conn, testfunc, username, password}) {
- TestData.aggCurOpTest = testfunc;
- TestData.aggCurOpUser = username;
- TestData.aggCurOpPwd = password;
-
- runCommandOnAllPrimaries({
- dbName: "admin",
- username: username,
- password: password,
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "alwaysOn"}
+ return curOpResult.length === 1;
+ },
+ function() {
+ return "Failed to find operation " + tojson(currentOpAggFilter) +
+ " in $currentOp output: " + tojson(curOpResult);
});
- testfunc = function() {
- db.getSiblingDB("admin").auth(TestData.aggCurOpUser, TestData.aggCurOpPwd);
- TestData.aggCurOpTest();
- db.getSiblingDB("admin").logout();
- };
-
- return startParallelShell(testfunc, conn.port);
- }
+ return curOpResult[0];
+}
- function assertCurrentOpHasSingleMatchingEntry({conn, currentOpAggFilter, curOpSpec}) {
- curOpSpec = (curOpSpec || {allUsers: true});
+function waitForParallelShell({conn, username, password, awaitShell}) {
+ runCommandOnAllPrimaries({
+ dbName: "admin",
+ username: username,
+ password: password,
+ cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
+ });
- const connAdminDB = conn.getDB("admin");
+ awaitShell();
+}
+
+// Generic function for running getMore on a $currentOp aggregation cursor and returning the
+// command response.
+function getMoreTest({conn, curOpSpec, getMoreBatchSize}) {
+ // Ensure that there are some other connections present so that the result set is larger
+ // than 1 $currentOp entry.
+ const otherConns = [new Mongo(conn.host), new Mongo(conn.host)];
+ curOpSpec = Object.assign({idleConnections: true}, (curOpSpec || {}));
+
+ // Log the other connections in as user_no_inprog so that they will show up for user_inprog
+ // with {allUsers: true} and user_no_inprog with {allUsers: false}.
+ for (let otherConn of otherConns) {
+ assert(otherConn.getDB("admin").auth("user_no_inprog", "pwd"));
+ }
- let curOpResult;
+ const connAdminDB = conn.getDB("admin");
- assert.soon(
- function() {
- curOpResult =
- connAdminDB.aggregate([{$currentOp: curOpSpec}, {$match: currentOpAggFilter}])
- .toArray();
+ const aggCmdRes = assert.commandWorked(connAdminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: curOpSpec}], cursor: {batchSize: 0}}));
+ assert.neq(aggCmdRes.cursor.id, 0);
- return curOpResult.length === 1;
- },
- function() {
- return "Failed to find operation " + tojson(currentOpAggFilter) +
- " in $currentOp output: " + tojson(curOpResult);
- });
+ return connAdminDB.runCommand({
+ getMore: aggCmdRes.cursor.id,
+ collection: getCollectionNameFromFullNamespace(aggCmdRes.cursor.ns),
+ batchSize: (getMoreBatchSize || 100)
+ });
+}
- return curOpResult[0];
- }
+//
+// Common tests.
+//
- function waitForParallelShell({conn, username, password, awaitShell}) {
- runCommandOnAllPrimaries({
- dbName: "admin",
- username: username,
- password: password,
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
- });
+// Runs a suite of tests for behaviour common to both the replica set and cluster levels.
+function runCommonTests(conn, curOpSpec) {
+ const testDB = conn.getDB(jsTestName());
+ const adminDB = conn.getDB("admin");
+ curOpSpec = (curOpSpec || {});
- awaitShell();
+ function addToSpec(spec) {
+ return Object.assign({}, curOpSpec, spec);
}
- // Generic function for running getMore on a $currentOp aggregation cursor and returning the
- // command response.
- function getMoreTest({conn, curOpSpec, getMoreBatchSize}) {
- // Ensure that there are some other connections present so that the result set is larger
- // than 1 $currentOp entry.
- const otherConns = [new Mongo(conn.host), new Mongo(conn.host)];
- curOpSpec = Object.assign({idleConnections: true}, (curOpSpec || {}));
-
- // Log the other connections in as user_no_inprog so that they will show up for user_inprog
- // with {allUsers: true} and user_no_inprog with {allUsers: false}.
- for (let otherConn of otherConns) {
- assert(otherConn.getDB("admin").auth("user_no_inprog", "pwd"));
- }
+ const isLocalMongosCurOp = (conn == mongosConn && curOpSpec.localOps);
+ const isRemoteShardCurOp = (conn == mongosConn && !curOpSpec.localOps);
- const connAdminDB = conn.getDB("admin");
+ // Test that an unauthenticated connection cannot run $currentOp even with {allUsers:
+ // false}.
+ assert(adminDB.logout());
- const aggCmdRes = assert.commandWorked(connAdminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: curOpSpec}], cursor: {batchSize: 0}}));
- assert.neq(aggCmdRes.cursor.id, 0);
+ assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: false})}], cursor: {}}),
+ ErrorCodes.Unauthorized);
- return connAdminDB.runCommand({
- getMore: aggCmdRes.cursor.id,
- collection: getCollectionNameFromFullNamespace(aggCmdRes.cursor.ns),
- batchSize: (getMoreBatchSize || 100)
- });
- }
+ // Test that an unauthenticated connection cannot run the currentOp command even with
+ // {$ownOps: true}.
+ assert.commandFailedWithCode(adminDB.currentOp({$ownOps: true}), ErrorCodes.Unauthorized);
//
- // Common tests.
+ // Authenticate as user_no_inprog.
//
+ assert(adminDB.logout());
+ assert(adminDB.auth("user_no_inprog", "pwd"));
- // Runs a suite of tests for behaviour common to both the replica set and cluster levels.
- function runCommonTests(conn, curOpSpec) {
- const testDB = conn.getDB(jsTestName());
- const adminDB = conn.getDB("admin");
- curOpSpec = (curOpSpec || {});
+ // Test that $currentOp fails with {allUsers: true} for a user without the "inprog"
+ // privilege.
+ assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: true})}], cursor: {}}),
+ ErrorCodes.Unauthorized);
- function addToSpec(spec) {
- return Object.assign({}, curOpSpec, spec);
- }
+ // Test that the currentOp command fails with {ownOps: false} for a user without the
+ // "inprog" privilege.
+ assert.commandFailedWithCode(adminDB.currentOp({$ownOps: false}), ErrorCodes.Unauthorized);
- const isLocalMongosCurOp = (conn == mongosConn && curOpSpec.localOps);
- const isRemoteShardCurOp = (conn == mongosConn && !curOpSpec.localOps);
-
- // Test that an unauthenticated connection cannot run $currentOp even with {allUsers:
- // false}.
- assert(adminDB.logout());
-
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: false})}], cursor: {}}),
- ErrorCodes.Unauthorized);
-
- // Test that an unauthenticated connection cannot run the currentOp command even with
- // {$ownOps: true}.
- assert.commandFailedWithCode(adminDB.currentOp({$ownOps: true}), ErrorCodes.Unauthorized);
-
- //
- // Authenticate as user_no_inprog.
- //
- assert(adminDB.logout());
- assert(adminDB.auth("user_no_inprog", "pwd"));
-
- // Test that $currentOp fails with {allUsers: true} for a user without the "inprog"
- // privilege.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: true})}], cursor: {}}),
- ErrorCodes.Unauthorized);
-
- // Test that the currentOp command fails with {ownOps: false} for a user without the
- // "inprog" privilege.
- assert.commandFailedWithCode(adminDB.currentOp({$ownOps: false}), ErrorCodes.Unauthorized);
-
- // Test that {aggregate: 1} fails when the first stage in the pipeline is not $currentOp.
- assert.commandFailedWithCode(
- adminDB.runCommand({aggregate: 1, pipeline: [{$match: {}}], cursor: {}}),
- ErrorCodes.InvalidNamespace);
-
- //
- // Authenticate as user_inprog.
- //
- assert(adminDB.logout());
- assert(adminDB.auth("user_inprog", "pwd"));
-
- // Test that $currentOp fails when it is not the first stage in the pipeline. We use two
- // $currentOp stages since any other stage in the initial position will trip the {aggregate:
- // 1} namespace check.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: {}}, {$currentOp: curOpSpec}], cursor: {}}),
- 40602);
-
- // Test that $currentOp fails when run on admin without {aggregate: 1}.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {aggregate: "collname", pipeline: [{$currentOp: curOpSpec}], cursor: {}}),
- ErrorCodes.InvalidNamespace);
-
- // Test that $currentOp fails when run as {aggregate: 1} on a database other than admin.
- assert.commandFailedWithCode(
- testDB.runCommand({aggregate: 1, pipeline: [{$currentOp: curOpSpec}], cursor: {}}),
- ErrorCodes.InvalidNamespace);
-
- // Test that the currentOp command fails when run directly on a database other than admin.
- assert.commandFailedWithCode(testDB.runCommand({currentOp: 1}), ErrorCodes.Unauthorized);
-
- // Test that the currentOp command helper succeeds when run on a database other than admin.
- // This is because the currentOp shell helper redirects the command to the admin database.
- assert.commandWorked(testDB.currentOp());
-
- // Test that $currentOp and the currentOp command accept all numeric types.
- const ones = [1, 1.0, NumberInt(1), NumberLong(1), NumberDecimal(1)];
-
- for (let one of ones) {
- assert.commandWorked(adminDB.runCommand(
- {aggregate: one, pipeline: [{$currentOp: curOpSpec}], cursor: {}}));
-
- assert.commandWorked(adminDB.runCommand({currentOp: one, $ownOps: true}));
- }
+ // Test that {aggregate: 1} fails when the first stage in the pipeline is not $currentOp.
+ assert.commandFailedWithCode(
+ adminDB.runCommand({aggregate: 1, pipeline: [{$match: {}}], cursor: {}}),
+ ErrorCodes.InvalidNamespace);
- // Test that $currentOp with {allUsers: true} succeeds for a user with the "inprog"
- // privilege.
- assert.commandWorked(adminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: true})}], cursor: {}}));
-
- // Test that the currentOp command with {$ownOps: false} succeeds for a user with the
- // "inprog" privilege.
- assert.commandWorked(adminDB.currentOp({$ownOps: false}));
-
- // Test that $currentOp succeeds if local readConcern is specified.
- assert.commandWorked(adminDB.runCommand({
- aggregate: 1,
- pipeline: [{$currentOp: curOpSpec}],
- readConcern: {level: "local"},
- cursor: {}
- }));
-
- // Test that $currentOp fails if a non-local readConcern is specified for any data-bearing
- // target.
- const linearizableAggCmd = {
- aggregate: 1,
- pipeline: [{$currentOp: curOpSpec}],
- readConcern: {level: "linearizable"},
- cursor: {}
- };
- assert.commandFailedWithCode(adminDB.runCommand(linearizableAggCmd),
- ErrorCodes.InvalidOptions);
-
- // Test that {idleConnections: false} returns only active connections.
- const idleConn = new Mongo(conn.host);
-
- assert.eq(adminDB
- .aggregate([
- {$currentOp: addToSpec({allUsers: true, idleConnections: false})},
- {$match: {active: false}}
- ])
- .itcount(),
- 0);
+ //
+ // Authenticate as user_inprog.
+ //
+ assert(adminDB.logout());
+ assert(adminDB.auth("user_inprog", "pwd"));
- // Test that the currentOp command with {$all: false} returns only active connections.
- assert.eq(adminDB.currentOp({$ownOps: false, $all: false, active: false}).inprog.length, 0);
+ // Test that $currentOp fails when it is not the first stage in the pipeline. We use two
+ // $currentOp stages since any other stage in the initial position will trip the {aggregate:
+ // 1} namespace check.
+ assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {}}, {$currentOp: curOpSpec}], cursor: {}}),
+ 40602);
- // Test that {idleConnections: true} returns inactive connections.
- assert.gte(adminDB
- .aggregate([
- {$currentOp: addToSpec({allUsers: true, idleConnections: true})},
- {$match: {active: false}}
- ])
- .itcount(),
- 1);
+ // Test that $currentOp fails when run on admin without {aggregate: 1}.
+ assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {aggregate: "collname", pipeline: [{$currentOp: curOpSpec}], cursor: {}}),
+ ErrorCodes.InvalidNamespace);
- // Test that the currentOp command with {$all: true} returns inactive connections.
- assert.gte(adminDB.currentOp({$ownOps: false, $all: true, active: false}).inprog.length, 1);
+ // Test that $currentOp fails when run as {aggregate: 1} on a database other than admin.
+ assert.commandFailedWithCode(
+ testDB.runCommand({aggregate: 1, pipeline: [{$currentOp: curOpSpec}], cursor: {}}),
+ ErrorCodes.InvalidNamespace);
- // Test that collation rules apply to matches on $currentOp output.
- const matchField =
- (isRemoteShardCurOp ? "cursor.originatingCommand.comment" : "command.comment");
- const numExpectedMatches = (isRemoteShardCurOp ? stParams.shards : 1);
+ // Test that the currentOp command fails when run directly on a database other than admin.
+ assert.commandFailedWithCode(testDB.runCommand({currentOp: 1}), ErrorCodes.Unauthorized);
- assert.eq(
- adminDB
- .aggregate(
- [{$currentOp: curOpSpec}, {$match: {[matchField]: "AGG_currént_op_COLLATION"}}],
- {
- collation: {locale: "en_US", strength: 1}, // Case and diacritic insensitive.
- comment: "agg_current_op_collation"
- })
- .itcount(),
- numExpectedMatches);
-
- // Test that $currentOp output can be processed by $facet subpipelines.
- assert.eq(adminDB
- .aggregate(
- [
- {$currentOp: curOpSpec},
- {
- $facet: {
- testFacet: [
- {$match: {[matchField]: "agg_current_op_facets"}},
- {$count: "count"}
- ]
- }
- },
- {$unwind: "$testFacet"},
- {$replaceRoot: {newRoot: "$testFacet"}}
- ],
- {comment: "agg_current_op_facets"})
- .next()
- .count,
- numExpectedMatches);
-
- // Test that $currentOp is explainable.
- const explainPlan = assert.commandWorked(adminDB.runCommand({
- aggregate: 1,
- pipeline: [
- {$currentOp: addToSpec({idleConnections: true, allUsers: false})},
- {$match: {desc: "test"}}
- ],
- explain: true
- }));
-
- let expectedStages =
- [{$currentOp: {idleConnections: true}}, {$match: {desc: {$eq: "test"}}}];
-
- if (isRemoteShardCurOp) {
- assert.docEq(explainPlan.splitPipeline.shardsPart, expectedStages);
- for (let i = 0; i < stParams.shards; i++) {
- let shardName = st["rs" + i].name;
- assert.docEq(explainPlan.shards[shardName].stages, expectedStages);
- }
- } else if (isLocalMongosCurOp) {
- expectedStages[0].$currentOp.localOps = true;
- assert.docEq(explainPlan.mongos.stages, expectedStages);
- } else {
- assert.docEq(explainPlan.stages, expectedStages);
- }
+ // Test that the currentOp command helper succeeds when run on a database other than admin.
+ // This is because the currentOp shell helper redirects the command to the admin database.
+ assert.commandWorked(testDB.currentOp());
- // Test that a user with the inprog privilege can run getMore on a $currentOp aggregation
- // cursor which they created with {allUsers: true}.
- let getMoreCmdRes = assert.commandWorked(
- getMoreTest({conn: conn, curOpSpec: {allUsers: true}, getMoreBatchSize: 1}));
-
- // Test that a user without the inprog privilege cannot run getMore on a $currentOp
- // aggregation cursor created by a user with {allUsers: true}.
- assert(adminDB.logout());
- assert(adminDB.auth("user_no_inprog", "pwd"));
-
- assert.neq(getMoreCmdRes.cursor.id, 0);
- assert.commandFailedWithCode(adminDB.runCommand({
- getMore: getMoreCmdRes.cursor.id,
- collection: getCollectionNameFromFullNamespace(getMoreCmdRes.cursor.ns),
- batchSize: 100
- }),
- ErrorCodes.Unauthorized);
- }
+ // Test that $currentOp and the currentOp command accept all numeric types.
+ const ones = [1, 1.0, NumberInt(1), NumberLong(1), NumberDecimal(1)];
- // Run the common tests on a shard, through mongoS, and on mongoS with 'localOps' enabled.
- runCommonTests(shardConn);
- runCommonTests(mongosConn);
- runCommonTests(mongosConn, {localOps: true});
+ for (let one of ones) {
+ assert.commandWorked(
+ adminDB.runCommand({aggregate: one, pipeline: [{$currentOp: curOpSpec}], cursor: {}}));
- //
- // mongoS specific tests.
- //
+ assert.commandWorked(adminDB.runCommand({currentOp: one, $ownOps: true}));
+ }
- // Test that a user without the inprog privilege cannot run non-local $currentOp via mongoS even
- // if allUsers is false.
- assert(clusterAdminDB.logout());
- assert(clusterAdminDB.auth("user_no_inprog", "pwd"));
+ // Test that $currentOp with {allUsers: true} succeeds for a user with the "inprog"
+ // privilege.
+ assert.commandWorked(adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: true})}], cursor: {}}));
- assert.commandFailedWithCode(
- clusterAdminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: {allUsers: false}}], cursor: {}}),
- ErrorCodes.Unauthorized);
+ // Test that the currentOp command with {$ownOps: false} succeeds for a user with the
+ // "inprog" privilege.
+ assert.commandWorked(adminDB.currentOp({$ownOps: false}));
- // Test that a user without the inprog privilege cannot run non-local currentOp command via
- // mongoS even if $ownOps is true.
- assert.commandFailedWithCode(clusterAdminDB.currentOp({$ownOps: true}),
- ErrorCodes.Unauthorized);
+ // Test that $currentOp succeeds if local readConcern is specified.
+ assert.commandWorked(adminDB.runCommand({
+ aggregate: 1,
+ pipeline: [{$currentOp: curOpSpec}],
+ readConcern: {level: "local"},
+ cursor: {}
+ }));
- // Test that a non-local $currentOp pipeline via mongoS returns results from all shards, and
- // includes both the shard and host names.
- assert(clusterAdminDB.logout());
- assert(clusterAdminDB.auth("user_inprog", "pwd"));
+ // Test that $currentOp fails if a non-local readConcern is specified for any data-bearing
+ // target.
+ const linearizableAggCmd = {
+ aggregate: 1,
+ pipeline: [{$currentOp: curOpSpec}],
+ readConcern: {level: "linearizable"},
+ cursor: {}
+ };
+ assert.commandFailedWithCode(adminDB.runCommand(linearizableAggCmd), ErrorCodes.InvalidOptions);
- assert.eq(clusterAdminDB
+ // Test that {idleConnections: false} returns only active connections.
+ const idleConn = new Mongo(conn.host);
+
+ assert.eq(adminDB
.aggregate([
- {$currentOp: {allUsers: true, idleConnections: true}},
- {$group: {_id: {shard: "$shard", host: "$host"}}},
- {$sort: {_id: 1}}
+ {$currentOp: addToSpec({allUsers: true, idleConnections: false})},
+ {$match: {active: false}}
])
- .toArray(),
- [
- {_id: {shard: "aggregation_currentop-rs0", host: st.rs0.getPrimary().host}},
- {_id: {shard: "aggregation_currentop-rs1", host: st.rs1.getPrimary().host}},
- {_id: {shard: "aggregation_currentop-rs2", host: st.rs2.getPrimary().host}}
- ]);
-
- // Test that a $currentOp pipeline with {localOps:true} returns operations from the mongoS
- // itself rather than the shards.
- assert.eq(clusterAdminDB
- .aggregate(
- [
- {$currentOp: {localOps: true}},
- {
- $match: {
- $expr: {$eq: ["$host", "$clientMetadata.mongos.host"]},
- "command.comment": "mongos_currentop_localOps"
- }
- }
- ],
- {comment: "mongos_currentop_localOps"})
.itcount(),
- 1);
+ 0);
- //
- // localOps tests.
- //
+ // Test that the currentOp command with {$all: false} returns only active connections.
+ assert.eq(adminDB.currentOp({$ownOps: false, $all: false, active: false}).inprog.length, 0);
- // Runs a suite of tests for behaviour common to both replica sets and mongoS with
- // {localOps:true}.
- function runLocalOpsTests(conn) {
- // The 'localOps' parameter is not supported by the currentOp command, so we limit its
- // testing to the replica set in certain cases.
- const connAdminDB = conn.getDB("admin");
- const isMongos = FixtureHelpers.isMongos(connAdminDB);
-
- // Test that a user with the inprog privilege can see another user's ops with
- // {allUsers:true}.
- assert(connAdminDB.logout());
- assert(connAdminDB.auth("user_inprog", "pwd"));
-
- let awaitShell = runInParallelShell({
- testfunc: function() {
- assert.eq(db.getSiblingDB(jsTestName())
- .test.find({})
- .comment("agg_current_op_allusers_test")
- .itcount(),
- 5);
- },
- conn: conn,
- username: "admin",
- password: "pwd"
- });
+ // Test that {idleConnections: true} returns inactive connections.
+ assert.gte(adminDB
+ .aggregate([
+ {$currentOp: addToSpec({allUsers: true, idleConnections: true})},
+ {$match: {active: false}}
+ ])
+ .itcount(),
+ 1);
- assertCurrentOpHasSingleMatchingEntry({
- conn: conn,
- currentOpAggFilter: {"command.comment": "agg_current_op_allusers_test"},
- curOpSpec: {allUsers: true, localOps: true}
- });
+ // Test that the currentOp command with {$all: true} returns inactive connections.
+ assert.gte(adminDB.currentOp({$ownOps: false, $all: true, active: false}).inprog.length, 1);
- // Test that the currentOp command can see another user's operations with {$ownOps: false}.
- // Only test on a replica set since 'localOps' isn't supported by the currentOp command.
- if (!isMongos) {
- assert.eq(
- connAdminDB
- .currentOp({$ownOps: false, "command.comment": "agg_current_op_allusers_test"})
- .inprog.length,
- 1);
- }
+ // Test that collation rules apply to matches on $currentOp output.
+ const matchField =
+ (isRemoteShardCurOp ? "cursor.originatingCommand.comment" : "command.comment");
+ const numExpectedMatches = (isRemoteShardCurOp ? stParams.shards : 1);
- // Test that $currentOp succeeds with {allUsers: false} for a user without the "inprog"
- // privilege.
- assert(connAdminDB.logout());
- assert(connAdminDB.auth("user_no_inprog", "pwd"));
-
- assert.commandWorked(connAdminDB.runCommand({
- aggregate: 1,
- pipeline: [{$currentOp: {allUsers: false, localOps: true}}],
- cursor: {}
- }));
-
- // Test that the currentOp command succeeds with {$ownOps: true} for a user without the
- // "inprog" privilege. Because currentOp does not support the 'localOps' parameter, we only
- // perform this test in the replica set case.
- if (!isMongos) {
- assert.commandWorked(connAdminDB.currentOp({$ownOps: true}));
- }
-
- // Test that a user without the inprog privilege cannot see another user's operations.
- assert.eq(connAdminDB
- .aggregate([
- {$currentOp: {allUsers: false, localOps: true}},
- {$match: {"command.comment": "agg_current_op_allusers_test"}}
- ])
- .itcount(),
- 0);
-
- // Test that a user without the inprog privilege cannot see another user's operations via
- // the currentOp command. Limit this test to the replica set case due to the absence of a
- // 'localOps' parameter for the currentOp command.
- if (!isMongos) {
- assert.eq(
- connAdminDB
- .currentOp({$ownOps: true, "command.comment": "agg_current_op_allusers_test"})
- .inprog.length,
- 0);
- }
-
- // Release the failpoint and wait for the parallel shell to complete.
- waitForParallelShell(
- {conn: conn, username: "admin", password: "pwd", awaitShell: awaitShell});
-
- // Test that a user without the inprog privilege can run getMore on a $currentOp cursor
- // which they created with {allUsers: false}.
- assert.commandWorked(
- getMoreTest({conn: conn, curOpSpec: {allUsers: false, localOps: true}}));
- }
+ assert.eq(
+ adminDB
+ .aggregate(
+ [{$currentOp: curOpSpec}, {$match: {[matchField]: "AGG_currént_op_COLLATION"}}], {
+ collation: {locale: "en_US", strength: 1}, // Case and diacritic insensitive.
+ comment: "agg_current_op_collation"
+ })
+ .itcount(),
+ numExpectedMatches);
- // Run the localOps tests for both replset and mongoS.
- runLocalOpsTests(mongosConn);
- runLocalOpsTests(shardConn);
+ // Test that $currentOp output can be processed by $facet subpipelines.
+ assert.eq(adminDB
+ .aggregate(
+ [
+ {$currentOp: curOpSpec},
+ {
+ $facet: {
+ testFacet: [
+ {$match: {[matchField]: "agg_current_op_facets"}},
+ {$count: "count"}
+ ]
+ }
+ },
+ {$unwind: "$testFacet"},
+ {$replaceRoot: {newRoot: "$testFacet"}}
+ ],
+ {comment: "agg_current_op_facets"})
+ .next()
+ .count,
+ numExpectedMatches);
- //
- // Stashed transactions tests.
- //
+ // Test that $currentOp is explainable.
+ const explainPlan = assert.commandWorked(adminDB.runCommand({
+ aggregate: 1,
+ pipeline: [
+ {$currentOp: addToSpec({idleConnections: true, allUsers: false})},
+ {$match: {desc: "test"}}
+ ],
+ explain: true
+ }));
- // Test that $currentOp will display stashed transaction locks if 'idleSessions' is true, and
- // will only permit a user to view other users' sessions if the caller possesses the 'inprog'
- // privilege and 'allUsers' is true.
- const userNames = ["user_inprog", "admin", "user_no_inprog"];
- let sessionDBs = [];
- let sessions = [];
-
- // Returns a set of predicates that filter $currentOp for all stashed transactions.
- function sessionFilter() {
- return {
- type: "idleSession",
- active: false,
- opid: {$exists: false},
- desc: "inactive transaction",
- "lsid.id": {$in: sessions.map((session) => session.getSessionId().id)},
- "transaction.parameters.txnNumber": {$gte: 0, $lt: sessions.length},
- };
- }
+ let expectedStages = [{$currentOp: {idleConnections: true}}, {$match: {desc: {$eq: "test"}}}];
- for (let i in userNames) {
- shardAdminDB.logout();
- assert(shardAdminDB.auth(userNames[i], "pwd"));
-
- // Create a session for this user.
- const session = shardAdminDB.getMongo().startSession();
-
- // For each session, start but do not complete a transaction.
- const sessionDB = session.getDatabase(shardTestDB.getName());
- assert.commandWorked(sessionDB.runCommand({
- insert: "test",
- documents: [{_id: `txn-insert-${userNames[i]}-${i}`}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(i),
- startTransaction: true,
- autocommit: false
- }));
- sessionDBs.push(sessionDB);
- sessions.push(session);
-
- // Use $currentOp to confirm that the incomplete transactions have stashed their locks while
- // inactive, and that each user can only view their own sessions with 'allUsers:false'.
- assert.eq(shardAdminDB
- .aggregate([
- {$currentOp: {allUsers: false, idleSessions: true}},
- {$match: sessionFilter()}
- ])
- .itcount(),
- 1);
+ if (isRemoteShardCurOp) {
+ assert.docEq(explainPlan.splitPipeline.shardsPart, expectedStages);
+ for (let i = 0; i < stParams.shards; i++) {
+ let shardName = st["rs" + i].name;
+ assert.docEq(explainPlan.shards[shardName].stages, expectedStages);
+ }
+ } else if (isLocalMongosCurOp) {
+ expectedStages[0].$currentOp.localOps = true;
+ assert.docEq(explainPlan.mongos.stages, expectedStages);
+ } else {
+ assert.docEq(explainPlan.stages, expectedStages);
}
- // Log in as 'user_no_inprog' to verify that the user cannot view other users' sessions via
- // 'allUsers:true'.
- shardAdminDB.logout();
- assert(shardAdminDB.auth("user_no_inprog", "pwd"));
-
- assert.commandFailedWithCode(shardAdminDB.runCommand({
- aggregate: 1,
- cursor: {},
- pipeline: [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter()}]
+ // Test that a user with the inprog privilege can run getMore on a $currentOp aggregation
+ // cursor which they created with {allUsers: true}.
+ let getMoreCmdRes = assert.commandWorked(
+ getMoreTest({conn: conn, curOpSpec: {allUsers: true}, getMoreBatchSize: 1}));
+
+ // Test that a user without the inprog privilege cannot run getMore on a $currentOp
+ // aggregation cursor created by a user with {allUsers: true}.
+ assert(adminDB.logout());
+ assert(adminDB.auth("user_no_inprog", "pwd"));
+
+ assert.neq(getMoreCmdRes.cursor.id, 0);
+ assert.commandFailedWithCode(adminDB.runCommand({
+ getMore: getMoreCmdRes.cursor.id,
+ collection: getCollectionNameFromFullNamespace(getMoreCmdRes.cursor.ns),
+ batchSize: 100
}),
ErrorCodes.Unauthorized);
+}
+
+// Run the common tests on a shard, through mongoS, and on mongoS with 'localOps' enabled.
+runCommonTests(shardConn);
+runCommonTests(mongosConn);
+runCommonTests(mongosConn, {localOps: true});
+
+//
+// mongoS specific tests.
+//
+
+// Test that a user without the inprog privilege cannot run non-local $currentOp via mongoS even
+// if allUsers is false.
+assert(clusterAdminDB.logout());
+assert(clusterAdminDB.auth("user_no_inprog", "pwd"));
+
+assert.commandFailedWithCode(
+ clusterAdminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {allUsers: false}}], cursor: {}}),
+ ErrorCodes.Unauthorized);
+
+// Test that a user without the inprog privilege cannot run non-local currentOp command via
+// mongoS even if $ownOps is true.
+assert.commandFailedWithCode(clusterAdminDB.currentOp({$ownOps: true}), ErrorCodes.Unauthorized);
+
+// Test that a non-local $currentOp pipeline via mongoS returns results from all shards, and
+// includes both the shard and host names.
+assert(clusterAdminDB.logout());
+assert(clusterAdminDB.auth("user_inprog", "pwd"));
+
+assert.eq(clusterAdminDB
+ .aggregate([
+ {$currentOp: {allUsers: true, idleConnections: true}},
+ {$group: {_id: {shard: "$shard", host: "$host"}}},
+ {$sort: {_id: 1}}
+ ])
+ .toArray(),
+ [
+ {_id: {shard: "aggregation_currentop-rs0", host: st.rs0.getPrimary().host}},
+ {_id: {shard: "aggregation_currentop-rs1", host: st.rs1.getPrimary().host}},
+ {_id: {shard: "aggregation_currentop-rs2", host: st.rs2.getPrimary().host}}
+ ]);
+
+// Test that a $currentOp pipeline with {localOps:true} returns operations from the mongoS
+// itself rather than the shards.
+assert.eq(clusterAdminDB
+ .aggregate(
+ [
+ {$currentOp: {localOps: true}},
+ {
+ $match: {
+ $expr: {$eq: ["$host", "$clientMetadata.mongos.host"]},
+ "command.comment": "mongos_currentop_localOps"
+ }
+ }
+ ],
+ {comment: "mongos_currentop_localOps"})
+ .itcount(),
+ 1);
+
+//
+// localOps tests.
+//
+
+// Runs a suite of tests for behaviour common to both replica sets and mongoS with
+// {localOps:true}.
+function runLocalOpsTests(conn) {
+ // The 'localOps' parameter is not supported by the currentOp command, so we limit its
+ // testing to the replica set in certain cases.
+ const connAdminDB = conn.getDB("admin");
+ const isMongos = FixtureHelpers.isMongos(connAdminDB);
+
+ // Test that a user with the inprog privilege can see another user's ops with
+ // {allUsers:true}.
+ assert(connAdminDB.logout());
+ assert(connAdminDB.auth("user_inprog", "pwd"));
+
+ let awaitShell = runInParallelShell({
+ testfunc: function() {
+ assert.eq(db.getSiblingDB(jsTestName())
+ .test.find({})
+ .comment("agg_current_op_allusers_test")
+ .itcount(),
+ 5);
+ },
+ conn: conn,
+ username: "admin",
+ password: "pwd"
+ });
+
+ assertCurrentOpHasSingleMatchingEntry({
+ conn: conn,
+ currentOpAggFilter: {"command.comment": "agg_current_op_allusers_test"},
+ curOpSpec: {allUsers: true, localOps: true}
+ });
+
+ // Test that the currentOp command can see another user's operations with {$ownOps: false}.
+ // Only test on a replica set since 'localOps' isn't supported by the currentOp command.
+ if (!isMongos) {
+ assert.eq(
+ connAdminDB
+ .currentOp({$ownOps: false, "command.comment": "agg_current_op_allusers_test"})
+ .inprog.length,
+ 1);
+ }
- // Log in as 'user_inprog' to confirm that a user with the 'inprog' privilege can see all three
- // stashed transactions with 'allUsers:true'.
- shardAdminDB.logout();
- assert(shardAdminDB.auth("user_inprog", "pwd"));
+ // Test that $currentOp succeeds with {allUsers: false} for a user without the "inprog"
+ // privilege.
+ assert(connAdminDB.logout());
+ assert(connAdminDB.auth("user_no_inprog", "pwd"));
- assert.eq(
- shardAdminDB
- .aggregate(
- [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter()}])
- .itcount(),
- 3);
+ assert.commandWorked(connAdminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {allUsers: false, localOps: true}}], cursor: {}}));
+
+ // Test that the currentOp command succeeds with {$ownOps: true} for a user without the
+ // "inprog" privilege. Because currentOp does not support the 'localOps' parameter, we only
+ // perform this test in the replica set case.
+ if (!isMongos) {
+ assert.commandWorked(connAdminDB.currentOp({$ownOps: true}));
+ }
- // Confirm that the 'idleSessions' parameter defaults to true.
- assert.eq(shardAdminDB.aggregate([{$currentOp: {allUsers: true}}, {$match: sessionFilter()}])
+ // Test that a user without the inprog privilege cannot see another user's operations.
+ assert.eq(connAdminDB
+ .aggregate([
+ {$currentOp: {allUsers: false, localOps: true}},
+ {$match: {"command.comment": "agg_current_op_allusers_test"}}
+ ])
.itcount(),
- 3);
+ 0);
- // Confirm that idleSessions:false omits the stashed locks from the report.
- assert.eq(
- shardAdminDB
- .aggregate(
- [{$currentOp: {allUsers: true, idleSessions: false}}, {$match: sessionFilter()}])
- .itcount(),
- 0);
-
- // Allow all transactions to complete and close the associated sessions.
- for (let i in userNames) {
- assert(shardAdminDB.auth(userNames[i], "pwd"));
- assert.commandWorked(sessionDBs[i].adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(i),
- autocommit: false,
- writeConcern: {w: 'majority'}
- }));
- sessions[i].endSession();
+ // Test that a user without the inprog privilege cannot see another user's operations via
+ // the currentOp command. Limit this test to the replica set case due to the absence of a
+ // 'localOps' parameter for the currentOp command.
+ if (!isMongos) {
+ assert.eq(connAdminDB
+ .currentOp({$ownOps: true, "command.comment": "agg_current_op_allusers_test"})
+ .inprog.length,
+ 0);
}
- //
- // No-auth tests.
- //
+ // Release the failpoint and wait for the parallel shell to complete.
+ waitForParallelShell({conn: conn, username: "admin", password: "pwd", awaitShell: awaitShell});
+
+ // Test that a user without the inprog privilege can run getMore on a $currentOp cursor
+ // which they created with {allUsers: false}.
+ assert.commandWorked(getMoreTest({conn: conn, curOpSpec: {allUsers: false, localOps: true}}));
+}
+
+// Run the localOps tests for both replset and mongoS.
+runLocalOpsTests(mongosConn);
+runLocalOpsTests(shardConn);
+
+//
+// Stashed transactions tests.
+//
+
+// Test that $currentOp will display stashed transaction locks if 'idleSessions' is true, and
+// will only permit a user to view other users' sessions if the caller possesses the 'inprog'
+// privilege and 'allUsers' is true.
+const userNames = ["user_inprog", "admin", "user_no_inprog"];
+let sessionDBs = [];
+let sessions = [];
+
+// Returns a set of predicates that filter $currentOp for all stashed transactions.
+function sessionFilter() {
+ return {
+ type: "idleSession",
+ active: false,
+ opid: {$exists: false},
+ desc: "inactive transaction",
+ "lsid.id": {$in: sessions.map((session) => session.getSessionId().id)},
+ "transaction.parameters.txnNumber": {$gte: 0, $lt: sessions.length},
+ };
+}
- // Restart the cluster with auth disabled.
- restartCluster(st, {keyFile: null});
+for (let i in userNames) {
+ shardAdminDB.logout();
+ assert(shardAdminDB.auth(userNames[i], "pwd"));
- // Test that $currentOp will display all stashed transaction locks by default if auth is
- // disabled, even with 'allUsers:false'.
+ // Create a session for this user.
const session = shardAdminDB.getMongo().startSession();
- // Run an operation prior to starting the transaction and save its operation time.
+ // For each session, start but do not complete a transaction.
const sessionDB = session.getDatabase(shardTestDB.getName());
- const res = assert.commandWorked(sessionDB.runCommand({insert: "test", documents: [{x: 1}]}));
- const operationTime = res.operationTime;
-
- // Set and save the transaction's lifetime. We will use this later to assert that our
- // transaction's expiry time is equal to its start time + lifetime.
- const transactionLifeTime = 10;
- assert.commandWorked(sessionDB.adminCommand(
- {setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
-
- // Start but do not complete a transaction.
assert.commandWorked(sessionDB.runCommand({
insert: "test",
- documents: [{_id: `txn-insert-no-auth`}],
+ documents: [{_id: `txn-insert-${userNames[i]}-${i}`}],
readConcern: {level: "snapshot"},
- txnNumber: NumberLong(0),
+ txnNumber: NumberLong(i),
startTransaction: true,
autocommit: false
}));
- sessionDBs = [sessionDB];
- sessions = [session];
+ sessionDBs.push(sessionDB);
+ sessions.push(session);
- const timeAfterTransactionStarts = new ISODate();
-
- // Use $currentOp to confirm that the incomplete transaction has stashed its locks.
- assert.eq(shardAdminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: sessionFilter()}])
- .itcount(),
- 1);
-
- // Confirm that idleSessions:false omits the stashed locks from the report.
+ // Use $currentOp to confirm that the incomplete transactions have stashed their locks while
+ // inactive, and that each user can only view their own sessions with 'allUsers:false'.
assert.eq(
shardAdminDB
.aggregate(
- [{$currentOp: {allUsers: false, idleSessions: false}}, {$match: sessionFilter()}])
+ [{$currentOp: {allUsers: false, idleSessions: true}}, {$match: sessionFilter()}])
.itcount(),
- 0);
-
- // Prepare the transaction and ensure the prepareTimestamp is valid.
- const prepareRes = assert.commandWorked(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(0),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
- assert(prepareRes.prepareTimestamp,
- "prepareTransaction did not return a 'prepareTimestamp': " + tojson(prepareRes));
- assert(prepareRes.prepareTimestamp instanceof Timestamp,
- 'prepareTimestamp was not a Timestamp: ' + tojson(prepareRes));
- assert.neq(prepareRes.prepareTimestamp,
- Timestamp(0, 0),
- "prepareTimestamp cannot be null: " + tojson(prepareRes));
-
- const timeBeforeCurrentOp = new ISODate();
-
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- let currentOp =
- shardAdminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: sessionFilter()}])
- .toArray();
- let transactionDocument = currentOp[0].transaction;
- assert.eq(transactionDocument.parameters.autocommit, false);
- assert.eq(transactionDocument.parameters.readConcern, {level: "snapshot"});
- assert.gte(transactionDocument.readTimestamp, operationTime);
- // We round timeOpenMicros up to the nearest multiple of 1000 to avoid occasional assertion
- // failures caused by timeOpenMicros having microsecond precision while
- // timeBeforeCurrentOp/timeAfterTransactionStarts only have millisecond precision.
- assert.gte(Math.ceil(transactionDocument.timeOpenMicros / 1000) * 1000,
- (timeBeforeCurrentOp - timeAfterTransactionStarts) * 1000);
- assert.gte(transactionDocument.timeActiveMicros, 0);
- assert.gte(transactionDocument.timeInactiveMicros, 0);
- assert.gte(transactionDocument.timePreparedMicros, 0);
- // Not worried about its specific value, validate that in general we return some non-zero &
- // valid time greater than epoch time.
- assert.gt(ISODate(transactionDocument.startWallClockTime), ISODate("1970-01-01T00:00:00.000Z"));
- assert.eq(
- ISODate(transactionDocument.expiryTime).getTime(),
- ISODate(transactionDocument.startWallClockTime).getTime() + transactionLifeTime * 1000);
-
- // Allow the transactions to complete and close the session. We must commit prepared
- // transactions at a timestamp greater than the prepare timestamp.
- const commitTimestamp =
- Timestamp(prepareRes.prepareTimestamp.getTime(), prepareRes.prepareTimestamp.getInc() + 1);
- assert.commandWorked(sessionDB.adminCommand({
+ 1);
+}
+
+// Log in as 'user_no_inprog' to verify that the user cannot view other users' sessions via
+// 'allUsers:true'.
+shardAdminDB.logout();
+assert(shardAdminDB.auth("user_no_inprog", "pwd"));
+
+assert.commandFailedWithCode(shardAdminDB.runCommand({
+ aggregate: 1,
+ cursor: {},
+ pipeline: [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter()}]
+}),
+ ErrorCodes.Unauthorized);
+
+// Log in as 'user_inprog' to confirm that a user with the 'inprog' privilege can see all three
+// stashed transactions with 'allUsers:true'.
+shardAdminDB.logout();
+assert(shardAdminDB.auth("user_inprog", "pwd"));
+
+assert.eq(
+ shardAdminDB
+ .aggregate([{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter()}])
+ .itcount(),
+ 3);
+
+// Confirm that the 'idleSessions' parameter defaults to true.
+assert.eq(
+ shardAdminDB.aggregate([{$currentOp: {allUsers: true}}, {$match: sessionFilter()}]).itcount(),
+ 3);
+
+// Confirm that idleSessions:false omits the stashed locks from the report.
+assert.eq(
+ shardAdminDB
+ .aggregate([{$currentOp: {allUsers: true, idleSessions: false}}, {$match: sessionFilter()}])
+ .itcount(),
+ 0);
+
+// Allow all transactions to complete and close the associated sessions.
+for (let i in userNames) {
+ assert(shardAdminDB.auth(userNames[i], "pwd"));
+ assert.commandWorked(sessionDBs[i].adminCommand({
commitTransaction: 1,
- txnNumber: NumberLong(0),
+ txnNumber: NumberLong(i),
autocommit: false,
- writeConcern: {w: 'majority'},
- commitTimestamp: commitTimestamp
+ writeConcern: {w: 'majority'}
}));
- session.endSession();
-
- // Run a set of tests of behaviour common to replset and mongoS when auth is disabled.
- function runNoAuthTests(conn, curOpSpec) {
- // Test that the allUsers parameter is ignored when authentication is disabled.
- // Ensure that there is at least one other connection present.
- const connAdminDB = conn.getDB("admin");
- const otherConn = new Mongo(conn.host);
- curOpSpec = Object.assign({localOps: false}, (curOpSpec || {}));
-
- // Verify that $currentOp displays all operations when auth is disabled regardless of the
- // allUsers parameter, by confirming that we can see non-client system operations when
- // {allUsers: false} is specified.
- assert.gte(
- connAdminDB
- .aggregate([
- {
- $currentOp:
- {allUsers: false, idleConnections: true, localOps: curOpSpec.localOps}
- },
- {$match: {connectionId: {$exists: false}}}
- ])
- .itcount(),
- 1);
-
- // Verify that the currentOp command displays all operations when auth is disabled
- // regardless of
- // the $ownOps parameter, by confirming that we can see non-client system operations when
- // {$ownOps: true} is specified.
- assert.gte(
- connAdminDB.currentOp({$ownOps: true, $all: true, connectionId: {$exists: false}})
- .inprog.length,
- 1);
-
- // Test that a user can run getMore on a $currentOp cursor when authentication is disabled.
- assert.commandWorked(
- getMoreTest({conn: conn, curOpSpec: {allUsers: true, localOps: curOpSpec.localOps}}));
- }
-
- runNoAuthTests(shardConn);
- runNoAuthTests(mongosConn);
- runNoAuthTests(mongosConn, {localOps: true});
-
- //
- // Replset specific tests.
- //
-
- // Take the replica set out of the cluster.
- shardConn = restartReplSet(st.rs0, {shardsvr: null});
- shardTestDB = shardConn.getDB(jsTestName());
- shardAdminDB = shardConn.getDB("admin");
-
- // Test that the host field is present and the shard field is absent when run on mongoD.
- assert.eq(shardAdminDB
- .aggregate([
- {$currentOp: {allUsers: true, idleConnections: true}},
- {$group: {_id: {shard: "$shard", host: "$host"}}}
- ])
- .toArray(),
- [
- {_id: {host: shardConn.host}},
- ]);
+ sessions[i].endSession();
+}
+
+//
+// No-auth tests.
+//
+
+// Restart the cluster with auth disabled.
+restartCluster(st, {keyFile: null});
+
+// Test that $currentOp will display all stashed transaction locks by default if auth is
+// disabled, even with 'allUsers:false'.
+const session = shardAdminDB.getMongo().startSession();
+
+// Run an operation prior to starting the transaction and save its operation time.
+const sessionDB = session.getDatabase(shardTestDB.getName());
+const res = assert.commandWorked(sessionDB.runCommand({insert: "test", documents: [{x: 1}]}));
+const operationTime = res.operationTime;
+
+// Set and save the transaction's lifetime. We will use this later to assert that our
+// transaction's expiry time is equal to its start time + lifetime.
+const transactionLifeTime = 10;
+assert.commandWorked(sessionDB.adminCommand(
+ {setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
+
+// Start but do not complete a transaction.
+assert.commandWorked(sessionDB.runCommand({
+ insert: "test",
+ documents: [{_id: `txn-insert-no-auth`}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(0),
+ startTransaction: true,
+ autocommit: false
+}));
+sessionDBs = [sessionDB];
+sessions = [session];
+
+const timeAfterTransactionStarts = new ISODate();
+
+// Use $currentOp to confirm that the incomplete transaction has stashed its locks.
+assert.eq(
+ shardAdminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: sessionFilter()}]).itcount(),
+ 1);
+
+// Confirm that idleSessions:false omits the stashed locks from the report.
+assert.eq(shardAdminDB
+ .aggregate(
+ [{$currentOp: {allUsers: false, idleSessions: false}}, {$match: sessionFilter()}])
+ .itcount(),
+ 0);
+
+// Prepare the transaction and ensure the prepareTimestamp is valid.
+const prepareRes = assert.commandWorked(sessionDB.adminCommand({
+ prepareTransaction: 1,
+ txnNumber: NumberLong(0),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
+assert(prepareRes.prepareTimestamp,
+ "prepareTransaction did not return a 'prepareTimestamp': " + tojson(prepareRes));
+assert(prepareRes.prepareTimestamp instanceof Timestamp,
+ 'prepareTimestamp was not a Timestamp: ' + tojson(prepareRes));
+assert.neq(prepareRes.prepareTimestamp,
+ Timestamp(0, 0),
+ "prepareTimestamp cannot be null: " + tojson(prepareRes));
+
+const timeBeforeCurrentOp = new ISODate();
+
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+let currentOp =
+ shardAdminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: sessionFilter()}]).toArray();
+let transactionDocument = currentOp[0].transaction;
+assert.eq(transactionDocument.parameters.autocommit, false);
+assert.eq(transactionDocument.parameters.readConcern, {level: "snapshot"});
+assert.gte(transactionDocument.readTimestamp, operationTime);
+// We round timeOpenMicros up to the nearest multiple of 1000 to avoid occasional assertion
+// failures caused by timeOpenMicros having microsecond precision while
+// timeBeforeCurrentOp/timeAfterTransactionStarts only have millisecond precision.
+assert.gte(Math.ceil(transactionDocument.timeOpenMicros / 1000) * 1000,
+ (timeBeforeCurrentOp - timeAfterTransactionStarts) * 1000);
+assert.gte(transactionDocument.timeActiveMicros, 0);
+assert.gte(transactionDocument.timeInactiveMicros, 0);
+assert.gte(transactionDocument.timePreparedMicros, 0);
+// Not worried about its specific value, validate that in general we return some non-zero &
+// valid time greater than epoch time.
+assert.gt(ISODate(transactionDocument.startWallClockTime), ISODate("1970-01-01T00:00:00.000Z"));
+assert.eq(ISODate(transactionDocument.expiryTime).getTime(),
+ ISODate(transactionDocument.startWallClockTime).getTime() + transactionLifeTime * 1000);
+
+// Allow the transactions to complete and close the session. We must commit prepared
+// transactions at a timestamp greater than the prepare timestamp.
+const commitTimestamp =
+ Timestamp(prepareRes.prepareTimestamp.getTime(), prepareRes.prepareTimestamp.getInc() + 1);
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(0),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+ commitTimestamp: commitTimestamp
+}));
+session.endSession();
+
+// Run a set of tests of behaviour common to replset and mongoS when auth is disabled.
+function runNoAuthTests(conn, curOpSpec) {
+ // Test that the allUsers parameter is ignored when authentication is disabled.
+ // Ensure that there is at least one other connection present.
+ const connAdminDB = conn.getDB("admin");
+ const otherConn = new Mongo(conn.host);
+ curOpSpec = Object.assign({localOps: false}, (curOpSpec || {}));
+
+ // Verify that $currentOp displays all operations when auth is disabled regardless of the
+ // allUsers parameter, by confirming that we can see non-client system operations when
+ // {allUsers: false} is specified.
+ assert.gte(
+ connAdminDB
+ .aggregate([
+ {
+ $currentOp:
+ {allUsers: false, idleConnections: true, localOps: curOpSpec.localOps}
+ },
+ {$match: {connectionId: {$exists: false}}}
+ ])
+ .itcount(),
+ 1);
- // Test that attempting to 'spoof' a sharded request on non-shardsvr mongoD fails.
- assert.commandFailedWithCode(
- shardAdminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: {}}], fromMongos: true, cursor: {}}),
- 40465);
-
- // Test that an operation which is at the BSON user size limit does not throw an error when the
- // currentOp metadata is added to the output document.
- const bsonUserSizeLimit = assert.commandWorked(shardAdminDB.isMaster()).maxBsonObjectSize;
-
- let aggPipeline = [
- {$currentOp: {}},
- {
- $match: {
- $or: [
- {
+ // Verify that the currentOp command displays all operations when auth is disabled
+ // regardless of
+ // the $ownOps parameter, by confirming that we can see non-client system operations when
+ // {$ownOps: true} is specified.
+ assert.gte(connAdminDB.currentOp({$ownOps: true, $all: true, connectionId: {$exists: false}})
+ .inprog.length,
+ 1);
+
+ // Test that a user can run getMore on a $currentOp cursor when authentication is disabled.
+ assert.commandWorked(
+ getMoreTest({conn: conn, curOpSpec: {allUsers: true, localOps: curOpSpec.localOps}}));
+}
+
+runNoAuthTests(shardConn);
+runNoAuthTests(mongosConn);
+runNoAuthTests(mongosConn, {localOps: true});
+
+//
+// Replset specific tests.
+//
+
+// Take the replica set out of the cluster.
+shardConn = restartReplSet(st.rs0, {shardsvr: null});
+shardTestDB = shardConn.getDB(jsTestName());
+shardAdminDB = shardConn.getDB("admin");
+
+// Test that the host field is present and the shard field is absent when run on mongoD.
+assert.eq(shardAdminDB
+ .aggregate([
+ {$currentOp: {allUsers: true, idleConnections: true}},
+ {$group: {_id: {shard: "$shard", host: "$host"}}}
+ ])
+ .toArray(),
+ [
+ {_id: {host: shardConn.host}},
+ ]);
+
+// Test that attempting to 'spoof' a sharded request on non-shardsvr mongoD fails.
+assert.commandFailedWithCode(
+ shardAdminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {}}], fromMongos: true, cursor: {}}),
+ 40465);
+
+// Test that an operation which is at the BSON user size limit does not throw an error when the
+// currentOp metadata is added to the output document.
+const bsonUserSizeLimit = assert.commandWorked(shardAdminDB.isMaster()).maxBsonObjectSize;
+
+let aggPipeline = [
+ {$currentOp: {}},
+ {
+ $match: {
+ $or: [
+ {
"command.comment": "agg_current_op_bson_limit_test",
"command.$truncated": {$exists: false}
- },
- {padding: ""}
- ]
- }
+ },
+ {padding: ""}
+ ]
}
- ];
+ }
+];
- aggPipeline[1].$match.$or[1].padding =
- "a".repeat(bsonUserSizeLimit - Object.bsonsize(aggPipeline));
+aggPipeline[1].$match.$or[1].padding = "a".repeat(bsonUserSizeLimit - Object.bsonsize(aggPipeline));
- assert.eq(Object.bsonsize(aggPipeline), bsonUserSizeLimit);
+assert.eq(Object.bsonsize(aggPipeline), bsonUserSizeLimit);
- assert.eq(
- shardAdminDB.aggregate(aggPipeline, {comment: "agg_current_op_bson_limit_test"}).itcount(),
- 1);
+assert.eq(
+ shardAdminDB.aggregate(aggPipeline, {comment: "agg_current_op_bson_limit_test"}).itcount(), 1);
- // Test that $currentOp can run while the mongoD is write-locked.
- let awaitShell = startParallelShell(function() {
- assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", secs: 300}),
- ErrorCodes.Interrupted);
- }, shardConn.port);
+// Test that $currentOp can run while the mongoD is write-locked.
+let awaitShell = startParallelShell(function() {
+ assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", secs: 300}),
+ ErrorCodes.Interrupted);
+}, shardConn.port);
- const op = assertCurrentOpHasSingleMatchingEntry(
- {conn: shardConn, currentOpAggFilter: {"command.sleep": 1, active: true}});
+const op = assertCurrentOpHasSingleMatchingEntry(
+ {conn: shardConn, currentOpAggFilter: {"command.sleep": 1, active: true}});
- assert.commandWorked(shardAdminDB.killOp(op.opid));
+assert.commandWorked(shardAdminDB.killOp(op.opid));
- awaitShell();
+awaitShell();
- // Add the shard back into the replset so that it can be validated by st.stop().
- shardConn = restartReplSet(st.rs0, {shardsvr: ""});
- st.stop();
+// Add the shard back into the replset so that it can be validated by st.stop().
+shardConn = restartReplSet(st.rs0, {shardsvr: ""});
+st.stop();
})();
diff --git a/jstests/sharding/aggregation_internal_parameters.js b/jstests/sharding/aggregation_internal_parameters.js
index 2076aa465d8..529a9e00b1f 100644
--- a/jstests/sharding/aggregation_internal_parameters.js
+++ b/jstests/sharding/aggregation_internal_parameters.js
@@ -3,116 +3,116 @@
* parameters that mongoS uses internally when communicating with the shards.
*/
(function() {
- "use strict";
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 1, enableMajorityReadConcern: ''}});
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Test that command succeeds when no internal options have been specified.
- assert.commandWorked(
- mongosDB.runCommand({aggregate: mongosColl.getName(), pipeline: [], cursor: {}}));
-
- // Test that the command fails if we have 'needsMerge: false' without 'fromMongos'.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, needsMerge: false}),
- ErrorCodes.FailedToParse);
-
- // Test that the command fails if we have 'needsMerge: true' without 'fromMongos'.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, needsMerge: true}),
- ErrorCodes.FailedToParse);
-
- // Test that 'fromMongos: true' cannot be specified in a command sent to mongoS.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, fromMongos: true}),
- 51089);
-
- // Test that 'fromMongos: false' can be specified in a command sent to mongoS.
- assert.commandWorked(mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, fromMongos: false}));
-
- // Test that the command fails if we have 'needsMerge: true' with 'fromMongos: false'.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: true,
- fromMongos: false
- }),
- 51089);
-
- // Test that the command fails if we have 'needsMerge: true' with 'fromMongos: true'.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: true,
- fromMongos: true
- }),
- 51089);
-
- // Test that 'needsMerge: false' can be specified in a command sent to mongoS along with
- // 'fromMongos: false'.
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: false,
- fromMongos: false
- }));
-
- // Test that 'mergeByPBRT: true' cannot be specified in a command sent to mongoS.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, mergeByPBRT: true}),
- 51089);
-
- // Test that 'mergeByPBRT: false' can be specified in a command sent to mongoS.
- assert.commandWorked(mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, mergeByPBRT: false}));
-
- // Test that the 'exchange' parameter cannot be specified in a command sent to mongoS.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- exchange: {policy: 'roundrobin', consumers: NumberInt(2)}
- }),
- 51028);
-
- // Test that the command fails when all internal parameters have been specified.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: true,
- fromMongos: true,
- mergeByPBRT: true,
- exchange: {policy: 'roundrobin', consumers: NumberInt(2)}
- }),
- 51028);
-
- // Test that the command fails when all internal parameters but exchange have been specified.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: true,
- fromMongos: true,
- mergeByPBRT: true
- }),
- 51089);
-
- st.stop();
+"use strict";
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 1, enableMajorityReadConcern: ''}});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Test that command succeeds when no internal options have been specified.
+assert.commandWorked(
+ mongosDB.runCommand({aggregate: mongosColl.getName(), pipeline: [], cursor: {}}));
+
+// Test that the command fails if we have 'needsMerge: false' without 'fromMongos'.
+assert.commandFailedWithCode(
+ mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, needsMerge: false}),
+ ErrorCodes.FailedToParse);
+
+// Test that the command fails if we have 'needsMerge: true' without 'fromMongos'.
+assert.commandFailedWithCode(
+ mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, needsMerge: true}),
+ ErrorCodes.FailedToParse);
+
+// Test that 'fromMongos: true' cannot be specified in a command sent to mongoS.
+assert.commandFailedWithCode(
+ mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, fromMongos: true}),
+ 51089);
+
+// Test that 'fromMongos: false' can be specified in a command sent to mongoS.
+assert.commandWorked(mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, fromMongos: false}));
+
+// Test that the command fails if we have 'needsMerge: true' with 'fromMongos: false'.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: true,
+ fromMongos: false
+}),
+ 51089);
+
+// Test that the command fails if we have 'needsMerge: true' with 'fromMongos: true'.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: true,
+ fromMongos: true
+}),
+ 51089);
+
+// Test that 'needsMerge: false' can be specified in a command sent to mongoS along with
+// 'fromMongos: false'.
+assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: false,
+ fromMongos: false
+}));
+
+// Test that 'mergeByPBRT: true' cannot be specified in a command sent to mongoS.
+assert.commandFailedWithCode(
+ mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, mergeByPBRT: true}),
+ 51089);
+
+// Test that 'mergeByPBRT: false' can be specified in a command sent to mongoS.
+assert.commandWorked(mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, mergeByPBRT: false}));
+
+// Test that the 'exchange' parameter cannot be specified in a command sent to mongoS.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ exchange: {policy: 'roundrobin', consumers: NumberInt(2)}
+}),
+ 51028);
+
+// Test that the command fails when all internal parameters have been specified.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: true,
+ fromMongos: true,
+ mergeByPBRT: true,
+ exchange: {policy: 'roundrobin', consumers: NumberInt(2)}
+}),
+ 51028);
+
+// Test that the command fails when all internal parameters but exchange have been specified.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: true,
+ fromMongos: true,
+ mergeByPBRT: true
+}),
+ 51089);
+
+st.stop();
})();
diff --git a/jstests/sharding/aggregations_in_session.js b/jstests/sharding/aggregations_in_session.js
index b2eb82bed3c..456decee662 100644
--- a/jstests/sharding/aggregations_in_session.js
+++ b/jstests/sharding/aggregations_in_session.js
@@ -1,41 +1,41 @@
// Tests running aggregations within a client session. This test was designed to reproduce
// SERVER-33660.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
+const st = new ShardingTest({shards: 2});
- // Gate this test to transaction supporting engines only as it uses txnNumber.
- let shardDB = st.rs0.getPrimary().getDB("test");
- if (!shardDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- jsTestLog("Do not run on storage engine that does not support transactions");
- st.stop();
- return;
- }
+// Gate this test to transaction supporting engines only as it uses txnNumber.
+let shardDB = st.rs0.getPrimary().getDB("test");
+if (!shardDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
+ jsTestLog("Do not run on storage engine that does not support transactions");
+ st.stop();
+ return;
+}
- const session = st.s0.getDB("test").getMongo().startSession();
- const mongosColl = session.getDatabase("test")[jsTestName()];
+const session = st.s0.getDB("test").getMongo().startSession();
+const mongosColl = session.getDatabase("test")[jsTestName()];
- // Shard the collection, split it into two chunks, and move the [1, MaxKey] chunk to the other
- // shard. We need chunks distributed across multiple shards in order to force a split pipeline
- // merging on a mongod - otherwise the entire pipeline will be forwarded without a split and
- // without a $mergeCursors stage.
- st.shardColl(mongosColl, {_id: 1}, {_id: 1}, {_id: 1});
- assert.writeOK(mongosColl.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+// Shard the collection, split it into two chunks, and move the [1, MaxKey] chunk to the other
+// shard. We need chunks distributed across multiple shards in order to force a split pipeline
+// merging on a mongod - otherwise the entire pipeline will be forwarded without a split and
+// without a $mergeCursors stage.
+st.shardColl(mongosColl, {_id: 1}, {_id: 1}, {_id: 1});
+assert.writeOK(mongosColl.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
- // This assertion will reproduce the hang described in SERVER-33660.
- assert.eq(
- [{_id: 0}, {_id: 1}, {_id: 2}],
- mongosColl
- .aggregate([{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}])
- .toArray());
+// This assertion will reproduce the hang described in SERVER-33660.
+assert.eq(
+ [{_id: 0}, {_id: 1}, {_id: 2}],
+ mongosColl
+ .aggregate([{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}])
+ .toArray());
- // Test a couple more aggregations to be sure.
- assert.eq(
- [{_id: 0}, {_id: 1}, {_id: 2}],
- mongosColl.aggregate([{$_internalSplitPipeline: {mergeType: "mongos"}}, {$sort: {_id: 1}}])
- .toArray());
- assert.eq(mongosColl.aggregate([{$sort: {_id: 1}}, {$out: "testing"}]).itcount(), 0);
+// Test a couple more aggregations to be sure.
+assert.eq(
+ [{_id: 0}, {_id: 1}, {_id: 2}],
+ mongosColl.aggregate([{$_internalSplitPipeline: {mergeType: "mongos"}}, {$sort: {_id: 1}}])
+ .toArray());
+assert.eq(mongosColl.aggregate([{$sort: {_id: 1}}, {$out: "testing"}]).itcount(), 0);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/all_config_servers_blackholed_from_mongos.js b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
index d13499cc551..53d6e435dd1 100644
--- a/jstests/sharding/all_config_servers_blackholed_from_mongos.js
+++ b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
@@ -9,41 +9,40 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({
- shards: 2,
- mongos: 1,
- useBridge: true,
- });
+var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ useBridge: true,
+});
- var testDB = st.s.getDB('BlackHoleDB');
+var testDB = st.s.getDB('BlackHoleDB');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
- assert.commandWorked(
- testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
+assert.commandWorked(
+ testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
- assert.writeOK(testDB.ShardedColl.insert({a: 1}));
+assert.writeOK(testDB.ShardedColl.insert({a: 1}));
- jsTest.log('Making all the config servers appear as a blackhole to mongos');
- st._configServers.forEach(function(configSvr) {
- configSvr.discardMessagesFrom(st.s, 1.0);
- });
+jsTest.log('Making all the config servers appear as a blackhole to mongos');
+st._configServers.forEach(function(configSvr) {
+ configSvr.discardMessagesFrom(st.s, 1.0);
+});
- assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
+assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
- // This shouldn't stall
- jsTest.log('Doing read operation on the sharded collection');
- assert.throws(function() {
- testDB.ShardedColl.find({}).maxTimeMS(15000).itcount();
- });
+// This shouldn't stall
+jsTest.log('Doing read operation on the sharded collection');
+assert.throws(function() {
+ testDB.ShardedColl.find({}).maxTimeMS(15000).itcount();
+});
- // This should fail, because the primary is not available
- jsTest.log('Doing write operation on a new database and collection');
- assert.writeError(st.s.getDB('NonExistentDB')
- .TestColl.insert({_id: 0, value: 'This value will never be inserted'},
- {maxTimeMS: 15000}));
-
- st.stop();
+// This should fail, because the primary is not available
+jsTest.log('Doing write operation on a new database and collection');
+assert.writeError(
+ st.s.getDB('NonExistentDB')
+ .TestColl.insert({_id: 0, value: 'This value will never be inserted'}, {maxTimeMS: 15000}));
+st.stop();
}());
diff --git a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
index 6a89bf1508c..68745172568 100644
--- a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
+++ b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
@@ -7,72 +7,72 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
- jsTest.log('Config nodes up: 3 of 3, shard nodes up: 2 of 2: ' +
- 'Insert test data to work with');
- assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
- {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
- assert.eq([{_id: 0, count: 1}], st.s0.getDB('TestDB').TestColl.find().toArray());
+jsTest.log('Config nodes up: 3 of 3, shard nodes up: 2 of 2: ' +
+ 'Insert test data to work with');
+assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+assert.eq([{_id: 0, count: 1}], st.s0.getDB('TestDB').TestColl.find().toArray());
- jsTest.log('Config nodes up: 2 of 3, shard nodes up: 2 of 2: ' +
- 'Inserts and queries must work');
- st.configRS.stop(0);
- st.restartMongos(0);
- assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
- {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
- assert.eq([{_id: 0, count: 2}], st.s0.getDB('TestDB').TestColl.find().toArray());
+jsTest.log('Config nodes up: 2 of 3, shard nodes up: 2 of 2: ' +
+ 'Inserts and queries must work');
+st.configRS.stop(0);
+st.restartMongos(0);
+assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+assert.eq([{_id: 0, count: 2}], st.s0.getDB('TestDB').TestColl.find().toArray());
- jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' +
- 'Inserts and queries must work');
- st.configRS.stop(1);
- st.restartMongos(0);
- assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
- {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
- assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
+jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' +
+ 'Inserts and queries must work');
+st.configRS.stop(1);
+st.restartMongos(0);
+assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
- jsTest.log('Config nodes up: 1 of 3, shard nodes up: 1 of 2: ' +
- 'Only queries will work (no shard primary)');
- st.rs0.stop(0);
- st.restartMongos(0);
- st.s0.setSlaveOk(true);
- assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
+jsTest.log('Config nodes up: 1 of 3, shard nodes up: 1 of 2: ' +
+ 'Only queries will work (no shard primary)');
+st.rs0.stop(0);
+st.restartMongos(0);
+st.s0.setSlaveOk(true);
+assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
- jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' +
- 'MongoS must start, but no operations will work (no shard nodes available)');
- st.rs0.stop(1);
- st.restartMongos(0);
- assert.throws(function() {
- st.s0.getDB('TestDB').TestColl.find().toArray();
- });
+jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' +
+ 'MongoS must start, but no operations will work (no shard nodes available)');
+st.rs0.stop(1);
+st.restartMongos(0);
+assert.throws(function() {
+ st.s0.getDB('TestDB').TestColl.find().toArray();
+});
- jsTest.log('Config nodes up: 0 of 3, shard nodes up: 0 of 2: ' +
- 'Metadata cannot be loaded at all, no operations will work');
- st.configRS.stop(1);
+jsTest.log('Config nodes up: 0 of 3, shard nodes up: 0 of 2: ' +
+ 'Metadata cannot be loaded at all, no operations will work');
+st.configRS.stop(1);
- // Instead of restarting mongos, ensure it has no metadata
- assert.commandWorked(st.s0.adminCommand({flushRouterConfig: 1}));
+// Instead of restarting mongos, ensure it has no metadata
+assert.commandWorked(st.s0.adminCommand({flushRouterConfig: 1}));
- // Throws transport error first and subsequent times when loading config data, not no primary
- for (var i = 0; i < 2; i++) {
- try {
- st.s0.getDB('TestDB').TestColl.findOne();
+// Throws transport error first and subsequent times when loading config data, not no primary
+for (var i = 0; i < 2; i++) {
+ try {
+ st.s0.getDB('TestDB').TestColl.findOne();
- // Must always throw
- assert(false);
- } catch (e) {
- printjson(e);
+ // Must always throw
+ assert(false);
+ } catch (e) {
+ printjson(e);
- // Make sure we get a transport error, and not a no-primary error
- assert(e.code == 10276 || // Transport error
- e.code == 13328 || // Connect error
- e.code == ErrorCodes.HostUnreachable ||
- e.code == ErrorCodes.FailedToSatisfyReadPreference ||
- e.code == ErrorCodes.ReplicaSetNotFound);
- }
+ // Make sure we get a transport error, and not a no-primary error
+ assert(e.code == 10276 || // Transport error
+ e.code == 13328 || // Connect error
+ e.code == ErrorCodes.HostUnreachable ||
+ e.code == ErrorCodes.FailedToSatisfyReadPreference ||
+ e.code == ErrorCodes.ReplicaSetNotFound);
}
+}
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js
index 6490720cb80..7ecbbb1dc7b 100644
--- a/jstests/sharding/allow_partial_results.js
+++ b/jstests/sharding/allow_partial_results.js
@@ -7,71 +7,70 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+"use strict";
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- // TODO: SERVER-33597 remove shardAsReplicaSet: false
- const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+// TODO: SERVER-33597 remove shardAsReplicaSet: false
+const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
- jsTest.log("Insert some data.");
- const nDocs = 100;
- const coll = st.s0.getDB(dbName)[collName];
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = -50; i < 50; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+jsTest.log("Insert some data.");
+const nDocs = 100;
+const coll = st.s0.getDB(dbName)[collName];
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = -50; i < 50; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Create a sharded collection with one chunk on each of the two shards.");
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+jsTest.log("Create a sharded collection with one chunk on each of the two shards.");
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
- let findRes;
+let findRes;
- jsTest.log("Without 'allowPartialResults', if all shards are up, find returns all docs.");
- findRes = coll.runCommand({find: collName});
- assert.commandWorked(findRes);
- assert.eq(nDocs, findRes.cursor.firstBatch.length);
+jsTest.log("Without 'allowPartialResults', if all shards are up, find returns all docs.");
+findRes = coll.runCommand({find: collName});
+assert.commandWorked(findRes);
+assert.eq(nDocs, findRes.cursor.firstBatch.length);
- jsTest.log("With 'allowPartialResults: false', if all shards are up, find returns all docs.");
- findRes = coll.runCommand({find: collName, allowPartialResults: false});
- assert.commandWorked(findRes);
- assert.eq(nDocs, findRes.cursor.firstBatch.length);
+jsTest.log("With 'allowPartialResults: false', if all shards are up, find returns all docs.");
+findRes = coll.runCommand({find: collName, allowPartialResults: false});
+assert.commandWorked(findRes);
+assert.eq(nDocs, findRes.cursor.firstBatch.length);
- jsTest.log("With 'allowPartialResults: true', if all shards are up, find returns all docs.");
- findRes = coll.runCommand({find: collName, allowPartialResults: true});
- assert.commandWorked(findRes);
- assert.eq(nDocs, findRes.cursor.firstBatch.length);
+jsTest.log("With 'allowPartialResults: true', if all shards are up, find returns all docs.");
+findRes = coll.runCommand({find: collName, allowPartialResults: true});
+assert.commandWorked(findRes);
+assert.eq(nDocs, findRes.cursor.firstBatch.length);
- jsTest.log("Stopping " + st.shard0.shardName);
- MongoRunner.stopMongod(st.shard0);
+jsTest.log("Stopping " + st.shard0.shardName);
+MongoRunner.stopMongod(st.shard0);
- jsTest.log("Without 'allowPartialResults', if some shard down, find fails.");
- assert.commandFailed(coll.runCommand({find: collName}));
+jsTest.log("Without 'allowPartialResults', if some shard down, find fails.");
+assert.commandFailed(coll.runCommand({find: collName}));
- jsTest.log("With 'allowPartialResults: false', if some shard down, find fails.");
- assert.commandFailed(coll.runCommand({find: collName, allowPartialResults: false}));
+jsTest.log("With 'allowPartialResults: false', if some shard down, find fails.");
+assert.commandFailed(coll.runCommand({find: collName, allowPartialResults: false}));
- jsTest.log(
- "With 'allowPartialResults: true', if some shard down, find succeeds with partial results");
- findRes = assert.commandWorked(coll.runCommand({find: collName, allowPartialResults: true}));
- assert.commandWorked(findRes);
- assert.eq(nDocs / 2, findRes.cursor.firstBatch.length);
+jsTest.log(
+ "With 'allowPartialResults: true', if some shard down, find succeeds with partial results");
+findRes = assert.commandWorked(coll.runCommand({find: collName, allowPartialResults: true}));
+assert.commandWorked(findRes);
+assert.eq(nDocs / 2, findRes.cursor.firstBatch.length);
- jsTest.log("The allowPartialResults option does not currently apply to aggregation.");
- assert.commandFailedWithCode(coll.runCommand({
- aggregate: collName,
- pipeline: [{$project: {_id: 1}}],
- cursor: {},
- allowPartialResults: true
- }),
- ErrorCodes.FailedToParse);
+jsTest.log("The allowPartialResults option does not currently apply to aggregation.");
+assert.commandFailedWithCode(coll.runCommand({
+ aggregate: collName,
+ pipeline: [{$project: {_id: 1}}],
+ cursor: {},
+ allowPartialResults: true
+}),
+ ErrorCodes.FailedToParse);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/arbiters_do_not_use_cluster_time.js b/jstests/sharding/arbiters_do_not_use_cluster_time.js
index f1a5b77dc83..03bf0f32f5a 100644
--- a/jstests/sharding/arbiters_do_not_use_cluster_time.js
+++ b/jstests/sharding/arbiters_do_not_use_cluster_time.js
@@ -3,29 +3,29 @@
*/
(function() {
- "use strict";
- let st = new ShardingTest(
- {shards: {rs0: {nodes: [{arbiter: false}, {arbiter: false}, {arbiter: true}]}}});
+"use strict";
+let st = new ShardingTest(
+ {shards: {rs0: {nodes: [{arbiter: false}, {arbiter: false}, {arbiter: true}]}}});
- jsTestLog("Started ShardingTest");
+jsTestLog("Started ShardingTest");
- let secondaries = st.rs0.getSecondaries();
+let secondaries = st.rs0.getSecondaries();
- let foundArbiter = false;
- for (let i = 0; i < secondaries.length; i++) {
- let conn = secondaries[i].getDB("admin");
- const res = conn.runCommand({isMaster: 1});
- if (res["arbiterOnly"]) {
- assert(!foundArbiter);
- foundArbiter = true;
- // nodes with disabled clocks do not gossip clusterTime and operationTime.
- assert.eq(res.hasOwnProperty("$clusterTime"), false);
- assert.eq(res.hasOwnProperty("operationTime"), false);
- } else {
- assert.eq(res.hasOwnProperty("$clusterTime"), true);
- assert.eq(res.hasOwnProperty("operationTime"), true);
- }
+let foundArbiter = false;
+for (let i = 0; i < secondaries.length; i++) {
+ let conn = secondaries[i].getDB("admin");
+ const res = conn.runCommand({isMaster: 1});
+ if (res["arbiterOnly"]) {
+ assert(!foundArbiter);
+ foundArbiter = true;
+ // nodes with disabled clocks do not gossip clusterTime and operationTime.
+ assert.eq(res.hasOwnProperty("$clusterTime"), false);
+ assert.eq(res.hasOwnProperty("operationTime"), false);
+ } else {
+ assert.eq(res.hasOwnProperty("$clusterTime"), true);
+ assert.eq(res.hasOwnProperty("operationTime"), true);
}
- assert.eq(foundArbiter, true);
- st.stop();
+}
+assert.eq(foundArbiter, true);
+st.stop();
})();
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index cdbe4bda885..2eb7dd102b6 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -1,113 +1,112 @@
// Ensure you can't shard on an array key
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3});
+var st = new ShardingTest({shards: 3});
- var mongos = st.s0;
+var mongos = st.s0;
- var coll = mongos.getCollection("TestDB.foo");
+var coll = mongos.getCollection("TestDB.foo");
- st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
+st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
- printjson(mongos.getDB("config").chunks.find().toArray());
+printjson(mongos.getDB("config").chunks.find().toArray());
- print("1: insert some invalid data");
+print("1: insert some invalid data");
- var value = null;
+var value = null;
- // Insert an object with invalid array key
- assert.writeError(coll.insert({i: [1, 2]}));
+// Insert an object with invalid array key
+assert.writeError(coll.insert({i: [1, 2]}));
- // Insert an object with all the right fields, but an invalid array val for _id
- assert.writeError(coll.insert({_id: [1, 2], i: 3}));
+// Insert an object with all the right fields, but an invalid array val for _id
+assert.writeError(coll.insert({_id: [1, 2], i: 3}));
- // Insert an object with valid array key
- assert.writeOK(coll.insert({i: 1}));
+// Insert an object with valid array key
+assert.writeOK(coll.insert({i: 1}));
- // Update the value with valid other field
- value = coll.findOne({i: 1});
- assert.writeOK(coll.update(value, {$set: {j: 2}}));
+// Update the value with valid other field
+value = coll.findOne({i: 1});
+assert.writeOK(coll.update(value, {$set: {j: 2}}));
- // Update the value with invalid other fields
- value = coll.findOne({i: 1});
- assert.writeError(coll.update(value, Object.merge(value, {i: [3]})));
+// Update the value with invalid other fields
+value = coll.findOne({i: 1});
+assert.writeError(coll.update(value, Object.merge(value, {i: [3]})));
- // Multi-update the value with invalid other fields
- value = coll.findOne({i: 1});
- assert.writeError(coll.update(value, Object.merge(value, {i: [3, 4]}), false, true));
+// Multi-update the value with invalid other fields
+value = coll.findOne({i: 1});
+assert.writeError(coll.update(value, Object.merge(value, {i: [3, 4]}), false, true));
- // Multi-update the value with other fields (won't work, but no error)
- value = coll.findOne({i: 1});
- assert.writeOK(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
+// Multi-update the value with other fields (won't work, but no error)
+value = coll.findOne({i: 1});
+assert.writeOK(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
- // Query the value with other fields (won't work, but no error)
- value = coll.findOne({i: 1});
- coll.find(Object.merge(value, {i: [1, 1]})).toArray();
+// Query the value with other fields (won't work, but no error)
+value = coll.findOne({i: 1});
+coll.find(Object.merge(value, {i: [1, 1]})).toArray();
- // Can't remove using multikey, but shouldn't error
- value = coll.findOne({i: 1});
- coll.remove(Object.extend(value, {i: [1, 2, 3, 4]}));
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({i: 1});
+coll.remove(Object.extend(value, {i: [1, 2, 3, 4]}));
- // Can't remove using multikey, but shouldn't error
- value = coll.findOne({i: 1});
- assert.writeOK(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
- assert.eq(coll.find().itcount(), 1);
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({i: 1});
+assert.writeOK(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
+assert.eq(coll.find().itcount(), 1);
- value = coll.findOne({i: 1});
- assert.writeOK(coll.remove(Object.extend(value, {i: 1})));
- assert.eq(coll.find().itcount(), 0);
+value = coll.findOne({i: 1});
+assert.writeOK(coll.remove(Object.extend(value, {i: 1})));
+assert.eq(coll.find().itcount(), 0);
- coll.ensureIndex({_id: 1, i: 1, j: 1});
- // Can insert document that will make index into a multi-key as long as it's not part of shard
- // key.
- coll.remove({});
- assert.writeOK(coll.insert({i: 1, j: [1, 2]}));
- assert.eq(coll.find().itcount(), 1);
+coll.ensureIndex({_id: 1, i: 1, j: 1});
+// Can insert document that will make index into a multi-key as long as it's not part of shard
+// key.
+coll.remove({});
+assert.writeOK(coll.insert({i: 1, j: [1, 2]}));
+assert.eq(coll.find().itcount(), 1);
- // Same is true for updates.
- coll.remove({});
- coll.insert({_id: 1, i: 1});
- assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
- assert.eq(coll.find().itcount(), 1);
+// Same is true for updates.
+coll.remove({});
+coll.insert({_id: 1, i: 1});
+assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
+assert.eq(coll.find().itcount(), 1);
- // Same for upserts.
- coll.remove({});
- assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
- assert.eq(coll.find().itcount(), 1);
+// Same for upserts.
+coll.remove({});
+assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
+assert.eq(coll.find().itcount(), 1);
- printjson(
- "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey");
+printjson("Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey");
- // Insert a bunch of data then shard over key which is an array
- var coll = mongos.getCollection("" + coll + "2");
- for (var i = 0; i < 10; i++) {
- // TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({i: [i, i + 1]}));
- }
+// Insert a bunch of data then shard over key which is an array
+var coll = mongos.getCollection("" + coll + "2");
+for (var i = 0; i < 10; i++) {
+ // TODO : does not check weird cases like [ i, i ]
+ assert.writeOK(coll.insert({i: [i, i + 1]}));
+}
- coll.ensureIndex({_id: 1, i: 1});
+coll.ensureIndex({_id: 1, i: 1});
- try {
- st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
- } catch (e) {
- print("Correctly threw error on sharding with multikey index.");
- }
+try {
+ st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
+} catch (e) {
+ print("Correctly threw error on sharding with multikey index.");
+}
- st.printShardingStatus();
+st.printShardingStatus();
- // Insert a bunch of data then shard over key which is not an array
- var coll = mongos.getCollection("" + coll + "3");
- for (var i = 0; i < 10; i++) {
- // TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({i: i}));
- }
+// Insert a bunch of data then shard over key which is not an array
+var coll = mongos.getCollection("" + coll + "3");
+for (var i = 0; i < 10; i++) {
+ // TODO : does not check weird cases like [ i, i ]
+ assert.writeOK(coll.insert({i: i}));
+}
- coll.ensureIndex({_id: 1, i: 1});
+coll.ensureIndex({_id: 1, i: 1});
- st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
+st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
- st.printShardingStatus();
+st.printShardingStatus();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index d2e46ff1ba4..61b5c273315 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -7,352 +7,342 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js");
+'use strict';
+load("jstests/replsets/rslib.js");
- // Replica set nodes started with --shardsvr do not enable key generation until they are added
- // to a sharded cluster and reject commands with gossiped clusterTime from users without the
- // advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
- // briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
- // gossip that time later in setup.
- //
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
+// Replica set nodes started with --shardsvr do not enable key generation until they are added
+// to a sharded cluster and reject commands with gossiped clusterTime from users without the
+// advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
+// briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
+// gossip that time later in setup.
+//
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
- var adminUser = {db: "admin", username: "foo", password: "bar"};
+var adminUser = {db: "admin", username: "foo", password: "bar"};
- var testUser = {db: "test", username: "bar", password: "baz"};
+var testUser = {db: "test", username: "bar", password: "baz"};
- var testUserReadOnly = {db: "test", username: "sad", password: "bat"};
+var testUserReadOnly = {db: "test", username: "sad", password: "bat"};
- function login(userObj, thingToUse) {
- if (!thingToUse) {
- thingToUse = s;
- }
-
- thingToUse.getDB(userObj.db).auth(userObj.username, userObj.password);
+function login(userObj, thingToUse) {
+ if (!thingToUse) {
+ thingToUse = s;
}
- function logout(userObj, thingToUse) {
- if (!thingToUse)
- thingToUse = s;
+ thingToUse.getDB(userObj.db).auth(userObj.username, userObj.password);
+}
- s.getDB(userObj.db).runCommand({logout: 1});
- }
+function logout(userObj, thingToUse) {
+ if (!thingToUse)
+ thingToUse = s;
- function getShardName(rsTest) {
- var master = rsTest.getPrimary();
- var config = master.getDB("local").system.replset.findOne();
- var members = config.members.map(function(elem) {
- return elem.host;
- });
- return config._id + "/" + members.join(",");
- }
+ s.getDB(userObj.db).runCommand({logout: 1});
+}
- var s = new ShardingTest({
- name: "auth",
- mongos: 1,
- shards: 0,
- other: {keyFile: "jstests/libs/key1", chunkSize: 1, enableAutoSplit: false},
+function getShardName(rsTest) {
+ var master = rsTest.getPrimary();
+ var config = master.getDB("local").system.replset.findOne();
+ var members = config.members.map(function(elem) {
+ return elem.host;
});
+ return config._id + "/" + members.join(",");
+}
+
+var s = new ShardingTest({
+ name: "auth",
+ mongos: 1,
+ shards: 0,
+ other: {keyFile: "jstests/libs/key1", chunkSize: 1, enableAutoSplit: false},
+});
+
+if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
+ print('Skipping test on 32-bit platforms');
+ return;
+}
+
+print("Configuration: Add user " + tojson(adminUser));
+s.getDB(adminUser.db)
+ .createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+login(adminUser);
+
+// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
+assert.writeOK(
+ s.getDB("config").settings.update({_id: "balancer"},
+ {$set: {"_secondaryThrottle": false, "_waitForDelete": true}},
+ {upsert: true}));
+
+printjson(s.getDB("config").settings.find().toArray());
+
+print("Restart mongos with different auth options");
+s.restartMongos(0);
+login(adminUser);
+
+var d1 = new ReplSetTest({name: "d1", nodes: 3, useHostName: true, waitForKeys: false});
+d1.startSet({keyFile: "jstests/libs/key2", shardsvr: ""});
+d1.initiate();
+
+print("d1 initiated");
+var shardName = authutil.asCluster(d1.nodes, "jstests/libs/key2", function() {
+ return getShardName(d1);
+});
+
+print("adding shard w/out auth " + shardName);
+logout(adminUser);
+
+var result = s.getDB("admin").runCommand({addShard: shardName});
+printjson(result);
+assert.eq(result.code, 13);
+
+login(adminUser);
+
+print("adding shard w/wrong key " + shardName);
+
+var thrown = false;
+try {
+ result = s.adminCommand({addShard: shardName});
+} catch (e) {
+ thrown = true;
+ printjson(e);
+}
+assert(thrown);
- if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
- print('Skipping test on 32-bit platforms');
- return;
- }
-
- print("Configuration: Add user " + tojson(adminUser));
- s.getDB(adminUser.db).createUser({
- user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles
- });
- login(adminUser);
-
- // Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
- assert.writeOK(s.getDB("config").settings.update(
- {_id: "balancer"},
- {$set: {"_secondaryThrottle": false, "_waitForDelete": true}},
- {upsert: true}));
-
- printjson(s.getDB("config").settings.find().toArray());
-
- print("Restart mongos with different auth options");
- s.restartMongos(0);
- login(adminUser);
-
- var d1 = new ReplSetTest({name: "d1", nodes: 3, useHostName: true, waitForKeys: false});
- d1.startSet({keyFile: "jstests/libs/key2", shardsvr: ""});
- d1.initiate();
+print("start rs w/correct key");
- print("d1 initiated");
- var shardName = authutil.asCluster(d1.nodes, "jstests/libs/key2", function() {
- return getShardName(d1);
- });
+d1.stopSet();
+d1.startSet({keyFile: "jstests/libs/key1", restart: true});
+d1.initiate();
- print("adding shard w/out auth " + shardName);
- logout(adminUser);
+var master = d1.getPrimary();
- var result = s.getDB("admin").runCommand({addShard: shardName});
- printjson(result);
- assert.eq(result.code, 13);
+print("adding shard w/auth " + shardName);
- login(adminUser);
+result = s.getDB("admin").runCommand({addShard: shardName});
+assert.eq(result.ok, 1, tojson(result));
- print("adding shard w/wrong key " + shardName);
+s.getDB("admin").runCommand({enableSharding: "test"});
+s.getDB("admin").runCommand({shardCollection: "test.foo", key: {x: 1}});
- var thrown = false;
- try {
- result = s.adminCommand({addShard: shardName});
- } catch (e) {
- thrown = true;
- printjson(e);
- }
- assert(thrown);
+d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- print("start rs w/correct key");
+s.getDB(testUser.db)
+ .createUser({user: testUser.username, pwd: testUser.password, roles: jsTest.basicUserRoles});
+s.getDB(testUserReadOnly.db).createUser({
+ user: testUserReadOnly.username,
+ pwd: testUserReadOnly.password,
+ roles: jsTest.readOnlyUserRoles
+});
- d1.stopSet();
- d1.startSet({keyFile: "jstests/libs/key1", restart: true});
- d1.initiate();
+logout(adminUser);
- var master = d1.getPrimary();
+print("query try");
+var e = assert.throws(function() {
+ s.s.getDB("foo").bar.findOne();
+});
+printjson(e);
- print("adding shard w/auth " + shardName);
+print("cmd try");
+assert.eq(0, s.s.getDB("foo").runCommand({listDatabases: 1}).ok);
- result = s.getDB("admin").runCommand({addShard: shardName});
- assert.eq(result.ok, 1, tojson(result));
+print("insert try 1");
+s.getDB("test").foo.insert({x: 1});
- s.getDB("admin").runCommand({enableSharding: "test"});
- s.getDB("admin").runCommand({shardCollection: "test.foo", key: {x: 1}});
+login(testUser);
+assert.eq(s.getDB("test").foo.findOne(), null);
- d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
-
- s.getDB(testUser.db).createUser({
- user: testUser.username,
- pwd: testUser.password,
- roles: jsTest.basicUserRoles
- });
- s.getDB(testUserReadOnly.db).createUser({
- user: testUserReadOnly.username,
- pwd: testUserReadOnly.password,
- roles: jsTest.readOnlyUserRoles
- });
+print("insert try 2");
+assert.writeOK(s.getDB("test").foo.insert({x: 1}));
+assert.eq(1, s.getDB("test").foo.find().itcount(), tojson(result));
- logout(adminUser);
+logout(testUser);
- print("query try");
- var e = assert.throws(function() {
- s.s.getDB("foo").bar.findOne();
- });
- printjson(e);
+var d2 = new ReplSetTest({name: "d2", nodes: 3, useHostName: true, waitForKeys: false});
+d2.startSet({keyFile: "jstests/libs/key1", shardsvr: ""});
+d2.initiate();
+d2.awaitSecondaryNodes();
- print("cmd try");
- assert.eq(0, s.s.getDB("foo").runCommand({listDatabases: 1}).ok);
+shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
+ return getShardName(d2);
+});
- print("insert try 1");
- s.getDB("test").foo.insert({x: 1});
+print("adding shard " + shardName);
+login(adminUser);
+print("logged in");
+result = s.getDB("admin").runCommand({addShard: shardName});
- login(testUser);
- assert.eq(s.getDB("test").foo.findOne(), null);
+awaitRSClientHosts(s.s, d1.nodes, {ok: true});
+awaitRSClientHosts(s.s, d2.nodes, {ok: true});
- print("insert try 2");
- assert.writeOK(s.getDB("test").foo.insert({x: 1}));
- assert.eq(1, s.getDB("test").foo.find().itcount(), tojson(result));
+s.getDB("test").foo.remove({});
- logout(testUser);
+var num = 10000;
+assert.commandWorked(s.s.adminCommand({split: "test.foo", middle: {x: num / 2}}));
+var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
+for (i = 0; i < num; i++) {
+ bulk.insert({_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
+}
+assert.writeOK(bulk.execute());
- var d2 = new ReplSetTest({name: "d2", nodes: 3, useHostName: true, waitForKeys: false});
- d2.startSet({keyFile: "jstests/libs/key1", shardsvr: ""});
- d2.initiate();
- d2.awaitSecondaryNodes();
+s.startBalancer(60000);
- shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
- return getShardName(d2);
- });
+assert.soon(function() {
+ var d1Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d1"});
+ var d2Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d2"});
+ var totalChunks = s.getDB("config").chunks.count({ns: 'test.foo'});
- print("adding shard " + shardName);
- login(adminUser);
- print("logged in");
- result = s.getDB("admin").runCommand({addShard: shardName});
+ print("chunks: " + d1Chunks + " " + d2Chunks + " " + totalChunks);
- awaitRSClientHosts(s.s, d1.nodes, {ok: true});
- awaitRSClientHosts(s.s, d2.nodes, {ok: true});
+ return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
+}, "Chunks failed to balance", 60000, 5000);
- s.getDB("test").foo.remove({});
+// SERVER-33753: count() without predicate can be wrong on sharded collections.
+// assert.eq(s.getDB("test").foo.count(), num+1);
+var numDocs = s.getDB("test").foo.find().itcount();
+if (numDocs != num) {
+ // Missing documents. At this point we're already in a failure mode, the code in this
+ // statement
+ // is to get a better idea how/why it's failing.
- var num = 10000;
- assert.commandWorked(s.s.adminCommand({split: "test.foo", middle: {x: num / 2}}));
- var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
- for (i = 0; i < num; i++) {
- bulk.insert(
- {_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
- }
- assert.writeOK(bulk.execute());
-
- s.startBalancer(60000);
-
- assert.soon(function() {
- var d1Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d1"});
- var d2Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d2"});
- var totalChunks = s.getDB("config").chunks.count({ns: 'test.foo'});
-
- print("chunks: " + d1Chunks + " " + d2Chunks + " " + totalChunks);
-
- return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
- }, "Chunks failed to balance", 60000, 5000);
-
- // SERVER-33753: count() without predicate can be wrong on sharded collections.
- // assert.eq(s.getDB("test").foo.count(), num+1);
- var numDocs = s.getDB("test").foo.find().itcount();
- if (numDocs != num) {
- // Missing documents. At this point we're already in a failure mode, the code in this
- // statement
- // is to get a better idea how/why it's failing.
-
- var numDocsSeen = 0;
- var lastDocNumber = -1;
- var missingDocNumbers = [];
- var docs = s.getDB("test").foo.find().sort({x: 1}).toArray();
- for (var i = 0; i < docs.length; i++) {
- if (docs[i].x != lastDocNumber + 1) {
- for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
- missingDocNumbers.push(missing);
- }
+ var numDocsSeen = 0;
+ var lastDocNumber = -1;
+ var missingDocNumbers = [];
+ var docs = s.getDB("test").foo.find().sort({x: 1}).toArray();
+ for (var i = 0; i < docs.length; i++) {
+ if (docs[i].x != lastDocNumber + 1) {
+ for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
+ missingDocNumbers.push(missing);
}
- lastDocNumber = docs[i].x;
- numDocsSeen++;
}
- assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()");
- assert.eq(num - numDocs, missingDocNumbers.length);
-
- load('jstests/libs/trace_missing_docs.js');
-
- for (var i = 0; i < missingDocNumbers.length; i++) {
- jsTest.log("Tracing doc: " + missingDocNumbers[i]);
- traceMissingDoc(s.getDB("test").foo,
- {_id: missingDocNumbers[i], x: missingDocNumbers[i]});
- }
-
- assert(false,
- "Number of docs found does not equal the number inserted. Missing docs: " +
- missingDocNumbers);
+ lastDocNumber = docs[i].x;
+ numDocsSeen++;
}
+ assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()");
+ assert.eq(num - numDocs, missingDocNumbers.length);
- // We're only sure we aren't duplicating documents iff there's no balancing going on here
- // This call also waits for any ongoing balancing to stop
- s.stopBalancer(60000);
-
- var cursor = s.getDB("test").foo.find({x: {$lt: 500}});
+ load('jstests/libs/trace_missing_docs.js');
- var count = 0;
- while (cursor.hasNext()) {
- cursor.next();
- count++;
+ for (var i = 0; i < missingDocNumbers.length; i++) {
+ jsTest.log("Tracing doc: " + missingDocNumbers[i]);
+ traceMissingDoc(s.getDB("test").foo, {_id: missingDocNumbers[i], x: missingDocNumbers[i]});
}
- assert.eq(count, 500);
-
- logout(adminUser);
-
- d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- d2.waitForState(d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
-
- authutil.asCluster(d1.nodes, "jstests/libs/key1", function() {
- d1.awaitReplication();
- });
- authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
- d2.awaitReplication();
- });
-
- // add admin on shard itself, hack to prevent localhost auth bypass
- d1.getPrimary()
- .getDB(adminUser.db)
- .createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
- d2.getPrimary()
- .getDB(adminUser.db)
- .createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
-
- login(testUser);
- print("testing map reduce");
-
- // Sharded map reduce can be tricky since all components talk to each other. For example
- // SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
- // properly tested here since addresses are localhost, which is more permissive.
- var res = s.getDB("test").runCommand({
- mapreduce: "foo",
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return values.length;
- },
- out: "mrout"
- });
- printjson(res);
- assert.commandWorked(res);
-
- // Check that dump doesn't get stuck with auth
- var exitCode = MongoRunner.runMongoTool("mongodump", {
- host: s.s.host,
- db: testUser.db,
- username: testUser.username,
- password: testUser.password,
- authenticationMechanism: "SCRAM-SHA-1",
- });
- assert.eq(0, exitCode, "mongodump failed to run with authentication enabled");
-
- // Test read only users
- print("starting read only tests");
-
- var readOnlyS = new Mongo(s.getDB("test").getMongo().host);
- var readOnlyDB = readOnlyS.getDB("test");
-
- print(" testing find that should fail");
- assert.throws(function() {
- readOnlyDB.foo.findOne();
- });
-
- print(" logging in");
- login(testUserReadOnly, readOnlyS);
-
- print(" testing find that should work");
+ assert(false,
+ "Number of docs found does not equal the number inserted. Missing docs: " +
+ missingDocNumbers);
+}
+
+// We're only sure we aren't duplicating documents iff there's no balancing going on here
+// This call also waits for any ongoing balancing to stop
+s.stopBalancer(60000);
+
+var cursor = s.getDB("test").foo.find({x: {$lt: 500}});
+
+var count = 0;
+while (cursor.hasNext()) {
+ cursor.next();
+ count++;
+}
+
+assert.eq(count, 500);
+
+logout(adminUser);
+
+d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+d2.waitForState(d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+
+authutil.asCluster(d1.nodes, "jstests/libs/key1", function() {
+ d1.awaitReplication();
+});
+authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
+ d2.awaitReplication();
+});
+
+// add admin on shard itself, hack to prevent localhost auth bypass
+d1.getPrimary()
+ .getDB(adminUser.db)
+ .createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+d2.getPrimary()
+ .getDB(adminUser.db)
+ .createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+
+login(testUser);
+print("testing map reduce");
+
+// Sharded map reduce can be tricky since all components talk to each other. For example
+// SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
+// properly tested here since addresses are localhost, which is more permissive.
+var res = s.getDB("test").runCommand({
+ mapreduce: "foo",
+ map: function() {
+ emit(this.x, 1);
+ },
+ reduce: function(key, values) {
+ return values.length;
+ },
+ out: "mrout"
+});
+printjson(res);
+assert.commandWorked(res);
+
+// Check that dump doesn't get stuck with auth
+var exitCode = MongoRunner.runMongoTool("mongodump", {
+ host: s.s.host,
+ db: testUser.db,
+ username: testUser.username,
+ password: testUser.password,
+ authenticationMechanism: "SCRAM-SHA-1",
+});
+assert.eq(0, exitCode, "mongodump failed to run with authentication enabled");
+
+// Test read only users
+print("starting read only tests");
+
+var readOnlyS = new Mongo(s.getDB("test").getMongo().host);
+var readOnlyDB = readOnlyS.getDB("test");
+
+print(" testing find that should fail");
+assert.throws(function() {
readOnlyDB.foo.findOne();
-
- print(" testing write that should fail");
- assert.writeError(readOnlyDB.foo.insert({eliot: 1}));
-
- print(" testing read command (should succeed)");
- assert.commandWorked(readOnlyDB.runCommand({count: "foo"}));
-
- print("make sure currentOp/killOp fail");
- assert.commandFailed(readOnlyDB.currentOp());
- assert.commandFailed(readOnlyDB.killOp(123));
-
- // fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
- /*
- broken because of SERVER-4156
- print( " testing write command (should fail)" );
- assert.commandFailed(readOnlyDB.runCommand(
- {mapreduce : "foo",
- map : function() { emit(this.y, 1); },
- reduce : function(key, values) { return values.length; },
- out:"blarg"
- }));
- */
-
- print(" testing logout (should succeed)");
- assert.commandWorked(readOnlyDB.runCommand({logout: 1}));
-
- print("make sure currentOp/killOp fail again");
- assert.commandFailed(readOnlyDB.currentOp());
- assert.commandFailed(readOnlyDB.killOp(123));
-
- s.stop();
- d1.stopSet();
- d2.stopSet();
+});
+
+print(" logging in");
+login(testUserReadOnly, readOnlyS);
+
+print(" testing find that should work");
+readOnlyDB.foo.findOne();
+
+print(" testing write that should fail");
+assert.writeError(readOnlyDB.foo.insert({eliot: 1}));
+
+print(" testing read command (should succeed)");
+assert.commandWorked(readOnlyDB.runCommand({count: "foo"}));
+
+print("make sure currentOp/killOp fail");
+assert.commandFailed(readOnlyDB.currentOp());
+assert.commandFailed(readOnlyDB.killOp(123));
+
+// fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
+/*
+broken because of SERVER-4156
+print( " testing write command (should fail)" );
+assert.commandFailed(readOnlyDB.runCommand(
+ {mapreduce : "foo",
+ map : function() { emit(this.y, 1); },
+ reduce : function(key, values) { return values.length; },
+ out:"blarg"
+ }));
+*/
+
+print(" testing logout (should succeed)");
+assert.commandWorked(readOnlyDB.runCommand({logout: 1}));
+
+print("make sure currentOp/killOp fail again");
+assert.commandFailed(readOnlyDB.currentOp());
+assert.commandFailed(readOnlyDB.killOp(123));
+
+s.stop();
+d1.stopSet();
+d2.stopSet();
})();
diff --git a/jstests/sharding/auth2.js b/jstests/sharding/auth2.js
index f3ac5caf1c7..d1d6cb20156 100644
--- a/jstests/sharding/auth2.js
+++ b/jstests/sharding/auth2.js
@@ -1,30 +1,26 @@
(function() {
- 'use strict';
+'use strict';
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- shards: 2,
- other: {
- chunkSize: 1,
- useHostname: true,
- keyFile: 'jstests/libs/key1',
- shardAsReplicaSet: false
- },
- });
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ shards: 2,
+ other:
+ {chunkSize: 1, useHostname: true, keyFile: 'jstests/libs/key1', shardAsReplicaSet: false},
+});
- var mongos = st.s;
- var adminDB = mongos.getDB('admin');
- var db = mongos.getDB('test');
+var mongos = st.s;
+var adminDB = mongos.getDB('admin');
+var db = mongos.getDB('test');
- adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
+adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
- jsTestLog("Add user was successful");
+jsTestLog("Add user was successful");
- // Test for SERVER-6549, make sure that repeatedly logging in always passes.
- for (var i = 0; i < 100; i++) {
- adminDB = new Mongo(mongos.host).getDB('admin');
- assert(adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i);
- }
+// Test for SERVER-6549, make sure that repeatedly logging in always passes.
+for (var i = 0; i < 100; i++) {
+ adminDB = new Mongo(mongos.host).getDB('admin');
+ assert(adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i);
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index cd75ddd5cec..8afe4facc2e 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -2,308 +2,305 @@
* This tests using DB commands with authentication enabled when sharded.
*/
(function() {
- 'use strict';
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- load("jstests/replsets/rslib.js");
-
- // Replica set nodes started with --shardsvr do not enable key generation until they are added
- // to a sharded cluster and reject commands with gossiped clusterTime from users without the
- // advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
- // briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
- // gossip that time later in setup.
- //
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
-
- var st = new ShardingTest({
- shards: 2,
- rs: {oplogSize: 10, useHostname: false},
- other: {keyFile: 'jstests/libs/key1', useHostname: false, chunkSize: 2},
- });
-
- var mongos = st.s;
- var adminDB = mongos.getDB('admin');
- var configDB = mongos.getDB('config');
- var testDB = mongos.getDB('test');
+'use strict';
- jsTestLog('Setting up initial users');
- var rwUser = 'rwUser';
- var roUser = 'roUser';
- var password = 'password';
- var expectedDocs = 1000;
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
- adminDB.createUser({user: rwUser, pwd: password, roles: jsTest.adminUserRoles});
+load("jstests/replsets/rslib.js");
- assert(adminDB.auth(rwUser, password));
+// Replica set nodes started with --shardsvr do not enable key generation until they are added
+// to a sharded cluster and reject commands with gossiped clusterTime from users without the
+// advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
+// briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
+// gossip that time later in setup.
+//
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
- // Secondaries should be up here, since we awaitReplication in the ShardingTest, but we *don't*
- // wait for the mongos to explicitly detect them.
- awaitRSClientHosts(mongos, st.rs0.getSecondaries(), {ok: true, secondary: true});
- awaitRSClientHosts(mongos, st.rs1.getSecondaries(), {ok: true, secondary: true});
+var st = new ShardingTest({
+ shards: 2,
+ rs: {oplogSize: 10, useHostname: false},
+ other: {keyFile: 'jstests/libs/key1', useHostname: false, chunkSize: 2},
+});
- testDB.createUser({user: rwUser, pwd: password, roles: jsTest.basicUserRoles});
- testDB.createUser({user: roUser, pwd: password, roles: jsTest.readOnlyUserRoles});
+var mongos = st.s;
+var adminDB = mongos.getDB('admin');
+var configDB = mongos.getDB('config');
+var testDB = mongos.getDB('test');
- var authenticatedConn = new Mongo(mongos.host);
- authenticatedConn.getDB('admin').auth(rwUser, password);
+jsTestLog('Setting up initial users');
+var rwUser = 'rwUser';
+var roUser = 'roUser';
+var password = 'password';
+var expectedDocs = 1000;
- // Add user to shards to prevent localhost connections from having automatic full access
- st.rs0.getPrimary().getDB('admin').createUser(
- {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
- st.rs1.getPrimary().getDB('admin').createUser(
- {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+adminDB.createUser({user: rwUser, pwd: password, roles: jsTest.adminUserRoles});
- jsTestLog('Creating initial data');
+assert(adminDB.auth(rwUser, password));
- st.adminCommand({enablesharding: "test"});
- st.ensurePrimaryShard('test', st.shard0.shardName);
- st.adminCommand({shardcollection: "test.foo", key: {i: 1, j: 1}});
+// Secondaries should be up here, since we awaitReplication in the ShardingTest, but we *don't*
+// wait for the mongos to explicitly detect them.
+awaitRSClientHosts(mongos, st.rs0.getSecondaries(), {ok: true, secondary: true});
+awaitRSClientHosts(mongos, st.rs1.getSecondaries(), {ok: true, secondary: true});
- // Balancer is stopped by default, so no moveChunks will interfere with the splits we're testing
+testDB.createUser({user: rwUser, pwd: password, roles: jsTest.basicUserRoles});
+testDB.createUser({user: roUser, pwd: password, roles: jsTest.readOnlyUserRoles});
- var str = 'a';
- while (str.length < 8000) {
- str += str;
- }
-
- for (var i = 0; i < 100; i++) {
- var bulk = testDB.foo.initializeUnorderedBulkOp();
- for (var j = 0; j < 10; j++) {
- bulk.insert({i: i, j: j, str: str});
- }
- assert.writeOK(bulk.execute({w: "majority"}));
- // Split the chunk we just inserted so that we have something to balance.
- assert.commandWorked(st.splitFind("test.foo", {i: i, j: 0}));
- }
+var authenticatedConn = new Mongo(mongos.host);
+authenticatedConn.getDB('admin').auth(rwUser, password);
- assert.eq(expectedDocs, testDB.foo.count());
+// Add user to shards to prevent localhost connections from having automatic full access
+st.rs0.getPrimary().getDB('admin').createUser(
+ {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+st.rs1.getPrimary().getDB('admin').createUser(
+ {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
- // Wait for the balancer to start back up
- assert.writeOK(
- configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
- st.startBalancer();
+jsTestLog('Creating initial data');
- // Make sure we've done at least some splitting, so the balancer will work
- assert.gt(configDB.chunks.find({ns: 'test.foo'}).count(), 2);
+st.adminCommand({enablesharding: "test"});
+st.ensurePrimaryShard('test', st.shard0.shardName);
+st.adminCommand({shardcollection: "test.foo", key: {i: 1, j: 1}});
- // Make sure we eventually balance all the chunks we've created
- assert.soon(function() {
- var x = st.chunkDiff("foo", "test");
- print("chunk diff: " + x);
- return x < 2 && configDB.locks.findOne({_id: 'test.foo'}).state == 0;
- }, "no balance happened", 5 * 60 * 1000);
+// Balancer is stopped by default, so no moveChunks will interfere with the splits we're testing
- var map = function() {
- emit(this.i, this.j);
- };
+var str = 'a';
+while (str.length < 8000) {
+ str += str;
+}
- var reduce = function(key, values) {
- var jCount = 0;
- values.forEach(function(j) {
- jCount += j;
+for (var i = 0; i < 100; i++) {
+ var bulk = testDB.foo.initializeUnorderedBulkOp();
+ for (var j = 0; j < 10; j++) {
+ bulk.insert({i: i, j: j, str: str});
+ }
+ assert.writeOK(bulk.execute({w: "majority"}));
+ // Split the chunk we just inserted so that we have something to balance.
+ assert.commandWorked(st.splitFind("test.foo", {i: i, j: 0}));
+}
+
+assert.eq(expectedDocs, testDB.foo.count());
+
+// Wait for the balancer to start back up
+assert.writeOK(configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
+st.startBalancer();
+
+// Make sure we've done at least some splitting, so the balancer will work
+assert.gt(configDB.chunks.find({ns: 'test.foo'}).count(), 2);
+
+// Make sure we eventually balance all the chunks we've created
+assert.soon(function() {
+ var x = st.chunkDiff("foo", "test");
+ print("chunk diff: " + x);
+ return x < 2 && configDB.locks.findOne({_id: 'test.foo'}).state == 0;
+}, "no balance happened", 5 * 60 * 1000);
+
+var map = function() {
+ emit(this.i, this.j);
+};
+
+var reduce = function(key, values) {
+ var jCount = 0;
+ values.forEach(function(j) {
+ jCount += j;
+ });
+ return jCount;
+};
+
+var checkCommandSucceeded = function(db, cmdObj) {
+ print("Running command that should succeed: " + tojson(cmdObj));
+ var resultObj = assert.commandWorked(db.runCommand(cmdObj));
+ printjson(resultObj);
+ return resultObj;
+};
+
+var checkCommandFailed = function(db, cmdObj) {
+ print("Running command that should fail: " + tojson(cmdObj));
+ var resultObj = assert.commandFailed(db.runCommand(cmdObj));
+ printjson(resultObj);
+ return resultObj;
+};
+
+var checkReadOps = function(hasReadAuth) {
+ if (hasReadAuth) {
+ print("Checking read operations, should work");
+ assert.eq(expectedDocs, testDB.foo.find().itcount());
+ assert.eq(expectedDocs, testDB.foo.count());
+
+ // NOTE: This is an explicit check that GLE can be run with read prefs, not the result
+ // of above.
+ assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
+ checkCommandSucceeded(testDB, {dbstats: 1});
+ checkCommandSucceeded(testDB, {collstats: 'foo'});
+
+ // inline map-reduce works read-only
+ var res = checkCommandSucceeded(
+ testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
+ assert.eq(100, res.results.length);
+ assert.eq(45, res.results[0].value);
+
+ res = checkCommandSucceeded(testDB, {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
+ cursor: {}
});
- return jCount;
- };
-
- var checkCommandSucceeded = function(db, cmdObj) {
- print("Running command that should succeed: " + tojson(cmdObj));
- var resultObj = assert.commandWorked(db.runCommand(cmdObj));
- printjson(resultObj);
- return resultObj;
- };
-
- var checkCommandFailed = function(db, cmdObj) {
- print("Running command that should fail: " + tojson(cmdObj));
- var resultObj = assert.commandFailed(db.runCommand(cmdObj));
- printjson(resultObj);
- return resultObj;
- };
-
- var checkReadOps = function(hasReadAuth) {
- if (hasReadAuth) {
- print("Checking read operations, should work");
- assert.eq(expectedDocs, testDB.foo.find().itcount());
- assert.eq(expectedDocs, testDB.foo.count());
-
- // NOTE: This is an explicit check that GLE can be run with read prefs, not the result
- // of above.
- assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
- checkCommandSucceeded(testDB, {dbstats: 1});
- checkCommandSucceeded(testDB, {collstats: 'foo'});
-
- // inline map-reduce works read-only
- var res = checkCommandSucceeded(
- testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
- assert.eq(100, res.results.length);
- assert.eq(45, res.results[0].value);
-
- res = checkCommandSucceeded(testDB, {
- aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
- cursor: {}
- });
- assert.eq(4500, res.cursor.firstBatch[0].sum);
- } else {
- print("Checking read operations, should fail");
- assert.throws(function() {
- testDB.foo.find().itcount();
- });
- checkCommandFailed(testDB, {dbstats: 1});
- checkCommandFailed(testDB, {collstats: 'foo'});
- checkCommandFailed(testDB,
- {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
- checkCommandFailed(testDB, {
- aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
- cursor: {}
- });
- }
- };
-
- var checkWriteOps = function(hasWriteAuth) {
- if (hasWriteAuth) {
- print("Checking write operations, should work");
- testDB.foo.insert({a: 1, i: 1, j: 1});
- var res = checkCommandSucceeded(
- testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
- assert.eq(1, res.value.a);
- assert.eq(null, res.value.b);
- assert.eq(1, testDB.foo.findOne({a: 1}).b);
- testDB.foo.remove({a: 1});
- assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
- checkCommandSucceeded(testDB,
- {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
- assert.eq(100, testDB.mrOutput.count());
- assert.eq(45, testDB.mrOutput.findOne().value);
-
- checkCommandSucceeded(testDB, {drop: 'foo'});
- assert.eq(0, testDB.foo.count());
- testDB.foo.insert({a: 1});
- assert.eq(1, testDB.foo.count());
- checkCommandSucceeded(testDB, {dropDatabase: 1});
- assert.eq(0, testDB.foo.count());
- checkCommandSucceeded(testDB, {create: 'baz'});
- } else {
- print("Checking write operations, should fail");
- testDB.foo.insert({a: 1, i: 1, j: 1});
- assert.eq(0, authenticatedConn.getDB('test').foo.count({a: 1, i: 1, j: 1}));
- checkCommandFailed(
- testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
- checkCommandFailed(testDB,
- {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
- checkCommandFailed(testDB, {drop: 'foo'});
- checkCommandFailed(testDB, {dropDatabase: 1});
- var passed = true;
- try {
- // For some reason when create fails it throws an exception instead of just
- // returning ok:0
- var res = testDB.runCommand({create: 'baz'});
- if (!res.ok) {
- passed = false;
- }
- } catch (e) {
- // expected
- printjson(e);
+ assert.eq(4500, res.cursor.firstBatch[0].sum);
+ } else {
+ print("Checking read operations, should fail");
+ assert.throws(function() {
+ testDB.foo.find().itcount();
+ });
+ checkCommandFailed(testDB, {dbstats: 1});
+ checkCommandFailed(testDB, {collstats: 'foo'});
+ checkCommandFailed(testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
+ checkCommandFailed(testDB, {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
+ cursor: {}
+ });
+ }
+};
+
+var checkWriteOps = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ print("Checking write operations, should work");
+ testDB.foo.insert({a: 1, i: 1, j: 1});
+ var res = checkCommandSucceeded(
+ testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
+ assert.eq(1, res.value.a);
+ assert.eq(null, res.value.b);
+ assert.eq(1, testDB.foo.findOne({a: 1}).b);
+ testDB.foo.remove({a: 1});
+ assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
+ checkCommandSucceeded(testDB,
+ {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
+ assert.eq(100, testDB.mrOutput.count());
+ assert.eq(45, testDB.mrOutput.findOne().value);
+
+ checkCommandSucceeded(testDB, {drop: 'foo'});
+ assert.eq(0, testDB.foo.count());
+ testDB.foo.insert({a: 1});
+ assert.eq(1, testDB.foo.count());
+ checkCommandSucceeded(testDB, {dropDatabase: 1});
+ assert.eq(0, testDB.foo.count());
+ checkCommandSucceeded(testDB, {create: 'baz'});
+ } else {
+ print("Checking write operations, should fail");
+ testDB.foo.insert({a: 1, i: 1, j: 1});
+ assert.eq(0, authenticatedConn.getDB('test').foo.count({a: 1, i: 1, j: 1}));
+ checkCommandFailed(
+ testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
+ checkCommandFailed(testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
+ checkCommandFailed(testDB, {drop: 'foo'});
+ checkCommandFailed(testDB, {dropDatabase: 1});
+ var passed = true;
+ try {
+ // For some reason when create fails it throws an exception instead of just
+ // returning ok:0
+ var res = testDB.runCommand({create: 'baz'});
+ if (!res.ok) {
passed = false;
}
- assert(!passed);
- }
- };
-
- var checkAdminOps = function(hasAuth) {
- if (hasAuth) {
- checkCommandSucceeded(adminDB, {getCmdLineOpts: 1});
- checkCommandSucceeded(adminDB, {serverStatus: 1});
- checkCommandSucceeded(adminDB, {listShards: 1});
- checkCommandSucceeded(adminDB, {whatsmyuri: 1});
- checkCommandSucceeded(adminDB, {isdbgrid: 1});
- checkCommandSucceeded(adminDB, {ismaster: 1});
- checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- var chunk = configDB.chunks.findOne({ns: 'test.foo', shard: st.rs0.name});
- checkCommandSucceeded(
- adminDB,
- {moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true});
- } else {
- checkCommandFailed(adminDB, {getCmdLineOpts: 1});
- checkCommandFailed(adminDB, {serverStatus: 1});
- checkCommandFailed(adminDB, {listShards: 1});
- // whatsmyuri, isdbgrid, and ismaster don't require any auth
- checkCommandSucceeded(adminDB, {whatsmyuri: 1});
- checkCommandSucceeded(adminDB, {isdbgrid: 1});
- checkCommandSucceeded(adminDB, {ismaster: 1});
- checkCommandFailed(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- var chunkKey = {i: {$minKey: 1}, j: {$minKey: 1}};
- checkCommandFailed(
- adminDB,
- {moveChunk: 'test.foo', find: chunkKey, to: st.rs1.name, _waitForDelete: true});
+ } catch (e) {
+ // expected
+ printjson(e);
+ passed = false;
}
- };
-
- var checkRemoveShard = function(hasWriteAuth) {
- if (hasWriteAuth) {
- // start draining
- checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
- // Wait for shard to be completely removed
- checkRemoveShard = function() {
- var res = checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
- return res.msg == 'removeshard completed successfully';
- };
- assert.soon(checkRemoveShard, "failed to remove shard");
- } else {
- checkCommandFailed(adminDB, {removeshard: st.rs1.name});
- }
- };
+ assert(!passed);
+ }
+};
+
+var checkAdminOps = function(hasAuth) {
+ if (hasAuth) {
+ checkCommandSucceeded(adminDB, {getCmdLineOpts: 1});
+ checkCommandSucceeded(adminDB, {serverStatus: 1});
+ checkCommandSucceeded(adminDB, {listShards: 1});
+ checkCommandSucceeded(adminDB, {whatsmyuri: 1});
+ checkCommandSucceeded(adminDB, {isdbgrid: 1});
+ checkCommandSucceeded(adminDB, {ismaster: 1});
+ checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
+ var chunk = configDB.chunks.findOne({ns: 'test.foo', shard: st.rs0.name});
+ checkCommandSucceeded(
+ adminDB,
+ {moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true});
+ } else {
+ checkCommandFailed(adminDB, {getCmdLineOpts: 1});
+ checkCommandFailed(adminDB, {serverStatus: 1});
+ checkCommandFailed(adminDB, {listShards: 1});
+ // whatsmyuri, isdbgrid, and ismaster don't require any auth
+ checkCommandSucceeded(adminDB, {whatsmyuri: 1});
+ checkCommandSucceeded(adminDB, {isdbgrid: 1});
+ checkCommandSucceeded(adminDB, {ismaster: 1});
+ checkCommandFailed(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
+ var chunkKey = {i: {$minKey: 1}, j: {$minKey: 1}};
+ checkCommandFailed(
+ adminDB,
+ {moveChunk: 'test.foo', find: chunkKey, to: st.rs1.name, _waitForDelete: true});
+ }
+};
+
+var checkRemoveShard = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ // start draining
+ checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
+ // Wait for shard to be completely removed
+ checkRemoveShard = function() {
+ var res = checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
+ return res.msg == 'removeshard completed successfully';
+ };
+ assert.soon(checkRemoveShard, "failed to remove shard");
+ } else {
+ checkCommandFailed(adminDB, {removeshard: st.rs1.name});
+ }
+};
- var checkAddShard = function(hasWriteAuth) {
- if (hasWriteAuth) {
- checkCommandSucceeded(adminDB, {addshard: st.rs1.getURL()});
- } else {
- checkCommandFailed(adminDB, {addshard: st.rs1.getURL()});
- }
- };
-
- st.stopBalancer();
-
- jsTestLog("Checking admin commands with admin auth credentials");
- checkAdminOps(true);
- assert(adminDB.logout().ok);
-
- jsTestLog("Checking admin commands with no auth credentials");
- checkAdminOps(false);
-
- jsTestLog("Checking commands with no auth credentials");
- checkReadOps(false);
- checkWriteOps(false);
-
- // Authenticate as read-only user
- jsTestLog("Checking commands with read-only auth credentials");
- assert(testDB.auth(roUser, password));
- checkReadOps(true);
- checkWriteOps(false);
-
- // Authenticate as read-write user
- jsTestLog("Checking commands with read-write auth credentials");
- assert(testDB.auth(rwUser, password));
- checkReadOps(true);
- checkWriteOps(true);
-
- jsTestLog("Check drainging/removing a shard");
- assert(testDB.logout().ok);
- checkRemoveShard(false);
- assert(adminDB.auth(rwUser, password));
- assert(testDB.dropDatabase().ok);
- checkRemoveShard(true);
- st.printShardingStatus();
-
- jsTestLog("Check adding a shard");
- assert(adminDB.logout().ok);
- checkAddShard(false);
- assert(adminDB.auth(rwUser, password));
- checkAddShard(true);
- st.printShardingStatus();
-
- st.stop();
+var checkAddShard = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ checkCommandSucceeded(adminDB, {addshard: st.rs1.getURL()});
+ } else {
+ checkCommandFailed(adminDB, {addshard: st.rs1.getURL()});
+ }
+};
+
+st.stopBalancer();
+
+jsTestLog("Checking admin commands with admin auth credentials");
+checkAdminOps(true);
+assert(adminDB.logout().ok);
+
+jsTestLog("Checking admin commands with no auth credentials");
+checkAdminOps(false);
+
+jsTestLog("Checking commands with no auth credentials");
+checkReadOps(false);
+checkWriteOps(false);
+
+// Authenticate as read-only user
+jsTestLog("Checking commands with read-only auth credentials");
+assert(testDB.auth(roUser, password));
+checkReadOps(true);
+checkWriteOps(false);
+
+// Authenticate as read-write user
+jsTestLog("Checking commands with read-write auth credentials");
+assert(testDB.auth(rwUser, password));
+checkReadOps(true);
+checkWriteOps(true);
+
+jsTestLog("Check drainging/removing a shard");
+assert(testDB.logout().ok);
+checkRemoveShard(false);
+assert(adminDB.auth(rwUser, password));
+assert(testDB.dropDatabase().ok);
+checkRemoveShard(true);
+st.printShardingStatus();
+
+jsTestLog("Check adding a shard");
+assert(adminDB.logout().ok);
+checkAddShard(false);
+assert(adminDB.auth(rwUser, password));
+checkAddShard(true);
+st.printShardingStatus();
+
+st.stop();
})();
diff --git a/jstests/sharding/authConnectionHook.js b/jstests/sharding/authConnectionHook.js
index 6655d4d5248..73e81393d44 100644
--- a/jstests/sharding/authConnectionHook.js
+++ b/jstests/sharding/authConnectionHook.js
@@ -14,55 +14,49 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- shards: 2,
- other: {
- keyFile: 'jstests/libs/key1',
- useHostname: true,
- chunkSize: 1,
- shardAsReplicaSet: false
- }
- });
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ shards: 2,
+ other: {keyFile: 'jstests/libs/key1', useHostname: true, chunkSize: 1, shardAsReplicaSet: false}
+});
- var mongos = st.s;
- var adminDB = mongos.getDB('admin');
- var db = mongos.getDB('test');
+var mongos = st.s;
+var adminDB = mongos.getDB('admin');
+var db = mongos.getDB('test');
- adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
+adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
- adminDB.auth('admin', 'password');
+adminDB.auth('admin', 'password');
- adminDB.runCommand({enableSharding: "test"});
- st.ensurePrimaryShard('test', 'shard0001');
- adminDB.runCommand({shardCollection: "test.foo", key: {x: 1}});
+adminDB.runCommand({enableSharding: "test"});
+st.ensurePrimaryShard('test', 'shard0001');
+adminDB.runCommand({shardCollection: "test.foo", key: {x: 1}});
- for (var i = 0; i < 100; i++) {
- db.foo.insert({x: i});
- }
+for (var i = 0; i < 100; i++) {
+ db.foo.insert({x: i});
+}
- adminDB.runCommand({split: "test.foo", middle: {x: 50}});
- var curShard = st.getShard("test.foo", {x: 75});
- var otherShard = st.getOther(curShard).name;
- adminDB.runCommand(
- {moveChunk: "test.foo", find: {x: 25}, to: otherShard, _waitForDelete: true});
+adminDB.runCommand({split: "test.foo", middle: {x: 50}});
+var curShard = st.getShard("test.foo", {x: 75});
+var otherShard = st.getOther(curShard).name;
+adminDB.runCommand({moveChunk: "test.foo", find: {x: 25}, to: otherShard, _waitForDelete: true});
- st.printShardingStatus();
+st.printShardingStatus();
- MongoRunner.stopMongod(st.shard0);
- st.shard0 = MongoRunner.runMongod({restart: st.shard0});
+MongoRunner.stopMongod(st.shard0);
+st.shard0 = MongoRunner.runMongod({restart: st.shard0});
- // May fail the first couple times due to socket exceptions
- assert.soon(function() {
- var res = adminDB.runCommand({moveChunk: "test.foo", find: {x: 75}, to: otherShard});
- printjson(res);
- return res.ok;
- });
+// May fail the first couple times due to socket exceptions
+assert.soon(function() {
+ var res = adminDB.runCommand({moveChunk: "test.foo", find: {x: 75}, to: otherShard});
+ printjson(res);
+ return res.ok;
+});
- printjson(db.foo.findOne({x: 25}));
- printjson(db.foo.findOne({x: 75}));
+printjson(db.foo.findOne({x: 25}));
+printjson(db.foo.findOne({x: 75}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js
index f898f5caecc..6c3c298b373 100644
--- a/jstests/sharding/auth_add_shard.js
+++ b/jstests/sharding/auth_add_shard.js
@@ -2,100 +2,97 @@
// The puporse of this test is to test authentication when adding/removing a shard. The test sets
// up a sharded system, then adds/removes a shard.
(function() {
- 'use strict';
+'use strict';
- // login method to login into the database
- function login(userObj) {
- var authResult = mongos.getDB(userObj.db).auth(userObj.username, userObj.password);
- printjson(authResult);
- }
+// login method to login into the database
+function login(userObj) {
+ var authResult = mongos.getDB(userObj.db).auth(userObj.username, userObj.password);
+ printjson(authResult);
+}
- // admin user object
- var adminUser = {db: "admin", username: "foo", password: "bar"};
+// admin user object
+var adminUser = {db: "admin", username: "foo", password: "bar"};
- // set up a 2 shard cluster with keyfile
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
+// set up a 2 shard cluster with keyfile
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest(
+ {shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
- print("1 shard system setup");
+print("1 shard system setup");
- // add the admin user
- print("adding user");
- mongos.getDB(adminUser.db).createUser({
- user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles
- });
+// add the admin user
+print("adding user");
+mongos.getDB(adminUser.db)
+ .createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
- // login as admin user
- login(adminUser);
+// login as admin user
+login(adminUser);
- assert.eq(1, st.config.shards.count(), "initial server count wrong");
+assert.eq(1, st.config.shards.count(), "initial server count wrong");
- // start a mongod with NO keyfile
- var conn = MongoRunner.runMongod({shardsvr: ""});
- print(conn);
+// start a mongod with NO keyfile
+var conn = MongoRunner.runMongod({shardsvr: ""});
+print(conn);
- // --------------- Test 1 --------------------
- // Add shard to the existing cluster (should fail because it was added without a keyfile)
- printjson(assert.commandFailed(admin.runCommand({addShard: conn.host})));
+// --------------- Test 1 --------------------
+// Add shard to the existing cluster (should fail because it was added without a keyfile)
+printjson(assert.commandFailed(admin.runCommand({addShard: conn.host})));
- // stop mongod
- MongoRunner.stopMongod(conn);
+// stop mongod
+MongoRunner.stopMongod(conn);
- //--------------- Test 2 --------------------
- // start mongod again, this time with keyfile
- var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1", shardsvr: ""});
- // try adding the new shard
- assert.commandWorked(admin.runCommand({addShard: conn.host}));
+//--------------- Test 2 --------------------
+// start mongod again, this time with keyfile
+var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1", shardsvr: ""});
+// try adding the new shard
+assert.commandWorked(admin.runCommand({addShard: conn.host}));
- // Add some data
- var db = mongos.getDB("foo");
- var collA = mongos.getCollection("foo.bar");
+// Add some data
+var db = mongos.getDB("foo");
+var collA = mongos.getCollection("foo.bar");
- // enable sharding on a collection
- assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
- st.ensurePrimaryShard("foo", "shard0000");
+// enable sharding on a collection
+assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
+st.ensurePrimaryShard("foo", "shard0000");
- assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
- // add data to the sharded collection
- for (var i = 0; i < 4; i++) {
- db.bar.save({_id: i});
- assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
- }
+// add data to the sharded collection
+for (var i = 0; i < 4; i++) {
+ db.bar.save({_id: i});
+ assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
+}
- // move a chunk
- assert.commandWorked(admin.runCommand({moveChunk: "foo.bar", find: {_id: 1}, to: "shard0001"}));
+// move a chunk
+assert.commandWorked(admin.runCommand({moveChunk: "foo.bar", find: {_id: 1}, to: "shard0001"}));
- // verify the chunk was moved
- admin.runCommand({flushRouterConfig: 1});
+// verify the chunk was moved
+admin.runCommand({flushRouterConfig: 1});
- var config = mongos.getDB("config");
- st.printShardingStatus(true);
+var config = mongos.getDB("config");
+st.printShardingStatus(true);
- // start balancer before removing the shard
- st.startBalancer();
+// start balancer before removing the shard
+st.startBalancer();
- //--------------- Test 3 --------------------
- // now drain the shard
- assert.commandWorked(admin.runCommand({removeShard: conn.host}));
+//--------------- Test 3 --------------------
+// now drain the shard
+assert.commandWorked(admin.runCommand({removeShard: conn.host}));
- // give it some time to drain
- assert.soon(function() {
- var result = admin.runCommand({removeShard: conn.host});
- printjson(result);
+// give it some time to drain
+assert.soon(function() {
+ var result = admin.runCommand({removeShard: conn.host});
+ printjson(result);
- return result.ok && result.state == "completed";
- }, "failed to drain shard completely", 5 * 60 * 1000);
+ return result.ok && result.state == "completed";
+}, "failed to drain shard completely", 5 * 60 * 1000);
- assert.eq(1, st.config.shards.count(), "removed server still appears in count");
+assert.eq(1, st.config.shards.count(), "removed server still appears in count");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/auth_no_config_primary.js b/jstests/sharding/auth_no_config_primary.js
index cb71ca0ef74..57d6f2109b0 100644
--- a/jstests/sharding/auth_no_config_primary.js
+++ b/jstests/sharding/auth_no_config_primary.js
@@ -12,47 +12,47 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckDBHashes = true;
(function() {
- 'use strict';
+'use strict';
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st =
+ new ShardingTest({shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
- st.s.getDB('admin').createUser({user: 'root', pwd: 'pass', roles: ['root']});
- st.s.getDB('admin').auth('root', 'pass');
- var testDB = st.s.getDB('test');
- testDB.user.insert({hello: 'world'});
+st.s.getDB('admin').createUser({user: 'root', pwd: 'pass', roles: ['root']});
+st.s.getDB('admin').auth('root', 'pass');
+var testDB = st.s.getDB('test');
+testDB.user.insert({hello: 'world'});
- // Kill all secondaries, forcing the current primary to step down.
- st.configRS.getSecondaries().forEach(function(secondaryConn) {
- MongoRunner.stopMongod(secondaryConn);
- });
+// Kill all secondaries, forcing the current primary to step down.
+st.configRS.getSecondaries().forEach(function(secondaryConn) {
+ MongoRunner.stopMongod(secondaryConn);
+});
- // Test authenticate through a fresh connection.
- var newConn = new Mongo(st.s.host);
+// Test authenticate through a fresh connection.
+var newConn = new Mongo(st.s.host);
- assert.commandFailedWithCode(newConn.getDB('test').runCommand({find: 'user'}),
- ErrorCodes.Unauthorized);
+assert.commandFailedWithCode(newConn.getDB('test').runCommand({find: 'user'}),
+ ErrorCodes.Unauthorized);
- newConn.getDB('admin').auth('root', 'pass');
+newConn.getDB('admin').auth('root', 'pass');
- var res = newConn.getDB('test').user.findOne();
- assert.neq(null, res);
- assert.eq('world', res.hello);
+var res = newConn.getDB('test').user.findOne();
+assert.neq(null, res);
+assert.eq('world', res.hello);
- // Test authenticate through new mongos.
- var otherMongos =
- MongoRunner.runMongos({keyFile: "jstests/libs/key1", configdb: st.s.savedOptions.configdb});
+// Test authenticate through new mongos.
+var otherMongos =
+ MongoRunner.runMongos({keyFile: "jstests/libs/key1", configdb: st.s.savedOptions.configdb});
- assert.commandFailedWithCode(otherMongos.getDB('test').runCommand({find: 'user'}),
- ErrorCodes.Unauthorized);
+assert.commandFailedWithCode(otherMongos.getDB('test').runCommand({find: 'user'}),
+ ErrorCodes.Unauthorized);
- otherMongos.getDB('admin').auth('root', 'pass');
+otherMongos.getDB('admin').auth('root', 'pass');
- var res = otherMongos.getDB('test').user.findOne();
- assert.neq(null, res);
- assert.eq('world', res.hello);
+var res = otherMongos.getDB('test').user.findOne();
+assert.neq(null, res);
+assert.eq('world', res.hello);
- st.stop();
- MongoRunner.stopMongos(otherMongos);
+st.stop();
+MongoRunner.stopMongos(otherMongos);
})();
diff --git a/jstests/sharding/auth_sharding_cmd_metadata.js b/jstests/sharding/auth_sharding_cmd_metadata.js
index 352c31d199c..d4474a26da7 100644
--- a/jstests/sharding/auth_sharding_cmd_metadata.js
+++ b/jstests/sharding/auth_sharding_cmd_metadata.js
@@ -3,45 +3,44 @@
*/
(function() {
- "use strict";
+"use strict";
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st =
+ new ShardingTest({shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
- var adminUser = {db: "admin", username: "foo", password: "bar"};
+var adminUser = {db: "admin", username: "foo", password: "bar"};
- st.s.getDB(adminUser.db).createUser({user: 'foo', pwd: 'bar', roles: jsTest.adminUserRoles});
+st.s.getDB(adminUser.db).createUser({user: 'foo', pwd: 'bar', roles: jsTest.adminUserRoles});
- st.s.getDB('admin').auth('foo', 'bar');
+st.s.getDB('admin').auth('foo', 'bar');
- st.adminCommand({enableSharding: 'test'});
- st.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+st.adminCommand({enableSharding: 'test'});
+st.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- st.d0.getDB('admin').createUser({user: 'user', pwd: 'pwd', roles: jsTest.adminUserRoles});
- st.d0.getDB('admin').auth('user', 'pwd');
+st.d0.getDB('admin').createUser({user: 'user', pwd: 'pwd', roles: jsTest.adminUserRoles});
+st.d0.getDB('admin').auth('user', 'pwd');
- var maxSecs = Math.pow(2, 32) - 1;
- var metadata = {$configServerState: {opTime: {ts: Timestamp(maxSecs, 0), t: maxSecs}}};
- var res = st.d0.getDB('test').runCommandWithMetadata({ping: 1}, metadata);
+var maxSecs = Math.pow(2, 32) - 1;
+var metadata = {$configServerState: {opTime: {ts: Timestamp(maxSecs, 0), t: maxSecs}}};
+var res = st.d0.getDB('test').runCommandWithMetadata({ping: 1}, metadata);
- assert.commandFailedWithCode(res.commandReply, ErrorCodes.Unauthorized);
+assert.commandFailedWithCode(res.commandReply, ErrorCodes.Unauthorized);
- // Make sure that the config server optime did not advance.
- var status = st.d0.getDB('test').runCommand({serverStatus: 1});
- assert.neq(null, status.sharding);
- assert.lt(status.sharding.lastSeenConfigServerOpTime.t, maxSecs);
+// Make sure that the config server optime did not advance.
+var status = st.d0.getDB('test').runCommand({serverStatus: 1});
+assert.neq(null, status.sharding);
+assert.lt(status.sharding.lastSeenConfigServerOpTime.t, maxSecs);
- st.d0.getDB('admin').createUser({user: 'internal', pwd: 'pwd', roles: ['__system']});
- st.d0.getDB('admin').auth('internal', 'pwd');
+st.d0.getDB('admin').createUser({user: 'internal', pwd: 'pwd', roles: ['__system']});
+st.d0.getDB('admin').auth('internal', 'pwd');
- res = st.d0.getDB('test').runCommandWithMetadata({ping: 1}, metadata);
- assert.commandWorked(res.commandReply);
+res = st.d0.getDB('test').runCommandWithMetadata({ping: 1}, metadata);
+assert.commandWorked(res.commandReply);
- status = st.d0.getDB('test').runCommand({serverStatus: 1});
- assert.neq(null, status.sharding);
- assert.eq(status.sharding.lastSeenConfigServerOpTime.t, maxSecs);
-
- st.stop();
+status = st.d0.getDB('test').runCommand({serverStatus: 1});
+assert.neq(null, status.sharding);
+assert.eq(status.sharding.lastSeenConfigServerOpTime.t, maxSecs);
+st.stop();
})();
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index 51810f76d3d..9aa9bc8db84 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -10,111 +10,111 @@
* @tags: [requires_persistence, requires_find_command]
*/
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js");
-
- // Replica set nodes started with --shardsvr do not enable key generation until they are added
- // to a sharded cluster and reject commands with gossiped clusterTime from users without the
- // advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
- // briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
- // gossip that time later in setup.
- //
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
-
- /**
- * Checks if a query to the given collection will be routed to the secondary. Returns true if
- * query was routed to a secondary node.
- */
- function doesRouteToSec(coll, query) {
- var explain = coll.find(query).explain();
- assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
- var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
- var conn = new Mongo(serverInfo.host + ":" + serverInfo.port.toString());
- var cmdRes = conn.getDB('admin').runCommand({isMaster: 1});
-
- jsTest.log('isMaster: ' + tojson(cmdRes));
-
- return cmdRes.secondary;
- }
-
- var rsOpts = {oplogSize: 50};
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, rs: rsOpts, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
-
- var mongos = st.s;
- var replTest = st.rs0;
- var testDB = mongos.getDB('AAAAA');
- var coll = testDB.user;
- var nodeCount = replTest.nodes.length;
-
- /* Add an admin user to the replica member to simulate connecting from
- * remote location. This is because mongod allows unautheticated
- * connections to access the server from localhost connections if there
- * is no admin user.
- */
- var adminDB = mongos.getDB('admin');
- adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
- adminDB.auth('user', 'password');
- var priAdminDB = replTest.getPrimary().getDB('admin');
- replTest.getPrimary().waitForClusterTime(60);
- priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 30000});
-
- coll.drop();
- coll.setSlaveOk(true);
-
- /* Secondaries should be up here, but they can still be in RECOVERY
- * state, which will make the ReplicaSetMonitor mark them as
- * ok = false and not eligible for slaveOk queries.
- */
- awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
-
- var bulk = coll.initializeUnorderedBulkOp();
- for (var x = 0; x < 20; x++) {
- bulk.insert({v: x, k: 10});
- }
- assert.writeOK(bulk.execute({w: nodeCount}));
-
- /* Although mongos never caches query results, try to do a different query
- * everytime just to be sure.
- */
- var vToFind = 0;
-
- jsTest.log('First query to SEC');
- assert(doesRouteToSec(coll, {v: vToFind++}));
-
- var SIG_TERM = 15;
- replTest.stopSet(SIG_TERM, true, {auth: {user: 'user', pwd: 'password'}});
-
- for (var n = 0; n < nodeCount; n++) {
- replTest.restart(n, rsOpts);
- }
-
- replTest.awaitSecondaryNodes();
-
- coll.setSlaveOk(true);
-
- /* replSetMonitor does not refresh the nodes information when getting secondaries.
- * A node that is previously labeled as secondary can now be a primary, so we
- * wait for the replSetMonitorWatcher thread to refresh the nodes information.
- */
- awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
- //
- // We also need to wait for the primary, it's possible that the mongos may think a node is a
- // secondary but it actually changed to a primary before we send our final query.
- //
- awaitRSClientHosts(mongos, replTest.getPrimary(), {ok: true, ismaster: true});
-
- // Recheck if we can still query secondaries after refreshing connections.
- jsTest.log('Final query to SEC');
- assert(doesRouteToSec(coll, {v: vToFind++}));
-
- // Cleanup auth so Windows will be able to shutdown gracefully
- priAdminDB = replTest.getPrimary().getDB('admin');
- priAdminDB.auth('user', 'password');
- priAdminDB.dropUser('user');
-
- st.stop();
+'use strict';
+load("jstests/replsets/rslib.js");
+
+// Replica set nodes started with --shardsvr do not enable key generation until they are added
+// to a sharded cluster and reject commands with gossiped clusterTime from users without the
+// advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
+// briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
+// gossip that time later in setup.
+//
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
+
+/**
+ * Checks if a query to the given collection will be routed to the secondary. Returns true if
+ * query was routed to a secondary node.
+ */
+function doesRouteToSec(coll, query) {
+ var explain = coll.find(query).explain();
+ assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
+ var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
+ var conn = new Mongo(serverInfo.host + ":" + serverInfo.port.toString());
+ var cmdRes = conn.getDB('admin').runCommand({isMaster: 1});
+
+ jsTest.log('isMaster: ' + tojson(cmdRes));
+
+ return cmdRes.secondary;
+}
+
+var rsOpts = {oplogSize: 50};
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest(
+ {shards: 1, rs: rsOpts, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
+
+var mongos = st.s;
+var replTest = st.rs0;
+var testDB = mongos.getDB('AAAAA');
+var coll = testDB.user;
+var nodeCount = replTest.nodes.length;
+
+/* Add an admin user to the replica member to simulate connecting from
+ * remote location. This is because mongod allows unautheticated
+ * connections to access the server from localhost connections if there
+ * is no admin user.
+ */
+var adminDB = mongos.getDB('admin');
+adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
+adminDB.auth('user', 'password');
+var priAdminDB = replTest.getPrimary().getDB('admin');
+replTest.getPrimary().waitForClusterTime(60);
+priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 30000});
+
+coll.drop();
+coll.setSlaveOk(true);
+
+/* Secondaries should be up here, but they can still be in RECOVERY
+ * state, which will make the ReplicaSetMonitor mark them as
+ * ok = false and not eligible for slaveOk queries.
+ */
+awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
+
+var bulk = coll.initializeUnorderedBulkOp();
+for (var x = 0; x < 20; x++) {
+ bulk.insert({v: x, k: 10});
+}
+assert.writeOK(bulk.execute({w: nodeCount}));
+
+/* Although mongos never caches query results, try to do a different query
+ * everytime just to be sure.
+ */
+var vToFind = 0;
+
+jsTest.log('First query to SEC');
+assert(doesRouteToSec(coll, {v: vToFind++}));
+
+var SIG_TERM = 15;
+replTest.stopSet(SIG_TERM, true, {auth: {user: 'user', pwd: 'password'}});
+
+for (var n = 0; n < nodeCount; n++) {
+ replTest.restart(n, rsOpts);
+}
+
+replTest.awaitSecondaryNodes();
+
+coll.setSlaveOk(true);
+
+/* replSetMonitor does not refresh the nodes information when getting secondaries.
+ * A node that is previously labeled as secondary can now be a primary, so we
+ * wait for the replSetMonitorWatcher thread to refresh the nodes information.
+ */
+awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
+//
+// We also need to wait for the primary, it's possible that the mongos may think a node is a
+// secondary but it actually changed to a primary before we send our final query.
+//
+awaitRSClientHosts(mongos, replTest.getPrimary(), {ok: true, ismaster: true});
+
+// Recheck if we can still query secondaries after refreshing connections.
+jsTest.log('Final query to SEC');
+assert(doesRouteToSec(coll, {v: vToFind++}));
+
+// Cleanup auth so Windows will be able to shutdown gracefully
+priAdminDB = replTest.getPrimary().getDB('admin');
+priAdminDB.auth('user', 'password');
+priAdminDB.dropUser('user');
+
+st.stop();
})();
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 0f444f6208d..0d1fb713c97 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -3,115 +3,114 @@
(function() {
- //
- // User document declarations. All users in this test are added to the admin database.
- //
-
- var adminUser = {
- user: "admin",
- pwd: "a",
- roles:
- ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
- };
-
- var test1User = {
- user: "test",
- pwd: "a",
- roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}]
- };
-
- function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
- }
-
- function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
+//
+// User document declarations. All users in this test are added to the admin database.
+//
+
+var adminUser = {
+ user: "admin",
+ pwd: "a",
+ roles: ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
+};
+
+var test1User = {
+ user: "test",
+ pwd: "a",
+ roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}]
+};
+
+function assertRemove(collection, pattern) {
+ assert.writeOK(collection.remove(pattern));
+}
+
+function assertInsert(collection, obj) {
+ assert.writeOK(collection.insert(obj));
+}
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var cluster = new ShardingTest({
+ name: "authmr",
+ shards: 1,
+ mongos: 1,
+ other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}
+});
+
+// Set up the test data.
+(function() {
+var adminDB = cluster.getDB('admin');
+var test1DB = adminDB.getSiblingDB('test1');
+var test2DB = adminDB.getSiblingDB('test2');
+var ex;
+try {
+ adminDB.createUser(adminUser);
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
+
+ adminDB.dropUser(test1User.user);
+ adminDB.createUser(test1User);
+
+ assertInsert(test1DB.foo, {a: 1});
+ assertInsert(test1DB.foo, {a: 2});
+ assertInsert(test1DB.foo, {a: 3});
+ assertInsert(test1DB.foo, {a: 4});
+ assertInsert(test2DB.foo, {x: 1});
+} finally {
+ adminDB.logout();
+}
+}());
+
+assert.throws(function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB;
+ var test2DB;
+ assert(adminDB.auth(test1User.user, test1User.pwd));
+ try {
+ test1DB = adminDB.getSiblingDB("test1");
+ test2DB = adminDB.getSiblingDB("test2");
+
+ // Sanity check. test1User can count (read) test1, but not test2.
+ assert.eq(test1DB.foo.count(), 4);
+ assert.throws(test2DB.foo.count);
+
+ test1DB.foo.mapReduce(
+ function() {
+ emit(0, this.a);
+ var t2 = new Mongo().getDB("test2");
+ t2.ad.insert(this);
+ },
+ function(k, vs) {
+ var t2 = new Mongo().getDB("test2");
+ t2.reductio.insert(this);
+
+ return Array.sum(vs);
+ },
+ {
+ out: "bar",
+ finalize: function(k, v) {
+ for (k in this) {
+ if (this.hasOwnProperty(k))
+ print(k + "=" + v);
+ }
+ var t2 = new Mongo().getDB("test2");
+ t2.absurdum.insert({key: k, value: v});
+ }
+ });
+ } finally {
+ adminDB.logout();
}
+});
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var cluster = new ShardingTest({
- name: "authmr",
- shards: 1,
- mongos: 1,
- other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}
- });
-
- // Set up the test data.
- (function() {
- var adminDB = cluster.getDB('admin');
- var test1DB = adminDB.getSiblingDB('test1');
- var test2DB = adminDB.getSiblingDB('test2');
- var ex;
- try {
- adminDB.createUser(adminUser);
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
-
- adminDB.dropUser(test1User.user);
- adminDB.createUser(test1User);
-
- assertInsert(test1DB.foo, {a: 1});
- assertInsert(test1DB.foo, {a: 2});
- assertInsert(test1DB.foo, {a: 3});
- assertInsert(test1DB.foo, {a: 4});
- assertInsert(test2DB.foo, {x: 1});
- } finally {
- adminDB.logout();
- }
- }());
-
- assert.throws(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB;
- var test2DB;
- assert(adminDB.auth(test1User.user, test1User.pwd));
- try {
- test1DB = adminDB.getSiblingDB("test1");
- test2DB = adminDB.getSiblingDB("test2");
-
- // Sanity check. test1User can count (read) test1, but not test2.
- assert.eq(test1DB.foo.count(), 4);
- assert.throws(test2DB.foo.count);
-
- test1DB.foo.mapReduce(
- function() {
- emit(0, this.a);
- var t2 = new Mongo().getDB("test2");
- t2.ad.insert(this);
- },
- function(k, vs) {
- var t2 = new Mongo().getDB("test2");
- t2.reductio.insert(this);
-
- return Array.sum(vs);
- },
- {
- out: "bar",
- finalize: function(k, v) {
- for (k in this) {
- if (this.hasOwnProperty(k))
- print(k + "=" + v);
- }
- var t2 = new Mongo().getDB("test2");
- t2.absurdum.insert({key: k, value: v});
- }
- });
- } finally {
- adminDB.logout();
- }
- });
-
- (function() {
- var adminDB = cluster.getDB('admin');
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
- try {
- var test2DB = cluster.getDB('test2');
- assert.eq(test2DB.reductio.count(), 0, "reductio");
- assert.eq(test2DB.ad.count(), 0, "ad");
- assert.eq(test2DB.absurdum.count(), 0, "absurdum");
- } finally {
- adminDB.logout();
- }
- }());
-
- cluster.stop();
+(function() {
+var adminDB = cluster.getDB('admin');
+assert(adminDB.auth(adminUser.user, adminUser.pwd));
+try {
+ var test2DB = cluster.getDB('test2');
+ assert.eq(test2DB.reductio.count(), 0, "reductio");
+ assert.eq(test2DB.ad.count(), 0, "ad");
+ assert.eq(test2DB.absurdum.count(), 0, "absurdum");
+} finally {
+ adminDB.logout();
+}
+}());
+
+cluster.stop();
})();
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 95e0b0d7b45..3d60fb2ccca 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -3,88 +3,87 @@
(function() {
- //
- // User document declarations. All users in this test are added to the admin database.
- //
+//
+// User document declarations. All users in this test are added to the admin database.
+//
- var adminUser = {
- user: "admin",
- pwd: "a",
- roles:
- ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
- };
+var adminUser = {
+ user: "admin",
+ pwd: "a",
+ roles: ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
+};
- var test1Reader = {
- user: "test",
- pwd: "a",
- roles: [{role: 'read', db: 'test1', hasRole: true, canDelegate: false}]
- };
+var test1Reader = {
+ user: "test",
+ pwd: "a",
+ roles: [{role: 'read', db: 'test1', hasRole: true, canDelegate: false}]
+};
- function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
- }
+function assertRemove(collection, pattern) {
+ assert.writeOK(collection.remove(pattern));
+}
- function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
- }
+function assertInsert(collection, obj) {
+ assert.writeOK(collection.insert(obj));
+}
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var cluster = new ShardingTest({
- name: "authwhere",
- shards: 1,
- mongos: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var cluster = new ShardingTest({
+ name: "authwhere",
+ shards: 1,
+ mongos: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
- // Set up the test data.
- (function() {
- var adminDB = cluster.getDB('admin');
- var test1DB = adminDB.getSiblingDB('test1');
- var test2DB = adminDB.getSiblingDB('test2');
- var ex;
- try {
- adminDB.createUser(adminUser);
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
+// Set up the test data.
+(function() {
+var adminDB = cluster.getDB('admin');
+var test1DB = adminDB.getSiblingDB('test1');
+var test2DB = adminDB.getSiblingDB('test2');
+var ex;
+try {
+ adminDB.createUser(adminUser);
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
- adminDB.dropUser(test1Reader.user);
- adminDB.createUser(test1Reader);
+ adminDB.dropUser(test1Reader.user);
+ adminDB.createUser(test1Reader);
- assertInsert(test1DB.foo, {a: 1});
- assertInsert(test2DB.foo, {x: 1});
- } finally {
- adminDB.logout();
- }
- }());
+ assertInsert(test1DB.foo, {a: 1});
+ assertInsert(test2DB.foo, {x: 1});
+} finally {
+ adminDB.logout();
+}
+}());
- (function() {
- var adminDB = cluster.getDB('admin');
- var test1DB;
- var test2DB;
- assert(adminDB.auth(test1Reader.user, test1Reader.pwd));
- try {
- test1DB = adminDB.getSiblingDB("test1");
- test2DB = adminDB.getSiblingDB("test2");
+(function() {
+var adminDB = cluster.getDB('admin');
+var test1DB;
+var test2DB;
+assert(adminDB.auth(test1Reader.user, test1Reader.pwd));
+try {
+ test1DB = adminDB.getSiblingDB("test1");
+ test2DB = adminDB.getSiblingDB("test2");
- // Sanity check. test1Reader can count (read) test1, but not test2.
- assert.eq(test1DB.foo.count(), 1);
- assert.throws(function() {
- test2DB.foo.count();
- });
+ // Sanity check. test1Reader can count (read) test1, but not test2.
+ assert.eq(test1DB.foo.count(), 1);
+ assert.throws(function() {
+ test2DB.foo.count();
+ });
- // Cannot examine second database from a where clause.
- assert.throws(function() {
- test1DB.foo.count("db.getSiblingDB('test2').foo.count() == 1");
- });
+ // Cannot examine second database from a where clause.
+ assert.throws(function() {
+ test1DB.foo.count("db.getSiblingDB('test2').foo.count() == 1");
+ });
- // Cannot write test1 via tricky where clause.
- assert.throws(function() {
- test1DB.foo.count("db.foo.insert({b: 1})");
- });
- assert.eq(test1DB.foo.count(), 1);
- } finally {
- adminDB.logout();
- }
- }());
+ // Cannot write test1 via tricky where clause.
+ assert.throws(function() {
+ test1DB.foo.count("db.foo.insert({b: 1})");
+ });
+ assert.eq(test1DB.foo.count(), 1);
+} finally {
+ adminDB.logout();
+}
+}());
- cluster.stop();
+cluster.stop();
})();
diff --git a/jstests/sharding/auto_rebalance_parallel.js b/jstests/sharding/auto_rebalance_parallel.js
index bb86c1fb9f1..ef6af0d57c5 100644
--- a/jstests/sharding/auto_rebalance_parallel.js
+++ b/jstests/sharding/auto_rebalance_parallel.js
@@ -3,69 +3,69 @@
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 4});
- var config = st.s0.getDB('config');
+var st = new ShardingTest({shards: 4});
+var config = st.s0.getDB('config');
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- function prepareCollectionForBalance(collName) {
- assert.commandWorked(st.s0.adminCommand({shardCollection: collName, key: {Key: 1}}));
+function prepareCollectionForBalance(collName) {
+ assert.commandWorked(st.s0.adminCommand({shardCollection: collName, key: {Key: 1}}));
- var coll = st.s0.getCollection(collName);
+ var coll = st.s0.getCollection(collName);
- // Create 4 chunks initially and ensure they get balanced within 1 balancer round
- assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+ // Create 4 chunks initially and ensure they get balanced within 1 balancer round
+ assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
+ assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
+ assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
+ assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
- assert.commandWorked(st.splitAt(collName, {Key: 10}));
- assert.commandWorked(st.splitAt(collName, {Key: 20}));
- assert.commandWorked(st.splitAt(collName, {Key: 30}));
+ assert.commandWorked(st.splitAt(collName, {Key: 10}));
+ assert.commandWorked(st.splitAt(collName, {Key: 20}));
+ assert.commandWorked(st.splitAt(collName, {Key: 30}));
- // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
- assert.commandWorked(st.moveChunk(collName, {Key: 20}, st.shard1.shardName));
- assert.commandWorked(st.moveChunk(collName, {Key: 30}, st.shard1.shardName));
+ // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
+ assert.commandWorked(st.moveChunk(collName, {Key: 20}, st.shard1.shardName));
+ assert.commandWorked(st.moveChunk(collName, {Key: 30}, st.shard1.shardName));
- assert.eq(2, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
- assert.eq(2, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
- }
+ assert.eq(2, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
+ assert.eq(2, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
+}
- function checkCollectionBalanced(collName) {
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard2.shardName}).itcount());
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard3.shardName}).itcount());
- }
+function checkCollectionBalanced(collName) {
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard2.shardName}).itcount());
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard3.shardName}).itcount());
+}
- function countMoves(collName) {
- return config.changelog.find({what: 'moveChunk.start', ns: collName}).itcount();
- }
+function countMoves(collName) {
+ return config.changelog.find({what: 'moveChunk.start', ns: collName}).itcount();
+}
- prepareCollectionForBalance('TestDB.TestColl1');
- prepareCollectionForBalance('TestDB.TestColl2');
+prepareCollectionForBalance('TestDB.TestColl1');
+prepareCollectionForBalance('TestDB.TestColl2');
- // Count the moveChunk start attempts accurately and ensure that only the correct number of
- // migrations are scheduled
- const testColl1InitialMoves = countMoves('TestDB.TestColl1');
- const testColl2InitialMoves = countMoves('TestDB.TestColl2');
+// Count the moveChunk start attempts accurately and ensure that only the correct number of
+// migrations are scheduled
+const testColl1InitialMoves = countMoves('TestDB.TestColl1');
+const testColl2InitialMoves = countMoves('TestDB.TestColl2');
- st.startBalancer();
- st.waitForBalancer(true, 60000);
- st.waitForBalancer(true, 60000);
- st.stopBalancer();
+st.startBalancer();
+st.waitForBalancer(true, 60000);
+st.waitForBalancer(true, 60000);
+st.stopBalancer();
- checkCollectionBalanced('TestDB.TestColl1');
- checkCollectionBalanced('TestDB.TestColl2');
+checkCollectionBalanced('TestDB.TestColl1');
+checkCollectionBalanced('TestDB.TestColl2');
- assert.eq(2, countMoves('TestDB.TestColl1') - testColl1InitialMoves);
- assert.eq(2, countMoves('TestDB.TestColl2') - testColl2InitialMoves);
+assert.eq(2, countMoves('TestDB.TestColl1') - testColl1InitialMoves);
+assert.eq(2, countMoves('TestDB.TestColl2') - testColl2InitialMoves);
- // Ensure there are no migration errors reported
- assert.eq(0, config.changelog.find({what: 'moveChunk.error'}).itcount());
+// Ensure there are no migration errors reported
+assert.eq(0, config.changelog.find({what: 'moveChunk.error'}).itcount());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/auto_rebalance_parallel_replica_sets.js b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
index 35c9132b061..0be9549f3cd 100644
--- a/jstests/sharding/auto_rebalance_parallel_replica_sets.js
+++ b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
@@ -2,66 +2,66 @@
* Tests that the cluster is balanced in parallel in one balancer round (replica sets).
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 4, rs: {nodes: 3}});
+var st = new ShardingTest({shards: 4, rs: {nodes: 3}});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
- var coll = st.s0.getDB('TestDB').TestColl;
+var coll = st.s0.getDB('TestDB').TestColl;
- // Create 4 chunks initially and ensure they get balanced within 1 balancer round
- assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+// Create 4 chunks initially and ensure they get balanced within 1 balancer round
+assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
+assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
+assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
+assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
- // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
+// Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
+assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
+assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
+assert.eq(2,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(2,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
- // Do enable the balancer and wait for a single balancer round
- st.startBalancer();
- st.waitForBalancer(true, 60000);
- st.stopBalancer();
+// Do enable the balancer and wait for a single balancer round
+st.startBalancer();
+st.waitForBalancer(true, 60000);
+st.stopBalancer();
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
- .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
+ .itcount());
- // Ensure the range deleter quiesces
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
- st.rs2.awaitReplication();
- st.rs3.awaitReplication();
+// Ensure the range deleter quiesces
+st.rs0.awaitReplication();
+st.rs1.awaitReplication();
+st.rs2.awaitReplication();
+st.rs3.awaitReplication();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/autodiscover_config_rs_from_secondary.js b/jstests/sharding/autodiscover_config_rs_from_secondary.js
index c439f5347d3..390d9bb7aa6 100644
--- a/jstests/sharding/autodiscover_config_rs_from_secondary.js
+++ b/jstests/sharding/autodiscover_config_rs_from_secondary.js
@@ -4,59 +4,59 @@
load('jstests/libs/feature_compatibility_version.js');
(function() {
- 'use strict';
-
- var rst = new ReplSetTest(
- {name: "configRS", nodes: 3, nodeOptions: {configsvr: "", storageEngine: "wiredTiger"}});
- rst.startSet();
- var conf = rst.getReplSetConfig();
- conf.members[1].priority = 0;
- conf.members[2].priority = 0;
- conf.writeConcernMajorityJournalDefault = true;
- rst.initiate(conf);
-
- // Config servers always start at the latest available FCV for the binary. This poses a problem
- // when this test is run in the mixed version suite because mongos will be 'last-stable' and if
- // this node is of the latest binary, it will report itself as the 'latest' FCV, which would
- // cause mongos to refuse to connect to it and shutdown.
- //
- // In order to work around this, in the mixed version suite, be pessimistic and always set this
- // node to the 'last-stable' FCV
- if (jsTestOptions().shardMixedBinVersions) {
- assert.commandWorked(
- rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- rst.awaitReplication();
- }
-
- var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to not be primary
- {
- // Ensure that mongos can start up when given the CSRS secondary, discover the primary, and
- // perform writes to the config servers.
- var mongos = MongoRunner.runMongos({configdb: seedList});
- var admin = mongos.getDB('admin');
- assert.writeOK(admin.foo.insert({a: 1}));
- assert.eq(1, admin.foo.findOne().a);
- MongoRunner.stopMongos(mongos);
- }
-
- // Wait for replication to all config server replica set members to ensure that mongos
- // will be able to do majority reads when trying to verify if the initial cluster metadata
- // has been properly written.
- rst.awaitLastOpCommitted();
- // Now take down the one electable node
- rst.stop(0);
- rst.awaitNoPrimary();
-
- // Start a mongos when there is no primary
+'use strict';
+
+var rst = new ReplSetTest(
+ {name: "configRS", nodes: 3, nodeOptions: {configsvr: "", storageEngine: "wiredTiger"}});
+rst.startSet();
+var conf = rst.getReplSetConfig();
+conf.members[1].priority = 0;
+conf.members[2].priority = 0;
+conf.writeConcernMajorityJournalDefault = true;
+rst.initiate(conf);
+
+// Config servers always start at the latest available FCV for the binary. This poses a problem
+// when this test is run in the mixed version suite because mongos will be 'last-stable' and if
+// this node is of the latest binary, it will report itself as the 'latest' FCV, which would
+// cause mongos to refuse to connect to it and shutdown.
+//
+// In order to work around this, in the mixed version suite, be pessimistic and always set this
+// node to the 'last-stable' FCV
+if (jsTestOptions().shardMixedBinVersions) {
+ assert.commandWorked(
+ rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ rst.awaitReplication();
+}
+
+var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to not be primary
+{
+ // Ensure that mongos can start up when given the CSRS secondary, discover the primary, and
+ // perform writes to the config servers.
var mongos = MongoRunner.runMongos({configdb: seedList});
- // Take down the one node the mongos knew about to ensure that it autodiscovered the one
- // remaining
- // config server
- rst.stop(1);
-
var admin = mongos.getDB('admin');
- mongos.setSlaveOk(true);
+ assert.writeOK(admin.foo.insert({a: 1}));
assert.eq(1, admin.foo.findOne().a);
MongoRunner.stopMongos(mongos);
- rst.stopSet();
+}
+
+// Wait for replication to all config server replica set members to ensure that mongos
+// will be able to do majority reads when trying to verify if the initial cluster metadata
+// has been properly written.
+rst.awaitLastOpCommitted();
+// Now take down the one electable node
+rst.stop(0);
+rst.awaitNoPrimary();
+
+// Start a mongos when there is no primary
+var mongos = MongoRunner.runMongos({configdb: seedList});
+// Take down the one node the mongos knew about to ensure that it autodiscovered the one
+// remaining
+// config server
+rst.stop(1);
+
+var admin = mongos.getDB('admin');
+mongos.setSlaveOk(true);
+assert.eq(1, admin.foo.findOne().a);
+MongoRunner.stopMongos(mongos);
+rst.stopSet();
})();
diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js
index 7ac5047cc71..58dbe7ece1e 100644
--- a/jstests/sharding/autosplit.js
+++ b/jstests/sharding/autosplit.js
@@ -2,78 +2,78 @@
* This test confirms that chunks get split as they grow due to data insertion.
*/
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
-
- var s = new ShardingTest({
- name: "auto1",
- shards: 2,
- mongos: 1,
- other: {enableAutoSplit: true, chunkSize: 10},
- });
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
-
- var bigString = "";
- while (bigString.length < 1024 * 50)
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
-
- var db = s.getDB("test");
- var primary = s.getPrimaryShard("test").getDB("test");
- var coll = db.foo;
- var counts = [];
-
- var i = 0;
-
- // Inserts numDocs documents into the collection, waits for any ongoing
- // splits to finish, and then prints some information about the
- // collection's chunks
- function insertDocsAndWaitForSplit(numDocs) {
- var bulk = coll.initializeUnorderedBulkOp();
- var curMaxKey = i;
- // Increment the global 'i' variable to keep 'num' unique across all
- // documents
- for (; i < curMaxKey + numDocs; i++) {
- bulk.insert({num: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
-
- waitForOngoingChunkSplits(s);
-
- s.printChunks();
- s.printChangeLog();
+'use strict';
+load('jstests/sharding/autosplit_include.js');
+
+var s = new ShardingTest({
+ name: "auto1",
+ shards: 2,
+ mongos: 1,
+ other: {enableAutoSplit: true, chunkSize: 10},
+});
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+
+var bigString = "";
+while (bigString.length < 1024 * 50)
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+
+var db = s.getDB("test");
+var primary = s.getPrimaryShard("test").getDB("test");
+var coll = db.foo;
+var counts = [];
+
+var i = 0;
+
+// Inserts numDocs documents into the collection, waits for any ongoing
+// splits to finish, and then prints some information about the
+// collection's chunks
+function insertDocsAndWaitForSplit(numDocs) {
+ var bulk = coll.initializeUnorderedBulkOp();
+ var curMaxKey = i;
+ // Increment the global 'i' variable to keep 'num' unique across all
+ // documents
+ for (; i < curMaxKey + numDocs; i++) {
+ bulk.insert({num: i, s: bigString});
}
+ assert.writeOK(bulk.execute());
- insertDocsAndWaitForSplit(100);
+ waitForOngoingChunkSplits(s);
- counts.push(s.config.chunks.count({"ns": "test.foo"}));
- assert.eq(100, db.foo.find().itcount());
+ s.printChunks();
+ s.printChangeLog();
+}
- print("datasize: " +
- tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
+insertDocsAndWaitForSplit(100);
- insertDocsAndWaitForSplit(100);
- counts.push(s.config.chunks.count({"ns": "test.foo"}));
+counts.push(s.config.chunks.count({"ns": "test.foo"}));
+assert.eq(100, db.foo.find().itcount());
- insertDocsAndWaitForSplit(200);
- counts.push(s.config.chunks.count({"ns": "test.foo"}));
+print("datasize: " +
+ tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
- insertDocsAndWaitForSplit(300);
- counts.push(s.config.chunks.count({"ns": "test.foo"}));
+insertDocsAndWaitForSplit(100);
+counts.push(s.config.chunks.count({"ns": "test.foo"}));
- assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts));
- var sorted = counts.slice(0);
- // Sort doesn't sort numbers correctly by default, resulting in fail
- sorted.sort(function(a, b) {
- return a - b;
- });
- assert.eq(counts, sorted, "counts 2 : " + tojson(counts));
+insertDocsAndWaitForSplit(200);
+counts.push(s.config.chunks.count({"ns": "test.foo"}));
- print(counts);
+insertDocsAndWaitForSplit(300);
+counts.push(s.config.chunks.count({"ns": "test.foo"}));
- printjson(db.stats());
+assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts));
+var sorted = counts.slice(0);
+// Sort doesn't sort numbers correctly by default, resulting in fail
+sorted.sort(function(a, b) {
+ return a - b;
+});
+assert.eq(counts, sorted, "counts 2 : " + tojson(counts));
- s.stop();
+print(counts);
+
+printjson(db.stats());
+
+s.stop();
})();
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
index 140ac3a1a40..1777a82678a 100644
--- a/jstests/sharding/autosplit_heuristics.js
+++ b/jstests/sharding/autosplit_heuristics.js
@@ -7,86 +7,85 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
+'use strict';
+load('jstests/sharding/autosplit_include.js');
- var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- // The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
- // moves/splits depending on the timing.
+// The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
+// moves/splits depending on the timing.
- // Test is not valid for debug build, heuristics get all mangled by debug reload behavior
- var isDebugBuild = st.s0.getDB("admin").serverBuildInfo().debug;
+// Test is not valid for debug build, heuristics get all mangled by debug reload behavior
+var isDebugBuild = st.s0.getDB("admin").serverBuildInfo().debug;
- if (!isDebugBuild) {
- var mongos = st.s0;
- var config = mongos.getDB("config");
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.hashBar");
+if (!isDebugBuild) {
+ var mongos = st.s0;
+ var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
+ var coll = mongos.getCollection("foo.hashBar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- var numChunks = 10;
+ var numChunks = 10;
- // Split off the low and high chunks, to get non-special-case behavior
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: numChunks + 1}}));
+ // Split off the low and high chunks, to get non-special-case behavior
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: numChunks + 1}}));
- // Split all the other chunks, and an extra chunk. We need the extra chunk to compensate for
- // the fact that the chunk differ resets the highest chunk's (i.e. the last-split-chunk's)
- // data count on reload.
- for (var i = 1; i < numChunks + 1; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- }
-
- jsTest.log("Setup collection...");
- st.printShardingStatus(true);
- var pad = (new Array(1024)).join(' ');
- var approxSize = Object.bsonsize({_id: 0.0, pad: pad});
-
- jsTest.log("Starting inserts of approx size: " + approxSize + "...");
-
- var chunkSizeBytes = 1024 * 1024;
-
- // We insert slightly more than the max number of docs per chunk, to test
- // if resetting the chunk size happens during reloads. If the size is
- // reset, we'd expect to split less, since the first split would then
- // disable further splits (statistically, since the decision is randomized).
- // We choose 1.4 since split attempts happen about once every 1/5 chunkSize,
- // and we want to be sure we def get a split attempt at a full chunk.
- var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
- var totalInserts = insertsForSplit * numChunks;
-
- printjson({
- chunkSizeBytes: chunkSizeBytes,
- insertsForSplit: insertsForSplit,
- totalInserts: totalInserts
- });
-
- // Insert enough docs to trigger splits into all chunks
- for (var i = 0; i < totalInserts; i++) {
- assert.writeOK(coll.insert({_id: i % numChunks + (i / totalInserts), pad: pad}));
- // Splitting is asynchronous so we should wait after each insert
- // for autosplitting to happen
- waitForOngoingChunkSplits(st);
- }
+ // Split all the other chunks, and an extra chunk. We need the extra chunk to compensate for
+ // the fact that the chunk differ resets the highest chunk's (i.e. the last-split-chunk's)
+ // data count on reload.
+ for (var i = 1; i < numChunks + 1; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+ }
- jsTest.log("Inserts completed...");
+ jsTest.log("Setup collection...");
+ st.printShardingStatus(true);
+ var pad = (new Array(1024)).join(' ');
+ var approxSize = Object.bsonsize({_id: 0.0, pad: pad});
+
+ jsTest.log("Starting inserts of approx size: " + approxSize + "...");
+
+ var chunkSizeBytes = 1024 * 1024;
+
+ // We insert slightly more than the max number of docs per chunk, to test
+ // if resetting the chunk size happens during reloads. If the size is
+ // reset, we'd expect to split less, since the first split would then
+ // disable further splits (statistically, since the decision is randomized).
+ // We choose 1.4 since split attempts happen about once every 1/5 chunkSize,
+ // and we want to be sure we def get a split attempt at a full chunk.
+ var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
+ var totalInserts = insertsForSplit * numChunks;
+
+ printjson({
+ chunkSizeBytes: chunkSizeBytes,
+ insertsForSplit: insertsForSplit,
+ totalInserts: totalInserts
+ });
+
+ // Insert enough docs to trigger splits into all chunks
+ for (var i = 0; i < totalInserts; i++) {
+ assert.writeOK(coll.insert({_id: i % numChunks + (i / totalInserts), pad: pad}));
+ // Splitting is asynchronous so we should wait after each insert
+ // for autosplitting to happen
+ waitForOngoingChunkSplits(st);
+ }
- st.printShardingStatus(true);
- printjson(coll.stats());
+ jsTest.log("Inserts completed...");
- // Check that all chunks (except the two extreme chunks)
- // have been split at least once + 1 extra chunk as reload buffer
- assert.gte(config.chunks.count({"ns": "foo.hashBar"}), numChunks * 2 + 3);
+ st.printShardingStatus(true);
+ printjson(coll.stats());
- jsTest.log("DONE!");
+ // Check that all chunks (except the two extreme chunks)
+ // have been split at least once + 1 extra chunk as reload buffer
+ assert.gte(config.chunks.count({"ns": "foo.hashBar"}), numChunks * 2 + 3);
- } else {
- jsTest.log("Disabled test in debug builds.");
- }
+ jsTest.log("DONE!");
- st.stop();
+} else {
+ jsTest.log("Disabled test in debug builds.");
+}
+st.stop();
})();
diff --git a/jstests/sharding/autosplit_with_balancer.js b/jstests/sharding/autosplit_with_balancer.js
index 8720790596f..0372ca09b9a 100644
--- a/jstests/sharding/autosplit_with_balancer.js
+++ b/jstests/sharding/autosplit_with_balancer.js
@@ -1,166 +1,166 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 2, other: {enableAutoSplit: true}});
+var s = new ShardingTest({shards: 2, mongos: 2, other: {enableAutoSplit: true}});
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
- var bigString = "";
- while (bigString.length < 1024 * 50) {
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
- }
+var bigString = "";
+while (bigString.length < 1024 * 50) {
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+}
- var db = s.getDB("test");
- var coll = db.foo;
+var db = s.getDB("test");
+var coll = db.foo;
- var i = 0;
- for (var j = 0; j < 30; j++) {
- print("j:" + j + " : " + Date.timeFunc(function() {
- var bulk = coll.initializeUnorderedBulkOp();
- for (var k = 0; k < 100; k++) {
- bulk.insert({num: i, s: bigString});
- i++;
- }
- assert.writeOK(bulk.execute());
- }));
- }
+var i = 0;
+for (var j = 0; j < 30; j++) {
+ print("j:" + j + " : " + Date.timeFunc(function() {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var k = 0; k < 100; k++) {
+ bulk.insert({num: i, s: bigString});
+ i++;
+ }
+ assert.writeOK(bulk.execute());
+ }));
+}
- s.startBalancer();
+s.startBalancer();
- let confirmBalancerSettings = function(expectedBalancerOn, expectedAutoSplitOn) {
- let configSettings = s.s.getDB('config').settings;
+let confirmBalancerSettings = function(expectedBalancerOn, expectedAutoSplitOn) {
+ let configSettings = s.s.getDB('config').settings;
- let balancerSettings = configSettings.findOne({_id: 'balancer'});
- assert.neq(null, balancerSettings);
- assert.eq(expectedBalancerOn, !balancerSettings.stopped);
- assert.eq(expectedBalancerOn, balancerSettings.mode == 'full');
+ let balancerSettings = configSettings.findOne({_id: 'balancer'});
+ assert.neq(null, balancerSettings);
+ assert.eq(expectedBalancerOn, !balancerSettings.stopped);
+ assert.eq(expectedBalancerOn, balancerSettings.mode == 'full');
- let autoSplitSettings = configSettings.findOne({_id: 'autosplit'});
- assert.neq(null, autoSplitSettings);
- assert.eq(expectedAutoSplitOn, autoSplitSettings.enabled);
- };
+ let autoSplitSettings = configSettings.findOne({_id: 'autosplit'});
+ assert.neq(null, autoSplitSettings);
+ assert.eq(expectedAutoSplitOn, autoSplitSettings.enabled);
+};
- confirmBalancerSettings(true, true);
+confirmBalancerSettings(true, true);
- assert.eq(i, j * 100, "setup");
+assert.eq(i, j * 100, "setup");
- // Until SERVER-9715 is fixed, the sync command must be run on a diff connection
- new Mongo(s.s.host).adminCommand("connpoolsync");
+// Until SERVER-9715 is fixed, the sync command must be run on a diff connection
+new Mongo(s.s.host).adminCommand("connpoolsync");
- print("done inserting data");
+print("done inserting data");
- print("datasize: " +
- tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
- s.printChunks();
+print("datasize: " +
+ tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
+s.printChunks();
- var counta, countb;
+var counta, countb;
- function doCountsGlobal() {
- counta = s._connections[0].getDB("test").foo.count();
- countb = s._connections[1].getDB("test").foo.count();
- return counta + countb;
- }
+function doCountsGlobal() {
+ counta = s._connections[0].getDB("test").foo.count();
+ countb = s._connections[1].getDB("test").foo.count();
+ return counta + countb;
+}
- // Wait for the chunks to distribute
- assert.soon(function() {
- doCountsGlobal();
- print("Counts: " + counta + countb);
+// Wait for the chunks to distribute
+assert.soon(function() {
+ doCountsGlobal();
+ print("Counts: " + counta + countb);
- return counta > 0 && countb > 0;
- });
+ return counta > 0 && countb > 0;
+});
- print("checkpoint B");
+print("checkpoint B");
- var missing = [];
+var missing = [];
- for (i = 0; i < j * 100; i++) {
- var x = coll.findOne({num: i});
+for (i = 0; i < j * 100; i++) {
+ var x = coll.findOne({num: i});
+ if (!x) {
+ missing.push(i);
+ print("can't find: " + i);
+ sleep(5000);
+ x = coll.findOne({num: i});
if (!x) {
- missing.push(i);
- print("can't find: " + i);
- sleep(5000);
- x = coll.findOne({num: i});
- if (!x) {
- print("still can't find: " + i);
-
- for (var zzz = 0; zzz < s._connections.length; zzz++) {
- if (s._connections[zzz].getDB("test").foo.findOne({num: i})) {
- print("found on wrong server: " + s._connections[zzz]);
- }
+ print("still can't find: " + i);
+
+ for (var zzz = 0; zzz < s._connections.length; zzz++) {
+ if (s._connections[zzz].getDB("test").foo.findOne({num: i})) {
+ print("found on wrong server: " + s._connections[zzz]);
}
}
}
}
-
- s.printChangeLog();
-
- print("missing: " + tojson(missing));
- assert.soon(function(z) {
- return doCountsGlobal() == j * 100;
- }, "from each a:" + counta + " b:" + countb + " i:" + i);
- print("checkpoint B.a");
- s.printChunks();
- assert.eq(j * 100, coll.find().limit(100000000).itcount(), "itcount A");
- assert.eq(j * 100, counta + countb, "from each 2 a:" + counta + " b:" + countb + " i:" + i);
- assert(missing.length == 0, "missing : " + tojson(missing));
-
- print("checkpoint C");
-
- assert(Array.unique(s.config.chunks.find({ns: 'test.foo'}).toArray().map(function(z) {
- return z.shard;
- })).length == 2,
- "should be using both servers");
-
- for (i = 0; i < 100; i++) {
- cursor = coll.find().batchSize(5);
- cursor.next();
- cursor.close();
- }
-
- print("checkpoint D");
-
- // Test non-sharded cursors
- db = s.getDB("test2");
- var t = db.foobar;
- for (i = 0; i < 100; i++)
- t.save({_id: i});
- for (i = 0; i < 100; i++) {
- var cursor = t.find().batchSize(2);
- cursor.next();
- assert.lt(0, db.serverStatus().metrics.cursor.open.total, "cursor1");
- cursor.close();
- }
-
- assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2");
-
- // Stop the balancer, otherwise it may grab some connections from the pool for itself
- s.stopBalancer();
-
- confirmBalancerSettings(false, false);
-
- print("checkpoint E");
-
- assert(t.findOne(), "check close 0");
-
- for (i = 0; i < 20; i++) {
- var conn = new Mongo(db.getMongo().host);
- var temp2 = conn.getDB("test2").foobar;
- assert.eq(conn._fullNameSpace, t._fullNameSpace, "check close 1");
- assert(temp2.findOne(), "check close 2");
- conn.close();
- }
-
- print("checkpoint F");
-
- assert.throws(function() {
- s.getDB("test").foo.find().sort({s: 1}).forEach(function(x) {
- printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length));
- });
+}
+
+s.printChangeLog();
+
+print("missing: " + tojson(missing));
+assert.soon(function(z) {
+ return doCountsGlobal() == j * 100;
+}, "from each a:" + counta + " b:" + countb + " i:" + i);
+print("checkpoint B.a");
+s.printChunks();
+assert.eq(j * 100, coll.find().limit(100000000).itcount(), "itcount A");
+assert.eq(j * 100, counta + countb, "from each 2 a:" + counta + " b:" + countb + " i:" + i);
+assert(missing.length == 0, "missing : " + tojson(missing));
+
+print("checkpoint C");
+
+assert(Array.unique(s.config.chunks.find({ns: 'test.foo'}).toArray().map(function(z) {
+ return z.shard;
+ })).length == 2,
+ "should be using both servers");
+
+for (i = 0; i < 100; i++) {
+ cursor = coll.find().batchSize(5);
+ cursor.next();
+ cursor.close();
+}
+
+print("checkpoint D");
+
+// Test non-sharded cursors
+db = s.getDB("test2");
+var t = db.foobar;
+for (i = 0; i < 100; i++)
+ t.save({_id: i});
+for (i = 0; i < 100; i++) {
+ var cursor = t.find().batchSize(2);
+ cursor.next();
+ assert.lt(0, db.serverStatus().metrics.cursor.open.total, "cursor1");
+ cursor.close();
+}
+
+assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2");
+
+// Stop the balancer, otherwise it may grab some connections from the pool for itself
+s.stopBalancer();
+
+confirmBalancerSettings(false, false);
+
+print("checkpoint E");
+
+assert(t.findOne(), "check close 0");
+
+for (i = 0; i < 20; i++) {
+ var conn = new Mongo(db.getMongo().host);
+ var temp2 = conn.getDB("test2").foobar;
+ assert.eq(conn._fullNameSpace, t._fullNameSpace, "check close 1");
+ assert(temp2.findOne(), "check close 2");
+ conn.close();
+}
+
+print("checkpoint F");
+
+assert.throws(function() {
+ s.getDB("test").foo.find().sort({s: 1}).forEach(function(x) {
+ printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length));
});
+});
- print("checkpoint G");
+print("checkpoint G");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 02e2c54bbc0..fdc0d15509c 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -3,67 +3,67 @@
* secondaryThrottle is used.
*/
(function() {
- 'use strict';
+'use strict';
- // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
- // from stepping down during migrations on slow evergreen builders.
- var s = new ShardingTest({
- shards: 2,
- other: {
- chunkSize: 1,
- rs0: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- },
- rs1: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- }
+// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
+// from stepping down during migrations on slow evergreen builders.
+var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ rs0: {
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
+ },
+ rs1: {
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
}
- });
-
- var bulk = s.s0.getDB('TestDB').TestColl.initializeUnorderedBulkOp();
- for (var i = 0; i < 2100; i++) {
- bulk.insert({_id: i, x: i});
}
- assert.writeOK(bulk.execute());
+});
- assert.commandWorked(s.s0.adminCommand({enablesharding: 'TestDB'}));
- s.ensurePrimaryShard('TestDB', s.shard0.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: 'TestDB.TestColl', key: {_id: 1}}));
+var bulk = s.s0.getDB('TestDB').TestColl.initializeUnorderedBulkOp();
+for (var i = 0; i < 2100; i++) {
+ bulk.insert({_id: i, x: i});
+}
+assert.writeOK(bulk.execute());
- for (i = 0; i < 20; i++) {
- assert.commandWorked(s.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: i * 100}}));
- }
+assert.commandWorked(s.s0.adminCommand({enablesharding: 'TestDB'}));
+s.ensurePrimaryShard('TestDB', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: 'TestDB.TestColl', key: {_id: 1}}));
- var collPrimary = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
- assert.eq(2100, collPrimary.find().itcount());
+for (i = 0; i < 20; i++) {
+ assert.commandWorked(s.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: i * 100}}));
+}
- var collSlaveOk = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
- collSlaveOk.setSlaveOk();
- assert.eq(2100, collSlaveOk.find().itcount());
+var collPrimary = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
+assert.eq(2100, collPrimary.find().itcount());
- for (i = 0; i < 20; i++) {
- // Needs to waitForDelete because we'll be performing a slaveOk query, and secondaries don't
- // have a chunk manager so it doesn't know how to filter out docs it doesn't own.
- assert.commandWorked(s.s0.adminCommand({
- moveChunk: 'TestDB.TestColl',
- find: {_id: i * 100},
- to: s.shard1.shardName,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- _waitForDelete: true
- }));
+var collSlaveOk = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
+collSlaveOk.setSlaveOk();
+assert.eq(2100, collSlaveOk.find().itcount());
- assert.eq(2100,
- collSlaveOk.find().itcount(),
- 'Incorrect count when reading from secondary. Count from primary is ' +
- collPrimary.find().itcount());
- }
+for (i = 0; i < 20; i++) {
+ // Needs to waitForDelete because we'll be performing a slaveOk query, and secondaries don't
+ // have a chunk manager so it doesn't know how to filter out docs it doesn't own.
+ assert.commandWorked(s.s0.adminCommand({
+ moveChunk: 'TestDB.TestColl',
+ find: {_id: i * 100},
+ to: s.shard1.shardName,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ _waitForDelete: true
+ }));
+
+ assert.eq(2100,
+ collSlaveOk.find().itcount(),
+ 'Incorrect count when reading from secondary. Count from primary is ' +
+ collPrimary.find().itcount());
+}
- s.stop();
+s.stop();
}());
diff --git a/jstests/sharding/balancer_shell_commands.js b/jstests/sharding/balancer_shell_commands.js
index 48c5c7c489a..f6d8faf1e99 100644
--- a/jstests/sharding/balancer_shell_commands.js
+++ b/jstests/sharding/balancer_shell_commands.js
@@ -7,20 +7,20 @@
var db;
(function() {
- "use strict";
- var shardingTest = new ShardingTest(
- {name: "shell_commands", shards: 1, mongos: 1, other: {enableBalancer: true}});
- db = shardingTest.getDB("test");
+"use strict";
+var shardingTest =
+ new ShardingTest({name: "shell_commands", shards: 1, mongos: 1, other: {enableBalancer: true}});
+db = shardingTest.getDB("test");
- assert(sh.getBalancerState(), "Balancer should have been enabled during cluster setup");
+assert(sh.getBalancerState(), "Balancer should have been enabled during cluster setup");
- // Test that the balancer can be disabled
- sh.setBalancerState(false);
- assert(!sh.getBalancerState(), "Failed to disable balancer");
+// Test that the balancer can be disabled
+sh.setBalancerState(false);
+assert(!sh.getBalancerState(), "Failed to disable balancer");
- // Test that the balancer can be re-enabled
- sh.setBalancerState(true);
- assert(sh.getBalancerState(), "Failed to re-enable balancer");
+// Test that the balancer can be re-enabled
+sh.setBalancerState(true);
+assert(sh.getBalancerState(), "Failed to re-enable balancer");
- shardingTest.stop();
+shardingTest.stop();
})();
diff --git a/jstests/sharding/balancer_window.js b/jstests/sharding/balancer_window.js
index 422085a537a..ee2d55b1345 100644
--- a/jstests/sharding/balancer_window.js
+++ b/jstests/sharding/balancer_window.js
@@ -11,83 +11,81 @@
* sure that some chunks are moved.
*/
(function() {
- 'use strict';
+'use strict';
- /**
- * Simple representation for wall clock time. Hour and minutes should be integers.
- */
- var HourAndMinute = function(hour, minutes) {
- return {
- /**
- * Returns a new HourAndMinute object with the amount of hours added.
- * Amount can be negative.
- */
- addHour: function(amount) {
- var newHour = (hour + amount) % 24;
- if (newHour < 0) {
- newHour += 24;
- }
+/**
+ * Simple representation for wall clock time. Hour and minutes should be integers.
+ */
+var HourAndMinute = function(hour, minutes) {
+ return {
+ /**
+ * Returns a new HourAndMinute object with the amount of hours added.
+ * Amount can be negative.
+ */
+ addHour: function(amount) {
+ var newHour = (hour + amount) % 24;
+ if (newHour < 0) {
+ newHour += 24;
+ }
- return new HourAndMinute(newHour, minutes);
- },
+ return new HourAndMinute(newHour, minutes);
+ },
- /**
- * Returns a string representation that is compatible with the format for the balancer
- * window settings.
- */
- toString: function() {
- var minStr = (minutes < 10) ? ('0' + minutes) : ('' + minutes);
- var hourStr = (hour < 10) ? ('0' + hour) : ('' + hour);
- return hourStr + ':' + minStr;
- }
- };
+ /**
+ * Returns a string representation that is compatible with the format for the balancer
+ * window settings.
+ */
+ toString: function() {
+ var minStr = (minutes < 10) ? ('0' + minutes) : ('' + minutes);
+ var hourStr = (hour < 10) ? ('0' + hour) : ('' + hour);
+ return hourStr + ':' + minStr;
+ }
};
+};
- var st = new ShardingTest({shards: 2});
- var configDB = st.s.getDB('config');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+var st = new ShardingTest({shards: 2});
+var configDB = st.s.getDB('config');
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- for (var x = 0; x < 150; x += 10) {
- configDB.adminCommand({split: 'test.user', middle: {_id: x}});
- }
+for (var x = 0; x < 150; x += 10) {
+ configDB.adminCommand({split: 'test.user', middle: {_id: x}});
+}
- var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
+var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
- var startDate = new Date();
- var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
- assert.writeOK(configDB.settings.update({_id: 'balancer'},
- {
- $set: {
- activeWindow: {
- start: hourMinStart.addHour(-2).toString(),
- stop: hourMinStart.addHour(-1).toString()
- },
- }
- },
- true));
- st.startBalancer();
+var startDate = new Date();
+var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
+assert.writeOK(configDB.settings.update({_id: 'balancer'},
+ {
+ $set: {
+ activeWindow: {
+ start: hourMinStart.addHour(-2).toString(),
+ stop: hourMinStart.addHour(-1).toString()
+ },
+ }
+ },
+ true));
+st.startBalancer();
- st.waitForBalancer(true, 60000);
+st.waitForBalancer(true, 60000);
- var shard0ChunksAfter =
- configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
- assert.eq(shard0Chunks, shard0ChunksAfter);
+var shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
+assert.eq(shard0Chunks, shard0ChunksAfter);
- assert.writeOK(configDB.settings.update(
- {_id: 'balancer'},
- {
- $set: {
- activeWindow:
- {start: hourMinStart.toString(), stop: hourMinStart.addHour(2).toString()}
- }
- },
- true));
+assert.writeOK(configDB.settings.update(
+ {_id: 'balancer'},
+ {
+ $set: {
+ activeWindow: {start: hourMinStart.toString(), stop: hourMinStart.addHour(2).toString()}
+ }
+ },
+ true));
- st.waitForBalancer(true, 60000);
+st.waitForBalancer(true, 60000);
- shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
- assert.neq(shard0Chunks, shard0ChunksAfter);
+shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
+assert.neq(shard0Chunks, shard0ChunksAfter);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/basic_drop_coll.js b/jstests/sharding/basic_drop_coll.js
index 92a37102123..b7fda388e34 100644
--- a/jstests/sharding/basic_drop_coll.js
+++ b/jstests/sharding/basic_drop_coll.js
@@ -3,65 +3,64 @@
* cleaned up properly.
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var testDB = st.s.getDB('test');
+var testDB = st.s.getDB('test');
- // Test dropping an unsharded collection.
+// Test dropping an unsharded collection.
- assert.writeOK(testDB.bar.insert({x: 1}));
- assert.neq(null, testDB.bar.findOne({x: 1}));
+assert.writeOK(testDB.bar.insert({x: 1}));
+assert.neq(null, testDB.bar.findOne({x: 1}));
- assert.commandWorked(testDB.runCommand({drop: 'bar'}));
- assert.eq(null, testDB.bar.findOne({x: 1}));
+assert.commandWorked(testDB.runCommand({drop: 'bar'}));
+assert.eq(null, testDB.bar.findOne({x: 1}));
- // Test dropping a sharded collection.
+// Test dropping a sharded collection.
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
- st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
- st.s.adminCommand({split: 'test.user', middle: {_id: 0}});
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName}));
- assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: 'foo'}));
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {_id: 0}, max: {_id: 10}, zone: 'foo'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+st.s.adminCommand({split: 'test.user', middle: {_id: 0}});
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: 'foo'}));
+assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: 'test.user', min: {_id: 0}, max: {_id: 10}, zone: 'foo'}));
- assert.writeOK(testDB.user.insert({_id: 10}));
- assert.writeOK(testDB.user.insert({_id: -10}));
+assert.writeOK(testDB.user.insert({_id: 10}));
+assert.writeOK(testDB.user.insert({_id: -10}));
- assert.neq(null, st.shard0.getDB('test').user.findOne({_id: -10}));
- assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10}));
+assert.neq(null, st.shard0.getDB('test').user.findOne({_id: -10}));
+assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10}));
- var configDB = st.s.getDB('config');
- var collDoc = configDB.collections.findOne({_id: 'test.user'});
+var configDB = st.s.getDB('config');
+var collDoc = configDB.collections.findOne({_id: 'test.user'});
- assert(!collDoc.dropped);
+assert(!collDoc.dropped);
- assert.eq(2, configDB.chunks.count({ns: 'test.user'}));
- assert.eq(1, configDB.tags.count({ns: 'test.user'}));
+assert.eq(2, configDB.chunks.count({ns: 'test.user'}));
+assert.eq(1, configDB.tags.count({ns: 'test.user'}));
- assert.commandWorked(testDB.runCommand({drop: 'user'}));
+assert.commandWorked(testDB.runCommand({drop: 'user'}));
- assert.eq(null, st.shard0.getDB('test').user.findOne());
- assert.eq(null, st.shard1.getDB('test').user.findOne());
+assert.eq(null, st.shard0.getDB('test').user.findOne());
+assert.eq(null, st.shard1.getDB('test').user.findOne());
- // Call drop again to verify that the command is idempotent.
- assert.commandWorked(testDB.runCommand({drop: 'user'}));
+// Call drop again to verify that the command is idempotent.
+assert.commandWorked(testDB.runCommand({drop: 'user'}));
- // Check for the collection with majority RC to verify that the write to remove the collection
- // document from the catalog has propagated to the majority snapshot.
- var findColl = configDB.runCommand(
- {find: 'collections', filter: {_id: 'test.user'}, readConcern: {'level': 'majority'}});
- collDoc = findColl.cursor.firstBatch[0];
+// Check for the collection with majority RC to verify that the write to remove the collection
+// document from the catalog has propagated to the majority snapshot.
+var findColl = configDB.runCommand(
+ {find: 'collections', filter: {_id: 'test.user'}, readConcern: {'level': 'majority'}});
+collDoc = findColl.cursor.firstBatch[0];
- assert(collDoc.dropped);
+assert(collDoc.dropped);
- assert.eq(0, configDB.chunks.count({ns: 'test.user'}));
- assert.eq(0, configDB.tags.count({ns: 'test.user'}));
-
- st.stop();
+assert.eq(0, configDB.chunks.count({ns: 'test.user'}));
+assert.eq(0, configDB.tags.count({ns: 'test.user'}));
+st.stop();
})();
diff --git a/jstests/sharding/basic_merge.js b/jstests/sharding/basic_merge.js
index 540a0f2355b..9bc75636e5d 100644
--- a/jstests/sharding/basic_merge.js
+++ b/jstests/sharding/basic_merge.js
@@ -2,65 +2,63 @@
* Perform basic tests for the mergeChunks command against mongos.
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
- var mongos = st.s0;
+var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
+var mongos = st.s0;
- var kDbName = 'db';
+var kDbName = 'db';
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- var ns = kDbName + ".foo";
+var ns = kDbName + ".foo";
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
- // Fail if invalid namespace.
- assert.commandFailed(mongos.adminCommand({mergeChunks: '', bounds: [{a: -1}, {a: 1}]}));
+// Fail if invalid namespace.
+assert.commandFailed(mongos.adminCommand({mergeChunks: '', bounds: [{a: -1}, {a: 1}]}));
- // Fail if database does not exist.
- assert.commandFailed(mongos.adminCommand({mergeChunks: 'a.b', bounds: [{a: -1}, {a: 1}]}));
+// Fail if database does not exist.
+assert.commandFailed(mongos.adminCommand({mergeChunks: 'a.b', bounds: [{a: -1}, {a: 1}]}));
- // Fail if collection is unsharded.
- assert.commandFailed(
- mongos.adminCommand({mergeChunks: kDbName + '.xxx', bounds: [{a: -1}, {a: 1}]}));
+// Fail if collection is unsharded.
+assert.commandFailed(
+ mongos.adminCommand({mergeChunks: kDbName + '.xxx', bounds: [{a: -1}, {a: 1}]}));
- // Errors if either bounds is not a valid shard key.
- assert.eq(0, mongos.getDB('config').chunks.count({ns: ns}));
+// Errors if either bounds is not a valid shard key.
+assert.eq(0, mongos.getDB('config').chunks.count({ns: ns}));
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {a: 1}}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns}));
- assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 0}}));
- assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: -1}}));
- assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 1}}));
+assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {a: 1}}));
+assert.eq(1, mongos.getDB('config').chunks.count({ns: ns}));
+assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 0}}));
+assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: -1}}));
+assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 1}}));
- // Fail if a wrong key
- assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{x: -1}, {a: 1}]}));
- assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {x: 1}]}));
+// Fail if a wrong key
+assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{x: -1}, {a: 1}]}));
+assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {x: 1}]}));
- // Fail if chunks do not contain a bound
- assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 10}]}));
+// Fail if chunks do not contain a bound
+assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 10}]}));
- // Fail if chunks to be merged are not contiguous on the shard
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: ns, bounds: [{a: -1}, {a: 0}], to: shard1, _waitForDelete: true}));
- assert.commandFailed(
- st.s0.adminCommand({mergeChunks: ns, bounds: [{a: MinKey()}, {a: MaxKey()}]}));
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: ns, bounds: [{a: -1}, {a: 0}], to: shard0, _waitForDelete: true}));
+// Fail if chunks to be merged are not contiguous on the shard
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: ns, bounds: [{a: -1}, {a: 0}], to: shard1, _waitForDelete: true}));
+assert.commandFailed(st.s0.adminCommand({mergeChunks: ns, bounds: [{a: MinKey()}, {a: MaxKey()}]}));
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: ns, bounds: [{a: -1}, {a: 0}], to: shard0, _waitForDelete: true}));
- // Validate metadata
- // There are four chunks [{$minKey, -1}, {-1, 0}, {0, 1}, {1, $maxKey}]
- assert.eq(4, st.s0.getDB('config').chunks.count({ns: ns}));
+// Validate metadata
+// There are four chunks [{$minKey, -1}, {-1, 0}, {0, 1}, {1, $maxKey}]
+assert.eq(4, st.s0.getDB('config').chunks.count({ns: ns}));
- // Use the second (stale) mongos to invoke the mergeChunks command so we can exercise the stale
- // shard version refresh logic
- assert.commandWorked(st.s1.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 1}]}));
- assert.eq(3, mongos.getDB('config').chunks.count({ns: ns}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a: 1}}));
-
- st.stop();
+// Use the second (stale) mongos to invoke the mergeChunks command so we can exercise the stale
+// shard version refresh logic
+assert.commandWorked(st.s1.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 1}]}));
+assert.eq(3, mongos.getDB('config').chunks.count({ns: ns}));
+assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a: 1}}));
+st.stop();
})();
diff --git a/jstests/sharding/basic_sharding_params.js b/jstests/sharding/basic_sharding_params.js
index c1076ff8941..5aea800c56e 100644
--- a/jstests/sharding/basic_sharding_params.js
+++ b/jstests/sharding/basic_sharding_params.js
@@ -3,80 +3,80 @@
*/
(function() {
- 'use strict';
+'use strict';
- function shardingTestUsingObjects() {
- var st = new ShardingTest({
- mongos: {s0: {verbose: 6}, s1: {verbose: 5}},
- config: {c0: {verbose: 4}},
- shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}}
- });
+function shardingTestUsingObjects() {
+ var st = new ShardingTest({
+ mongos: {s0: {verbose: 6}, s1: {verbose: 5}},
+ config: {c0: {verbose: 4}},
+ shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}}
+ });
- var s0 = st.s0;
- assert.eq(s0, st._mongos[0]);
+ var s0 = st.s0;
+ assert.eq(s0, st._mongos[0]);
- var s1 = st.s1;
- assert.eq(s1, st._mongos[1]);
+ var s1 = st.s1;
+ assert.eq(s1, st._mongos[1]);
- var c0 = st.c0;
- assert.eq(c0, st._configServers[0]);
+ var c0 = st.c0;
+ assert.eq(c0, st._configServers[0]);
- var rs0 = st.rs0;
- assert.eq(rs0, st._rsObjects[0]);
+ var rs0 = st.rs0;
+ assert.eq(rs0, st._rsObjects[0]);
- var rs1 = st.rs1;
- assert.eq(rs1, st._rsObjects[1]);
+ var rs1 = st.rs1;
+ assert.eq(rs1, st._rsObjects[1]);
- var rs0_d0 = rs0.nodes[0];
+ var rs0_d0 = rs0.nodes[0];
- var rs1_d0 = rs1.nodes[0];
- var rs1_a1 = rs1.nodes[1];
+ var rs1_d0 = rs1.nodes[0];
+ var rs1_a1 = rs1.nodes[1];
- assert(s0.commandLine.hasOwnProperty("vvvvvv"));
- assert(s1.commandLine.hasOwnProperty("vvvvv"));
- assert(c0.commandLine.hasOwnProperty("vvvv"));
- assert(rs0_d0.commandLine.hasOwnProperty("vvv"));
- assert(rs1_d0.commandLine.hasOwnProperty("vv"));
- assert(rs1_a1.commandLine.hasOwnProperty("v"));
+ assert(s0.commandLine.hasOwnProperty("vvvvvv"));
+ assert(s1.commandLine.hasOwnProperty("vvvvv"));
+ assert(c0.commandLine.hasOwnProperty("vvvv"));
+ assert(rs0_d0.commandLine.hasOwnProperty("vvv"));
+ assert(rs1_d0.commandLine.hasOwnProperty("vv"));
+ assert(rs1_a1.commandLine.hasOwnProperty("v"));
- st.stop();
- }
+ st.stop();
+}
- function shardingTestUsingArrays() {
- var st = new ShardingTest({
- mongos: [{verbose: 5}, {verbose: 4}],
- config: [{verbose: 3}],
- shards: [{verbose: 2}, {verbose: 1}]
- });
+function shardingTestUsingArrays() {
+ var st = new ShardingTest({
+ mongos: [{verbose: 5}, {verbose: 4}],
+ config: [{verbose: 3}],
+ shards: [{verbose: 2}, {verbose: 1}]
+ });
- var s0 = st.s0;
- assert.eq(s0, st._mongos[0]);
+ var s0 = st.s0;
+ assert.eq(s0, st._mongos[0]);
- var s1 = st.s1;
- assert.eq(s1, st._mongos[1]);
+ var s1 = st.s1;
+ assert.eq(s1, st._mongos[1]);
- var c0 = st.c0;
- assert.eq(c0, st._configServers[0]);
+ var c0 = st.c0;
+ assert.eq(c0, st._configServers[0]);
- var rs0 = st.rs0;
- assert.eq(rs0, st._rsObjects[0]);
+ var rs0 = st.rs0;
+ assert.eq(rs0, st._rsObjects[0]);
- var rs1 = st.rs1;
- assert.eq(rs1, st._rsObjects[1]);
+ var rs1 = st.rs1;
+ assert.eq(rs1, st._rsObjects[1]);
- var rs0_d0 = rs0.nodes[0];
+ var rs0_d0 = rs0.nodes[0];
- var rs1_d0 = rs1.nodes[0];
+ var rs1_d0 = rs1.nodes[0];
- assert(s0.commandLine.hasOwnProperty("vvvvv"));
- assert(s1.commandLine.hasOwnProperty("vvvv"));
- assert(c0.commandLine.hasOwnProperty("vvv"));
- assert(rs0_d0.commandLine.hasOwnProperty("vv"));
- assert(rs1_d0.commandLine.hasOwnProperty("v"));
+ assert(s0.commandLine.hasOwnProperty("vvvvv"));
+ assert(s1.commandLine.hasOwnProperty("vvvv"));
+ assert(c0.commandLine.hasOwnProperty("vvv"));
+ assert(rs0_d0.commandLine.hasOwnProperty("vv"));
+ assert(rs1_d0.commandLine.hasOwnProperty("v"));
- st.stop();
- }
+ st.stop();
+}
- shardingTestUsingObjects();
- shardingTestUsingArrays();
+shardingTestUsingObjects();
+shardingTestUsingArrays();
})();
diff --git a/jstests/sharding/basic_split.js b/jstests/sharding/basic_split.js
index cb86e2d34b0..00a442ac353 100644
--- a/jstests/sharding/basic_split.js
+++ b/jstests/sharding/basic_split.js
@@ -2,106 +2,103 @@
* Perform basic tests for the split command against mongos.
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
- var configDB = st.s0.getDB('config');
+var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
+var configDB = st.s0.getDB('config');
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- // split on invalid ns.
- assert.commandFailed(configDB.adminCommand({split: 'user', key: {_id: 1}}));
+// split on invalid ns.
+assert.commandFailed(configDB.adminCommand({split: 'user', key: {_id: 1}}));
- // split on unsharded collection (db is not sharding enabled).
- assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
+// split on unsharded collection (db is not sharding enabled).
+assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', shard0);
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', shard0);
- // split on unsharded collection (db is sharding enabled).
- assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
+// split on unsharded collection (db is sharding enabled).
+assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- assert.eq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
+assert.eq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
- assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
- assert.neq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
+assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
+assert.neq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
- // Cannot split on existing chunk boundary.
- assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
+// Cannot split on existing chunk boundary.
+assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
- // Attempt to split on a value that is not the shard key.
- assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {x: 100}}));
- assert.commandFailed(configDB.adminCommand({split: 'test.user', find: {x: 100}}));
- assert.commandFailed(
- configDB.adminCommand({split: 'test.user', bounds: [{x: MinKey}, {x: MaxKey}]}));
+// Attempt to split on a value that is not the shard key.
+assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {x: 100}}));
+assert.commandFailed(configDB.adminCommand({split: 'test.user', find: {x: 100}}));
+assert.commandFailed(
+ configDB.adminCommand({split: 'test.user', bounds: [{x: MinKey}, {x: MaxKey}]}));
- // Insert documents large enough to fill up a chunk, but do it directly in the shard in order
- // to bypass the auto-split logic.
- var kiloDoc = new Array(1024).join('x');
- var testDB = st.rs0.getPrimary().getDB('test');
- var bulk = testDB.user.initializeUnorderedBulkOp();
- for (var x = -1200; x < 1200; x++) {
- bulk.insert({_id: x, val: kiloDoc});
- }
- assert.writeOK(bulk.execute());
+// Insert documents large enough to fill up a chunk, but do it directly in the shard in order
+// to bypass the auto-split logic.
+var kiloDoc = new Array(1024).join('x');
+var testDB = st.rs0.getPrimary().getDB('test');
+var bulk = testDB.user.initializeUnorderedBulkOp();
+for (var x = -1200; x < 1200; x++) {
+ bulk.insert({_id: x, val: kiloDoc});
+}
+assert.writeOK(bulk.execute());
- assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
+assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
- // Errors if bounds do not correspond to existing chunk boundaries.
- assert.commandFailed(
- configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: 1000}]}));
- assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
+// Errors if bounds do not correspond to existing chunk boundaries.
+assert.commandFailed(configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: 1000}]}));
+assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
- assert.commandWorked(
- configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: MaxKey}]}));
- assert.gt(configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount(), 1);
+assert.commandWorked(
+ configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: MaxKey}]}));
+assert.gt(configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount(), 1);
- assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount());
- assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: -600}}));
- assert.gt(configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount(), 1);
+assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount());
+assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: -600}}));
+assert.gt(configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount(), 1);
- // Mongos must refresh metadata if the chunk version does not match
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: 'test.user', find: {_id: -900}, to: shard1, _waitForDelete: true}));
- assert.commandWorked(st.s1.adminCommand({split: 'test.user', middle: {_id: -900}}));
- assert.commandWorked(st.s1.adminCommand(
- {moveChunk: 'test.user', find: {_id: -900}, to: shard0, _waitForDelete: true}));
- assert.commandWorked(st.s1.adminCommand(
- {moveChunk: 'test.user', find: {_id: -901}, to: shard0, _waitForDelete: true}));
- assert.eq(0, configDB.chunks.find({ns: 'test.user', shard: shard1}).itcount());
+// Mongos must refresh metadata if the chunk version does not match
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'test.user', find: {_id: -900}, to: shard1, _waitForDelete: true}));
+assert.commandWorked(st.s1.adminCommand({split: 'test.user', middle: {_id: -900}}));
+assert.commandWorked(st.s1.adminCommand(
+ {moveChunk: 'test.user', find: {_id: -900}, to: shard0, _waitForDelete: true}));
+assert.commandWorked(st.s1.adminCommand(
+ {moveChunk: 'test.user', find: {_id: -901}, to: shard0, _waitForDelete: true}));
+assert.eq(0, configDB.chunks.find({ns: 'test.user', shard: shard1}).itcount());
- //
- // Compound Key
- //
+//
+// Compound Key
+//
- assert.commandWorked(
- configDB.adminCommand({shardCollection: 'test.compound', key: {x: 1, y: 1}}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.compound', key: {x: 1, y: 1}}));
- assert.eq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
- assert.commandWorked(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
- assert.neq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
+assert.eq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
+assert.commandWorked(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
+assert.neq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
- // cannot split on existing chunk boundary.
- assert.commandFailed(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
+// cannot split on existing chunk boundary.
+assert.commandFailed(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
- bulk = testDB.compound.initializeUnorderedBulkOp();
- for (x = -1200; x < 1200; x++) {
- bulk.insert({x: x, y: x, val: kiloDoc});
- }
- assert.writeOK(bulk.execute());
+bulk = testDB.compound.initializeUnorderedBulkOp();
+for (x = -1200; x < 1200; x++) {
+ bulk.insert({x: x, y: x, val: kiloDoc});
+}
+assert.writeOK(bulk.execute());
- assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount());
- assert.commandWorked(configDB.adminCommand(
- {split: 'test.compound', bounds: [{x: 0, y: 0}, {x: MaxKey, y: MaxKey}]}));
- assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount(), 1);
+assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount());
+assert.commandWorked(configDB.adminCommand(
+ {split: 'test.compound', bounds: [{x: 0, y: 0}, {x: MaxKey, y: MaxKey}]}));
+assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount(), 1);
- assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount());
- assert.commandWorked(configDB.adminCommand({split: 'test.compound', find: {x: -1, y: -1}}));
- assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount(), 1);
-
- st.stop();
+assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount());
+assert.commandWorked(configDB.adminCommand({split: 'test.compound', find: {x: -1, y: -1}}));
+assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount(), 1);
+st.stop();
})();
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index cb3b4cd21d6..60b848dd6de 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -9,233 +9,267 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var configConnStr = st._configDB;
-
- jsTest.log("Starting sharding batch write tests...");
-
- var request;
- var result;
-
- // NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
-
- //
- //
- // Mongos _id autogeneration tests for sharded collections
-
- var coll = mongos.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
-
- //
- // Basic insert no _id
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(1, coll.count());
-
- //
- // Multi insert some _ids
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{_id: 0, a: 1}, {a: 2}]});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(2, result.n);
- assert.eq(2, coll.count());
- assert.eq(1, coll.count({_id: 0}));
-
- //
- // Ensure generating many _ids don't push us over limits
- var maxDocSize = (16 * 1024 * 1024) / 1000;
- var baseDocSize = Object.bsonsize({a: 1, data: ""});
- var dataSize = maxDocSize - baseDocSize;
-
- var data = "";
- for (var i = 0; i < dataSize; i++)
- data += "x";
-
- var documents = [];
- for (var i = 0; i < 1000; i++)
- documents.push({a: i, data: data});
-
- assert.commandWorked(coll.getMongo().getDB("admin").runCommand({setParameter: 1, logLevel: 4}));
- coll.remove({});
- request = {insert: coll.getName(), documents: documents};
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1000, result.n);
- assert.eq(1000, coll.count());
-
- //
- //
- // Config server upserts (against admin db, for example) require _id test
- var adminColl = admin.getCollection(coll.getName());
-
- //
- // Without _id
- adminColl.remove({});
- printjson(
- request = {update: adminColl.getName(), updates: [{q: {a: 1}, u: {a: 1}, upsert: true}]});
- var result = adminColl.runCommand(request);
- assert.commandWorked(result);
- assert.eq(1, result.n);
- assert.eq(1, adminColl.count());
-
- //
- // With _id
- adminColl.remove({});
- printjson(request = {
- update: adminColl.getName(),
- updates: [{q: {_id: 1, a: 1}, u: {a: 1}, upsert: true}]
- });
- assert.commandWorked(adminColl.runCommand(request));
- assert.eq(1, result.n);
- assert.eq(1, adminColl.count());
-
- //
- //
- // Stale config progress tests
- // Set up a new collection across two shards, then revert the chunks to an earlier state to put
- // mongos and mongod permanently out of sync.
-
- // START SETUP
- var brokenColl = mongos.getCollection("broken.coll");
- assert.commandWorked(admin.runCommand({enableSharding: brokenColl.getDB().toString()}));
- st.ensurePrimaryShard(brokenColl.getDB().toString(), st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: brokenColl.toString(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: brokenColl.toString(), middle: {_id: 0}}));
-
- var oldChunks = config.chunks.find().toArray();
-
- // Start a new mongos and bring it up-to-date with the chunks so far
-
- var staleMongos = MongoRunner.runMongos({configdb: configConnStr});
- brokenColl = staleMongos.getCollection(brokenColl.toString());
- assert.writeOK(brokenColl.insert({hello: "world"}));
-
- // Modify the chunks to make shards at a higher version
-
- assert.commandWorked(admin.runCommand(
- {moveChunk: brokenColl.toString(), find: {_id: 0}, to: st.shard1.shardName}));
-
- // Rewrite the old chunks back to the config server
-
- assert.writeOK(config.chunks.remove({}));
- for (var i = 0; i < oldChunks.length; i++) {
- assert.writeOK(config.chunks.insert(oldChunks[i]));
- }
-
- // Ensure that the inserts have propagated to all secondary nodes
- st.configRS.awaitReplication();
-
- // Stale mongos can no longer bring itself up-to-date!
- // END SETUP
-
- //
- // Config server insert, repeatedly stale
- printjson(request = {insert: brokenColl.getName(), documents: [{_id: -1}]});
- printjson(result = brokenColl.runCommand(request));
- assert(result.ok);
- assert.eq(0, result.n);
- assert.eq(1, result.writeErrors.length);
- assert.eq(0, result.writeErrors[0].index);
- assert.eq(result.writeErrors[0].code, 82); // No Progress Made
-
- //
- // Config server insert to other shard, repeatedly stale
- printjson(request = {insert: brokenColl.getName(), documents: [{_id: 1}]});
- printjson(result = brokenColl.runCommand(request));
- assert(result.ok);
- assert.eq(0, result.n);
- assert.eq(1, result.writeErrors.length);
- assert.eq(0, result.writeErrors[0].index);
- assert.eq(result.writeErrors[0].code, 82); // No Progress Made
-
- //
- //
- // Tests against config server
- var configColl = config.getCollection("batch_write_protocol_sharded");
-
- //
- // Basic config server insert
- configColl.remove({});
- printjson(request = {insert: configColl.getName(), documents: [{a: 1}]});
- var result = configColl.runCommand(request);
- assert.commandWorked(result);
- assert.eq(1, result.n);
-
- st.configRS.awaitReplication();
- assert.eq(1, st.config0.getCollection(configColl + "").count());
- assert.eq(1, st.config1.getCollection(configColl + "").count());
- assert.eq(1, st.config2.getCollection(configColl + "").count());
-
- //
- // Basic config server update
- configColl.remove({});
- configColl.insert({a: 1});
- printjson(request = {update: configColl.getName(), updates: [{q: {a: 1}, u: {$set: {b: 2}}}]});
- printjson(result = configColl.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
-
- st.configRS.awaitReplication();
- assert.eq(1, st.config0.getCollection(configColl + "").count({b: 2}));
- assert.eq(1, st.config1.getCollection(configColl + "").count({b: 2}));
- assert.eq(1, st.config2.getCollection(configColl + "").count({b: 2}));
-
- //
- // Basic config server delete
- configColl.remove({});
- configColl.insert({a: 1});
- printjson(request = {'delete': configColl.getName(), deletes: [{q: {a: 1}, limit: 0}]});
- printjson(result = configColl.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
-
- st.configRS.awaitReplication();
- assert.eq(0, st.config0.getCollection(configColl + "").count());
- assert.eq(0, st.config1.getCollection(configColl + "").count());
- assert.eq(0, st.config2.getCollection(configColl + "").count());
-
- MongoRunner.stopMongod(st.config1);
- MongoRunner.stopMongod(st.config2);
- st.configRS.awaitNoPrimary();
-
- // Config server insert with no config PRIMARY
- configColl.remove({});
- printjson(request = {insert: configColl.getName(), documents: [{a: 1}]});
- printjson(result = configColl.runCommand(request));
- assert(!result.ok);
- assert(result.errmsg != null);
-
- // Config server insert with no config PRIMARY
- configColl.remove({});
- configColl.insert({a: 1});
- printjson(request = {update: configColl.getName(), updates: [{q: {a: 1}, u: {$set: {b: 2}}}]});
- printjson(result = configColl.runCommand(request));
- assert(!result.ok);
- assert(result.errmsg != null);
-
- // Config server insert with no config PRIMARY
- configColl.remove({});
- configColl.insert({a: 1});
- printjson(request = {delete: configColl.getName(), deletes: [{q: {a: 1}, limit: 0}]});
- printjson(result = configColl.runCommand(request));
- assert(!result.ok);
- assert(result.errmsg != null);
-
- jsTest.log("DONE!");
-
- MongoRunner.stopMongos(staleMongos);
- st.stop();
+"use strict";
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var configConnStr = st._configDB;
+
+jsTest.log("Starting sharding batch write tests...");
+
+var request;
+var result;
+
+// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
+
+//
+//
+// Mongos _id autogeneration tests for sharded collections
+
+var coll = mongos.getCollection("foo.bar");
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
+
+//
+// Basic insert no _id
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}]
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(1, coll.count());
+
+//
+// Multi insert some _ids
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{_id: 0, a: 1}, {a: 2}]
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(2, result.n);
+assert.eq(2, coll.count());
+assert.eq(1, coll.count({_id: 0}));
+
+//
+// Ensure generating many _ids don't push us over limits
+var maxDocSize = (16 * 1024 * 1024) / 1000;
+var baseDocSize = Object.bsonsize({a: 1, data: ""});
+var dataSize = maxDocSize - baseDocSize;
+
+var data = "";
+for (var i = 0; i < dataSize; i++)
+ data += "x";
+
+var documents = [];
+for (var i = 0; i < 1000; i++)
+ documents.push({a: i, data: data});
+
+assert.commandWorked(coll.getMongo().getDB("admin").runCommand({setParameter: 1, logLevel: 4}));
+coll.remove({});
+request = {
+ insert: coll.getName(),
+ documents: documents
+};
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1000, result.n);
+assert.eq(1000, coll.count());
+
+//
+//
+// Config server upserts (against admin db, for example) require _id test
+var adminColl = admin.getCollection(coll.getName());
+
+//
+// Without _id
+adminColl.remove({});
+printjson(request = {
+ update: adminColl.getName(),
+ updates: [{q: {a: 1}, u: {a: 1}, upsert: true}]
+});
+var result = adminColl.runCommand(request);
+assert.commandWorked(result);
+assert.eq(1, result.n);
+assert.eq(1, adminColl.count());
+
+//
+// With _id
+adminColl.remove({});
+printjson(request = {
+ update: adminColl.getName(),
+ updates: [{q: {_id: 1, a: 1}, u: {a: 1}, upsert: true}]
+});
+assert.commandWorked(adminColl.runCommand(request));
+assert.eq(1, result.n);
+assert.eq(1, adminColl.count());
+
+//
+//
+// Stale config progress tests
+// Set up a new collection across two shards, then revert the chunks to an earlier state to put
+// mongos and mongod permanently out of sync.
+
+// START SETUP
+var brokenColl = mongos.getCollection("broken.coll");
+assert.commandWorked(admin.runCommand({enableSharding: brokenColl.getDB().toString()}));
+st.ensurePrimaryShard(brokenColl.getDB().toString(), st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: brokenColl.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: brokenColl.toString(), middle: {_id: 0}}));
+
+var oldChunks = config.chunks.find().toArray();
+
+// Start a new mongos and bring it up-to-date with the chunks so far
+
+var staleMongos = MongoRunner.runMongos({configdb: configConnStr});
+brokenColl = staleMongos.getCollection(brokenColl.toString());
+assert.writeOK(brokenColl.insert({hello: "world"}));
+
+// Modify the chunks to make shards at a higher version
+
+assert.commandWorked(
+ admin.runCommand({moveChunk: brokenColl.toString(), find: {_id: 0}, to: st.shard1.shardName}));
+
+// Rewrite the old chunks back to the config server
+
+assert.writeOK(config.chunks.remove({}));
+for (var i = 0; i < oldChunks.length; i++) {
+ assert.writeOK(config.chunks.insert(oldChunks[i]));
+}
+
+// Ensure that the inserts have propagated to all secondary nodes
+st.configRS.awaitReplication();
+
+// Stale mongos can no longer bring itself up-to-date!
+// END SETUP
+
+//
+// Config server insert, repeatedly stale
+printjson(request = {
+ insert: brokenColl.getName(),
+ documents: [{_id: -1}]
+});
+printjson(result = brokenColl.runCommand(request));
+assert(result.ok);
+assert.eq(0, result.n);
+assert.eq(1, result.writeErrors.length);
+assert.eq(0, result.writeErrors[0].index);
+assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+//
+// Config server insert to other shard, repeatedly stale
+printjson(request = {
+ insert: brokenColl.getName(),
+ documents: [{_id: 1}]
+});
+printjson(result = brokenColl.runCommand(request));
+assert(result.ok);
+assert.eq(0, result.n);
+assert.eq(1, result.writeErrors.length);
+assert.eq(0, result.writeErrors[0].index);
+assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+//
+//
+// Tests against config server
+var configColl = config.getCollection("batch_write_protocol_sharded");
+
+//
+// Basic config server insert
+configColl.remove({});
+printjson(request = {
+ insert: configColl.getName(),
+ documents: [{a: 1}]
+});
+var result = configColl.runCommand(request);
+assert.commandWorked(result);
+assert.eq(1, result.n);
+
+st.configRS.awaitReplication();
+assert.eq(1, st.config0.getCollection(configColl + "").count());
+assert.eq(1, st.config1.getCollection(configColl + "").count());
+assert.eq(1, st.config2.getCollection(configColl + "").count());
+
+//
+// Basic config server update
+configColl.remove({});
+configColl.insert({a: 1});
+printjson(request = {
+ update: configColl.getName(),
+ updates: [{q: {a: 1}, u: {$set: {b: 2}}}]
+});
+printjson(result = configColl.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+
+st.configRS.awaitReplication();
+assert.eq(1, st.config0.getCollection(configColl + "").count({b: 2}));
+assert.eq(1, st.config1.getCollection(configColl + "").count({b: 2}));
+assert.eq(1, st.config2.getCollection(configColl + "").count({b: 2}));
+
+//
+// Basic config server delete
+configColl.remove({});
+configColl.insert({a: 1});
+printjson(request = {
+ 'delete': configColl.getName(),
+ deletes: [{q: {a: 1}, limit: 0}]
+});
+printjson(result = configColl.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+
+st.configRS.awaitReplication();
+assert.eq(0, st.config0.getCollection(configColl + "").count());
+assert.eq(0, st.config1.getCollection(configColl + "").count());
+assert.eq(0, st.config2.getCollection(configColl + "").count());
+
+MongoRunner.stopMongod(st.config1);
+MongoRunner.stopMongod(st.config2);
+st.configRS.awaitNoPrimary();
+
+// Config server insert with no config PRIMARY
+configColl.remove({});
+printjson(request = {
+ insert: configColl.getName(),
+ documents: [{a: 1}]
+});
+printjson(result = configColl.runCommand(request));
+assert(!result.ok);
+assert(result.errmsg != null);
+
+// Config server insert with no config PRIMARY
+configColl.remove({});
+configColl.insert({a: 1});
+printjson(request = {
+ update: configColl.getName(),
+ updates: [{q: {a: 1}, u: {$set: {b: 2}}}]
+});
+printjson(result = configColl.runCommand(request));
+assert(!result.ok);
+assert(result.errmsg != null);
+
+// Config server insert with no config PRIMARY
+configColl.remove({});
+configColl.insert({a: 1});
+printjson(request = {
+ delete: configColl.getName(),
+ deletes: [{q: {a: 1}, limit: 0}]
+});
+printjson(result = configColl.runCommand(request));
+assert(!result.ok);
+assert(result.errmsg != null);
+
+jsTest.log("DONE!");
+
+MongoRunner.stopMongos(staleMongos);
+st.stop();
}());
diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js
index f4deb4335e5..68097fb23a3 100644
--- a/jstests/sharding/bouncing_count.js
+++ b/jstests/sharding/bouncing_count.js
@@ -2,71 +2,67 @@
* Tests whether new sharding is detected on insert by mongos
*/
(function() {
- 'use strict';
+'use strict';
- // TODO: SERVER-33830 remove shardAsReplicaSet: false
- var st = new ShardingTest({shards: 10, mongos: 3, other: {shardAsReplicaSet: false}});
+// TODO: SERVER-33830 remove shardAsReplicaSet: false
+var st = new ShardingTest({shards: 10, mongos: 3, other: {shardAsReplicaSet: false}});
- var mongosA = st.s0;
- var mongosB = st.s1;
- var mongosC = st.s2;
+var mongosA = st.s0;
+var mongosB = st.s1;
+var mongosC = st.s2;
- var admin = mongosA.getDB("admin");
- var config = mongosA.getDB("config");
+var admin = mongosA.getDB("admin");
+var config = mongosA.getDB("config");
- var collA = mongosA.getCollection("foo.bar");
- var collB = mongosB.getCollection("" + collA);
- var collC = mongosB.getCollection("" + collA);
+var collA = mongosA.getCollection("foo.bar");
+var collB = mongosB.getCollection("" + collA);
+var collC = mongosB.getCollection("" + collA);
- var shards = [
- st.shard0,
- st.shard1,
- st.shard2,
- st.shard3,
- st.shard4,
- st.shard5,
- st.shard6,
- st.shard7,
- st.shard8,
- st.shard9
- ];
+var shards = [
+ st.shard0,
+ st.shard1,
+ st.shard2,
+ st.shard3,
+ st.shard4,
+ st.shard5,
+ st.shard6,
+ st.shard7,
+ st.shard8,
+ st.shard9
+];
- assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
- st.ensurePrimaryShard(collA.getDB().getName(), st.shard1.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
+st.ensurePrimaryShard(collA.getDB().getName(), st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
- jsTestLog("Splitting up the collection...");
+jsTestLog("Splitting up the collection...");
- // Split up the collection
- for (var i = 0; i < shards.length; i++) {
- assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: "" + collA, find: {_id: i}, to: shards[i].shardName}));
- }
+// Split up the collection
+for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: "" + collA, find: {_id: i}, to: shards[i].shardName}));
+}
- mongosB.getDB("admin").runCommand({flushRouterConfig: 1});
- mongosC.getDB("admin").runCommand({flushRouterConfig: 1});
+mongosB.getDB("admin").runCommand({flushRouterConfig: 1});
+mongosC.getDB("admin").runCommand({flushRouterConfig: 1});
- printjson(collB.count());
- printjson(collC.count());
+printjson(collB.count());
+printjson(collC.count());
- // Change up all the versions...
- for (var i = 0; i < shards.length; i++) {
- assert.commandWorked(admin.runCommand({
- moveChunk: "" + collA,
- find: {_id: i},
- to: shards[(i + 1) % shards.length].shardName
- }));
- }
+// Change up all the versions...
+for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: "" + collA, find: {_id: i}, to: shards[(i + 1) % shards.length].shardName}));
+}
- // Make sure mongos A is up-to-date
- mongosA.getDB("admin").runCommand({flushRouterConfig: 1});
+// Make sure mongos A is up-to-date
+mongosA.getDB("admin").runCommand({flushRouterConfig: 1});
- jsTestLog("Running count!");
+jsTestLog("Running count!");
- printjson(collB.count());
- printjson(collC.find().toArray());
-
- st.stop();
+printjson(collB.count());
+printjson(collC.find().toArray());
+st.stop();
})();
diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js
index d50830a4665..a2162771492 100644
--- a/jstests/sharding/bulk_insert.js
+++ b/jstests/sharding/bulk_insert.js
@@ -1,284 +1,282 @@
// Tests bulk inserts to mongos
(function() {
- 'use strict';
+'use strict';
- // TODO: SERVER-33601 remove shardAsReplicaSet: false
- var st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
+// TODO: SERVER-33601 remove shardAsReplicaSet: false
+var st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
- var mongos = st.s;
- var staleMongos = st.s1;
- var admin = mongos.getDB("admin");
+var mongos = st.s;
+var staleMongos = st.s1;
+var admin = mongos.getDB("admin");
- var collSh = mongos.getCollection(jsTestName() + ".collSharded");
- var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
- var collDi = st.shard0.getCollection(jsTestName() + ".collDirect");
+var collSh = mongos.getCollection(jsTestName() + ".collSharded");
+var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
+var collDi = st.shard0.getCollection(jsTestName() + ".collDirect");
- jsTest.log('Checking write to config collections...');
- assert.writeOK(admin.TestColl.insert({SingleDoc: 1}));
+jsTest.log('Checking write to config collections...');
+assert.writeOK(admin.TestColl.insert({SingleDoc: 1}));
- jsTest.log("Setting up collections...");
+jsTest.log("Setting up collections...");
- assert.commandWorked(admin.runCommand({enableSharding: collSh.getDB() + ""}));
- st.ensurePrimaryShard(collSh.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({enableSharding: collSh.getDB() + ""}));
+st.ensurePrimaryShard(collSh.getDB() + "", st.shard0.shardName);
- assert.commandWorked(
- admin.runCommand({movePrimary: collUn.getDB() + "", to: st.shard1.shardName}));
+assert.commandWorked(admin.runCommand({movePrimary: collUn.getDB() + "", to: st.shard1.shardName}));
- printjson(collSh.ensureIndex({ukey: 1}, {unique: true}));
- printjson(collUn.ensureIndex({ukey: 1}, {unique: true}));
- printjson(collDi.ensureIndex({ukey: 1}, {unique: true}));
+printjson(collSh.ensureIndex({ukey: 1}, {unique: true}));
+printjson(collUn.ensureIndex({ukey: 1}, {unique: true}));
+printjson(collDi.ensureIndex({ukey: 1}, {unique: true}));
- assert.commandWorked(admin.runCommand({shardCollection: collSh + "", key: {ukey: 1}}));
- assert.commandWorked(admin.runCommand({split: collSh + "", middle: {ukey: 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand({shardCollection: collSh + "", key: {ukey: 1}}));
+assert.commandWorked(admin.runCommand({split: collSh + "", middle: {ukey: 0}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- var resetColls = function() {
- assert.writeOK(collSh.remove({}));
- assert.writeOK(collUn.remove({}));
- assert.writeOK(collDi.remove({}));
- };
+var resetColls = function() {
+ assert.writeOK(collSh.remove({}));
+ assert.writeOK(collUn.remove({}));
+ assert.writeOK(collDi.remove({}));
+};
- var isDupKeyError = function(err) {
- return /dup key/.test(err + "");
- };
+var isDupKeyError = function(err) {
+ return /dup key/.test(err + "");
+};
- jsTest.log("Collections created.");
- st.printShardingStatus();
+jsTest.log("Collections created.");
+st.printShardingStatus();
- //
- // BREAK-ON-ERROR
- //
+//
+// BREAK-ON-ERROR
+//
- jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
+jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 1}];
- assert.writeOK(collSh.insert(inserts));
- assert.eq(2, collSh.find().itcount());
+assert.writeOK(collSh.insert(inserts));
+assert.eq(2, collSh.find().itcount());
- assert.writeOK(collUn.insert(inserts));
- assert.eq(2, collUn.find().itcount());
+assert.writeOK(collUn.insert(inserts));
+assert.eq(2, collUn.find().itcount());
- assert.writeOK(collDi.insert(inserts));
- assert.eq(2, collDi.find().itcount());
+assert.writeOK(collDi.insert(inserts));
+assert.eq(2, collDi.find().itcount());
- jsTest.log("Bulk insert (no COE) with mongos error...");
+jsTest.log("Bulk insert (no COE) with mongos error...");
- resetColls();
- var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
- assert.writeError(collSh.insert(inserts));
- assert.eq(1, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts));
+assert.eq(1, collSh.find().itcount());
- jsTest.log("Bulk insert (no COE) with mongod error...");
+jsTest.log("Bulk insert (no COE) with mongod error...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
- assert.writeError(collSh.insert(inserts));
- assert.eq(1, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts));
+assert.eq(1, collSh.find().itcount());
- assert.writeError(collUn.insert(inserts));
- assert.eq(1, collUn.find().itcount());
+assert.writeError(collUn.insert(inserts));
+assert.eq(1, collUn.find().itcount());
- assert.writeError(collDi.insert(inserts));
- assert.eq(1, collDi.find().itcount());
+assert.writeError(collDi.insert(inserts));
+assert.eq(1, collDi.find().itcount());
- jsTest.log("Bulk insert (no COE) with mongod and mongos error...");
+jsTest.log("Bulk insert (no COE) with mongod and mongos error...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}, {hello: "world"}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}, {hello: "world"}];
- var res = assert.writeError(collSh.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(1, collSh.find().itcount());
+var res = assert.writeError(collSh.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(1, collSh.find().itcount());
- res = assert.writeError(collUn.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(1, collUn.find().itcount());
+res = assert.writeError(collUn.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(1, collUn.find().itcount());
- res = assert.writeError(collDi.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(1, collDi.find().itcount());
+res = assert.writeError(collDi.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(1, collDi.find().itcount());
- jsTest.log("Bulk insert (no COE) on second shard...");
+jsTest.log("Bulk insert (no COE) on second shard...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: -1}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: -1}];
- assert.writeOK(collSh.insert(inserts));
- assert.eq(2, collSh.find().itcount());
+assert.writeOK(collSh.insert(inserts));
+assert.eq(2, collSh.find().itcount());
- assert.writeOK(collUn.insert(inserts));
- assert.eq(2, collUn.find().itcount());
+assert.writeOK(collUn.insert(inserts));
+assert.eq(2, collUn.find().itcount());
- assert.writeOK(collDi.insert(inserts));
- assert.eq(2, collDi.find().itcount());
+assert.writeOK(collDi.insert(inserts));
+assert.eq(2, collDi.find().itcount());
- jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
+jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
- resetColls();
- var inserts = [
- {ukey: 0},
- {ukey: 1}, // switches shards
- {ukey: -1},
- {hello: "world"}
- ];
+resetColls();
+var inserts = [
+ {ukey: 0},
+ {ukey: 1}, // switches shards
+ {ukey: -1},
+ {hello: "world"}
+];
- assert.writeError(collSh.insert(inserts));
- assert.eq(3, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts));
+assert.eq(3, collSh.find().itcount());
- jsTest.log("Bulk insert to second shard (no COE) with mongod error...");
+jsTest.log("Bulk insert to second shard (no COE) with mongod error...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 1}, {ukey: -1}, {ukey: -2}, {ukey: -2}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 1}, {ukey: -1}, {ukey: -2}, {ukey: -2}];
- assert.writeError(collSh.insert(inserts));
- assert.eq(4, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts));
+assert.eq(4, collSh.find().itcount());
- assert.writeError(collUn.insert(inserts));
- assert.eq(4, collUn.find().itcount());
+assert.writeError(collUn.insert(inserts));
+assert.eq(4, collUn.find().itcount());
- assert.writeError(collDi.insert(inserts));
- assert.eq(4, collDi.find().itcount());
+assert.writeError(collDi.insert(inserts));
+assert.eq(4, collDi.find().itcount());
- jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...");
+jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...");
- resetColls();
- var inserts =
- [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
+resetColls();
+var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
- res = assert.writeError(collSh.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(5, collSh.find().itcount());
+res = assert.writeError(collSh.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(5, collSh.find().itcount());
- res = assert.writeError(collUn.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(5, collUn.find().itcount());
+res = assert.writeError(collUn.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(5, collUn.find().itcount());
- res = assert.writeError(collDi.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(5, collDi.find().itcount());
+res = assert.writeError(collDi.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(5, collDi.find().itcount());
- //
- // CONTINUE-ON-ERROR
- //
+//
+// CONTINUE-ON-ERROR
+//
- jsTest.log("Bulk insert (yes COE) with mongos error...");
+jsTest.log("Bulk insert (yes COE) with mongos error...");
- resetColls();
- var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
- assert.writeError(collSh.insert(inserts, 1)); // COE
- assert.eq(2, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts, 1)); // COE
+assert.eq(2, collSh.find().itcount());
- jsTest.log("Bulk insert (yes COE) with mongod error...");
+jsTest.log("Bulk insert (yes COE) with mongod error...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
- assert.writeError(collSh.insert(inserts, 1));
- assert.eq(2, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts, 1));
+assert.eq(2, collSh.find().itcount());
- assert.writeError(collUn.insert(inserts, 1));
- assert.eq(2, collUn.find().itcount());
+assert.writeError(collUn.insert(inserts, 1));
+assert.eq(2, collUn.find().itcount());
- assert.writeError(collDi.insert(inserts, 1));
- assert.eq(2, collDi.find().itcount());
+assert.writeError(collDi.insert(inserts, 1));
+assert.eq(2, collDi.find().itcount());
- jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error...");
+jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error...");
- resetColls();
- var inserts =
- [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
+resetColls();
+var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
- // Last error here is mongos error
- res = assert.writeError(collSh.insert(inserts, 1));
- assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg),
- res.toString());
- assert.eq(5, collSh.find().itcount());
+// Last error here is mongos error
+res = assert.writeError(collSh.insert(inserts, 1));
+assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+assert.eq(5, collSh.find().itcount());
- // Extra insert goes through, since mongos error "doesn't count"
- res = assert.writeError(collUn.insert(inserts, 1));
- assert.eq(6, res.nInserted, res.toString());
- assert.eq(6, collUn.find().itcount());
+// Extra insert goes through, since mongos error "doesn't count"
+res = assert.writeError(collUn.insert(inserts, 1));
+assert.eq(6, res.nInserted, res.toString());
+assert.eq(6, collUn.find().itcount());
- res = assert.writeError(collDi.insert(inserts, 1));
- assert.eq(6, res.nInserted, res.toString());
- assert.eq(6, collDi.find().itcount());
+res = assert.writeError(collDi.insert(inserts, 1));
+assert.eq(6, res.nInserted, res.toString());
+assert.eq(6, collDi.find().itcount());
- jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error " +
- "(mongos error first)...");
+jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error " +
+ "(mongos error first)...");
- resetColls();
- var inserts =
- [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {hello: "world"}, {ukey: 4}, {ukey: 4}];
+resetColls();
+var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {hello: "world"}, {ukey: 4}, {ukey: 4}];
- // Last error here is mongos error
- res = assert.writeError(collSh.insert(inserts, 1));
- assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
- assert.eq(5, collSh.find().itcount());
+// Last error here is mongos error
+res = assert.writeError(collSh.insert(inserts, 1));
+assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+assert.eq(5, collSh.find().itcount());
- // Extra insert goes through, since mongos error "doesn't count"
- res = assert.writeError(collUn.insert(inserts, 1));
- assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
- assert.eq(6, collUn.find().itcount());
+// Extra insert goes through, since mongos error "doesn't count"
+res = assert.writeError(collUn.insert(inserts, 1));
+assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+assert.eq(6, collUn.find().itcount());
- res = assert.writeError(collDi.insert(inserts, 1));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(6, collDi.find().itcount());
+res = assert.writeError(collDi.insert(inserts, 1));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(6, collDi.find().itcount());
- //
- // Test when WBL has to be invoked mid-insert
- //
+//
+// Test when WBL has to be invoked mid-insert
+//
- jsTest.log("Testing bulk insert (no COE) with WBL...");
- resetColls();
+jsTest.log("Testing bulk insert (no COE) with WBL...");
+resetColls();
- var inserts = [{ukey: 1}, {ukey: -1}];
+var inserts = [{ukey: 1}, {ukey: -1}];
- var staleCollSh = staleMongos.getCollection(collSh + "");
- assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
+var staleCollSh = staleMongos.getCollection(collSh + "");
+assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- assert.writeOK(staleCollSh.insert(inserts));
+assert.writeOK(staleCollSh.insert(inserts));
- //
- // Test when the legacy batch exceeds the BSON object size limit
- //
+//
+// Test when the legacy batch exceeds the BSON object size limit
+//
- jsTest.log("Testing bulk insert (no COE) with large objects...");
- resetColls();
+jsTest.log("Testing bulk insert (no COE) with large objects...");
+resetColls();
- var inserts = (function() {
- var data = 'x'.repeat(10 * 1024 * 1024);
- return [
- {ukey: 1, data: data},
- {ukey: 2, data: data},
- {ukey: -1, data: data},
- {ukey: -2, data: data}
- ];
- })();
+var inserts = (function() {
+ var data = 'x'.repeat(10 * 1024 * 1024);
+ return [
+ {ukey: 1, data: data},
+ {ukey: 2, data: data},
+ {ukey: -1, data: data},
+ {ukey: -2, data: data}
+ ];
+})();
- var staleMongosWithLegacyWrites = new Mongo(staleMongos.name);
- staleMongosWithLegacyWrites.forceWriteMode('legacy');
+var staleMongosWithLegacyWrites = new Mongo(staleMongos.name);
+staleMongosWithLegacyWrites.forceWriteMode('legacy');
- staleCollSh = staleMongos.getCollection(collSh + "");
- assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
+staleCollSh = staleMongos.getCollection(collSh + "");
+assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- staleCollSh.insert(inserts);
- staleCollSh.getDB().getLastError();
+staleCollSh.insert(inserts);
+staleCollSh.getDB().getLastError();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 6db6a62c998..4f3a4626818 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -7,94 +7,92 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 4, chunkSize: 1});
+var st = new ShardingTest({shards: 4, chunkSize: 1});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(
- st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Counter: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Counter: 1}}));
- var db = st.s0.getDB('TestDB');
- var coll = db.TestColl;
+var db = st.s0.getDB('TestDB');
+var coll = db.TestColl;
- // Insert lots of bulk documents
- var numDocs = 1000000;
+// Insert lots of bulk documents
+var numDocs = 1000000;
- var bulkSize = 4000;
- var docSize = 128; /* bytes */
- print("\n\n\nBulk size is " + bulkSize);
+var bulkSize = 4000;
+var docSize = 128; /* bytes */
+print("\n\n\nBulk size is " + bulkSize);
- var data = "x";
- while (Object.bsonsize({x: data}) < docSize) {
- data += data;
- }
+var data = "x";
+while (Object.bsonsize({x: data}) < docSize) {
+ data += data;
+}
- print("\n\n\nDocument size is " + Object.bsonsize({x: data}));
+print("\n\n\nDocument size is " + Object.bsonsize({x: data}));
- var docsInserted = 0;
- var balancerOn = false;
+var docsInserted = 0;
+var balancerOn = false;
- /**
- * Ensures that the just inserted documents can be found.
- */
- function checkDocuments() {
- var docsFound = coll.find({}, {_id: 0, Counter: 1}).toArray();
- var count = coll.find().count();
+/**
+ * Ensures that the just inserted documents can be found.
+ */
+function checkDocuments() {
+ var docsFound = coll.find({}, {_id: 0, Counter: 1}).toArray();
+ var count = coll.find().count();
- if (docsFound.length != docsInserted) {
- print("Inserted " + docsInserted + " count : " + count + " doc count : " +
- docsFound.length);
+ if (docsFound.length != docsInserted) {
+ print("Inserted " + docsInserted + " count : " + count +
+ " doc count : " + docsFound.length);
- var allFoundDocsSorted = docsFound.sort(function(a, b) {
- return a.Counter - b.Counter;
- });
+ var allFoundDocsSorted = docsFound.sort(function(a, b) {
+ return a.Counter - b.Counter;
+ });
- var missingValueInfo;
+ var missingValueInfo;
- for (var i = 0; i < docsInserted; i++) {
- if (i != allFoundDocsSorted[i].Counter) {
- missingValueInfo = {expected: i, actual: allFoundDocsSorted[i].Counter};
- break;
- }
+ for (var i = 0; i < docsInserted; i++) {
+ if (i != allFoundDocsSorted[i].Counter) {
+ missingValueInfo = {expected: i, actual: allFoundDocsSorted[i].Counter};
+ break;
}
+ }
- st.printShardingStatus();
+ st.printShardingStatus();
- assert(false,
- 'Inserted number of documents does not match the actual: ' +
- tojson(missingValueInfo));
- }
+ assert(
+ false,
+ 'Inserted number of documents does not match the actual: ' + tojson(missingValueInfo));
}
+}
- while (docsInserted < numDocs) {
- var currBulkSize =
- (numDocs - docsInserted > bulkSize) ? bulkSize : (numDocs - docsInserted);
+while (docsInserted < numDocs) {
+ var currBulkSize = (numDocs - docsInserted > bulkSize) ? bulkSize : (numDocs - docsInserted);
- var bulk = [];
- for (var i = 0; i < currBulkSize; i++) {
- bulk.push({Counter: docsInserted, hi: "there", i: i, x: data});
- docsInserted++;
- }
+ var bulk = [];
+ for (var i = 0; i < currBulkSize; i++) {
+ bulk.push({Counter: docsInserted, hi: "there", i: i, x: data});
+ docsInserted++;
+ }
- assert.writeOK(coll.insert(bulk));
+ assert.writeOK(coll.insert(bulk));
- if (docsInserted % 10000 == 0) {
- print("Inserted " + docsInserted + " documents.");
- st.printShardingStatus();
- }
+ if (docsInserted % 10000 == 0) {
+ print("Inserted " + docsInserted + " documents.");
+ st.printShardingStatus();
+ }
- if (docsInserted > numDocs / 3 && !balancerOn) {
- // Do one check before we turn balancer on
- checkDocuments();
- print('Turning on balancer after ' + docsInserted + ' documents inserted.');
- st.startBalancer();
- balancerOn = true;
- }
+ if (docsInserted > numDocs / 3 && !balancerOn) {
+ // Do one check before we turn balancer on
+ checkDocuments();
+ print('Turning on balancer after ' + docsInserted + ' documents inserted.');
+ st.startBalancer();
+ balancerOn = true;
}
+}
- checkDocuments();
+checkDocuments();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/causal_consistency_shell_support.js b/jstests/sharding/causal_consistency_shell_support.js
index f66b772aa9a..8466209d367 100644
--- a/jstests/sharding/causal_consistency_shell_support.js
+++ b/jstests/sharding/causal_consistency_shell_support.js
@@ -4,186 +4,184 @@
* response, and that the server rejects commands with afterClusterTime ahead of cluster time.
*/
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- // Verifies the command works and properly updates operation or cluster time.
- function runCommandAndCheckLogicalTimes(cmdObj, db, shouldAdvance) {
- const session = db.getSession();
-
- // Extract initial operation and cluster time.
- let operationTime = session.getOperationTime();
- let clusterTimeObj = session.getClusterTime();
-
- assert.commandWorked(db.runCommand(cmdObj));
-
- // Verify cluster and operation time.
- if (shouldAdvance) {
- assert(bsonWoCompare(session.getOperationTime(), operationTime) > 0,
- "expected the shell's operationTime to increase after running command: " +
- tojson(cmdObj));
- assert(
- bsonWoCompare(session.getClusterTime().clusterTime, clusterTimeObj.clusterTime) > 0,
- "expected the shell's clusterTime value to increase after running command: " +
- tojson(cmdObj));
- } else {
- // Don't expect either clusterTime or operationTime to not change, because they may be
- // incremented by unrelated activity in the cluster.
- }
+"use strict";
+
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+// Verifies the command works and properly updates operation or cluster time.
+function runCommandAndCheckLogicalTimes(cmdObj, db, shouldAdvance) {
+ const session = db.getSession();
+
+ // Extract initial operation and cluster time.
+ let operationTime = session.getOperationTime();
+ let clusterTimeObj = session.getClusterTime();
+
+ assert.commandWorked(db.runCommand(cmdObj));
+
+ // Verify cluster and operation time.
+ if (shouldAdvance) {
+ assert(bsonWoCompare(session.getOperationTime(), operationTime) > 0,
+ "expected the shell's operationTime to increase after running command: " +
+ tojson(cmdObj));
+ assert(bsonWoCompare(session.getClusterTime().clusterTime, clusterTimeObj.clusterTime) > 0,
+ "expected the shell's clusterTime value to increase after running command: " +
+ tojson(cmdObj));
+ } else {
+ // Don't expect either clusterTime or operationTime to not change, because they may be
+ // incremented by unrelated activity in the cluster.
}
-
- // Verifies the command works and correctly updates the shell's operationTime.
- function commandWorksAndUpdatesOperationTime(cmdObj, db) {
- const session = db.getSession();
-
- // Use the latest cluster time returned as a new operationTime and run command.
- const clusterTimeObj = session.getClusterTime();
- session.advanceOperationTime(clusterTimeObj.clusterTime);
- assert.commandWorked(testDB.runCommand(cmdObj));
-
- // Verify the response contents and that new operation time is >= passed in time.
- assert(bsonWoCompare(session.getOperationTime(), clusterTimeObj.clusterTime) >= 0,
- "expected the shell's operationTime to be >= to:" + clusterTimeObj.clusterTime +
- " after running command: " + tojson(cmdObj));
- }
-
- // Manually create a shard so tests on storage engines that don't support majority readConcern
- // can exit early.
- const rsName = "causal_consistency_shell_support_rs";
- const rst = new ReplSetTest({
- nodes: 1,
- name: rsName,
- nodeOptions: {
- enableMajorityReadConcern: "",
- shardsvr: "",
- }
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
+}
+
+// Verifies the command works and correctly updates the shell's operationTime.
+function commandWorksAndUpdatesOperationTime(cmdObj, db) {
+ const session = db.getSession();
+
+ // Use the latest cluster time returned as a new operationTime and run command.
+ const clusterTimeObj = session.getClusterTime();
+ session.advanceOperationTime(clusterTimeObj.clusterTime);
+ assert.commandWorked(testDB.runCommand(cmdObj));
+
+ // Verify the response contents and that new operation time is >= passed in time.
+ assert(bsonWoCompare(session.getOperationTime(), clusterTimeObj.clusterTime) >= 0,
+ "expected the shell's operationTime to be >= to:" + clusterTimeObj.clusterTime +
+ " after running command: " + tojson(cmdObj));
+}
+
+// Manually create a shard so tests on storage engines that don't support majority readConcern
+// can exit early.
+const rsName = "causal_consistency_shell_support_rs";
+const rst = new ReplSetTest({
+ nodes: 1,
+ name: rsName,
+ nodeOptions: {
+ enableMajorityReadConcern: "",
+ shardsvr: "",
}
- rst.initiate();
-
- // Start the sharding test and add the majority readConcern enabled replica set.
- const name = "causal_consistency_shell_support";
- const st = new ShardingTest({name: name, shards: 1, manualAddShard: true});
- assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
-
- const testDB = st.s.getDB("test");
- const session = testDB.getSession();
-
- // Verify causal consistency is disabled unless explicitly set.
- assert.eq(testDB.getMongo()._causalConsistency,
- false,
- "causal consistency should be disabled by default");
- testDB.getMongo().setCausalConsistency(true);
-
- // Verify causal consistency is enabled for the connection and for each supported command.
- assert.eq(testDB.getMongo()._causalConsistency,
- true,
- "calling setCausalConsistency() didn't enable causal consistency");
-
- // Verify cluster times are tracked even before causal consistency is set (so the first
- // operation with causal consistency set can use valid cluster times).
- session.resetOperationTime_forTesting();
-
- assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 1}]}));
- assert.neq(session.getOperationTime(), null);
- assert.neq(session.getClusterTime(), null);
-
- session.resetOperationTime_forTesting();
-
- assert.commandWorked(testDB.runCommand({find: "foo"}));
- assert.neq(session.getOperationTime(), null);
- assert.neq(session.getClusterTime(), null);
-
- // Test that write commands advance both operation and cluster time.
- runCommandAndCheckLogicalTimes({insert: "foo", documents: [{x: 2}]}, testDB, true);
- runCommandAndCheckLogicalTimes(
- {update: "foo", updates: [{q: {x: 2}, u: {$set: {x: 3}}}]}, testDB, true);
-
- // Test that each supported command works as expected and the shell's cluster times are properly
- // forwarded to the server and updated based on the response.
- testDB.getMongo().setCausalConsistency(true);
-
- // Aggregate command.
- let aggColl = "aggColl";
- let aggCmd = {aggregate: aggColl, pipeline: [{$match: {x: 1}}], cursor: {}};
-
- runCommandAndCheckLogicalTimes({insert: aggColl, documents: [{_id: 1, x: 1}]}, testDB, true);
- runCommandAndCheckLogicalTimes(aggCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(aggCmd, testDB);
-
- // Count command.
- let countColl = "countColl";
- let countCmd = {count: countColl};
-
- runCommandAndCheckLogicalTimes({insert: countColl, documents: [{_id: 1, x: 1}]}, testDB, true);
- runCommandAndCheckLogicalTimes(countCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(countCmd, testDB);
-
- // Distinct command.
- let distinctColl = "distinctColl";
- let distinctCmd = {distinct: distinctColl, key: "x"};
-
- runCommandAndCheckLogicalTimes(
- {insert: distinctColl, documents: [{_id: 1, x: 1}]}, testDB, true);
- runCommandAndCheckLogicalTimes(distinctCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(distinctCmd, testDB);
-
- // Find command.
- let findColl = "findColl";
- let findCmd = {find: findColl};
-
- runCommandAndCheckLogicalTimes({insert: findColl, documents: [{_id: 1, x: 1}]}, testDB, true);
- runCommandAndCheckLogicalTimes(findCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(findCmd, testDB);
-
- // Aggregate command with $geoNear.
- let geoNearColl = "geoNearColl";
- let geoNearCmd = {
- aggregate: geoNearColl,
- cursor: {},
- pipeline: [
- {
- $geoNear: {
- near: {type: "Point", coordinates: [-10, 10]},
- distanceField: "dist",
- spherical: true
- }
- },
- ],
- };
-
- assert.commandWorked(testDB[geoNearColl].createIndex({loc: "2dsphere"}));
- runCommandAndCheckLogicalTimes(
- {insert: geoNearColl, documents: [{_id: 1, loc: {type: "Point", coordinates: [-10, 10]}}]},
- testDB,
- true);
- runCommandAndCheckLogicalTimes(geoNearCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(geoNearCmd, testDB);
-
- // GeoSearch is not supported for sharded clusters.
-
- // MapReduce doesn't currently support read concern majority.
-
- // Verify that the server rejects commands when operation time is invalid by running a command
- // with an afterClusterTime value one day ahead.
- const invalidTime = new Timestamp(session.getOperationTime().getTime() + (60 * 60 * 24), 0);
- const invalidCmd = {
- find: "foo",
- readConcern: {level: "majority", afterClusterTime: invalidTime}
- };
- assert.commandFailedWithCode(
- testDB.runCommand(invalidCmd),
- ErrorCodes.InvalidOptions,
- "expected command, " + tojson(invalidCmd) + ", to fail with code, " +
- ErrorCodes.InvalidOptions + ", because the afterClusterTime value, " + invalidTime +
- ", should not be ahead of the clusterTime, " + session.getClusterTime().clusterTime);
+});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
rst.stopSet();
- st.stop();
+ return;
+}
+rst.initiate();
+
+// Start the sharding test and add the majority readConcern enabled replica set.
+const name = "causal_consistency_shell_support";
+const st = new ShardingTest({name: name, shards: 1, manualAddShard: true});
+assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
+
+const testDB = st.s.getDB("test");
+const session = testDB.getSession();
+
+// Verify causal consistency is disabled unless explicitly set.
+assert.eq(testDB.getMongo()._causalConsistency,
+ false,
+ "causal consistency should be disabled by default");
+testDB.getMongo().setCausalConsistency(true);
+
+// Verify causal consistency is enabled for the connection and for each supported command.
+assert.eq(testDB.getMongo()._causalConsistency,
+ true,
+ "calling setCausalConsistency() didn't enable causal consistency");
+
+// Verify cluster times are tracked even before causal consistency is set (so the first
+// operation with causal consistency set can use valid cluster times).
+session.resetOperationTime_forTesting();
+
+assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 1}]}));
+assert.neq(session.getOperationTime(), null);
+assert.neq(session.getClusterTime(), null);
+
+session.resetOperationTime_forTesting();
+
+assert.commandWorked(testDB.runCommand({find: "foo"}));
+assert.neq(session.getOperationTime(), null);
+assert.neq(session.getClusterTime(), null);
+
+// Test that write commands advance both operation and cluster time.
+runCommandAndCheckLogicalTimes({insert: "foo", documents: [{x: 2}]}, testDB, true);
+runCommandAndCheckLogicalTimes(
+ {update: "foo", updates: [{q: {x: 2}, u: {$set: {x: 3}}}]}, testDB, true);
+
+// Test that each supported command works as expected and the shell's cluster times are properly
+// forwarded to the server and updated based on the response.
+testDB.getMongo().setCausalConsistency(true);
+
+// Aggregate command.
+let aggColl = "aggColl";
+let aggCmd = {aggregate: aggColl, pipeline: [{$match: {x: 1}}], cursor: {}};
+
+runCommandAndCheckLogicalTimes({insert: aggColl, documents: [{_id: 1, x: 1}]}, testDB, true);
+runCommandAndCheckLogicalTimes(aggCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(aggCmd, testDB);
+
+// Count command.
+let countColl = "countColl";
+let countCmd = {count: countColl};
+
+runCommandAndCheckLogicalTimes({insert: countColl, documents: [{_id: 1, x: 1}]}, testDB, true);
+runCommandAndCheckLogicalTimes(countCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(countCmd, testDB);
+
+// Distinct command.
+let distinctColl = "distinctColl";
+let distinctCmd = {distinct: distinctColl, key: "x"};
+
+runCommandAndCheckLogicalTimes({insert: distinctColl, documents: [{_id: 1, x: 1}]}, testDB, true);
+runCommandAndCheckLogicalTimes(distinctCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(distinctCmd, testDB);
+
+// Find command.
+let findColl = "findColl";
+let findCmd = {find: findColl};
+
+runCommandAndCheckLogicalTimes({insert: findColl, documents: [{_id: 1, x: 1}]}, testDB, true);
+runCommandAndCheckLogicalTimes(findCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(findCmd, testDB);
+
+// Aggregate command with $geoNear.
+let geoNearColl = "geoNearColl";
+let geoNearCmd = {
+ aggregate: geoNearColl,
+ cursor: {},
+ pipeline: [
+ {
+ $geoNear: {
+ near: {type: "Point", coordinates: [-10, 10]},
+ distanceField: "dist",
+ spherical: true
+ }
+ },
+ ],
+};
+
+assert.commandWorked(testDB[geoNearColl].createIndex({loc: "2dsphere"}));
+runCommandAndCheckLogicalTimes(
+ {insert: geoNearColl, documents: [{_id: 1, loc: {type: "Point", coordinates: [-10, 10]}}]},
+ testDB,
+ true);
+runCommandAndCheckLogicalTimes(geoNearCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(geoNearCmd, testDB);
+
+// GeoSearch is not supported for sharded clusters.
+
+// MapReduce doesn't currently support read concern majority.
+
+// Verify that the server rejects commands when operation time is invalid by running a command
+// with an afterClusterTime value one day ahead.
+const invalidTime = new Timestamp(session.getOperationTime().getTime() + (60 * 60 * 24), 0);
+const invalidCmd = {
+ find: "foo",
+ readConcern: {level: "majority", afterClusterTime: invalidTime}
+};
+assert.commandFailedWithCode(
+ testDB.runCommand(invalidCmd),
+ ErrorCodes.InvalidOptions,
+ "expected command, " + tojson(invalidCmd) + ", to fail with code, " +
+ ErrorCodes.InvalidOptions + ", because the afterClusterTime value, " + invalidTime +
+ ", should not be ahead of the clusterTime, " + session.getClusterTime().clusterTime);
+
+rst.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_chunk_migration.js b/jstests/sharding/change_stream_chunk_migration.js
index 64f7d860c2a..a4e74ed3efd 100644
--- a/jstests/sharding/change_stream_chunk_migration.js
+++ b/jstests/sharding/change_stream_chunk_migration.js
@@ -2,167 +2,163 @@
// it's migrating a chunk to a new shard.
// @tags: [uses_change_streams]
(function() {
- 'use strict';
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- // TODO WT-3864: Re-enable test for LSM once transaction visibility bug in LSM is resolved.
- if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
- jsTestLog("Skipping test because we're running with WiredTiger's LSM tree.");
- return;
- }
-
- const rsNodeOptions = {
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- };
- const st =
- new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
-
- const mongos = st.s;
- const mongosColl = mongos.getCollection('test.foo');
- const mongosDB = mongos.getDB("test");
-
- // Enable sharding to inform mongos of the database, allowing us to open a cursor.
- assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
-
- // Make sure all chunks start on shard 0.
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Open a change stream cursor before the collection is sharded.
- const changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert(!changeStream.hasNext(), "Do not expect any results yet");
-
- jsTestLog("Sharding collection");
- // Once we have a cursor, actually shard the collection.
- assert.commandWorked(
- mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Insert two documents.
- assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
-
- // Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
-
- jsTestLog("Migrating [10, MaxKey] chunk to shard1.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 20},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- for (let id of[0, 20]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {_id: id});
- }
-
- // Insert into both the chunks.
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
-
- // Split again, and move a second chunk to the first shard. The new chunks are:
- // [MinKey, 0), [0, 10), and [10, MaxKey].
- jsTestLog("Moving [MinKey, 0] to shard 1");
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 5},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Insert again, into all three chunks.
- assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
-
- // Make sure we can see all the inserts, without any 'retryNeeded' entries.
- for (let nextExpectedId of[1, 21, -2, 2, 22]) {
- assert.soon(() => changeStream.hasNext());
- let item = changeStream.next();
- assert.eq(item.documentKey, {_id: nextExpectedId});
- }
-
- // Make sure we're at the end of the stream.
- assert(!changeStream.hasNext());
-
- // Test that migrating the last chunk to shard 1 (meaning all chunks are now on the same shard)
- // will not invalidate the change stream.
-
- // Insert into all three chunks.
- jsTestLog("Insert into all three chunks");
- assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
-
- jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -5},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Insert again, into all three chunks.
- assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
-
- // Make sure we can see all the inserts, without any 'retryNeeded' entries.
- for (let nextExpectedId of[-3, 3, 23, -4, 4, 24]) {
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
- }
-
- // Now test that adding a new shard and migrating a chunk to it will continue to
- // return the correct results.
- const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
- newShard.startSet({shardsvr: ''});
- newShard.initiate();
- assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "newShard"}));
-
- // At this point, there haven't been any migrations to that shard; check that the changeStream
- // works normally.
- assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
-
- for (let nextExpectedId of[-5, 5, 25]) {
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
- }
-
- assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
-
- // Now migrate a chunk to the new shard and verify the stream continues to return results
- // from both before and after the migration.
- jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 20},
- to: "newShard",
- _waitForDelete: true
- }));
- assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
-
- for (let nextExpectedId of[16, -6, 6, 26]) {
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
- }
- assert(!changeStream.hasNext());
-
- st.stop();
- newShard.stopSet();
+'use strict';
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+// TODO WT-3864: Re-enable test for LSM once transaction visibility bug in LSM is resolved.
+if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
+ jsTestLog("Skipping test because we're running with WiredTiger's LSM tree.");
+ return;
+}
+
+const rsNodeOptions = {
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+};
+const st =
+ new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
+
+const mongos = st.s;
+const mongosColl = mongos.getCollection('test.foo');
+const mongosDB = mongos.getDB("test");
+
+// Enable sharding to inform mongos of the database, allowing us to open a cursor.
+assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
+
+// Make sure all chunks start on shard 0.
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Open a change stream cursor before the collection is sharded.
+const changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+assert(!changeStream.hasNext(), "Do not expect any results yet");
+
+jsTestLog("Sharding collection");
+// Once we have a cursor, actually shard the collection.
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Insert two documents.
+assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+
+// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
+
+jsTestLog("Migrating [10, MaxKey] chunk to shard1.");
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: 20},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+for (let id of [0, 20]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {_id: id});
+}
+
+// Insert into both the chunks.
+assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
+
+// Split again, and move a second chunk to the first shard. The new chunks are:
+// [MinKey, 0), [0, 10), and [10, MaxKey].
+jsTestLog("Moving [MinKey, 0] to shard 1");
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: 5},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Insert again, into all three chunks.
+assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
+
+// Make sure we can see all the inserts, without any 'retryNeeded' entries.
+for (let nextExpectedId of [1, 21, -2, 2, 22]) {
+ assert.soon(() => changeStream.hasNext());
+ let item = changeStream.next();
+ assert.eq(item.documentKey, {_id: nextExpectedId});
+}
+
+// Make sure we're at the end of the stream.
+assert(!changeStream.hasNext());
+
+// Test that migrating the last chunk to shard 1 (meaning all chunks are now on the same shard)
+// will not invalidate the change stream.
+
+// Insert into all three chunks.
+jsTestLog("Insert into all three chunks");
+assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
+
+jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: -5},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Insert again, into all three chunks.
+assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
+
+// Make sure we can see all the inserts, without any 'retryNeeded' entries.
+for (let nextExpectedId of [-3, 3, 23, -4, 4, 24]) {
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
+}
+
+// Now test that adding a new shard and migrating a chunk to it will continue to
+// return the correct results.
+const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
+newShard.startSet({shardsvr: ''});
+newShard.initiate();
+assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "newShard"}));
+
+// At this point, there haven't been any migrations to that shard; check that the changeStream
+// works normally.
+assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
+
+for (let nextExpectedId of [-5, 5, 25]) {
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
+}
+
+assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
+
+// Now migrate a chunk to the new shard and verify the stream continues to return results
+// from both before and after the migration.
+jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 20}, to: "newShard", _waitForDelete: true}));
+assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
+
+for (let nextExpectedId of [16, -6, 6, 26]) {
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
+}
+assert(!changeStream.hasNext());
+
+st.stop();
+newShard.stopSet();
})();
diff --git a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
index d97e88f62a1..5d854fdf44c 100644
--- a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
+++ b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
@@ -5,186 +5,183 @@
// shards.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
-
- // This test only works on storage engines that support committed reads, skip it if the
- // configured engine doesn't support it.
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- // Create a 2-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
- // second so that each shard is continually advancing its optime, allowing the
- // AsyncResultsMerger to return sorted results even if some shards have not yet produced any
- // data.
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 1, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- const shard0DB = st.shard0.getDB(jsTestName());
- const shard1DB = st.shard1.getDB(jsTestName());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- // Start the profiler on each shard so that we can examine the getMores' maxTimeMS.
- for (let profileDB of[shard0DB, shard1DB]) {
- assert.commandWorked(profileDB.setProfilingLevel(0));
- profileDB.system.profile.drop();
- assert.commandWorked(profileDB.setProfilingLevel(2));
- }
-
- // Returns 'true' if there is at least one getMore profile entry matching the given namespace,
- // identifying comment and maxTimeMS.
- function profilerHasAtLeastOneMatchingGetMore(profileDB, nss, comment, timeout) {
- return profileDB.system.profile.count({
- "originatingCommand.comment": comment,
- "command.maxTimeMS": timeout,
- op: "getmore",
- ns: nss
- }) > 0;
- }
-
- // Asserts that there is at least one getMore profile entry matching the given namespace and
- // identifying comment, and that all such entries have the given maxTimeMS.
- function assertAllGetMoresHaveTimeout(profileDB, nss, comment, timeout) {
- const getMoreTimeouts =
- profileDB.system.profile
- .aggregate([
- {$match: {op: "getmore", ns: nss, "originatingCommand.comment": comment}},
- {$group: {_id: "$command.maxTimeMS"}}
- ])
- .toArray();
- assert.eq(getMoreTimeouts.length, 1);
- assert.eq(getMoreTimeouts[0]._id, timeout);
- }
-
- // Kills the cursor with the given cursor id (if provided). Then opens a new change stream
- // against 'mongosColl' and returns the new change stream's cursor id.
- //
- // We re-open the change stream in between each test case with a batchSize if 0. This is done to
- // ensure that mongos delivers getMores to the shards for the first getMore against the mongos
- // change stream cursor (thus avoiding issues such as SERVER-35084).
- function reopenChangeStream(existingCursorId) {
- if (existingCursorId) {
- assert.commandWorked(mongosDB.runCommand(
- {killCursors: mongosColl.getName(), cursors: [existingCursorId]}));
- }
-
- const csCmdRes = assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$changeStream: {}}],
- comment: testComment,
- cursor: {batchSize: 0}
- }));
- assert.eq(csCmdRes.cursor.firstBatch.length, 0);
- assert.neq(csCmdRes.cursor.id, 0);
- return csCmdRes.cursor.id;
- }
-
- // Timeout values used in the subsequent getMore tests.
- const quarterSec = 250;
- const halfSec = 500;
- const oneSec = 2 * halfSec;
- const fiveSecs = 5 * oneSec;
- const fiveMins = 60 * fiveSecs;
- const thirtyMins = 6 * fiveMins;
- const testComment = "change stream sharded maxTimeMS test";
-
- // Open a $changeStream on the empty, inactive collection.
- let csCursorId = reopenChangeStream();
-
- // Confirm that getMores without an explicit maxTimeMS default to one second on the shards.
- assert.commandWorked(
- mongosDB.runCommand({getMore: csCursorId, collection: mongosColl.getName()}));
- for (let shardDB of[shard0DB, shard1DB]) {
- // The mongos is guaranteed to have already delivered getMores to each of the shards.
- // However, the mongos await time can expire prior to the await time on the shards.
- // Therefore, the getMore on mongos succeeding doesn't guarantee that the getMores on the
- // shards have already been profiled. We use an assert.soon() here to wait for the maxTimeMS
- // on the shards to expire, at which point the getMores will appear in the profile
- // collection.
- assert.soon(() => profilerHasAtLeastOneMatchingGetMore(
- shardDB, mongosColl.getFullName(), testComment, oneSec));
+"use strict";
+
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+
+// This test only works on storage engines that support committed reads, skip it if the
+// configured engine doesn't support it.
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+// Create a 2-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
+// second so that each shard is continually advancing its optime, allowing the
+// AsyncResultsMerger to return sorted results even if some shards have not yet produced any
+// data.
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 1, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+const shard0DB = st.shard0.getDB(jsTestName());
+const shard1DB = st.shard1.getDB(jsTestName());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey] chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+
+// Start the profiler on each shard so that we can examine the getMores' maxTimeMS.
+for (let profileDB of [shard0DB, shard1DB]) {
+ assert.commandWorked(profileDB.setProfilingLevel(0));
+ profileDB.system.profile.drop();
+ assert.commandWorked(profileDB.setProfilingLevel(2));
+}
+
+// Returns 'true' if there is at least one getMore profile entry matching the given namespace,
+// identifying comment and maxTimeMS.
+function profilerHasAtLeastOneMatchingGetMore(profileDB, nss, comment, timeout) {
+ return profileDB.system.profile.count({
+ "originatingCommand.comment": comment,
+ "command.maxTimeMS": timeout,
+ op: "getmore",
+ ns: nss
+ }) > 0;
+}
+
+// Asserts that there is at least one getMore profile entry matching the given namespace and
+// identifying comment, and that all such entries have the given maxTimeMS.
+function assertAllGetMoresHaveTimeout(profileDB, nss, comment, timeout) {
+ const getMoreTimeouts =
+ profileDB.system.profile
+ .aggregate([
+ {$match: {op: "getmore", ns: nss, "originatingCommand.comment": comment}},
+ {$group: {_id: "$command.maxTimeMS"}}
+ ])
+ .toArray();
+ assert.eq(getMoreTimeouts.length, 1);
+ assert.eq(getMoreTimeouts[0]._id, timeout);
+}
+
+// Kills the cursor with the given cursor id (if provided). Then opens a new change stream
+// against 'mongosColl' and returns the new change stream's cursor id.
+//
+// We re-open the change stream in between each test case with a batchSize if 0. This is done to
+// ensure that mongos delivers getMores to the shards for the first getMore against the mongos
+// change stream cursor (thus avoiding issues such as SERVER-35084).
+function reopenChangeStream(existingCursorId) {
+ if (existingCursorId) {
+ assert.commandWorked(
+ mongosDB.runCommand({killCursors: mongosColl.getName(), cursors: [existingCursorId]}));
}
- // Verify that with no activity on the shards, a $changeStream with maxTimeMS waits for the full
- // duration on mongoS. Allow some leniency since the server-side wait may wake spuriously.
- csCursorId = reopenChangeStream(csCursorId);
- let startTime = (new Date()).getTime();
+ const csCmdRes = assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$changeStream: {}}],
+ comment: testComment,
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(csCmdRes.cursor.firstBatch.length, 0);
+ assert.neq(csCmdRes.cursor.id, 0);
+ return csCmdRes.cursor.id;
+}
+
+// Timeout values used in the subsequent getMore tests.
+const quarterSec = 250;
+const halfSec = 500;
+const oneSec = 2 * halfSec;
+const fiveSecs = 5 * oneSec;
+const fiveMins = 60 * fiveSecs;
+const thirtyMins = 6 * fiveMins;
+const testComment = "change stream sharded maxTimeMS test";
+
+// Open a $changeStream on the empty, inactive collection.
+let csCursorId = reopenChangeStream();
+
+// Confirm that getMores without an explicit maxTimeMS default to one second on the shards.
+assert.commandWorked(mongosDB.runCommand({getMore: csCursorId, collection: mongosColl.getName()}));
+for (let shardDB of [shard0DB, shard1DB]) {
+ // The mongos is guaranteed to have already delivered getMores to each of the shards.
+ // However, the mongos await time can expire prior to the await time on the shards.
+ // Therefore, the getMore on mongos succeeding doesn't guarantee that the getMores on the
+ // shards have already been profiled. We use an assert.soon() here to wait for the maxTimeMS
+ // on the shards to expire, at which point the getMores will appear in the profile
+ // collection.
+ assert.soon(() => profilerHasAtLeastOneMatchingGetMore(
+ shardDB, mongosColl.getFullName(), testComment, oneSec));
+}
+
+// Verify that with no activity on the shards, a $changeStream with maxTimeMS waits for the full
+// duration on mongoS. Allow some leniency since the server-side wait may wake spuriously.
+csCursorId = reopenChangeStream(csCursorId);
+let startTime = (new Date()).getTime();
+assert.commandWorked(mongosDB.runCommand(
+ {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: fiveSecs}));
+assert.gte((new Date()).getTime() - startTime, fiveSecs - halfSec);
+
+// Confirm that each getMore dispatched to the shards during this period had a maxTimeMS of 1s.
+for (let shardDB of [shard0DB, shard1DB]) {
+ assertAllGetMoresHaveTimeout(shardDB, mongosColl.getFullName(), testComment, oneSec);
+}
+
+// Issue a getMore with a sub-second maxTimeMS. This should propagate to the shards as-is.
+csCursorId = reopenChangeStream(csCursorId);
+assert.commandWorked(mongosDB.runCommand(
+ {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: halfSec}));
+
+for (let shardDB of [shard0DB, shard1DB]) {
+ // The mongos is guaranteed to have already delivered getMores to each of the shards.
+ // However, the mongos await time can expire prior to the await time on the shards.
+ // Therefore, the getMore on mongos succeeding doesn't guarantee that the getMores on the
+ // shards have already been profiled. We use an assert.soon() here to wait for the maxTimeMS
+ // on the shards to expire, at which point the getMores will appear in the profile
+ // collection.
+ assert.soon(() => profilerHasAtLeastOneMatchingGetMore(
+ shardDB, mongosColl.getFullName(), testComment, halfSec));
+}
+
+// Write a document to shard0, and confirm that - despite the fact that shard1 is still idle - a
+// getMore with a high maxTimeMS returns the document before this timeout expires.
+csCursorId = reopenChangeStream(csCursorId);
+assert.writeOK(mongosColl.insert({_id: -1}));
+startTime = (new Date()).getTime();
+const csResult = assert.commandWorked(mongosDB.runCommand(
+ {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: thirtyMins}));
+assert.lte((new Date()).getTime() - startTime, fiveMins);
+assert.docEq(csResult.cursor.nextBatch[0].fullDocument, {_id: -1});
+
+// Open a change stream with the default maxTimeMS. Then verify that if the client starts
+// issuing getMores with a subsecond maxTimeMS, that mongos eventually schedules getMores on the
+// shards with this subsecond maxTimeMS value.
+csCursorId = reopenChangeStream(csCursorId);
+assert.commandWorked(mongosDB.runCommand({getMore: csCursorId, collection: mongosColl.getName()}));
+assert.soon(function() {
+ // Run a getMore with a 250ms maxTimeMS against mongos.
assert.commandWorked(mongosDB.runCommand(
- {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: fiveSecs}));
- assert.gte((new Date()).getTime() - startTime, fiveSecs - halfSec);
-
- // Confirm that each getMore dispatched to the shards during this period had a maxTimeMS of 1s.
- for (let shardDB of[shard0DB, shard1DB]) {
- assertAllGetMoresHaveTimeout(shardDB, mongosColl.getFullName(), testComment, oneSec);
- }
-
- // Issue a getMore with a sub-second maxTimeMS. This should propagate to the shards as-is.
- csCursorId = reopenChangeStream(csCursorId);
- assert.commandWorked(mongosDB.runCommand(
- {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: halfSec}));
-
- for (let shardDB of[shard0DB, shard1DB]) {
- // The mongos is guaranteed to have already delivered getMores to each of the shards.
- // However, the mongos await time can expire prior to the await time on the shards.
- // Therefore, the getMore on mongos succeeding doesn't guarantee that the getMores on the
- // shards have already been profiled. We use an assert.soon() here to wait for the maxTimeMS
- // on the shards to expire, at which point the getMores will appear in the profile
- // collection.
- assert.soon(() => profilerHasAtLeastOneMatchingGetMore(
- shardDB, mongosColl.getFullName(), testComment, halfSec));
- }
-
- // Write a document to shard0, and confirm that - despite the fact that shard1 is still idle - a
- // getMore with a high maxTimeMS returns the document before this timeout expires.
- csCursorId = reopenChangeStream(csCursorId);
- assert.writeOK(mongosColl.insert({_id: -1}));
- startTime = (new Date()).getTime();
- const csResult = assert.commandWorked(mongosDB.runCommand(
- {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: thirtyMins}));
- assert.lte((new Date()).getTime() - startTime, fiveMins);
- assert.docEq(csResult.cursor.nextBatch[0].fullDocument, {_id: -1});
-
- // Open a change stream with the default maxTimeMS. Then verify that if the client starts
- // issuing getMores with a subsecond maxTimeMS, that mongos eventually schedules getMores on the
- // shards with this subsecond maxTimeMS value.
- csCursorId = reopenChangeStream(csCursorId);
- assert.commandWorked(
- mongosDB.runCommand({getMore: csCursorId, collection: mongosColl.getName()}));
- assert.soon(function() {
- // Run a getMore with a 250ms maxTimeMS against mongos.
- assert.commandWorked(mongosDB.runCommand(
- {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: quarterSec}));
- // Check whether all shards now have a getMore with 250ms maxTimeMS recorded in their
- // profile collections.
- return [shard0DB, shard1DB].every(function(shardDB) {
- return profilerHasAtLeastOneMatchingGetMore(
- shardDB, mongosColl.getFullName(), testComment, quarterSec);
- });
+ {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: quarterSec}));
+ // Check whether all shards now have a getMore with 250ms maxTimeMS recorded in their
+ // profile collections.
+ return [shard0DB, shard1DB].every(function(shardDB) {
+ return profilerHasAtLeastOneMatchingGetMore(
+ shardDB, mongosColl.getFullName(), testComment, quarterSec);
});
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_lookup_single_shard_cluster.js b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
index 60ded0f352d..53fed919125 100644
--- a/jstests/sharding/change_stream_lookup_single_shard_cluster.js
+++ b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
@@ -3,63 +3,63 @@
// sharded collection.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
- // TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
- if (!jsTestOptions().enableMajorityReadConcern &&
- jsTestOptions().mongosBinVersion === 'last-stable') {
- jsTestLog(
- "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
- return;
- }
+// TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
+if (!jsTestOptions().enableMajorityReadConcern &&
+ jsTestOptions().mongosBinVersion === 'last-stable') {
+ jsTestLog(
+ "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
+ return;
+}
- // This test only works on storage engines that support committed reads, skip it if the
- // configured engine doesn't support it.
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+// This test only works on storage engines that support committed reads, skip it if the
+// configured engine doesn't support it.
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- // Create a cluster with only 1 shard.
- const st = new ShardingTest({
- shards: 1,
- rs: {nodes: 1, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
- });
+// Create a cluster with only 1 shard.
+const st = new ShardingTest({
+ shards: 1,
+ rs: {nodes: 1, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
+});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
- // Enable sharding, shard on _id, and insert a test document which will be updated later.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- assert.writeOK(mongosColl.insert({_id: 1}));
+// Enable sharding, shard on _id, and insert a test document which will be updated later.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+assert.writeOK(mongosColl.insert({_id: 1}));
- // Verify that the pipeline splits and merges on mongoS despite only targeting a single shard.
- const explainPlan = assert.commandWorked(
- mongosColl.explain().aggregate([{$changeStream: {fullDocument: "updateLookup"}}]));
- assert.neq(explainPlan.splitPipeline, null);
- assert.eq(explainPlan.mergeType, "mongos");
+// Verify that the pipeline splits and merges on mongoS despite only targeting a single shard.
+const explainPlan = assert.commandWorked(
+ mongosColl.explain().aggregate([{$changeStream: {fullDocument: "updateLookup"}}]));
+assert.neq(explainPlan.splitPipeline, null);
+assert.eq(explainPlan.mergeType, "mongos");
- // Open a $changeStream on the collection with 'updateLookup' and update the test doc.
- const stream = mongosColl.watch([], {fullDocument: "updateLookup"});
- const wholeDbStream = mongosDB.watch([], {fullDocument: "updateLookup"});
+// Open a $changeStream on the collection with 'updateLookup' and update the test doc.
+const stream = mongosColl.watch([], {fullDocument: "updateLookup"});
+const wholeDbStream = mongosDB.watch([], {fullDocument: "updateLookup"});
- mongosColl.update({_id: 1}, {$set: {updated: true}});
+mongosColl.update({_id: 1}, {$set: {updated: true}});
- // Verify that the document is successfully retrieved from the single-collection and whole-db
- // change streams.
- assert.soon(() => stream.hasNext());
- assert.docEq(stream.next().fullDocument, {_id: 1, updated: true});
+// Verify that the document is successfully retrieved from the single-collection and whole-db
+// change streams.
+assert.soon(() => stream.hasNext());
+assert.docEq(stream.next().fullDocument, {_id: 1, updated: true});
- assert.soon(() => wholeDbStream.hasNext());
- assert.docEq(wholeDbStream.next().fullDocument, {_id: 1, updated: true});
+assert.soon(() => wholeDbStream.hasNext());
+assert.docEq(wholeDbStream.next().fullDocument, {_id: 1, updated: true});
- stream.close();
- wholeDbStream.close();
+stream.close();
+wholeDbStream.close();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_metadata_notifications.js b/jstests/sharding/change_stream_metadata_notifications.js
index f535012a7b2..48138d089ec 100644
--- a/jstests/sharding/change_stream_metadata_notifications.js
+++ b/jstests/sharding/change_stream_metadata_notifications.js
@@ -2,154 +2,154 @@
// Legacy getMore fails after dropping the database that the original cursor is on.
// @tags: [requires_find_command]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on a field called 'shardKey'.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 0}}));
-
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {shardKey: 1}, to: st.rs1.getURL()}));
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({shardKey: -1, _id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({shardKey: 1, _id: 1}, {writeConcern: {w: "majority"}}));
-
- let changeStream = mongosColl.watch();
-
- // We awaited the replication of the first writes, so the change stream shouldn't return them.
- assert.writeOK(mongosColl.update({shardKey: -1, _id: -1}, {$set: {updated: true}}));
- assert.writeOK(mongosColl.update({shardKey: 1, _id: 1}, {$set: {updated: true}}));
- assert.writeOK(mongosColl.insert({shardKey: 2, _id: 2}));
-
- // Drop the collection and test that we return a "drop" entry, followed by an "invalidate"
- // entry.
- mongosColl.drop();
-
- // Test that we see the two writes that happened before the collection drop.
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey.shardKey, -1);
- const resumeTokenFromFirstUpdate = next._id;
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey.shardKey, 1);
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {_id: 2});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "drop");
- assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "invalidate");
- assert(changeStream.isExhausted());
-
- // With an explicit collation, test that we can resume from before the collection drop.
- changeStream = mongosColl.watch(
- [], {resumeAfter: resumeTokenFromFirstUpdate, collation: {locale: "simple"}});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, {shardKey: 1, _id: 1});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {shardKey: 2, _id: 2});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "drop");
- assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "invalidate");
- assert(changeStream.isExhausted());
-
- // Test that we can resume the change stream without specifying an explicit collation.
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
- cursor: {}
- }));
-
- // Recreate and shard the collection.
- assert.commandWorked(mongosDB.createCollection(mongosColl.getName()));
-
- // Shard the test collection on shardKey.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
-
- // Test that resuming the change stream on the recreated collection succeeds, since we will not
- // attempt to inherit the collection's default collation and can therefore ignore the new UUID.
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
- cursor: {}
- }));
-
- // Recreate the collection as unsharded and open a change stream on it.
- assertDropAndRecreateCollection(mongosDB, mongosColl.getName());
-
- changeStream = mongosColl.watch();
-
- // Drop the database and verify that the stream returns a collection drop followed by an
- // invalidate.
- assert.commandWorked(mongosDB.dropDatabase());
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "drop");
- assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "invalidate");
- assert(changeStream.isExhausted());
-
- st.stop();
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ }
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on a field called 'shardKey'.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 0}}));
+
+// Move the [0, MaxKey] chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {shardKey: 1}, to: st.rs1.getURL()}));
+
+// Write a document to each chunk.
+assert.writeOK(mongosColl.insert({shardKey: -1, _id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({shardKey: 1, _id: 1}, {writeConcern: {w: "majority"}}));
+
+let changeStream = mongosColl.watch();
+
+// We awaited the replication of the first writes, so the change stream shouldn't return them.
+assert.writeOK(mongosColl.update({shardKey: -1, _id: -1}, {$set: {updated: true}}));
+assert.writeOK(mongosColl.update({shardKey: 1, _id: 1}, {$set: {updated: true}}));
+assert.writeOK(mongosColl.insert({shardKey: 2, _id: 2}));
+
+// Drop the collection and test that we return a "drop" entry, followed by an "invalidate"
+// entry.
+mongosColl.drop();
+
+// Test that we see the two writes that happened before the collection drop.
+assert.soon(() => changeStream.hasNext());
+let next = changeStream.next();
+assert.eq(next.operationType, "update");
+assert.eq(next.documentKey.shardKey, -1);
+const resumeTokenFromFirstUpdate = next._id;
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update");
+assert.eq(next.documentKey.shardKey, 1);
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "insert");
+assert.eq(next.documentKey, {_id: 2});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "drop");
+assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "invalidate");
+assert(changeStream.isExhausted());
+
+// With an explicit collation, test that we can resume from before the collection drop.
+changeStream =
+ mongosColl.watch([], {resumeAfter: resumeTokenFromFirstUpdate, collation: {locale: "simple"}});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update");
+assert.eq(next.documentKey, {shardKey: 1, _id: 1});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "insert");
+assert.eq(next.documentKey, {shardKey: 2, _id: 2});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "drop");
+assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "invalidate");
+assert(changeStream.isExhausted());
+
+// Test that we can resume the change stream without specifying an explicit collation.
+assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
+ cursor: {}
+}));
+
+// Recreate and shard the collection.
+assert.commandWorked(mongosDB.createCollection(mongosColl.getName()));
+
+// Shard the test collection on shardKey.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
+
+// Test that resuming the change stream on the recreated collection succeeds, since we will not
+// attempt to inherit the collection's default collation and can therefore ignore the new UUID.
+assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
+ cursor: {}
+}));
+
+// Recreate the collection as unsharded and open a change stream on it.
+assertDropAndRecreateCollection(mongosDB, mongosColl.getName());
+
+changeStream = mongosColl.watch();
+
+// Drop the database and verify that the stream returns a collection drop followed by an
+// invalidate.
+assert.commandWorked(mongosDB.dropDatabase());
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "drop");
+assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "invalidate");
+assert(changeStream.isExhausted());
+
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_no_shards.js b/jstests/sharding/change_stream_no_shards.js
index e92c91d7322..8e682172f9c 100644
--- a/jstests/sharding/change_stream_no_shards.js
+++ b/jstests/sharding/change_stream_no_shards.js
@@ -3,37 +3,37 @@
* set with a cursorID of zero.
*/
(function() {
- const st = new ShardingTest({shards: 0, config: 1});
+const st = new ShardingTest({shards: 0, config: 1});
- const adminDB = st.s.getDB("admin");
- const testDB = st.s.getDB("test");
+const adminDB = st.s.getDB("admin");
+const testDB = st.s.getDB("test");
- // Test that attempting to open a stream on a single collection results in an empty, closed
- // cursor response.
- let csCmdRes = assert.commandWorked(
- testDB.runCommand({aggregate: "testing", pipeline: [{$changeStream: {}}], cursor: {}}));
- assert.docEq(csCmdRes.cursor.firstBatch, []);
- assert.eq(csCmdRes.cursor.id, 0);
+// Test that attempting to open a stream on a single collection results in an empty, closed
+// cursor response.
+let csCmdRes = assert.commandWorked(
+ testDB.runCommand({aggregate: "testing", pipeline: [{$changeStream: {}}], cursor: {}}));
+assert.docEq(csCmdRes.cursor.firstBatch, []);
+assert.eq(csCmdRes.cursor.id, 0);
- // Test that attempting to open a whole-db stream results in an empty, closed cursor response.
- csCmdRes = assert.commandWorked(
- testDB.runCommand({aggregate: 1, pipeline: [{$changeStream: {}}], cursor: {}}));
- assert.docEq(csCmdRes.cursor.firstBatch, []);
- assert.eq(csCmdRes.cursor.id, 0);
+// Test that attempting to open a whole-db stream results in an empty, closed cursor response.
+csCmdRes = assert.commandWorked(
+ testDB.runCommand({aggregate: 1, pipeline: [{$changeStream: {}}], cursor: {}}));
+assert.docEq(csCmdRes.cursor.firstBatch, []);
+assert.eq(csCmdRes.cursor.id, 0);
- // Test that attempting to open a cluster-wide stream results in an empty, closed cursor
- // response.
- csCmdRes = assert.commandWorked(adminDB.runCommand(
- {aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}], cursor: {}}));
- assert.docEq(csCmdRes.cursor.firstBatch, []);
- assert.eq(csCmdRes.cursor.id, 0);
+// Test that attempting to open a cluster-wide stream results in an empty, closed cursor
+// response.
+csCmdRes = assert.commandWorked(adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}], cursor: {}}));
+assert.docEq(csCmdRes.cursor.firstBatch, []);
+assert.eq(csCmdRes.cursor.id, 0);
- // Test that a regular, non-$changeStream aggregation also results in an empty cursor when no
- // shards are present.
- const nonCsCmdRes = assert.commandWorked(
- testDB.runCommand({aggregate: "testing", pipeline: [{$match: {}}], cursor: {}}));
- assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
- assert.eq(nonCsCmdRes.cursor.id, 0);
+// Test that a regular, non-$changeStream aggregation also results in an empty cursor when no
+// shards are present.
+const nonCsCmdRes = assert.commandWorked(
+ testDB.runCommand({aggregate: "testing", pipeline: [{$match: {}}], cursor: {}}));
+assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
+assert.eq(nonCsCmdRes.cursor.id, 0);
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/change_stream_read_preference.js b/jstests/sharding/change_stream_read_preference.js
index 4f35b42424a..1c4129e9952 100644
--- a/jstests/sharding/change_stream_read_preference.js
+++ b/jstests/sharding/change_stream_read_preference.js
@@ -2,139 +2,136 @@
// user.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/libs/profiler.js'); // For various profiler helpers.
-
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
-
- // TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
- if (!jsTestOptions().enableMajorityReadConcern &&
- jsTestOptions().mongosBinVersion === 'last-stable') {
- jsTestLog(
- "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
- return;
- }
-
- // This test only works on storage engines that support committed reads, skip it if the
- // configured engine doesn't support it.
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- const st = new ShardingTest({
- name: "change_stream_read_pref",
- shards: 2,
- rs: {
- nodes: 2,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- },
+"use strict";
+
+load('jstests/libs/profiler.js'); // For various profiler helpers.
+
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+
+// TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
+if (!jsTestOptions().enableMajorityReadConcern &&
+ jsTestOptions().mongosBinVersion === 'last-stable') {
+ jsTestLog(
+ "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
+ return;
+}
+
+// This test only works on storage engines that support committed reads, skip it if the
+// configured engine doesn't support it.
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ name: "change_stream_read_pref",
+ shards: 2,
+ rs: {
+ nodes: 2,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+ },
+});
+
+const dbName = jsTestName();
+const mongosDB = st.s0.getDB(dbName);
+const mongosColl = mongosDB[jsTestName()];
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey] chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+
+// Turn on the profiler.
+for (let rs of [st.rs0, st.rs1]) {
+ assert.commandWorked(rs.getPrimary().getDB(dbName).setProfilingLevel(2));
+ assert.commandWorked(rs.getSecondary().getDB(dbName).setProfilingLevel(2));
+}
+
+// Write a document to each chunk.
+assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+// Test that change streams go to the primary by default.
+let changeStreamComment = "change stream against primary";
+const primaryStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
+ {comment: changeStreamComment});
+
+assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
+assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
+
+assert.soon(() => primaryStream.hasNext());
+assert.eq(primaryStream.next().fullDocument, {_id: -1, updated: true});
+assert.soon(() => primaryStream.hasNext());
+assert.eq(primaryStream.next().fullDocument, {_id: 1, updated: true});
+
+for (let rs of [st.rs0, st.rs1]) {
+ const primaryDB = rs.getPrimary().getDB(dbName);
+ // Test that the change stream itself goes to the primary. There might be more than one if
+ // we needed multiple getMores to retrieve the changes.
+ // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore
+ // because the initial aggregate will not show up.
+ profilerHasAtLeastOneMatchingEntryOrThrow(
+ {profileDB: primaryDB, filter: {'originatingCommand.comment': changeStreamComment}});
+
+ // Test that the update lookup goes to the primary as well.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryDB,
+ filter: {op: "query", ns: mongosColl.getFullName(), "command.comment": changeStreamComment}
});
+}
+
+primaryStream.close();
+
+// Test that change streams go to the secondary when the readPreference is {mode: "secondary"}.
+changeStreamComment = 'change stream against secondary';
+const secondaryStream =
+ mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
+ {comment: changeStreamComment, $readPreference: {mode: "secondary"}});
+
+assert.writeOK(mongosColl.update({_id: -1}, {$set: {updatedCount: 2}}));
+assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
+
+assert.soon(() => secondaryStream.hasNext());
+assert.eq(secondaryStream.next().fullDocument, {_id: -1, updated: true, updatedCount: 2});
+assert.soon(() => secondaryStream.hasNext());
+assert.eq(secondaryStream.next().fullDocument, {_id: 1, updated: true, updatedCount: 2});
+
+for (let rs of [st.rs0, st.rs1]) {
+ const secondaryDB = rs.getSecondary().getDB(dbName);
+ // Test that the change stream itself goes to the secondary. There might be more than one if
+ // we needed multiple getMores to retrieve the changes.
+ // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore
+ // because the initial aggregate will not show up.
+ profilerHasAtLeastOneMatchingEntryOrThrow(
+ {profileDB: secondaryDB, filter: {'originatingCommand.comment': changeStreamComment}});
+
+ // Test that the update lookup goes to the secondary as well.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: secondaryDB,
+ filter: {
+ op: "query",
+ ns: mongosColl.getFullName(),
+ "command.comment": changeStreamComment,
+ // We need to filter out any profiler entries with a stale config - this is the
+ // first read on this secondary with a readConcern specified, so it is the first
+ // read on this secondary that will enforce shard version.
+ errCode: {$ne: ErrorCodes.StaleConfig}
+ }
+ });
+}
- const dbName = jsTestName();
- const mongosDB = st.s0.getDB(dbName);
- const mongosColl = mongosDB[jsTestName()];
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- // Turn on the profiler.
- for (let rs of[st.rs0, st.rs1]) {
- assert.commandWorked(rs.getPrimary().getDB(dbName).setProfilingLevel(2));
- assert.commandWorked(rs.getSecondary().getDB(dbName).setProfilingLevel(2));
- }
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- // Test that change streams go to the primary by default.
- let changeStreamComment = "change stream against primary";
- const primaryStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
- {comment: changeStreamComment});
-
- assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
-
- assert.soon(() => primaryStream.hasNext());
- assert.eq(primaryStream.next().fullDocument, {_id: -1, updated: true});
- assert.soon(() => primaryStream.hasNext());
- assert.eq(primaryStream.next().fullDocument, {_id: 1, updated: true});
-
- for (let rs of[st.rs0, st.rs1]) {
- const primaryDB = rs.getPrimary().getDB(dbName);
- // Test that the change stream itself goes to the primary. There might be more than one if
- // we needed multiple getMores to retrieve the changes.
- // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore
- // because the initial aggregate will not show up.
- profilerHasAtLeastOneMatchingEntryOrThrow(
- {profileDB: primaryDB, filter: {'originatingCommand.comment': changeStreamComment}});
-
- // Test that the update lookup goes to the primary as well.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryDB,
- filter: {
- op: "query",
- ns: mongosColl.getFullName(), "command.comment": changeStreamComment
- }
- });
- }
-
- primaryStream.close();
-
- // Test that change streams go to the secondary when the readPreference is {mode: "secondary"}.
- changeStreamComment = 'change stream against secondary';
- const secondaryStream =
- mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
- {comment: changeStreamComment, $readPreference: {mode: "secondary"}});
-
- assert.writeOK(mongosColl.update({_id: -1}, {$set: {updatedCount: 2}}));
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
-
- assert.soon(() => secondaryStream.hasNext());
- assert.eq(secondaryStream.next().fullDocument, {_id: -1, updated: true, updatedCount: 2});
- assert.soon(() => secondaryStream.hasNext());
- assert.eq(secondaryStream.next().fullDocument, {_id: 1, updated: true, updatedCount: 2});
-
- for (let rs of[st.rs0, st.rs1]) {
- const secondaryDB = rs.getSecondary().getDB(dbName);
- // Test that the change stream itself goes to the secondary. There might be more than one if
- // we needed multiple getMores to retrieve the changes.
- // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore
- // because the initial aggregate will not show up.
- profilerHasAtLeastOneMatchingEntryOrThrow(
- {profileDB: secondaryDB, filter: {'originatingCommand.comment': changeStreamComment}});
-
- // Test that the update lookup goes to the secondary as well.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: secondaryDB,
- filter: {
- op: "query",
- ns: mongosColl.getFullName(), "command.comment": changeStreamComment,
- // We need to filter out any profiler entries with a stale config - this is the
- // first read on this secondary with a readConcern specified, so it is the first
- // read on this secondary that will enforce shard version.
- errCode: {$ne: ErrorCodes.StaleConfig}
- }
- });
- }
-
- secondaryStream.close();
- st.stop();
+secondaryStream.close();
+st.stop();
}());
diff --git a/jstests/sharding/change_stream_resume_from_different_mongos.js b/jstests/sharding/change_stream_resume_from_different_mongos.js
index 7efe9e06a36..73914b51af1 100644
--- a/jstests/sharding/change_stream_resume_from_different_mongos.js
+++ b/jstests/sharding/change_stream_resume_from_different_mongos.js
@@ -1,99 +1,99 @@
// Test resuming a change stream on a mongos other than the one the change stream was started on.
// @tags: [uses_change_streams]
(function() {
- "use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
+"use strict";
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ rs: {nodes: 3, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
+});
+
+for (let key of Object.keys(ChangeStreamWatchMode)) {
+ const watchMode = ChangeStreamWatchMode[key];
+ jsTestLog("Running test for mode " + watchMode);
+
+ const s0DB = st.s0.getDB("test");
+ const s1DB = st.s1.getDB("test");
+ const coll = assertDropAndRecreateCollection(s0DB, "change_stream_failover");
+
+ const nDocs = 100;
+
+ // Split so ids < nDocs / 2 are for one shard, ids >= nDocs / 2 + 1 for another.
+ st.shardColl(
+ coll,
+ {_id: 1}, // key
+ {_id: nDocs / 2}, // split
+ {_id: nDocs / 2 + 1}, // move
+ "test", // dbName
+ false // waitForDelete
+ );
+
+ // Open a change stream.
+ const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, s0DB));
+ let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
+
+ // Be sure we can read from the change stream. Write some documents that will end up on
+ // each shard. Use a bulk write to increase the chance that two of the writes get the same
+ // cluster time on each shard.
+ const kIds = [];
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs / 2; i++) {
+ // Interleave elements which will end up on shard 0 with elements that will end up on
+ // shard 1.
+ kIds.push(i);
+ bulk.insert({_id: i});
+ kIds.push(i + nDocs / 2);
+ bulk.insert({_id: i + nDocs / 2});
+ }
+ assert.commandWorked(bulk.execute());
+
+ // Read from the change stream. The order of the documents isn't guaranteed because we
+ // performed a bulk write.
+ const firstChange = cst.getOneChange(changeStream);
+ const docsFoundInOrder = [firstChange];
+ for (let i = 0; i < nDocs - 1; i++) {
+ const change = cst.getOneChange(changeStream);
+ assert.docEq(change.ns, {db: s0DB.getName(), coll: coll.getName()});
+ assert.eq(change.operationType, "insert");
+
+ docsFoundInOrder.push(change);
+ }
+
+ // Assert that we found the documents we inserted (in any order).
+ assert.setEq(new Set(kIds), new Set(docsFoundInOrder.map(doc => doc.fullDocument._id)));
+ cst.cleanUp();
+
+ // Now resume using the resume token from the first change on a different mongos.
+ const otherCst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, s1DB));
+
+ const resumeCursor =
+ otherCst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
+
+ // Get the resume tokens for each change that occurred.
+ const resumeTokens = [firstChange._id];
+ for (let i = 0; i < kIds.length - 1; i++) {
+ resumeTokens.push(otherCst.getOneChange(resumeCursor)._id);
}
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- rs: {nodes: 3, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
- });
-
- for (let key of Object.keys(ChangeStreamWatchMode)) {
- const watchMode = ChangeStreamWatchMode[key];
- jsTestLog("Running test for mode " + watchMode);
-
- const s0DB = st.s0.getDB("test");
- const s1DB = st.s1.getDB("test");
- const coll = assertDropAndRecreateCollection(s0DB, "change_stream_failover");
-
- const nDocs = 100;
-
- // Split so ids < nDocs / 2 are for one shard, ids >= nDocs / 2 + 1 for another.
- st.shardColl(coll,
- {_id: 1}, // key
- {_id: nDocs / 2}, // split
- {_id: nDocs / 2 + 1}, // move
- "test", // dbName
- false // waitForDelete
- );
-
- // Open a change stream.
- const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, s0DB));
- let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
-
- // Be sure we can read from the change stream. Write some documents that will end up on
- // each shard. Use a bulk write to increase the chance that two of the writes get the same
- // cluster time on each shard.
- const kIds = [];
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs / 2; i++) {
- // Interleave elements which will end up on shard 0 with elements that will end up on
- // shard 1.
- kIds.push(i);
- bulk.insert({_id: i});
- kIds.push(i + nDocs / 2);
- bulk.insert({_id: i + nDocs / 2});
- }
- assert.commandWorked(bulk.execute());
-
- // Read from the change stream. The order of the documents isn't guaranteed because we
- // performed a bulk write.
- const firstChange = cst.getOneChange(changeStream);
- const docsFoundInOrder = [firstChange];
- for (let i = 0; i < nDocs - 1; i++) {
- const change = cst.getOneChange(changeStream);
- assert.docEq(change.ns, {db: s0DB.getName(), coll: coll.getName()});
- assert.eq(change.operationType, "insert");
-
- docsFoundInOrder.push(change);
- }
-
- // Assert that we found the documents we inserted (in any order).
- assert.setEq(new Set(kIds), new Set(docsFoundInOrder.map(doc => doc.fullDocument._id)));
- cst.cleanUp();
-
- // Now resume using the resume token from the first change on a different mongos.
- const otherCst =
- new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, s1DB));
-
- const resumeCursor = otherCst.getChangeStream(
- {watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
-
- // Get the resume tokens for each change that occurred.
- const resumeTokens = [firstChange._id];
- for (let i = 0; i < kIds.length - 1; i++) {
- resumeTokens.push(otherCst.getOneChange(resumeCursor)._id);
- }
-
- // Check that resuming from each possible resume token works.
- for (let i = 0; i < resumeTokens.length; i++) {
- const cursor = otherCst.getChangeStream(
- {watchMode: watchMode, coll: coll, resumeAfter: resumeTokens[i]});
- otherCst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: docsFoundInOrder.splice(i + 1)});
- }
- otherCst.cleanUp();
+ // Check that resuming from each possible resume token works.
+ for (let i = 0; i < resumeTokens.length; i++) {
+ const cursor = otherCst.getChangeStream(
+ {watchMode: watchMode, coll: coll, resumeAfter: resumeTokens[i]});
+ otherCst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: docsFoundInOrder.splice(i + 1)});
}
+ otherCst.cleanUp();
+}
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/change_stream_shard_failover.js b/jstests/sharding/change_stream_shard_failover.js
index f4b3007bd30..f5675aedd04 100644
--- a/jstests/sharding/change_stream_shard_failover.js
+++ b/jstests/sharding/change_stream_shard_failover.js
@@ -9,104 +9,104 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
+"use strict";
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 2, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
+});
+
+const sDB = st.s.getDB("test");
+const kCollName = "change_stream_failover";
+
+for (let key of Object.keys(ChangeStreamWatchMode)) {
+ const watchMode = ChangeStreamWatchMode[key];
+ jsTestLog("Running test for mode " + watchMode);
+
+ const coll = assertDropAndRecreateCollection(sDB, kCollName);
+
+ const nDocs = 100;
+
+ // Split so ids < nDocs / 2 are for one shard, ids >= nDocs / 2 + 1 for another.
+ st.shardColl(
+ coll,
+ {_id: 1}, // key
+ {_id: nDocs / 2}, // split
+ {_id: nDocs / 2 + 1}, // move
+ "test", // dbName
+ false // waitForDelete
+ );
+
+ // Be sure we'll only read from the primaries.
+ st.s.setReadPref("primary");
+
+ // Open a changeStream.
+ const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, sDB));
+ let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
+
+ // Be sure we can read from the change stream. Write some documents that will end up on
+ // each shard. Use a bulk write to increase the chance that two of the writes get the same
+ // cluster time on each shard.
+ const bulk = coll.initializeUnorderedBulkOp();
+ const kIds = [];
+ for (let i = 0; i < nDocs / 2; i++) {
+ // Interleave elements which will end up on shard 0 with elements that will end up on
+ // shard 1.
+ kIds.push(i);
+ bulk.insert({_id: i});
+ kIds.push(i + nDocs / 2);
+ bulk.insert({_id: i + nDocs / 2});
}
+ // Use {w: "majority"} so that we're still guaranteed to be able to read after the
+ // failover.
+ assert.commandWorked(bulk.execute({w: "majority"}));
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 2, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
- });
-
- const sDB = st.s.getDB("test");
- const kCollName = "change_stream_failover";
-
- for (let key of Object.keys(ChangeStreamWatchMode)) {
- const watchMode = ChangeStreamWatchMode[key];
- jsTestLog("Running test for mode " + watchMode);
-
- const coll = assertDropAndRecreateCollection(sDB, kCollName);
-
- const nDocs = 100;
-
- // Split so ids < nDocs / 2 are for one shard, ids >= nDocs / 2 + 1 for another.
- st.shardColl(coll,
- {_id: 1}, // key
- {_id: nDocs / 2}, // split
- {_id: nDocs / 2 + 1}, // move
- "test", // dbName
- false // waitForDelete
- );
-
- // Be sure we'll only read from the primaries.
- st.s.setReadPref("primary");
-
- // Open a changeStream.
- const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, sDB));
- let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
-
- // Be sure we can read from the change stream. Write some documents that will end up on
- // each shard. Use a bulk write to increase the chance that two of the writes get the same
- // cluster time on each shard.
- const bulk = coll.initializeUnorderedBulkOp();
- const kIds = [];
- for (let i = 0; i < nDocs / 2; i++) {
- // Interleave elements which will end up on shard 0 with elements that will end up on
- // shard 1.
- kIds.push(i);
- bulk.insert({_id: i});
- kIds.push(i + nDocs / 2);
- bulk.insert({_id: i + nDocs / 2});
- }
- // Use {w: "majority"} so that we're still guaranteed to be able to read after the
- // failover.
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- const firstChange = cst.getOneChange(changeStream);
-
- // Make one of the primaries step down.
- const oldPrimary = st.rs0.getPrimary();
-
- assert.commandWorked(oldPrimary.adminCommand({replSetStepDown: 300, force: true}));
-
- st.rs0.awaitNodesAgreeOnPrimary();
- const newPrimary = st.rs0.getPrimary();
- // Be sure the new primary is not the previous primary.
- assert.neq(newPrimary.port, oldPrimary.port);
-
- // Read the remaining documents from the original stream.
- const docsFoundInOrder = [firstChange];
- for (let i = 0; i < nDocs - 1; i++) {
- const change = cst.getOneChange(changeStream);
- assert.docEq(change.ns, {db: sDB.getName(), coll: coll.getName()});
- assert.eq(change.operationType, "insert");
-
- docsFoundInOrder.push(change);
- }
-
- // Assert that we found the documents we inserted (in any order).
- assert.setEq(new Set(kIds), new Set(docsFoundInOrder.map(doc => doc.fullDocument._id)));
-
- // Now resume using the resume token from the first change (which was read before the
- // failover). The mongos should talk to the new primary.
- const resumeCursor =
- cst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
-
- // Be sure we can read the remaining changes in the same order as we read them initially.
- cst.assertNextChangesEqual(
- {cursor: resumeCursor, expectedChanges: docsFoundInOrder.splice(1)});
- cst.cleanUp();
-
- // Reset the original primary's election timeout.
- assert.commandWorked(oldPrimary.adminCommand({replSetFreeze: 0}));
+ const firstChange = cst.getOneChange(changeStream);
+
+ // Make one of the primaries step down.
+ const oldPrimary = st.rs0.getPrimary();
+
+ assert.commandWorked(oldPrimary.adminCommand({replSetStepDown: 300, force: true}));
+
+ st.rs0.awaitNodesAgreeOnPrimary();
+ const newPrimary = st.rs0.getPrimary();
+ // Be sure the new primary is not the previous primary.
+ assert.neq(newPrimary.port, oldPrimary.port);
+
+ // Read the remaining documents from the original stream.
+ const docsFoundInOrder = [firstChange];
+ for (let i = 0; i < nDocs - 1; i++) {
+ const change = cst.getOneChange(changeStream);
+ assert.docEq(change.ns, {db: sDB.getName(), coll: coll.getName()});
+ assert.eq(change.operationType, "insert");
+
+ docsFoundInOrder.push(change);
}
- st.stop();
+ // Assert that we found the documents we inserted (in any order).
+ assert.setEq(new Set(kIds), new Set(docsFoundInOrder.map(doc => doc.fullDocument._id)));
+
+ // Now resume using the resume token from the first change (which was read before the
+ // failover). The mongos should talk to the new primary.
+ const resumeCursor =
+ cst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
+
+ // Be sure we can read the remaining changes in the same order as we read them initially.
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: docsFoundInOrder.splice(1)});
+ cst.cleanUp();
+
+ // Reset the original primary's election timeout.
+ assert.commandWorked(oldPrimary.adminCommand({replSetFreeze: 0}));
+}
+
+st.stop();
}());
diff --git a/jstests/sharding/change_stream_show_migration_events.js b/jstests/sharding/change_stream_show_migration_events.js
index 4c75ca5fc2a..c07e059e4d1 100644
--- a/jstests/sharding/change_stream_show_migration_events.js
+++ b/jstests/sharding/change_stream_show_migration_events.js
@@ -3,265 +3,261 @@
// This test is connecting directly to a shard, and change streams require the getMore command.
// @tags: [requires_find_command, uses_change_streams]
(function() {
- 'use strict';
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
-
- function checkEvents(changeStream, expectedEvents) {
- expectedEvents.forEach((event) => {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, event["operationType"]);
- assert.eq(next.documentKey, {_id: event["_id"]});
- });
- }
-
- function makeEvent(docId, opType) {
- assert(typeof docId === 'number');
- assert(typeof opType === 'string' && (opType === 'insert' || opType === 'delete'));
- return ({_id: docId, operationType: opType});
- }
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- // TODO WT-3864: Re-enable test for LSM once transaction visibility bug in LSM is resolved.
- if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
- jsTestLog("Skipping test because we're running with WiredTiger's LSM tree.");
- return;
- }
-
- const rsNodeOptions = {
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- };
- const st =
- new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
-
- const mongos = st.s;
- const mongosColl = mongos.getCollection('test.chunk_mig');
- const mongosDB = mongos.getDB("test");
-
- // Enable sharding to inform mongos of the database, allowing us to open a cursor.
- assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
-
- // Make sure all chunks start on shard 0.
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Open a change stream cursor before the collection is sharded.
- const changeStreamShardZero = st.shard0.getCollection('test.chunk_mig').aggregate([
- {$changeStream: {showMigrationEvents: true}}
- ]);
- const changeStreamShardOne = st.shard1.getCollection('test.chunk_mig').aggregate([
- {$changeStream: {showMigrationEvents: true}}
- ]);
-
- // Change streams opened on mongos do not allow showMigrationEvents to be set to true.
- assertErrorCode(mongosColl, [{$changeStream: {showMigrationEvents: true}}], 31123);
-
- assert(!changeStreamShardZero.hasNext(), "Do not expect any results yet");
- assert(!changeStreamShardOne.hasNext(), "Do not expect any results yet");
-
- jsTestLog("Sharding collection");
- // Once we have a cursor, actually shard the collection.
- assert.commandWorked(
- mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Insert two documents.
- assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
-
- // Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
-
- jsTestLog("Migrating [10, MaxKey] chunk to shard1.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 20},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- var shardZeroEventsBeforeNewShard = [makeEvent(0, "insert"), makeEvent(20, "insert")];
- var shardZeroEventsAfterNewShard = [makeEvent(20, "delete")];
- var shardOneEvents = [makeEvent(20, "insert")];
-
- // Check that each change stream returns the expected events.
- checkEvents(changeStreamShardZero, shardZeroEventsBeforeNewShard);
- assert.soon(() => changeStreamShardZero.hasNext());
- let next = changeStreamShardZero.next();
- assert.eq(next.operationType, "kNewShardDetected");
-
- checkEvents(changeStreamShardZero, shardZeroEventsAfterNewShard);
- checkEvents(changeStreamShardOne, shardOneEvents);
-
- // Insert into both the chunks.
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
-
- // Split again, and move a second chunk to the first shard. The new chunks are:
- // [MinKey, 0), [0, 10), and [10, MaxKey].
- jsTestLog("Moving [MinKey, 0] to shard 1");
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 5},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Insert again, into all three chunks.
- assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
-
- var shardZeroEvents = [
- makeEvent(1, "insert"),
- makeEvent(0, "delete"),
- makeEvent(1, "delete"),
- makeEvent(-2, "insert"),
- ];
- shardOneEvents = [
- makeEvent(21, "insert"),
- makeEvent(0, "insert"),
- makeEvent(1, "insert"),
- makeEvent(2, "insert"),
- makeEvent(22, "insert"),
- ];
-
- // Check that each change stream returns the expected events.
- checkEvents(changeStreamShardZero, shardZeroEvents);
- checkEvents(changeStreamShardOne, shardOneEvents);
-
- // Make sure we're at the end of the stream.
- assert(!changeStreamShardZero.hasNext());
- assert(!changeStreamShardOne.hasNext());
-
- // Test that migrating the last chunk to shard 1 (meaning all chunks are now on the same shard)
- // will not invalidate the change stream.
-
- // Insert into all three chunks.
- jsTestLog("Insert into all three chunks");
- assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
-
- jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -5},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Insert again, into all three chunks.
- assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
-
- // Check that each change stream returns the expected events.
- shardZeroEvents = [
- makeEvent(-3, "insert"),
- makeEvent(-3, "delete"),
- makeEvent(-2, "delete"),
- ];
- shardOneEvents = [
- makeEvent(3, "insert"),
- makeEvent(23, "insert"),
- makeEvent(-2, "insert"),
- makeEvent(-3, "insert"),
- makeEvent(-4, "insert"),
- makeEvent(4, "insert"),
- makeEvent(24, "insert"),
- ];
-
- checkEvents(changeStreamShardZero, shardZeroEvents);
- checkEvents(changeStreamShardOne, shardOneEvents);
-
- // Now test that adding a new shard and migrating a chunk to it will continue to
- // return the correct results.
- const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
- newShard.startSet({shardsvr: ''});
- newShard.initiate();
- assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "newShard"}));
- const changeStreamNewShard = newShard.getPrimary().getCollection('test.chunk_mig').aggregate([
- {$changeStream: {showMigrationEvents: true}}
- ]);
-
- // At this point, there haven't been any migrations to that shard; check that the changeStream
- // works normally.
- assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
-
- shardOneEvents = [
- makeEvent(-5, "insert"),
- makeEvent(5, "insert"),
- makeEvent(25, "insert"),
- ];
-
- assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
- checkEvents(changeStreamShardOne, shardOneEvents);
- assert(!changeStreamNewShard.hasNext(), "Do not expect any results yet");
-
- assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
-
- // Now migrate a chunk to the new shard and verify the stream continues to return results
- // from both before and after the migration.
- jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 20},
- to: "newShard",
- _waitForDelete: true
- }));
- assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
-
- let shardOneEventsBeforeNewShard = [
- makeEvent(16, "insert"),
- ];
- let shardOneEventsAfterNewShard = [
- makeEvent(16, "delete"),
- makeEvent(20, "delete"),
- makeEvent(21, "delete"),
- makeEvent(22, "delete"),
- makeEvent(23, "delete"),
- makeEvent(24, "delete"),
- makeEvent(25, "delete"),
- makeEvent(-6, "insert"),
- makeEvent(6, "insert"),
- ];
- let newShardEvents = [
- makeEvent(20, "insert"),
- makeEvent(21, "insert"),
- makeEvent(22, "insert"),
- makeEvent(23, "insert"),
- makeEvent(24, "insert"),
- makeEvent(25, "insert"),
- makeEvent(16, "insert"),
- makeEvent(26, "insert"),
- ];
-
- // Check that each change stream returns the expected events.
- assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
- checkEvents(changeStreamShardOne, shardOneEventsBeforeNewShard);
- assert.soon(() => changeStreamShardOne.hasNext());
- next = changeStreamShardOne.next();
- assert.eq(next.operationType, "kNewShardDetected");
- checkEvents(changeStreamShardOne, shardOneEventsAfterNewShard);
- checkEvents(changeStreamNewShard, newShardEvents);
-
- // Make sure all change streams are empty.
- assert(!changeStreamShardZero.hasNext());
- assert(!changeStreamShardOne.hasNext());
- assert(!changeStreamNewShard.hasNext());
-
- st.stop();
- newShard.stopSet();
+'use strict';
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
+
+function checkEvents(changeStream, expectedEvents) {
+ expectedEvents.forEach((event) => {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, event["operationType"]);
+ assert.eq(next.documentKey, {_id: event["_id"]});
+ });
+}
+
+function makeEvent(docId, opType) {
+ assert(typeof docId === 'number');
+ assert(typeof opType === 'string' && (opType === 'insert' || opType === 'delete'));
+ return ({_id: docId, operationType: opType});
+}
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+// TODO WT-3864: Re-enable test for LSM once transaction visibility bug in LSM is resolved.
+if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
+ jsTestLog("Skipping test because we're running with WiredTiger's LSM tree.");
+ return;
+}
+
+const rsNodeOptions = {
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+};
+const st =
+ new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
+
+const mongos = st.s;
+const mongosColl = mongos.getCollection('test.chunk_mig');
+const mongosDB = mongos.getDB("test");
+
+// Enable sharding to inform mongos of the database, allowing us to open a cursor.
+assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
+
+// Make sure all chunks start on shard 0.
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Open a change stream cursor before the collection is sharded.
+const changeStreamShardZero = st.shard0.getCollection('test.chunk_mig').aggregate([
+ {$changeStream: {showMigrationEvents: true}}
+]);
+const changeStreamShardOne = st.shard1.getCollection('test.chunk_mig').aggregate([
+ {$changeStream: {showMigrationEvents: true}}
+]);
+
+// Change streams opened on mongos do not allow showMigrationEvents to be set to true.
+assertErrorCode(mongosColl, [{$changeStream: {showMigrationEvents: true}}], 31123);
+
+assert(!changeStreamShardZero.hasNext(), "Do not expect any results yet");
+assert(!changeStreamShardOne.hasNext(), "Do not expect any results yet");
+
+jsTestLog("Sharding collection");
+// Once we have a cursor, actually shard the collection.
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Insert two documents.
+assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+
+// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
+
+jsTestLog("Migrating [10, MaxKey] chunk to shard1.");
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: 20},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+var shardZeroEventsBeforeNewShard = [makeEvent(0, "insert"), makeEvent(20, "insert")];
+var shardZeroEventsAfterNewShard = [makeEvent(20, "delete")];
+var shardOneEvents = [makeEvent(20, "insert")];
+
+// Check that each change stream returns the expected events.
+checkEvents(changeStreamShardZero, shardZeroEventsBeforeNewShard);
+assert.soon(() => changeStreamShardZero.hasNext());
+let next = changeStreamShardZero.next();
+assert.eq(next.operationType, "kNewShardDetected");
+
+checkEvents(changeStreamShardZero, shardZeroEventsAfterNewShard);
+checkEvents(changeStreamShardOne, shardOneEvents);
+
+// Insert into both the chunks.
+assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
+
+// Split again, and move a second chunk to the first shard. The new chunks are:
+// [MinKey, 0), [0, 10), and [10, MaxKey].
+jsTestLog("Moving [MinKey, 0] to shard 1");
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: 5},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Insert again, into all three chunks.
+assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
+
+var shardZeroEvents = [
+ makeEvent(1, "insert"),
+ makeEvent(0, "delete"),
+ makeEvent(1, "delete"),
+ makeEvent(-2, "insert"),
+];
+shardOneEvents = [
+ makeEvent(21, "insert"),
+ makeEvent(0, "insert"),
+ makeEvent(1, "insert"),
+ makeEvent(2, "insert"),
+ makeEvent(22, "insert"),
+];
+
+// Check that each change stream returns the expected events.
+checkEvents(changeStreamShardZero, shardZeroEvents);
+checkEvents(changeStreamShardOne, shardOneEvents);
+
+// Make sure we're at the end of the stream.
+assert(!changeStreamShardZero.hasNext());
+assert(!changeStreamShardOne.hasNext());
+
+// Test that migrating the last chunk to shard 1 (meaning all chunks are now on the same shard)
+// will not invalidate the change stream.
+
+// Insert into all three chunks.
+jsTestLog("Insert into all three chunks");
+assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
+
+jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: -5},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Insert again, into all three chunks.
+assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
+
+// Check that each change stream returns the expected events.
+shardZeroEvents = [
+ makeEvent(-3, "insert"),
+ makeEvent(-3, "delete"),
+ makeEvent(-2, "delete"),
+];
+shardOneEvents = [
+ makeEvent(3, "insert"),
+ makeEvent(23, "insert"),
+ makeEvent(-2, "insert"),
+ makeEvent(-3, "insert"),
+ makeEvent(-4, "insert"),
+ makeEvent(4, "insert"),
+ makeEvent(24, "insert"),
+];
+
+checkEvents(changeStreamShardZero, shardZeroEvents);
+checkEvents(changeStreamShardOne, shardOneEvents);
+
+// Now test that adding a new shard and migrating a chunk to it will continue to
+// return the correct results.
+const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
+newShard.startSet({shardsvr: ''});
+newShard.initiate();
+assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "newShard"}));
+const changeStreamNewShard = newShard.getPrimary().getCollection('test.chunk_mig').aggregate([
+ {$changeStream: {showMigrationEvents: true}}
+]);
+
+// At this point, there haven't been any migrations to that shard; check that the changeStream
+// works normally.
+assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
+
+shardOneEvents = [
+ makeEvent(-5, "insert"),
+ makeEvent(5, "insert"),
+ makeEvent(25, "insert"),
+];
+
+assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
+checkEvents(changeStreamShardOne, shardOneEvents);
+assert(!changeStreamNewShard.hasNext(), "Do not expect any results yet");
+
+assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
+
+// Now migrate a chunk to the new shard and verify the stream continues to return results
+// from both before and after the migration.
+jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 20}, to: "newShard", _waitForDelete: true}));
+assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
+
+let shardOneEventsBeforeNewShard = [
+ makeEvent(16, "insert"),
+];
+let shardOneEventsAfterNewShard = [
+ makeEvent(16, "delete"),
+ makeEvent(20, "delete"),
+ makeEvent(21, "delete"),
+ makeEvent(22, "delete"),
+ makeEvent(23, "delete"),
+ makeEvent(24, "delete"),
+ makeEvent(25, "delete"),
+ makeEvent(-6, "insert"),
+ makeEvent(6, "insert"),
+];
+let newShardEvents = [
+ makeEvent(20, "insert"),
+ makeEvent(21, "insert"),
+ makeEvent(22, "insert"),
+ makeEvent(23, "insert"),
+ makeEvent(24, "insert"),
+ makeEvent(25, "insert"),
+ makeEvent(16, "insert"),
+ makeEvent(26, "insert"),
+];
+
+// Check that each change stream returns the expected events.
+assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
+checkEvents(changeStreamShardOne, shardOneEventsBeforeNewShard);
+assert.soon(() => changeStreamShardOne.hasNext());
+next = changeStreamShardOne.next();
+assert.eq(next.operationType, "kNewShardDetected");
+checkEvents(changeStreamShardOne, shardOneEventsAfterNewShard);
+checkEvents(changeStreamNewShard, newShardEvents);
+
+// Make sure all change streams are empty.
+assert(!changeStreamShardZero.hasNext());
+assert(!changeStreamShardOne.hasNext());
+assert(!changeStreamNewShard.hasNext());
+
+st.stop();
+newShard.stopSet();
})();
diff --git a/jstests/sharding/change_stream_transaction_sharded.js b/jstests/sharding/change_stream_transaction_sharded.js
index 311f132012f..96e15459ff1 100644
--- a/jstests/sharding/change_stream_transaction_sharded.js
+++ b/jstests/sharding/change_stream_transaction_sharded.js
@@ -6,263 +6,254 @@
// uses_transactions,
// ]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "change_stream_transaction_sharded";
- const namespace = dbName + "." + collName;
-
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+"use strict";
+
+const dbName = "test";
+const collName = "change_stream_transaction_sharded";
+const namespace = dbName + "." + collName;
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+});
+
+const mongosConn = st.s;
+assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).createIndex({shard: 1}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+// Shard the test collection and split it into two chunks: one that contains all {shard: 1}
+// documents and one that contains all {shard: 2} documents.
+st.shardColl(collName,
+ {shard: 1} /* shard key */,
+ {shard: 2} /* split at */,
+ {shard: 2} /* move the chunk containing {shard: 2} to its own shard */,
+ dbName,
+ true);
+// Seed each chunk with an initial document.
+assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).insert(
+ {shard: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).insert(
+ {shard: 2}, {writeConcern: {w: "majority"}}));
+
+const db = mongosConn.getDB(dbName);
+const coll = db.getCollection(collName);
+let changeListShard1 = [], changeListShard2 = [];
+
+//
+// Start transaction 1.
+//
+const session1 = db.getMongo().startSession({causalConsistency: true});
+const sessionDb1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDb1[collName];
+session1.startTransaction({readConcern: {level: "majority"}});
+
+//
+// Start transaction 2.
+//
+const session2 = db.getMongo().startSession({causalConsistency: true});
+const sessionDb2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDb2[collName];
+session2.startTransaction({readConcern: {level: "majority"}});
+
+/**
+ * Asserts that there are no changes waiting on the change stream cursor.
+ */
+function assertNoChanges(cursor) {
+ assert(!cursor.hasNext(), () => {
+ return "Unexpected change set: " + tojson(cursor.toArray());
});
+}
- const mongosConn = st.s;
- assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).createIndex({shard: 1}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- // Shard the test collection and split it into two chunks: one that contains all {shard: 1}
- // documents and one that contains all {shard: 2} documents.
- st.shardColl(collName,
- {shard: 1} /* shard key */,
- {shard: 2} /* split at */,
- {shard: 2} /* move the chunk containing {shard: 2} to its own shard */,
- dbName,
- true);
- // Seed each chunk with an initial document.
- assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).insert(
- {shard: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).insert(
- {shard: 2}, {writeConcern: {w: "majority"}}));
-
- const db = mongosConn.getDB(dbName);
- const coll = db.getCollection(collName);
- let changeListShard1 = [], changeListShard2 = [];
-
- //
- // Start transaction 1.
- //
- const session1 = db.getMongo().startSession({causalConsistency: true});
- const sessionDb1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDb1[collName];
- session1.startTransaction({readConcern: {level: "majority"}});
-
- //
- // Start transaction 2.
- //
- const session2 = db.getMongo().startSession({causalConsistency: true});
- const sessionDb2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDb2[collName];
- session2.startTransaction({readConcern: {level: "majority"}});
-
- /**
- * Asserts that there are no changes waiting on the change stream cursor.
- */
- function assertNoChanges(cursor) {
- assert(!cursor.hasNext(), () => {
- return "Unexpected change set: " + tojson(cursor.toArray());
- });
+//
+// Perform writes both in and outside of transactions and confirm that the changes expected are
+// returned by the change stream.
+//
+(function() {
+/**
+ * Asserts that the expected changes are found on the change stream cursor. Pushes the
+ * corresponding change stream document (with resume token) to an array. When expected
+ * changes are provided for both shards, we must assume that either shard's changes could
+ * come first or that they are interleaved via applyOps index. This is because a cross shard
+ * transaction may commit at a different cluster time on each shard, which impacts the
+ * ordering of the change stream.
+ */
+function assertWritesVisibleWithCapture(cursor,
+ expectedChangesShard1,
+ expectedChangesShard2,
+ changeCaptureListShard1,
+ changeCaptureListShard2) {
+ function assertChangeEqualWithCapture(changeDoc, expectedChange, changeCaptureList) {
+ assert.eq(expectedChange.operationType, changeDoc.operationType);
+ assert.eq(expectedChange._id, changeDoc.documentKey._id);
+ changeCaptureList.push(changeDoc);
}
- //
- // Perform writes both in and outside of transactions and confirm that the changes expected are
- // returned by the change stream.
- //
- (function() {
- /**
- * Asserts that the expected changes are found on the change stream cursor. Pushes the
- * corresponding change stream document (with resume token) to an array. When expected
- * changes are provided for both shards, we must assume that either shard's changes could
- * come first or that they are interleaved via applyOps index. This is because a cross shard
- * transaction may commit at a different cluster time on each shard, which impacts the
- * ordering of the change stream.
- */
- function assertWritesVisibleWithCapture(cursor,
- expectedChangesShard1,
- expectedChangesShard2,
- changeCaptureListShard1,
- changeCaptureListShard2) {
- function assertChangeEqualWithCapture(changeDoc, expectedChange, changeCaptureList) {
- assert.eq(expectedChange.operationType, changeDoc.operationType);
- assert.eq(expectedChange._id, changeDoc.documentKey._id);
- changeCaptureList.push(changeDoc);
- }
-
- while (expectedChangesShard1.length || expectedChangesShard2.length) {
- assert.soon(() => cursor.hasNext());
- const changeDoc = cursor.next();
-
- if (changeDoc.documentKey.shard === 1) {
- assert(expectedChangesShard1.length);
- assertChangeEqualWithCapture(
- changeDoc, expectedChangesShard1[0], changeCaptureListShard1);
- expectedChangesShard1.shift();
- } else {
- assert(changeDoc.documentKey.shard === 2);
- assert(expectedChangesShard2.length);
- assertChangeEqualWithCapture(
- changeDoc, expectedChangesShard2[0], changeCaptureListShard2);
- expectedChangesShard2.shift();
- }
- }
-
- assertNoChanges(cursor);
+ while (expectedChangesShard1.length || expectedChangesShard2.length) {
+ assert.soon(() => cursor.hasNext());
+ const changeDoc = cursor.next();
+
+ if (changeDoc.documentKey.shard === 1) {
+ assert(expectedChangesShard1.length);
+ assertChangeEqualWithCapture(
+ changeDoc, expectedChangesShard1[0], changeCaptureListShard1);
+ expectedChangesShard1.shift();
+ } else {
+ assert(changeDoc.documentKey.shard === 2);
+ assert(expectedChangesShard2.length);
+ assertChangeEqualWithCapture(
+ changeDoc, expectedChangesShard2[0], changeCaptureListShard2);
+ expectedChangesShard2.shift();
}
+ }
- // Open a change stream on the test collection.
- const changeStreamCursor = coll.watch();
-
- // Insert a document and confirm that the change stream has it.
- assert.commandWorked(
- coll.insert({shard: 1, _id: "no-txn-doc-1"}, {writeConcern: {w: "majority"}}));
- assertWritesVisibleWithCapture(changeStreamCursor,
- [{operationType: "insert", _id: "no-txn-doc-1"}],
- [],
- changeListShard1,
- changeListShard2);
-
- // Insert two documents under each transaction and confirm no change stream updates.
- assert.commandWorked(
- sessionColl1.insert([{shard: 1, _id: "txn1-doc-1"}, {shard: 2, _id: "txn1-doc-2"}]));
- assert.commandWorked(
- sessionColl2.insert([{shard: 1, _id: "txn2-doc-1"}, {shard: 2, _id: "txn2-doc-2"}]));
- assertNoChanges(changeStreamCursor);
-
- // Update one document under each transaction and confirm no change stream updates.
- assert.commandWorked(
- sessionColl1.update({shard: 1, _id: "txn1-doc-1"}, {$set: {"updated": 1}}));
- assert.commandWorked(
- sessionColl2.update({shard: 2, _id: "txn2-doc-2"}, {$set: {"updated": 1}}));
- assertNoChanges(changeStreamCursor);
-
- // Update and then remove second doc under each transaction.
- assert.commandWorked(sessionColl1.update({shard: 2, _id: "txn1-doc-2"},
- {$set: {"update-before-delete": 1}}));
- assert.commandWorked(sessionColl2.update({shard: 1, _id: "txn2-doc-1"},
- {$set: {"update-before-delete": 1}}));
- assert.commandWorked(sessionColl1.remove({shard: 2, _id: "txn1-doc-2"}));
- assert.commandWorked(sessionColl2.remove({shard: 1, _id: "txn2-doc-2"}));
- assertNoChanges(changeStreamCursor);
-
- // Perform a write outside of a transaction and confirm that the change stream sees only
- // this write.
- assert.commandWorked(
- coll.insert({shard: 2, _id: "no-txn-doc-2"}, {writeConcern: {w: "majority"}}));
- assertWritesVisibleWithCapture(changeStreamCursor,
- [],
- [{operationType: "insert", _id: "no-txn-doc-2"}],
- changeListShard1,
- changeListShard2);
- assertNoChanges(changeStreamCursor);
-
- // Perform a write outside of the transaction.
- assert.commandWorked(
- coll.insert({shard: 1, _id: "no-txn-doc-3"}, {writeConcern: {w: "majority"}}));
-
- // Commit first transaction and confirm that the change stream sees the changes expected
- // from each shard.
- assert.commandWorked(session1.commitTransaction_forTesting());
- assertWritesVisibleWithCapture(changeStreamCursor,
- [
- {operationType: "insert", _id: "no-txn-doc-3"},
- {operationType: "insert", _id: "txn1-doc-1"},
- {operationType: "update", _id: "txn1-doc-1"}
- ],
- [
- {operationType: "insert", _id: "txn1-doc-2"},
- {operationType: "update", _id: "txn1-doc-2"},
- {operationType: "delete", _id: "txn1-doc-2"}
- ],
- changeListShard1,
- changeListShard2);
- assertNoChanges(changeStreamCursor);
-
- // Perform a write outside of the transaction.
- assert.commandWorked(
- coll.insert({shard: 2, _id: "no-txn-doc-4"}, {writeConcern: {w: "majority"}}));
-
- // Abort second transaction and confirm that the change stream sees only the previous
- // non-transaction write.
- assert.commandWorked(session2.abortTransaction_forTesting());
- assertWritesVisibleWithCapture(changeStreamCursor,
- [],
- [{operationType: "insert", _id: "no-txn-doc-4"}],
- changeListShard1,
- changeListShard2);
- assertNoChanges(changeStreamCursor);
- changeStreamCursor.close();
- })();
-
- //
- // Open a change stream at each resume point captured for the previous writes. Confirm that the
- // documents returned match what was returned for the initial change stream.
- //
- (function() {
-
- /**
- * Iterates over a list of changes and returns the index of the change whose resume token is
- * higher than that of 'changeDoc'. It is expected that 'changeList' entries at this index
- * and beyond will be included in a change stream resumed at 'changeDoc._id'.
- */
- function getPostTokenChangeIndex(changeDoc, changeList) {
- for (let i = 0; i < changeList.length; ++i) {
- if (changeDoc._id._data < changeList[i]._id._data) {
- return i;
- }
- }
+ assertNoChanges(cursor);
+}
+
+// Open a change stream on the test collection.
+const changeStreamCursor = coll.watch();
+
+// Insert a document and confirm that the change stream has it.
+assert.commandWorked(coll.insert({shard: 1, _id: "no-txn-doc-1"}, {writeConcern: {w: "majority"}}));
+assertWritesVisibleWithCapture(changeStreamCursor,
+ [{operationType: "insert", _id: "no-txn-doc-1"}],
+ [],
+ changeListShard1,
+ changeListShard2);
+
+// Insert two documents under each transaction and confirm no change stream updates.
+assert.commandWorked(
+ sessionColl1.insert([{shard: 1, _id: "txn1-doc-1"}, {shard: 2, _id: "txn1-doc-2"}]));
+assert.commandWorked(
+ sessionColl2.insert([{shard: 1, _id: "txn2-doc-1"}, {shard: 2, _id: "txn2-doc-2"}]));
+assertNoChanges(changeStreamCursor);
+
+// Update one document under each transaction and confirm no change stream updates.
+assert.commandWorked(sessionColl1.update({shard: 1, _id: "txn1-doc-1"}, {$set: {"updated": 1}}));
+assert.commandWorked(sessionColl2.update({shard: 2, _id: "txn2-doc-2"}, {$set: {"updated": 1}}));
+assertNoChanges(changeStreamCursor);
+
+// Update and then remove second doc under each transaction.
+assert.commandWorked(
+ sessionColl1.update({shard: 2, _id: "txn1-doc-2"}, {$set: {"update-before-delete": 1}}));
+assert.commandWorked(
+ sessionColl2.update({shard: 1, _id: "txn2-doc-1"}, {$set: {"update-before-delete": 1}}));
+assert.commandWorked(sessionColl1.remove({shard: 2, _id: "txn1-doc-2"}));
+assert.commandWorked(sessionColl2.remove({shard: 1, _id: "txn2-doc-2"}));
+assertNoChanges(changeStreamCursor);
+
+// Perform a write outside of a transaction and confirm that the change stream sees only
+// this write.
+assert.commandWorked(coll.insert({shard: 2, _id: "no-txn-doc-2"}, {writeConcern: {w: "majority"}}));
+assertWritesVisibleWithCapture(changeStreamCursor,
+ [],
+ [{operationType: "insert", _id: "no-txn-doc-2"}],
+ changeListShard1,
+ changeListShard2);
+assertNoChanges(changeStreamCursor);
+
+// Perform a write outside of the transaction.
+assert.commandWorked(coll.insert({shard: 1, _id: "no-txn-doc-3"}, {writeConcern: {w: "majority"}}));
+
+// Commit first transaction and confirm that the change stream sees the changes expected
+// from each shard.
+assert.commandWorked(session1.commitTransaction_forTesting());
+assertWritesVisibleWithCapture(changeStreamCursor,
+ [
+ {operationType: "insert", _id: "no-txn-doc-3"},
+ {operationType: "insert", _id: "txn1-doc-1"},
+ {operationType: "update", _id: "txn1-doc-1"}
+ ],
+ [
+ {operationType: "insert", _id: "txn1-doc-2"},
+ {operationType: "update", _id: "txn1-doc-2"},
+ {operationType: "delete", _id: "txn1-doc-2"}
+ ],
+ changeListShard1,
+ changeListShard2);
+assertNoChanges(changeStreamCursor);
+
+// Perform a write outside of the transaction.
+assert.commandWorked(coll.insert({shard: 2, _id: "no-txn-doc-4"}, {writeConcern: {w: "majority"}}));
+
+// Abort second transaction and confirm that the change stream sees only the previous
+// non-transaction write.
+assert.commandWorked(session2.abortTransaction_forTesting());
+assertWritesVisibleWithCapture(changeStreamCursor,
+ [],
+ [{operationType: "insert", _id: "no-txn-doc-4"}],
+ changeListShard1,
+ changeListShard2);
+assertNoChanges(changeStreamCursor);
+changeStreamCursor.close();
+})();
- return changeList.length;
- }
+//
+// Open a change stream at each resume point captured for the previous writes. Confirm that the
+// documents returned match what was returned for the initial change stream.
+//
+(function() {
- /**
- * Confirms that the change represented by 'changeDoc' exists in 'shardChangeList' at index
- * 'changeListIndex'.
- */
- function shardHasDocumentAtChangeListIndex(changeDoc, shardChangeList, changeListIndex) {
- assert(changeListIndex < shardChangeList.length);
-
- const expectedChangeDoc = shardChangeList[changeListIndex];
- assert.eq(changeDoc, expectedChangeDoc);
- assert.eq(expectedChangeDoc.documentKey,
- changeDoc.documentKey,
- tojson(changeDoc) + ", " + tojson(expectedChangeDoc));
+/**
+ * Iterates over a list of changes and returns the index of the change whose resume token is
+ * higher than that of 'changeDoc'. It is expected that 'changeList' entries at this index
+ * and beyond will be included in a change stream resumed at 'changeDoc._id'.
+ */
+function getPostTokenChangeIndex(changeDoc, changeList) {
+ for (let i = 0; i < changeList.length; ++i) {
+ if (changeDoc._id._data < changeList[i]._id._data) {
+ return i;
}
+ }
- /**
- * Test that change stream returns the expected set of documuments when resumed from each
- * point captured by 'changeList'.
- */
- function confirmResumeForChangeList(changeList, changeListShard1, changeListShard2) {
- for (let i = 0; i < changeList.length; ++i) {
- const resumeDoc = changeList[i];
- let indexShard1 = getPostTokenChangeIndex(resumeDoc, changeListShard1);
- let indexShard2 = getPostTokenChangeIndex(resumeDoc, changeListShard2);
- const resumeCursor = coll.watch([], {startAfter: resumeDoc._id});
-
- while ((indexShard1 + indexShard2) <
- (changeListShard1.length + changeListShard2.length)) {
- assert.soon(() => resumeCursor.hasNext());
- const changeDoc = resumeCursor.next();
-
- if (changeDoc.documentKey.shard === 1) {
- shardHasDocumentAtChangeListIndex(
- changeDoc, changeListShard1, indexShard1++);
- } else {
- assert(changeDoc.documentKey.shard === 2);
- shardHasDocumentAtChangeListIndex(
- changeDoc, changeListShard2, indexShard2++);
- }
- }
-
- assertNoChanges(resumeCursor);
- resumeCursor.close();
+ return changeList.length;
+}
+
+/**
+ * Confirms that the change represented by 'changeDoc' exists in 'shardChangeList' at index
+ * 'changeListIndex'.
+ */
+function shardHasDocumentAtChangeListIndex(changeDoc, shardChangeList, changeListIndex) {
+ assert(changeListIndex < shardChangeList.length);
+
+ const expectedChangeDoc = shardChangeList[changeListIndex];
+ assert.eq(changeDoc, expectedChangeDoc);
+ assert.eq(expectedChangeDoc.documentKey,
+ changeDoc.documentKey,
+ tojson(changeDoc) + ", " + tojson(expectedChangeDoc));
+}
+
+/**
+ * Test that change stream returns the expected set of documuments when resumed from each
+ * point captured by 'changeList'.
+ */
+function confirmResumeForChangeList(changeList, changeListShard1, changeListShard2) {
+ for (let i = 0; i < changeList.length; ++i) {
+ const resumeDoc = changeList[i];
+ let indexShard1 = getPostTokenChangeIndex(resumeDoc, changeListShard1);
+ let indexShard2 = getPostTokenChangeIndex(resumeDoc, changeListShard2);
+ const resumeCursor = coll.watch([], {startAfter: resumeDoc._id});
+
+ while ((indexShard1 + indexShard2) < (changeListShard1.length + changeListShard2.length)) {
+ assert.soon(() => resumeCursor.hasNext());
+ const changeDoc = resumeCursor.next();
+
+ if (changeDoc.documentKey.shard === 1) {
+ shardHasDocumentAtChangeListIndex(changeDoc, changeListShard1, indexShard1++);
+ } else {
+ assert(changeDoc.documentKey.shard === 2);
+ shardHasDocumentAtChangeListIndex(changeDoc, changeListShard2, indexShard2++);
}
}
- // Confirm that the sequence of events returned by the stream is consistent when resuming
- // from any point in the stream on either shard.
- confirmResumeForChangeList(changeListShard1, changeListShard1, changeListShard2);
- confirmResumeForChangeList(changeListShard2, changeListShard1, changeListShard2);
- })();
+ assertNoChanges(resumeCursor);
+ resumeCursor.close();
+ }
+}
+
+// Confirm that the sequence of events returned by the stream is consistent when resuming
+// from any point in the stream on either shard.
+confirmResumeForChangeList(changeListShard1, changeListShard1, changeListShard2);
+confirmResumeForChangeList(changeListShard2, changeListShard1, changeListShard2);
+})();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_update_lookup_collation.js b/jstests/sharding/change_stream_update_lookup_collation.js
index eefff9d463f..9c13f4afac8 100644
--- a/jstests/sharding/change_stream_update_lookup_collation.js
+++ b/jstests/sharding/change_stream_update_lookup_collation.js
@@ -4,164 +4,163 @@
// Collation is only supported with the find command, not with op query.
// @tags: [requires_find_command, uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- // TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
- if (!jsTestOptions().enableMajorityReadConcern &&
- jsTestOptions().mongosBinVersion === 'last-stable') {
- jsTestLog(
- "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
- return;
- }
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- const st = new ShardingTest({
- shards: 2,
- config: 1,
- rs: {
- nodes: 1,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- const caseInsensitive = {locale: "en_US", strength: 2};
- assert.commandWorked(
- mongosDB.runCommand({create: mongosColl.getName(), collation: caseInsensitive}));
-
- // Shard the test collection on 'shardKey'. The shard key must use the simple collation.
- assert.commandWorked(mongosDB.adminCommand({
- shardCollection: mongosColl.getFullName(),
- key: {shardKey: 1},
- collation: {locale: "simple"}
- }));
-
- // Split the collection into 2 chunks: [MinKey, "aBC"), ["aBC", MaxKey). Note that there will be
- // documents in each chunk that will have the same shard key according to the collection's
- // default collation, but not according to the simple collation (e.g. "abc" and "ABC").
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: "aBC"}}));
-
- // Move the [MinKey, 'aBC') chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {shardKey: "ABC"}, to: st.rs1.getURL()}));
-
- // Make sure that "ABC" and "abc" go to different shards - we rely on that to make sure the _ids
- // are unique on each shard.
- assert.lte(bsonWoCompare({shardKey: "ABC"}, {shardKey: "aBC"}), -1);
- assert.gte(bsonWoCompare({shardKey: "abc"}, {shardKey: "aBC"}), 1);
-
- // Write some documents to each chunk. Note that the _id is purposefully not unique, since we
- // know the update lookup will use both the _id and the shard key, and we want to make sure it
- // is only targeting a single shard. Also note that _id is a string, since we want to make sure
- // the _id index can only be used if we are using the collection's default collation.
- assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "ABC"}));
- assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "ABC"}));
- assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "abc"}));
- assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "abc"}));
-
- // Verify that the post-change lookup uses the simple collation to target to a single shard,
- // then uses the collection-default collation to perform the lookup on the shard.
- const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
-
- // Be sure to include the collation in the updates so that each can be targeted to exactly one
- // shard - this is important to ensure each update only updates one document (since with the
- // default collation their documentKeys are identical). If each operation updates only one, the
- // clusterTime sent from mongos will ensure that each corresponding oplog entry has a distinct
- // timestamp and so will appear in the change stream in the order we expect.
- let updateResult = mongosColl.updateOne({shardKey: "abc", _id: "abc_1"},
- {$set: {updatedCount: 1}},
- {collation: {locale: "simple"}});
- assert.eq(1, updateResult.modifiedCount);
- updateResult = mongosColl.updateOne({shardKey: "ABC", _id: "abc_1"},
- {$set: {updatedCount: 1}},
- {collation: {locale: "simple"}});
- assert.eq(1, updateResult.modifiedCount);
-
- function numIdIndexUsages(host) {
- return host.getCollection(mongosColl.getFullName())
- .aggregate([{$indexStats: {}}, {$match: {name: "_id_"}}])
- .toArray()[0]
- .accesses.ops;
- }
- let idIndexUsagesPreIteration = {
- shard0: numIdIndexUsages(st.rs0.getPrimary()),
- shard1: numIdIndexUsages(st.rs1.getPrimary())
- };
-
- for (let nextDocKey of[{shardKey: "abc", _id: "abc_1"}, {shardKey: "ABC", _id: "abc_1"}]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, nextDocKey, tojson(next));
- assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 1}));
- }
- assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
- assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
-
- changeStream.close();
-
- // Now test that a change stream with a non-default collation will still use the simple
- // collation to target the update lookup, and the collection-default collation to do the update
- // lookup on the shard.
-
- // Strength 1 will consider "ç" equal to "c" and "C".
- const strengthOneCollation = {locale: "en_US", strength: 1};
-
- // Insert some documents that might be confused with existing documents under the change
- // stream's collation, but should not be confused during the update lookup.
- assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "ABÇ"}));
- assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "ABÇ"}));
- assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "abç"}));
- assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "abç"}));
-
- assert.eq(mongosColl.find({shardKey: "abc"}).collation(strengthOneCollation).itcount(), 8);
-
- const strengthOneChangeStream = mongosColl.aggregate(
- [
- {$changeStream: {fullDocument: "updateLookup"}},
- {$match: {"fullDocument.shardKey": "abc"}}
- ],
- {collation: strengthOneCollation});
-
- updateResult = mongosColl.updateOne({shardKey: "ABC", _id: "abc_1"},
- {$set: {updatedCount: 2}},
- {collation: {locale: "simple"}});
- assert.eq(1, updateResult.modifiedCount);
- updateResult = mongosColl.updateOne({shardKey: "abc", _id: "abc_1"},
- {$set: {updatedCount: 2}},
- {collation: {locale: "simple"}});
- assert.eq(1, updateResult.modifiedCount);
-
- idIndexUsagesPreIteration = {
- shard0: numIdIndexUsages(st.rs0.getPrimary()),
- shard1: numIdIndexUsages(st.rs1.getPrimary())
- };
- for (let nextDocKey of[{shardKey: "ABC", _id: "abc_1"}, {shardKey: "abc", _id: "abc_1"}]) {
- assert.soon(() => strengthOneChangeStream.hasNext());
- let next = strengthOneChangeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, nextDocKey, tojson(next));
- assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 2}));
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+// TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
+if (!jsTestOptions().enableMajorityReadConcern &&
+ jsTestOptions().mongosBinVersion === 'last-stable') {
+ jsTestLog(
+ "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
+ return;
+}
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ config: 1,
+ rs: {
+ nodes: 1,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
}
- assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
- assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
-
- strengthOneChangeStream.close();
-
- st.stop();
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+assert.commandWorked(
+ mongosDB.runCommand({create: mongosColl.getName(), collation: caseInsensitive}));
+
+// Shard the test collection on 'shardKey'. The shard key must use the simple collation.
+assert.commandWorked(mongosDB.adminCommand({
+ shardCollection: mongosColl.getFullName(),
+ key: {shardKey: 1},
+ collation: {locale: "simple"}
+}));
+
+// Split the collection into 2 chunks: [MinKey, "aBC"), ["aBC", MaxKey). Note that there will be
+// documents in each chunk that will have the same shard key according to the collection's
+// default collation, but not according to the simple collation (e.g. "abc" and "ABC").
+assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: "aBC"}}));
+
+// Move the [MinKey, 'aBC') chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {shardKey: "ABC"}, to: st.rs1.getURL()}));
+
+// Make sure that "ABC" and "abc" go to different shards - we rely on that to make sure the _ids
+// are unique on each shard.
+assert.lte(bsonWoCompare({shardKey: "ABC"}, {shardKey: "aBC"}), -1);
+assert.gte(bsonWoCompare({shardKey: "abc"}, {shardKey: "aBC"}), 1);
+
+// Write some documents to each chunk. Note that the _id is purposefully not unique, since we
+// know the update lookup will use both the _id and the shard key, and we want to make sure it
+// is only targeting a single shard. Also note that _id is a string, since we want to make sure
+// the _id index can only be used if we are using the collection's default collation.
+assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "ABC"}));
+assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "ABC"}));
+assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "abc"}));
+assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "abc"}));
+
+// Verify that the post-change lookup uses the simple collation to target to a single shard,
+// then uses the collection-default collation to perform the lookup on the shard.
+const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
+
+// Be sure to include the collation in the updates so that each can be targeted to exactly one
+// shard - this is important to ensure each update only updates one document (since with the
+// default collation their documentKeys are identical). If each operation updates only one, the
+// clusterTime sent from mongos will ensure that each corresponding oplog entry has a distinct
+// timestamp and so will appear in the change stream in the order we expect.
+let updateResult = mongosColl.updateOne(
+ {shardKey: "abc", _id: "abc_1"}, {$set: {updatedCount: 1}}, {collation: {locale: "simple"}});
+assert.eq(1, updateResult.modifiedCount);
+updateResult = mongosColl.updateOne(
+ {shardKey: "ABC", _id: "abc_1"}, {$set: {updatedCount: 1}}, {collation: {locale: "simple"}});
+assert.eq(1, updateResult.modifiedCount);
+
+function numIdIndexUsages(host) {
+ return host.getCollection(mongosColl.getFullName())
+ .aggregate([{$indexStats: {}}, {$match: {name: "_id_"}}])
+ .toArray()[0]
+ .accesses.ops;
+}
+let idIndexUsagesPreIteration = {
+ shard0: numIdIndexUsages(st.rs0.getPrimary()),
+ shard1: numIdIndexUsages(st.rs1.getPrimary())
+};
+
+for (let nextDocKey of [{shardKey: "abc", _id: "abc_1"}, {shardKey: "ABC", _id: "abc_1"}]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, nextDocKey, tojson(next));
+ assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 1}));
+}
+assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
+assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
+
+changeStream.close();
+
+// Now test that a change stream with a non-default collation will still use the simple
+// collation to target the update lookup, and the collection-default collation to do the update
+// lookup on the shard.
+
+// Strength 1 will consider "ç" equal to "c" and "C".
+const strengthOneCollation = {
+ locale: "en_US",
+ strength: 1
+};
+
+// Insert some documents that might be confused with existing documents under the change
+// stream's collation, but should not be confused during the update lookup.
+assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "ABÇ"}));
+assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "ABÇ"}));
+assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "abç"}));
+assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "abç"}));
+
+assert.eq(mongosColl.find({shardKey: "abc"}).collation(strengthOneCollation).itcount(), 8);
+
+const strengthOneChangeStream = mongosColl.aggregate(
+ [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.shardKey": "abc"}}],
+ {collation: strengthOneCollation});
+
+updateResult = mongosColl.updateOne(
+ {shardKey: "ABC", _id: "abc_1"}, {$set: {updatedCount: 2}}, {collation: {locale: "simple"}});
+assert.eq(1, updateResult.modifiedCount);
+updateResult = mongosColl.updateOne(
+ {shardKey: "abc", _id: "abc_1"}, {$set: {updatedCount: 2}}, {collation: {locale: "simple"}});
+assert.eq(1, updateResult.modifiedCount);
+
+idIndexUsagesPreIteration = {
+ shard0: numIdIndexUsages(st.rs0.getPrimary()),
+ shard1: numIdIndexUsages(st.rs1.getPrimary())
+};
+for (let nextDocKey of [{shardKey: "ABC", _id: "abc_1"}, {shardKey: "abc", _id: "abc_1"}]) {
+ assert.soon(() => strengthOneChangeStream.hasNext());
+ let next = strengthOneChangeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, nextDocKey, tojson(next));
+ assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 2}));
+}
+assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
+assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
+
+strengthOneChangeStream.close();
+
+st.stop();
}());
diff --git a/jstests/sharding/change_stream_update_lookup_read_concern.js b/jstests/sharding/change_stream_update_lookup_read_concern.js
index 1b6938589cf..03b9ec86738 100644
--- a/jstests/sharding/change_stream_update_lookup_read_concern.js
+++ b/jstests/sharding/change_stream_update_lookup_read_concern.js
@@ -3,170 +3,170 @@
// change that we're doing the lookup for, and that change will be majority-committed.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/replsets/rslib.js'); // For startSetIfSupportsReadMajority.
- load("jstests/libs/profiler.js"); // For profilerHas*OrThrow() helpers.
- load("jstests/replsets/rslib.js"); // For reconfig().
-
- // For stopServerReplication() and restartServerReplication().
- load("jstests/libs/write_concern_util.js");
-
- // Configure a replica set to have nodes with specific tags - we will eventually add this as
- // part of a sharded cluster.
- const rsNodeOptions = {
- setParameter: {
- writePeriodicNoops: true,
- // Note we do not configure the periodic noop writes to be more frequent as we do to
- // speed up other change streams tests, since we provide an array of individually
- // configured nodes, in order to know which nodes have which tags. This requires a step
- // up command to happen, which requires all nodes to agree on an op time. With the
- // periodic noop writer at a high frequency, this can potentially never finish.
- },
- shardsvr: "",
- };
- const replSetName = jsTestName();
-
- // Note that we include {chainingAllowed: false} in the replica set settings, because this test
- // assumes that both secondaries sync from the primary. Without this setting, the
- // TopologyCoordinator would sometimes chain one of the secondaries off the other. The test
- // later disables replication on one secondary, but with chaining, that would effectively
- // disable replication on both secondaries, deadlocking the test.
- const rst = new ReplSetTest({
- name: replSetName,
- nodes: [
- {rsConfig: {priority: 1, tags: {tag: "primary"}}},
- {rsConfig: {priority: 0, tags: {tag: "closestSecondary"}}},
- {rsConfig: {priority: 0, tags: {tag: "fartherSecondary"}}}
- ],
- nodeOptions: rsNodeOptions,
- settings: {chainingAllowed: false},
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
- rst.awaitSecondaryNodes();
-
- // Start the sharding test and add the replica set.
- const st = new ShardingTest({manualAddShard: true});
- assert.commandWorked(st.s.adminCommand({addShard: replSetName + "/" + rst.getPrimary().host}));
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- // Shard the collection to ensure the change stream will perform update lookup from mongos.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- assert.writeOK(mongosColl.insert({_id: 1}));
- rst.awaitReplication();
-
- // Make sure reads with read preference tag 'closestSecondary' go to the tagged secondary.
- const closestSecondary = rst.nodes[1];
- const closestSecondaryDB = closestSecondary.getDB(mongosDB.getName());
- assert.commandWorked(closestSecondaryDB.setProfilingLevel(2));
-
- // We expect the tag to ensure there is only one node to choose from, so the actual read
- // preference doesn't really matter - we use 'nearest' throughout.
- assert.eq(mongosColl.find()
- .readPref("nearest", [{tag: "closestSecondary"}])
- .comment("testing targeting")
- .itcount(),
- 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: closestSecondaryDB,
- filter: {ns: mongosColl.getFullName(), "command.comment": "testing targeting"}
- });
-
- const changeStreamComment = "change stream against closestSecondary";
- const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}], {
- comment: changeStreamComment,
- $readPreference: {mode: "nearest", tags: [{tag: "closestSecondary"}]}
- });
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 1}}));
- assert.soon(() => changeStream.hasNext());
- let latestChange = changeStream.next();
- assert.eq(latestChange.operationType, "update");
- assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 1});
-
- // Test that the change stream itself goes to the secondary. There might be more than one if we
- // needed multiple getMores to retrieve the changes.
- // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore because
- // the initial aggregate will not show up.
- profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: closestSecondaryDB,
- filter: {"originatingCommand.comment": changeStreamComment}
- });
-
- // Test that the update lookup goes to the secondary as well.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: closestSecondaryDB,
- filter: {
- op: "query",
- ns: mongosColl.getFullName(),
- "command.filter._id": 1,
- "command.comment": changeStreamComment,
- // We need to filter out any profiler entries with a stale config - this is the first
- // read on this secondary with a readConcern specified, so it is the first read on this
- // secondary that will enforce shard version.
- errCode: {$ne: ErrorCodes.StaleConfig}
- },
- errorMsgFilter: {ns: mongosColl.getFullName()},
- errorMsgProj: {ns: 1, op: 1, command: 1},
- });
-
- // Now add a new secondary which is "closer" (add the "closestSecondary" tag to that secondary,
- // and remove it from the old node with that tag) to force update lookups target a different
- // node than the change stream itself.
- let rsConfig = rst.getReplSetConfig();
- rsConfig.members[1].tags = {tag: "fartherSecondary"};
- rsConfig.members[2].tags = {tag: "closestSecondary"};
- rsConfig.version = rst.getReplSetConfigFromNode().version + 1;
- reconfig(rst, rsConfig);
- rst.awaitSecondaryNodes();
- const newClosestSecondary = rst.nodes[2];
- const newClosestSecondaryDB = newClosestSecondary.getDB(mongosDB.getName());
- const originalClosestSecondaryDB = closestSecondaryDB;
-
- // Wait for the mongos to acknowledge the new tags from our reconfig.
- awaitRSClientHosts(st.s,
- newClosestSecondary,
- {ok: true, secondary: true, tags: {tag: "closestSecondary"}},
- rst);
- awaitRSClientHosts(st.s,
- originalClosestSecondaryDB.getMongo(),
- {ok: true, secondary: true, tags: {tag: "fartherSecondary"}},
- rst);
- assert.commandWorked(newClosestSecondaryDB.setProfilingLevel(2));
-
- // Make sure new queries with read preference tag "closestSecondary" go to the new secondary.
- profilerHasZeroMatchingEntriesOrThrow({profileDB: newClosestSecondaryDB, filter: {}});
- assert.eq(mongosColl.find()
- .readPref("nearest", [{tag: "closestSecondary"}])
- .comment("testing targeting")
- .itcount(),
- 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: newClosestSecondaryDB,
- filter: {ns: mongosColl.getFullName(), "command.comment": "testing targeting"}
- });
-
- // Test that the change stream continues on the original host, but the update lookup now targets
- // the new, lagged secondary. Even though it's lagged, the lookup should use 'afterClusterTime'
- // to ensure it does not return until the node can see the change it's looking up.
- stopServerReplication(newClosestSecondary);
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
-
- // Since we stopped replication, we expect the update lookup to block indefinitely until we
- // resume replication, so we resume replication in a parallel shell while this thread is blocked
- // getting the next change from the stream.
- const noConnect = true; // This shell creates its own connection to the host.
- const joinResumeReplicationShell =
+"use strict";
+
+load('jstests/replsets/rslib.js'); // For startSetIfSupportsReadMajority.
+load("jstests/libs/profiler.js"); // For profilerHas*OrThrow() helpers.
+load("jstests/replsets/rslib.js"); // For reconfig().
+
+// For stopServerReplication() and restartServerReplication().
+load("jstests/libs/write_concern_util.js");
+
+// Configure a replica set to have nodes with specific tags - we will eventually add this as
+// part of a sharded cluster.
+const rsNodeOptions = {
+ setParameter: {
+ writePeriodicNoops: true,
+ // Note we do not configure the periodic noop writes to be more frequent as we do to
+ // speed up other change streams tests, since we provide an array of individually
+ // configured nodes, in order to know which nodes have which tags. This requires a step
+ // up command to happen, which requires all nodes to agree on an op time. With the
+ // periodic noop writer at a high frequency, this can potentially never finish.
+ },
+ shardsvr: "",
+};
+const replSetName = jsTestName();
+
+// Note that we include {chainingAllowed: false} in the replica set settings, because this test
+// assumes that both secondaries sync from the primary. Without this setting, the
+// TopologyCoordinator would sometimes chain one of the secondaries off the other. The test
+// later disables replication on one secondary, but with chaining, that would effectively
+// disable replication on both secondaries, deadlocking the test.
+const rst = new ReplSetTest({
+ name: replSetName,
+ nodes: [
+ {rsConfig: {priority: 1, tags: {tag: "primary"}}},
+ {rsConfig: {priority: 0, tags: {tag: "closestSecondary"}}},
+ {rsConfig: {priority: 0, tags: {tag: "fartherSecondary"}}}
+ ],
+ nodeOptions: rsNodeOptions,
+ settings: {chainingAllowed: false},
+});
+
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
+rst.awaitSecondaryNodes();
+
+// Start the sharding test and add the replica set.
+const st = new ShardingTest({manualAddShard: true});
+assert.commandWorked(st.s.adminCommand({addShard: replSetName + "/" + rst.getPrimary().host}));
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+// Shard the collection to ensure the change stream will perform update lookup from mongos.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+assert.writeOK(mongosColl.insert({_id: 1}));
+rst.awaitReplication();
+
+// Make sure reads with read preference tag 'closestSecondary' go to the tagged secondary.
+const closestSecondary = rst.nodes[1];
+const closestSecondaryDB = closestSecondary.getDB(mongosDB.getName());
+assert.commandWorked(closestSecondaryDB.setProfilingLevel(2));
+
+// We expect the tag to ensure there is only one node to choose from, so the actual read
+// preference doesn't really matter - we use 'nearest' throughout.
+assert.eq(mongosColl.find()
+ .readPref("nearest", [{tag: "closestSecondary"}])
+ .comment("testing targeting")
+ .itcount(),
+ 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: closestSecondaryDB,
+ filter: {ns: mongosColl.getFullName(), "command.comment": "testing targeting"}
+});
+
+const changeStreamComment = "change stream against closestSecondary";
+const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}], {
+ comment: changeStreamComment,
+ $readPreference: {mode: "nearest", tags: [{tag: "closestSecondary"}]}
+});
+assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 1}}));
+assert.soon(() => changeStream.hasNext());
+let latestChange = changeStream.next();
+assert.eq(latestChange.operationType, "update");
+assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 1});
+
+// Test that the change stream itself goes to the secondary. There might be more than one if we
+// needed multiple getMores to retrieve the changes.
+// TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore because
+// the initial aggregate will not show up.
+profilerHasAtLeastOneMatchingEntryOrThrow(
+ {profileDB: closestSecondaryDB, filter: {"originatingCommand.comment": changeStreamComment}});
+
+// Test that the update lookup goes to the secondary as well.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: closestSecondaryDB,
+ filter: {
+ op: "query",
+ ns: mongosColl.getFullName(),
+ "command.filter._id": 1,
+ "command.comment": changeStreamComment,
+ // We need to filter out any profiler entries with a stale config - this is the first
+ // read on this secondary with a readConcern specified, so it is the first read on this
+ // secondary that will enforce shard version.
+ errCode: {$ne: ErrorCodes.StaleConfig}
+ },
+ errorMsgFilter: {ns: mongosColl.getFullName()},
+ errorMsgProj: {ns: 1, op: 1, command: 1},
+});
+
+// Now add a new secondary which is "closer" (add the "closestSecondary" tag to that secondary,
+// and remove it from the old node with that tag) to force update lookups target a different
+// node than the change stream itself.
+let rsConfig = rst.getReplSetConfig();
+rsConfig.members[1].tags = {
+ tag: "fartherSecondary"
+};
+rsConfig.members[2].tags = {
+ tag: "closestSecondary"
+};
+rsConfig.version = rst.getReplSetConfigFromNode().version + 1;
+reconfig(rst, rsConfig);
+rst.awaitSecondaryNodes();
+const newClosestSecondary = rst.nodes[2];
+const newClosestSecondaryDB = newClosestSecondary.getDB(mongosDB.getName());
+const originalClosestSecondaryDB = closestSecondaryDB;
+
+// Wait for the mongos to acknowledge the new tags from our reconfig.
+awaitRSClientHosts(
+ st.s, newClosestSecondary, {ok: true, secondary: true, tags: {tag: "closestSecondary"}}, rst);
+awaitRSClientHosts(st.s,
+ originalClosestSecondaryDB.getMongo(),
+ {ok: true, secondary: true, tags: {tag: "fartherSecondary"}},
+ rst);
+assert.commandWorked(newClosestSecondaryDB.setProfilingLevel(2));
+
+// Make sure new queries with read preference tag "closestSecondary" go to the new secondary.
+profilerHasZeroMatchingEntriesOrThrow({profileDB: newClosestSecondaryDB, filter: {}});
+assert.eq(mongosColl.find()
+ .readPref("nearest", [{tag: "closestSecondary"}])
+ .comment("testing targeting")
+ .itcount(),
+ 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: newClosestSecondaryDB,
+ filter: {ns: mongosColl.getFullName(), "command.comment": "testing targeting"}
+});
+
+// Test that the change stream continues on the original host, but the update lookup now targets
+// the new, lagged secondary. Even though it's lagged, the lookup should use 'afterClusterTime'
+// to ensure it does not return until the node can see the change it's looking up.
+stopServerReplication(newClosestSecondary);
+assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
+
+// Since we stopped replication, we expect the update lookup to block indefinitely until we
+// resume replication, so we resume replication in a parallel shell while this thread is blocked
+// getting the next change from the stream.
+const noConnect = true; // This shell creates its own connection to the host.
+const joinResumeReplicationShell =
startParallelShell(`load('jstests/libs/write_concern_util.js');
const pausedSecondary = new Mongo("${newClosestSecondary.host}");
@@ -194,26 +194,27 @@
restartServerReplication(pausedSecondary);`,
undefined,
noConnect);
- assert.soon(() => changeStream.hasNext());
- latestChange = changeStream.next();
- assert.eq(latestChange.operationType, "update");
- assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 2});
- joinResumeReplicationShell();
-
- // Test that the update lookup goes to the new closest secondary.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: newClosestSecondaryDB,
- filter: {
- op: "query",
- ns: mongosColl.getFullName(), "command.comment": changeStreamComment,
- // We need to filter out any profiler entries with a stale config - this is the first
- // read on this secondary with a readConcern specified, so it is the first read on this
- // secondary that will enforce shard version.
- errCode: {$ne: ErrorCodes.StaleConfig}
- }
- });
-
- changeStream.close();
- st.stop();
- rst.stopSet();
+assert.soon(() => changeStream.hasNext());
+latestChange = changeStream.next();
+assert.eq(latestChange.operationType, "update");
+assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 2});
+joinResumeReplicationShell();
+
+// Test that the update lookup goes to the new closest secondary.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: newClosestSecondaryDB,
+ filter: {
+ op: "query",
+ ns: mongosColl.getFullName(),
+ "command.comment": changeStreamComment,
+ // We need to filter out any profiler entries with a stale config - this is the first
+ // read on this secondary with a readConcern specified, so it is the first read on this
+ // secondary that will enforce shard version.
+ errCode: {$ne: ErrorCodes.StaleConfig}
+ }
+});
+
+changeStream.close();
+st.stop();
+rst.stopSet();
}());
diff --git a/jstests/sharding/change_streams.js b/jstests/sharding/change_streams.js
index 98039231687..08c075c1e18 100644
--- a/jstests/sharding/change_streams.js
+++ b/jstests/sharding/change_streams.js
@@ -1,252 +1,249 @@
// Tests the behavior of change streams on sharded collections.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- load('jstests/aggregation/extras/utils.js'); // For assertErrorCode().
- load('jstests/libs/change_stream_util.js'); // For assertChangeStreamEventEq.
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- function runTest(collName, shardKey) {
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Intentionally disable the periodic no-op writer in order to allow the test have
- // control of advancing the cluster time. For when it is enabled later in the test,
- // use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: false}
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- assert.commandWorked(st.s0.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- const mongosColl = mongosDB[collName];
-
- //
- // Sanity tests
- //
-
- // Test that $sort and $group are banned from running in a $changeStream pipeline.
- assertErrorCode(mongosDB.NegativeTest,
- [{$changeStream: {}}, {$sort: {operationType: 1}}],
- ErrorCodes.IllegalOperation);
- assertErrorCode(mongosDB.NegativeTest,
- [{$changeStream: {}}, {$group: {_id: '$documentKey'}}],
- ErrorCodes.IllegalOperation);
-
- // Test that using change streams with $out results in an error.
- assertErrorCode(
- mongosColl, [{$changeStream: {}}, {$out: "shouldntWork"}], ErrorCodes.IllegalOperation);
-
- //
- // Main tests
- //
-
- function makeShardKey(value) {
- var obj = {};
- obj[shardKey] = value;
- return obj;
- }
-
- function makeShardKeyDocument(value, optExtraFields) {
- var obj = {};
- if (shardKey !== '_id')
- obj['_id'] = value;
- obj[shardKey] = value;
- return Object.assign(obj, optExtraFields);
+"use strict";
+
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load('jstests/aggregation/extras/utils.js'); // For assertErrorCode().
+load('jstests/libs/change_stream_util.js'); // For assertChangeStreamEventEq.
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+function runTest(collName, shardKey) {
+ const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Intentionally disable the periodic no-op writer in order to allow the test have
+ // control of advancing the cluster time. For when it is enabled later in the test,
+ // use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: false}
}
+ });
+
+ const mongosDB = st.s0.getDB(jsTestName());
+ assert.commandWorked(st.s0.adminCommand({enableSharding: mongosDB.getName()}));
+ st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+ const mongosColl = mongosDB[collName];
+
+ //
+ // Sanity tests
+ //
+
+ // Test that $sort and $group are banned from running in a $changeStream pipeline.
+ assertErrorCode(mongosDB.NegativeTest,
+ [{$changeStream: {}}, {$sort: {operationType: 1}}],
+ ErrorCodes.IllegalOperation);
+ assertErrorCode(mongosDB.NegativeTest,
+ [{$changeStream: {}}, {$group: {_id: '$documentKey'}}],
+ ErrorCodes.IllegalOperation);
+
+ // Test that using change streams with $out results in an error.
+ assertErrorCode(
+ mongosColl, [{$changeStream: {}}, {$out: "shouldntWork"}], ErrorCodes.IllegalOperation);
+
+ //
+ // Main tests
+ //
+
+ function makeShardKey(value) {
+ var obj = {};
+ obj[shardKey] = value;
+ return obj;
+ }
- jsTestLog('Testing change streams with shard key ' + shardKey);
- // Shard the test collection and split it into 2 chunks:
- // [MinKey, 0) - shard0, [0, MaxKey) - shard1
- st.shardColl(mongosColl,
- makeShardKey(1) /* shard key */,
- makeShardKey(0) /* split at */,
- makeShardKey(1) /* move to shard 1 */);
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1)));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1)));
-
- let changeStream = mongosColl.aggregate([{$changeStream: {}}]);
-
- // Test that a change stream can see inserts on shard 0.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1000)));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1000)));
-
- assert.soon(() => changeStream.hasNext(), "expected to be able to see the first insert");
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(1000),
- fullDocument: makeShardKeyDocument(1000),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
-
- // Because the periodic noop writer is disabled, do another write to shard 0 in order to
- // advance that shard's clock and enabling the stream to return the earlier write to shard 1
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1001)));
-
- assert.soon(() => changeStream.hasNext(), "expected to be able to see the second insert");
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(-1000),
- fullDocument: makeShardKeyDocument(-1000),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
-
- // Test that all changes are eventually visible due to the periodic noop writer.
- assert.commandWorked(
- st.rs0.getPrimary().adminCommand({setParameter: 1, writePeriodicNoops: true}));
- assert.commandWorked(
- st.rs1.getPrimary().adminCommand({setParameter: 1, writePeriodicNoops: true}));
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(1001),
- fullDocument: makeShardKeyDocument(1001),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
- changeStream.close();
-
- jsTestLog('Testing multi-update change streams with shard key ' + shardKey);
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(10, {a: 0, b: 0})));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-10, {a: 0, b: 0})));
- changeStream = mongosColl.aggregate([{$changeStream: {}}]);
-
- assert.writeOK(mongosColl.update({a: 0}, {$set: {b: 2}}, {multi: true}));
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- operationType: "update",
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- documentKey: makeShardKeyDocument(-10),
- updateDescription: {updatedFields: {b: 2}, removedFields: []},
- });
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- operationType: "update",
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- documentKey: makeShardKeyDocument(10),
- updateDescription: {updatedFields: {b: 2}, removedFields: []},
- });
- changeStream.close();
-
- // Test that it is legal to open a change stream, even if the
- // 'internalQueryProhibitMergingOnMongos' parameter is set.
- assert.commandWorked(
- st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: true}));
- let tempCursor = assert.doesNotThrow(() => mongosColl.aggregate([{$changeStream: {}}]));
- tempCursor.close();
- assert.commandWorked(
- st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: false}));
-
- assert.writeOK(mongosColl.remove({}));
- // We awaited the replication of the first write, so the change stream shouldn't return it.
- // Use { w: "majority" } to deal with journaling correctly, even though we only have one
- // node.
- assert.writeOK(
- mongosColl.insert(makeShardKeyDocument(0, {a: 1}), {writeConcern: {w: "majority"}}));
-
- changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert(!changeStream.hasNext());
-
- // Drop the collection and test that we return a "drop" followed by an "invalidate" entry
- // and close the cursor.
- jsTestLog('Testing getMore command closes cursor for invalidate entries with shard key' +
- shardKey);
- mongosColl.drop();
- // Wait for the drop to actually happen.
- assert.soon(() => !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(
- mongosColl.getDB(), mongosColl.getName()));
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "drop");
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "invalidate");
- assert(changeStream.isExhausted());
-
- jsTestLog('Testing aggregate command closes cursor for invalidate entries with shard key' +
- shardKey);
- // Shard the test collection and split it into 2 chunks:
- // [MinKey, 0) - shard0, [0, MaxKey) - shard1
- st.shardColl(mongosColl,
- makeShardKey(1) /* shard key */,
- makeShardKey(0) /* split at */,
- makeShardKey(1) /* move to shard 1 */);
-
- // Write one document to each chunk.
- assert.writeOK(
- mongosColl.insert(makeShardKeyDocument(-1), {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1), {writeConcern: {w: "majority"}}));
-
- changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert(!changeStream.hasNext());
-
- // Store a valid resume token before dropping the collection, to be used later in the test
- assert.writeOK(
- mongosColl.insert(makeShardKeyDocument(-2), {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(2), {writeConcern: {w: "majority"}}));
-
- assert.soon(() => changeStream.hasNext());
- const resumeToken = changeStream.next()._id;
-
- mongosColl.drop();
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(2),
- fullDocument: makeShardKeyDocument(2),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
-
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "drop");
-
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "invalidate");
-
- // With an explicit collation, test that we can resume from before the collection drop
- changeStream =
- mongosColl.watch([], {resumeAfter: resumeToken, collation: {locale: "simple"}});
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(2),
- fullDocument: makeShardKeyDocument(2),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
-
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "drop");
-
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "invalidate");
-
- // Test that we can resume from before the collection drop without an explicit collation.
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {}
- }));
-
- st.stop();
+ function makeShardKeyDocument(value, optExtraFields) {
+ var obj = {};
+ if (shardKey !== '_id')
+ obj['_id'] = value;
+ obj[shardKey] = value;
+ return Object.assign(obj, optExtraFields);
}
- runTest('with_id_shard_key', '_id');
- runTest('with_non_id_shard_key', 'non_id');
+ jsTestLog('Testing change streams with shard key ' + shardKey);
+ // Shard the test collection and split it into 2 chunks:
+ // [MinKey, 0) - shard0, [0, MaxKey) - shard1
+ st.shardColl(mongosColl,
+ makeShardKey(1) /* shard key */,
+ makeShardKey(0) /* split at */,
+ makeShardKey(1) /* move to shard 1 */);
+
+ // Write a document to each chunk.
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1)));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(1)));
+
+ let changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+
+ // Test that a change stream can see inserts on shard 0.
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(1000)));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1000)));
+
+ assert.soon(() => changeStream.hasNext(), "expected to be able to see the first insert");
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(1000),
+ fullDocument: makeShardKeyDocument(1000),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+
+ // Because the periodic noop writer is disabled, do another write to shard 0 in order to
+ // advance that shard's clock and enabling the stream to return the earlier write to shard 1
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(1001)));
+
+ assert.soon(() => changeStream.hasNext(), "expected to be able to see the second insert");
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(-1000),
+ fullDocument: makeShardKeyDocument(-1000),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+
+ // Test that all changes are eventually visible due to the periodic noop writer.
+ assert.commandWorked(
+ st.rs0.getPrimary().adminCommand({setParameter: 1, writePeriodicNoops: true}));
+ assert.commandWorked(
+ st.rs1.getPrimary().adminCommand({setParameter: 1, writePeriodicNoops: true}));
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(1001),
+ fullDocument: makeShardKeyDocument(1001),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+ changeStream.close();
+
+ jsTestLog('Testing multi-update change streams with shard key ' + shardKey);
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(10, {a: 0, b: 0})));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-10, {a: 0, b: 0})));
+ changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+
+ assert.writeOK(mongosColl.update({a: 0}, {$set: {b: 2}}, {multi: true}));
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ operationType: "update",
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ documentKey: makeShardKeyDocument(-10),
+ updateDescription: {updatedFields: {b: 2}, removedFields: []},
+ });
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ operationType: "update",
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ documentKey: makeShardKeyDocument(10),
+ updateDescription: {updatedFields: {b: 2}, removedFields: []},
+ });
+ changeStream.close();
+
+ // Test that it is legal to open a change stream, even if the
+ // 'internalQueryProhibitMergingOnMongos' parameter is set.
+ assert.commandWorked(
+ st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: true}));
+ let tempCursor = assert.doesNotThrow(() => mongosColl.aggregate([{$changeStream: {}}]));
+ tempCursor.close();
+ assert.commandWorked(
+ st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: false}));
+
+ assert.writeOK(mongosColl.remove({}));
+ // We awaited the replication of the first write, so the change stream shouldn't return it.
+ // Use { w: "majority" } to deal with journaling correctly, even though we only have one
+ // node.
+ assert.writeOK(
+ mongosColl.insert(makeShardKeyDocument(0, {a: 1}), {writeConcern: {w: "majority"}}));
+
+ changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+ assert(!changeStream.hasNext());
+
+ // Drop the collection and test that we return a "drop" followed by an "invalidate" entry
+ // and close the cursor.
+ jsTestLog('Testing getMore command closes cursor for invalidate entries with shard key' +
+ shardKey);
+ mongosColl.drop();
+ // Wait for the drop to actually happen.
+ assert.soon(() => !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(
+ mongosColl.getDB(), mongosColl.getName()));
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "drop");
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "invalidate");
+ assert(changeStream.isExhausted());
+
+ jsTestLog('Testing aggregate command closes cursor for invalidate entries with shard key' +
+ shardKey);
+ // Shard the test collection and split it into 2 chunks:
+ // [MinKey, 0) - shard0, [0, MaxKey) - shard1
+ st.shardColl(mongosColl,
+ makeShardKey(1) /* shard key */,
+ makeShardKey(0) /* split at */,
+ makeShardKey(1) /* move to shard 1 */);
+
+ // Write one document to each chunk.
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1), {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(1), {writeConcern: {w: "majority"}}));
+
+ changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+ assert(!changeStream.hasNext());
+
+ // Store a valid resume token before dropping the collection, to be used later in the test
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-2), {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(2), {writeConcern: {w: "majority"}}));
+
+ assert.soon(() => changeStream.hasNext());
+ const resumeToken = changeStream.next()._id;
+
+ mongosColl.drop();
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(2),
+ fullDocument: makeShardKeyDocument(2),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "drop");
+
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "invalidate");
+
+ // With an explicit collation, test that we can resume from before the collection drop
+ changeStream = mongosColl.watch([], {resumeAfter: resumeToken, collation: {locale: "simple"}});
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(2),
+ fullDocument: makeShardKeyDocument(2),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "drop");
+
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "invalidate");
+
+ // Test that we can resume from before the collection drop without an explicit collation.
+ assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {}
+ }));
+
+ st.stop();
+}
+
+runTest('with_id_shard_key', '_id');
+runTest('with_non_id_shard_key', 'non_id');
})();
diff --git a/jstests/sharding/change_streams_establishment_finds_new_shards.js b/jstests/sharding/change_streams_establishment_finds_new_shards.js
index 45bc1583e46..146fc166d50 100644
--- a/jstests/sharding/change_streams_establishment_finds_new_shards.js
+++ b/jstests/sharding/change_streams_establishment_finds_new_shards.js
@@ -2,50 +2,48 @@
// during cursor establishment.
// @tags: [uses_change_streams]
(function() {
- 'use strict';
+'use strict';
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const rsNodeOptions = {
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- };
- const st =
- new ShardingTest({shards: 1, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
+const rsNodeOptions = {
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+};
+const st =
+ new ShardingTest({shards: 1, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
- jsTestLog("Starting new shard (but not adding to shard set yet)");
- const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
- newShard.startSet({shardsvr: ''});
- newShard.initiate();
+jsTestLog("Starting new shard (but not adding to shard set yet)");
+const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
+newShard.startSet({shardsvr: ''});
+newShard.initiate();
- const mongos = st.s;
- const mongosColl = mongos.getCollection('test.foo');
- const mongosDB = mongos.getDB("test");
+const mongos = st.s;
+const mongosColl = mongos.getCollection('test.foo');
+const mongosDB = mongos.getDB("test");
- // Enable sharding to inform mongos of the database, allowing us to open a cursor.
- assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
+// Enable sharding to inform mongos of the database, allowing us to open a cursor.
+assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
- // Shard the collection.
- assert.commandWorked(
- mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+// Shard the collection.
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- // Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
+// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
- // Enable the failpoint.
- assert.commandWorked(mongos.adminCommand({
- configureFailPoint: "clusterAggregateHangBeforeEstablishingShardCursors",
- mode: "alwaysOn"
- }));
+// Enable the failpoint.
+assert.commandWorked(mongos.adminCommand(
+ {configureFailPoint: "clusterAggregateHangBeforeEstablishingShardCursors", mode: "alwaysOn"}));
- // While opening the cursor, wait for the failpoint and add the new shard.
- const awaitNewShard = startParallelShell(`
+// While opening the cursor, wait for the failpoint and add the new shard.
+const awaitNewShard = startParallelShell(`
load("jstests/libs/check_log.js");
checkLog.contains(db,
"clusterAggregateHangBeforeEstablishingShardCursors fail point enabled");
@@ -62,27 +60,27 @@
mode: "off"}));`,
mongos.port);
- jsTestLog("Opening $changeStream cursor");
- const changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert(!changeStream.hasNext(), "Do not expect any results yet");
+jsTestLog("Opening $changeStream cursor");
+const changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+assert(!changeStream.hasNext(), "Do not expect any results yet");
- // Clean up the parallel shell.
- awaitNewShard();
+// Clean up the parallel shell.
+awaitNewShard();
- // Insert two documents in different shards.
- assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+// Insert two documents in different shards.
+assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
- // Expect to see them both.
- for (let id of[0, 20]) {
- jsTestLog("Expecting Item " + id);
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {_id: id});
- }
- assert(!changeStream.hasNext());
+// Expect to see them both.
+for (let id of [0, 20]) {
+ jsTestLog("Expecting Item " + id);
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {_id: id});
+}
+assert(!changeStream.hasNext());
- st.stop();
- newShard.stopSet();
+st.stop();
+newShard.stopSet();
})();
diff --git a/jstests/sharding/change_streams_primary_shard_unaware.js b/jstests/sharding/change_streams_primary_shard_unaware.js
index 1fdb86564ae..b325f770585 100644
--- a/jstests/sharding/change_streams_primary_shard_unaware.js
+++ b/jstests/sharding/change_streams_primary_shard_unaware.js
@@ -5,183 +5,180 @@
// SERVER-36321.
// @tags: [requires_persistence, blacklist_from_rhel_67_s390x, uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- // TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
- if (!jsTestOptions().enableMajorityReadConcern &&
- jsTestOptions().mongosBinVersion === 'last-stable') {
- jsTestLog(
- "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
- return;
- }
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- // Returns true if the shard is aware that the collection is sharded.
- function isShardAware(shard, coll) {
- const res = shard.adminCommand({getShardVersion: coll, fullMetadata: true});
- assert.commandWorked(res);
- return res.metadata.collVersion != undefined;
- }
-
- const testName = "change_streams_primary_shard_unaware";
- const st = new ShardingTest({
- shards: 2,
- mongos: 3,
- rs: {
- nodes: 1,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true},
- },
- });
-
- const mongosDB = st.s0.getDB(testName);
-
- // Ensure that shard0 is the primary shard.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Create unsharded collection on primary shard.
- const mongosColl = mongosDB[testName];
- assert.commandWorked(mongosDB.createCollection(testName));
-
- // Before sharding the collection, issue a write through mongos2 to ensure that it knows the
- // collection exists and believes it is unsharded. This is needed later in the test to avoid
- // triggering a refresh when a change stream is established through mongos2.
- const mongos2DB = st.s2.getDB(testName);
- const mongos2Coll = mongos2DB[testName];
- assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
-
- // Create index on the shard key.
- assert.commandWorked(mongos2Coll.createIndex({a: 1}));
-
- // Shard the collection.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {a: 1}}));
-
- // Restart the primary shard and ensure that it is no longer aware that the collection is
- // sharded.
- st.restartShardRS(0);
- assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
-
- const mongos1DB = st.s1.getDB(testName);
- const mongos1Coll = mongos1DB[testName];
-
- // Establish change stream cursor on the second mongos, which is not aware that the
- // collection is sharded.
- let cstMongos1 = new ChangeStreamTest(mongos1DB);
- let cursorMongos1 = cstMongos1.startWatchingChanges(
- {pipeline: [{$changeStream: {fullDocument: "updateLookup"}}], collection: mongos1Coll});
- assert.eq(0, cursorMongos1.firstBatch.length, "Cursor had changes: " + tojson(cursorMongos1));
-
- // Establish a change stream cursor on the now sharded collection through the first mongos.
- let cst = new ChangeStreamTest(mongosDB);
- let cursor = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {fullDocument: "updateLookup"}}], collection: mongosColl});
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- // Ensure that the primary shard is still unaware that the collection is sharded.
- assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
-
- // Insert a doc and verify that the primary shard is now aware that the collection is sharded.
- assert.writeOK(mongosColl.insert({_id: 1, a: 1}));
- assert.eq(true, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
-
- // Verify that both cursors are able to pick up an inserted document.
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [{
- documentKey: {_id: 1, a: 1},
- fullDocument: {_id: 1, a: 1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- }]
- });
- let mongos1ChangeDoc = cstMongos1.getOneChange(cursorMongos1);
- assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.documentKey);
- assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.fullDocument);
- assert.eq({db: mongos1DB.getName(), coll: mongos1Coll.getName()}, mongos1ChangeDoc.ns);
- assert.eq("insert", mongos1ChangeDoc.operationType);
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
- assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {a: 0}}));
-
- // Move a chunk to the non-primary shard.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {a: -1},
- to: st.rs1.getURL(),
- _waitForDelete: true
- }));
-
- // Update the document on the primary shard.
- assert.writeOK(mongosColl.update({_id: 1, a: 1}, {$set: {b: 1}}));
- // Insert another document to each shard.
- assert.writeOK(mongosColl.insert({_id: -2, a: -2}));
- assert.writeOK(mongosColl.insert({_id: 2, a: 2}));
-
- // Verify that both cursors pick up the first inserted doc regardless of the moveChunk
- // operation.
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [{
- documentKey: {_id: 1, a: 1},
- fullDocument: {_id: 1, a: 1, b: 1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {b: 1}}
- }]
- });
- mongos1ChangeDoc = cstMongos1.getOneChange(cursorMongos1);
- assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.documentKey);
- assert.docEq({_id: 1, a: 1, b: 1}, mongos1ChangeDoc.fullDocument);
- assert.eq({db: mongos1DB.getName(), coll: mongos1Coll.getName()}, mongos1ChangeDoc.ns);
- assert.eq("update", mongos1ChangeDoc.operationType);
-
- // Restart the primary shard and ensure that it is no longer aware that the collection is
- // sharded.
- st.restartShardRS(0);
- assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
-
- // Establish change stream cursor on mongos2 using the resume token from the change steam on
- // mongos1. Mongos2 is aware that the collection exists and thinks that it's unsharded, so it
- // won't trigger a routing table refresh. This must be done using a resume token from an update
- // otherwise the shard will generate the documentKey based on the assumption that the shard key
- // is _id which will cause the cursor establishment to fail due to SERVER-32085.
- let cstMongos2 = new ChangeStreamTest(mongos2DB);
- let cursorMongos2 = cstMongos2.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: mongos1ChangeDoc._id}}],
- collection: mongos2Coll
- });
-
- cstMongos2.assertNextChangesEqual({
- cursor: cursorMongos2,
- expectedChanges: [{
- documentKey: {_id: -2, a: -2},
- fullDocument: {_id: -2, a: -2},
- ns: {db: mongos2DB.getName(), coll: mongos2Coll.getName()},
- operationType: "insert",
- }]
- });
-
- cstMongos2.assertNextChangesEqual({
- cursor: cursorMongos2,
- expectedChanges: [{
- documentKey: {_id: 2, a: 2},
- fullDocument: {_id: 2, a: 2},
- ns: {db: mongos2DB.getName(), coll: mongos2Coll.getName()},
- operationType: "insert",
- }]
- });
-
- st.stop();
-
+"use strict";
+
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+// TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
+if (!jsTestOptions().enableMajorityReadConcern &&
+ jsTestOptions().mongosBinVersion === 'last-stable') {
+ jsTestLog(
+ "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
+ return;
+}
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+// Returns true if the shard is aware that the collection is sharded.
+function isShardAware(shard, coll) {
+ const res = shard.adminCommand({getShardVersion: coll, fullMetadata: true});
+ assert.commandWorked(res);
+ return res.metadata.collVersion != undefined;
+}
+
+const testName = "change_streams_primary_shard_unaware";
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 3,
+ rs: {
+ nodes: 1,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true},
+ },
+});
+
+const mongosDB = st.s0.getDB(testName);
+
+// Ensure that shard0 is the primary shard.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Create unsharded collection on primary shard.
+const mongosColl = mongosDB[testName];
+assert.commandWorked(mongosDB.createCollection(testName));
+
+// Before sharding the collection, issue a write through mongos2 to ensure that it knows the
+// collection exists and believes it is unsharded. This is needed later in the test to avoid
+// triggering a refresh when a change stream is established through mongos2.
+const mongos2DB = st.s2.getDB(testName);
+const mongos2Coll = mongos2DB[testName];
+assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
+
+// Create index on the shard key.
+assert.commandWorked(mongos2Coll.createIndex({a: 1}));
+
+// Shard the collection.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {a: 1}}));
+
+// Restart the primary shard and ensure that it is no longer aware that the collection is
+// sharded.
+st.restartShardRS(0);
+assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
+
+const mongos1DB = st.s1.getDB(testName);
+const mongos1Coll = mongos1DB[testName];
+
+// Establish change stream cursor on the second mongos, which is not aware that the
+// collection is sharded.
+let cstMongos1 = new ChangeStreamTest(mongos1DB);
+let cursorMongos1 = cstMongos1.startWatchingChanges(
+ {pipeline: [{$changeStream: {fullDocument: "updateLookup"}}], collection: mongos1Coll});
+assert.eq(0, cursorMongos1.firstBatch.length, "Cursor had changes: " + tojson(cursorMongos1));
+
+// Establish a change stream cursor on the now sharded collection through the first mongos.
+let cst = new ChangeStreamTest(mongosDB);
+let cursor = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {fullDocument: "updateLookup"}}], collection: mongosColl});
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+
+// Ensure that the primary shard is still unaware that the collection is sharded.
+assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
+
+// Insert a doc and verify that the primary shard is now aware that the collection is sharded.
+assert.writeOK(mongosColl.insert({_id: 1, a: 1}));
+assert.eq(true, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
+
+// Verify that both cursors are able to pick up an inserted document.
+cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [{
+ documentKey: {_id: 1, a: 1},
+ fullDocument: {_id: 1, a: 1},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ }]
+});
+let mongos1ChangeDoc = cstMongos1.getOneChange(cursorMongos1);
+assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.documentKey);
+assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.fullDocument);
+assert.eq({db: mongos1DB.getName(), coll: mongos1Coll.getName()}, mongos1ChangeDoc.ns);
+assert.eq("insert", mongos1ChangeDoc.operationType);
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {a: 0}}));
+
+// Move a chunk to the non-primary shard.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {a: -1},
+ to: st.rs1.getURL(),
+ _waitForDelete: true
+}));
+
+// Update the document on the primary shard.
+assert.writeOK(mongosColl.update({_id: 1, a: 1}, {$set: {b: 1}}));
+// Insert another document to each shard.
+assert.writeOK(mongosColl.insert({_id: -2, a: -2}));
+assert.writeOK(mongosColl.insert({_id: 2, a: 2}));
+
+// Verify that both cursors pick up the first inserted doc regardless of the moveChunk
+// operation.
+cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [{
+ documentKey: {_id: 1, a: 1},
+ fullDocument: {_id: 1, a: 1, b: 1},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {b: 1}}
+ }]
+});
+mongos1ChangeDoc = cstMongos1.getOneChange(cursorMongos1);
+assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.documentKey);
+assert.docEq({_id: 1, a: 1, b: 1}, mongos1ChangeDoc.fullDocument);
+assert.eq({db: mongos1DB.getName(), coll: mongos1Coll.getName()}, mongos1ChangeDoc.ns);
+assert.eq("update", mongos1ChangeDoc.operationType);
+
+// Restart the primary shard and ensure that it is no longer aware that the collection is
+// sharded.
+st.restartShardRS(0);
+assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
+
+// Establish change stream cursor on mongos2 using the resume token from the change steam on
+// mongos1. Mongos2 is aware that the collection exists and thinks that it's unsharded, so it
+// won't trigger a routing table refresh. This must be done using a resume token from an update
+// otherwise the shard will generate the documentKey based on the assumption that the shard key
+// is _id which will cause the cursor establishment to fail due to SERVER-32085.
+let cstMongos2 = new ChangeStreamTest(mongos2DB);
+let cursorMongos2 = cstMongos2.startWatchingChanges(
+ {pipeline: [{$changeStream: {resumeAfter: mongos1ChangeDoc._id}}], collection: mongos2Coll});
+
+cstMongos2.assertNextChangesEqual({
+ cursor: cursorMongos2,
+ expectedChanges: [{
+ documentKey: {_id: -2, a: -2},
+ fullDocument: {_id: -2, a: -2},
+ ns: {db: mongos2DB.getName(), coll: mongos2Coll.getName()},
+ operationType: "insert",
+ }]
+});
+
+cstMongos2.assertNextChangesEqual({
+ cursor: cursorMongos2,
+ expectedChanges: [{
+ documentKey: {_id: 2, a: 2},
+ fullDocument: {_id: 2, a: 2},
+ ns: {db: mongos2DB.getName(), coll: mongos2Coll.getName()},
+ operationType: "insert",
+ }]
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/change_streams_shards_start_in_sync.js b/jstests/sharding/change_streams_shards_start_in_sync.js
index 9209ad4ea70..3928913a1bb 100644
--- a/jstests/sharding/change_streams_shards_start_in_sync.js
+++ b/jstests/sharding/change_streams_shards_start_in_sync.js
@@ -7,109 +7,108 @@
// and 'B' will be seen in the changestream before 'C'.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ useBridge: true,
+ rs: {
+ nodes: 1,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+ }
+});
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- useBridge: true,
- rs: {
- nodes: 1,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey) chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- function checkStream() {
- load('jstests/libs/change_stream_util.js'); // For assertChangeStreamEventEq.
-
- db = db.getSiblingDB(jsTestName());
- let coll = db[jsTestName()];
- let changeStream = coll.aggregate([{$changeStream: {}}]);
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: {_id: -1000},
- fullDocument: {_id: -1000},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- });
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: {_id: 1001},
- fullDocument: {_id: 1001},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- });
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: {_id: -1002},
- fullDocument: {_id: -1002},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- });
- changeStream.close();
- }
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- // Start the $changeStream with shard 1 unavailable on the second mongos (s1). We will be
- // writing through the first mongos (s0), which will remain connected to all shards.
- st.rs1.getPrimary().disconnect(st.s1);
- let waitForShell = startParallelShell(checkStream, st.s1.port);
-
- // Wait for the aggregate cursor to appear in currentOp on the current shard.
- function waitForShardCursor(rs) {
- assert.soon(() => st.rs0.getPrimary()
- .getDB('admin')
- .aggregate([
- {"$currentOp": {"idleCursors": true}},
- {"$match": {ns: mongosColl.getFullName(), type: "idleCursor"}}
-
- ])
- .itcount() === 1);
- }
- // Make sure the shard 0 $changeStream cursor is established before doing the first writes.
- waitForShardCursor(st.rs0);
+// Move the [0, MaxKey) chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- assert.writeOK(mongosColl.insert({_id: -1000}, {writeConcern: {w: "majority"}}));
+function checkStream() {
+ load('jstests/libs/change_stream_util.js'); // For assertChangeStreamEventEq.
- // This write to shard 1 occurs before the $changeStream cursor on shard 1 is open, because the
- // mongos where the $changeStream is running is disconnected from shard 1.
- assert.writeOK(mongosColl.insert({_id: 1001}, {writeConcern: {w: "majority"}}));
+ db = db.getSiblingDB(jsTestName());
+ let coll = db[jsTestName()];
+ let changeStream = coll.aggregate([{$changeStream: {}}]);
- jsTestLog("Reconnecting");
- st.rs1.getPrimary().reconnect(st.s1);
- waitForShardCursor(st.rs1);
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: {_id: -1000},
+ fullDocument: {_id: -1000},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ });
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: {_id: 1001},
+ fullDocument: {_id: 1001},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ });
- assert.writeOK(mongosColl.insert({_id: -1002}, {writeConcern: {w: "majority"}}));
- waitForShell();
- st.stop();
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: {_id: -1002},
+ fullDocument: {_id: -1002},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ });
+ changeStream.close();
+}
+
+// Start the $changeStream with shard 1 unavailable on the second mongos (s1). We will be
+// writing through the first mongos (s0), which will remain connected to all shards.
+st.rs1.getPrimary().disconnect(st.s1);
+let waitForShell = startParallelShell(checkStream, st.s1.port);
+
+// Wait for the aggregate cursor to appear in currentOp on the current shard.
+function waitForShardCursor(rs) {
+ assert.soon(() => st.rs0.getPrimary()
+ .getDB('admin')
+ .aggregate([
+ {"$currentOp": {"idleCursors": true}},
+ {"$match": {ns: mongosColl.getFullName(), type: "idleCursor"}}
+
+ ])
+ .itcount() === 1);
+}
+// Make sure the shard 0 $changeStream cursor is established before doing the first writes.
+waitForShardCursor(st.rs0);
+
+assert.writeOK(mongosColl.insert({_id: -1000}, {writeConcern: {w: "majority"}}));
+
+// This write to shard 1 occurs before the $changeStream cursor on shard 1 is open, because the
+// mongos where the $changeStream is running is disconnected from shard 1.
+assert.writeOK(mongosColl.insert({_id: 1001}, {writeConcern: {w: "majority"}}));
+
+jsTestLog("Reconnecting");
+st.rs1.getPrimary().reconnect(st.s1);
+waitForShardCursor(st.rs1);
+
+assert.writeOK(mongosColl.insert({_id: -1002}, {writeConcern: {w: "majority"}}));
+waitForShell();
+st.stop();
})();
diff --git a/jstests/sharding/change_streams_unsharded_becomes_sharded.js b/jstests/sharding/change_streams_unsharded_becomes_sharded.js
index e865fb709d5..c28e19c9520 100644
--- a/jstests/sharding/change_streams_unsharded_becomes_sharded.js
+++ b/jstests/sharding/change_streams_unsharded_becomes_sharded.js
@@ -5,190 +5,188 @@
// sharded.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
+
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const testName = "change_streams_unsharded_becomes_sharded";
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+ }
+});
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+const mongosDB = st.s0.getDB("test");
+const mongosColl = mongosDB[testName];
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+function testUnshardedBecomesSharded(collToWatch) {
+ mongosColl.drop();
+ mongosDB.createCollection(testName);
+ mongosColl.createIndex({x: 1});
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+ st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+ // Establish a change stream cursor on the unsharded collection.
+ const cst = new ChangeStreamTest(mongosDB);
+
+ // Create a different collection in the same database, and verify that it doesn't affect the
+ // results of the change stream.
+ const mongosCollOther = mongosDB[testName + "other"];
+ mongosCollOther.drop();
+ mongosDB.createCollection(testName + "other");
+ mongosCollOther.createIndex({y: 1});
+
+ let cursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$match: {"ns.coll": mongosColl.getName()}}],
+ collection: collToWatch
+ });
+ assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+
+ // Verify that the cursor picks up documents inserted while the collection is unsharded. The
+ // 'documentKey' at this point is simply the _id field.
+ assert.writeOK(mongosColl.insert({_id: 0, x: 0}));
+ assert.writeOK(mongosCollOther.insert({_id: 0, y: 0}));
+ const [preShardCollectionChange] = cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [{
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, x: 0},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ }]
+ });
- const testName = "change_streams_unsharded_becomes_sharded";
- const st = new ShardingTest({
- shards: 2,
- mongos: 1,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+ // Record the resume token for this change, before the collection is sharded.
+ const preShardCollectionResumeToken = preShardCollectionChange._id;
+
+ // Shard the test collection with shard key {x: 1} and split into 2 chunks.
+ st.shardColl(mongosColl.getName(), {x: 1}, {x: 0}, false, mongosDB.getName());
+
+ // Shard the other collection with shard key {y: 1} and split into 2 chunks.
+ st.shardColl(mongosCollOther.getName(), {y: 1}, {y: 0}, false, mongosDB.getName());
+
+ // List the changes we expect to see for the next two operations on the sharded collection.
+ // Later, we will resume the stream using the token generated before the collection was
+ // sharded, and will need to confirm that we can still see these two changes.
+ const postShardCollectionChanges = [
+ {
+ documentKey: {x: 1, _id: 1},
+ fullDocument: {_id: 1, x: 1},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {x: -1, _id: -1},
+ fullDocument: {_id: -1, x: -1},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
}
+ ];
+
+ // Verify that the cursor on the original shard is still valid and sees new inserted
+ // documents. The 'documentKey' field should now include the shard key, even before a
+ // 'kNewShardDetected' operation has been generated by the migration of a chunk to a new
+ // shard.
+ assert.writeOK(mongosColl.insert({_id: 1, x: 1}));
+ assert.writeOK(mongosCollOther.insert({_id: 1, y: 1}));
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [postShardCollectionChanges[0]]});
+
+ // Move the [minKey, 0) chunk to shard1.
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {x: -1},
+ to: st.rs1.getURL(),
+ _waitForDelete: true
+ }));
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosCollOther.getFullName(),
+ find: {y: -1},
+ to: st.rs1.getURL(),
+ _waitForDelete: true
+ }));
+
+ // Make sure the change stream cursor sees a document inserted on the recipient shard.
+ assert.writeOK(mongosColl.insert({_id: -1, x: -1}));
+ assert.writeOK(mongosCollOther.insert({_id: -1, y: -1}));
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [postShardCollectionChanges[1]]});
+
+ // Confirm that we can resume the stream on the sharded collection using the token generated
+ // while the collection was unsharded, whose documentKey contains the _id field but not the
+ // shard key.
+ let resumedCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: preShardCollectionResumeToken}}],
+ collection: mongosColl
});
- const mongosDB = st.s0.getDB("test");
- const mongosColl = mongosDB[testName];
-
- function testUnshardedBecomesSharded(collToWatch) {
- mongosColl.drop();
- mongosDB.createCollection(testName);
- mongosColl.createIndex({x: 1});
-
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Establish a change stream cursor on the unsharded collection.
- const cst = new ChangeStreamTest(mongosDB);
-
- // Create a different collection in the same database, and verify that it doesn't affect the
- // results of the change stream.
- const mongosCollOther = mongosDB[testName + "other"];
- mongosCollOther.drop();
- mongosDB.createCollection(testName + "other");
- mongosCollOther.createIndex({y: 1});
-
- let cursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$match: {"ns.coll": mongosColl.getName()}}],
- collection: collToWatch
- });
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- // Verify that the cursor picks up documents inserted while the collection is unsharded. The
- // 'documentKey' at this point is simply the _id field.
- assert.writeOK(mongosColl.insert({_id: 0, x: 0}));
- assert.writeOK(mongosCollOther.insert({_id: 0, y: 0}));
- const[preShardCollectionChange] = cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [{
- documentKey: {_id: 0},
- fullDocument: {_id: 0, x: 0},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- }]
- });
+ // Verify that we see both of the insertions which occurred after the collection was
+ // sharded.
+ cst.assertNextChangesEqual(
+ {cursor: resumedCursor, expectedChanges: postShardCollectionChanges});
- // Record the resume token for this change, before the collection is sharded.
- const preShardCollectionResumeToken = preShardCollectionChange._id;
+ // Test the behavior of a change stream when a sharded collection is dropped and recreated.
+ cursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$match: {"ns.coll": mongosColl.getName()}}],
+ collection: collToWatch
+ });
+ assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
- // Shard the test collection with shard key {x: 1} and split into 2 chunks.
- st.shardColl(mongosColl.getName(), {x: 1}, {x: 0}, false, mongosDB.getName());
+ // Insert a couple documents to shard1, creating a scenario where the getMore to shard0 will
+ // indicate that the change stream is invalidated yet shard1 will still have data to return.
+ assert.writeOK(mongosColl.insert({_id: -2, x: -2}));
+ assert.writeOK(mongosColl.insert({_id: -3, x: -3}));
- // Shard the other collection with shard key {y: 1} and split into 2 chunks.
- st.shardColl(mongosCollOther.getName(), {y: 1}, {y: 0}, false, mongosDB.getName());
+ // Drop and recreate the collection.
+ mongosColl.drop();
+ mongosDB.createCollection(mongosColl.getName());
+ mongosColl.createIndex({z: 1});
- // List the changes we expect to see for the next two operations on the sharded collection.
- // Later, we will resume the stream using the token generated before the collection was
- // sharded, and will need to confirm that we can still see these two changes.
- const postShardCollectionChanges = [
+ // Shard the collection on a different shard key and ensure that each shard has a chunk.
+ st.shardColl(mongosColl.getName(), {z: 1}, {z: 0}, {z: -1}, mongosDB.getName());
+
+ assert.writeOK(mongosColl.insert({_id: -1, z: -1}));
+ assert.writeOK(mongosColl.insert({_id: 1, z: 1}));
+
+ // Verify that the change stream picks up the inserts, however the shard key is missing
+ // since the collection has since been dropped and recreated.
+ cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [
{
- documentKey: {x: 1, _id: 1},
- fullDocument: {_id: 1, x: 1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
+ documentKey: {_id: -2},
+ fullDocument: {_id: -2, x: -2},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
},
{
- documentKey: {x: -1, _id: -1},
- fullDocument: {_id: -1, x: -1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
+ documentKey: {_id: -3},
+ fullDocument: {_id: -3, x: -3},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
}
- ];
-
- // Verify that the cursor on the original shard is still valid and sees new inserted
- // documents. The 'documentKey' field should now include the shard key, even before a
- // 'kNewShardDetected' operation has been generated by the migration of a chunk to a new
- // shard.
- assert.writeOK(mongosColl.insert({_id: 1, x: 1}));
- assert.writeOK(mongosCollOther.insert({_id: 1, y: 1}));
- cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: [postShardCollectionChanges[0]]});
-
- // Move the [minKey, 0) chunk to shard1.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {x: -1},
- to: st.rs1.getURL(),
- _waitForDelete: true
- }));
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosCollOther.getFullName(),
- find: {y: -1},
- to: st.rs1.getURL(),
- _waitForDelete: true
- }));
-
- // Make sure the change stream cursor sees a document inserted on the recipient shard.
- assert.writeOK(mongosColl.insert({_id: -1, x: -1}));
- assert.writeOK(mongosCollOther.insert({_id: -1, y: -1}));
- cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: [postShardCollectionChanges[1]]});
-
- // Confirm that we can resume the stream on the sharded collection using the token generated
- // while the collection was unsharded, whose documentKey contains the _id field but not the
- // shard key.
- let resumedCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: preShardCollectionResumeToken}}],
- collection: mongosColl
- });
-
- // Verify that we see both of the insertions which occurred after the collection was
- // sharded.
- cst.assertNextChangesEqual(
- {cursor: resumedCursor, expectedChanges: postShardCollectionChanges});
-
- // Test the behavior of a change stream when a sharded collection is dropped and recreated.
- cursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$match: {"ns.coll": mongosColl.getName()}}],
- collection: collToWatch
- });
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- // Insert a couple documents to shard1, creating a scenario where the getMore to shard0 will
- // indicate that the change stream is invalidated yet shard1 will still have data to return.
- assert.writeOK(mongosColl.insert({_id: -2, x: -2}));
- assert.writeOK(mongosColl.insert({_id: -3, x: -3}));
-
- // Drop and recreate the collection.
- mongosColl.drop();
- mongosDB.createCollection(mongosColl.getName());
- mongosColl.createIndex({z: 1});
-
- // Shard the collection on a different shard key and ensure that each shard has a chunk.
- st.shardColl(mongosColl.getName(), {z: 1}, {z: 0}, {z: -1}, mongosDB.getName());
-
- assert.writeOK(mongosColl.insert({_id: -1, z: -1}));
- assert.writeOK(mongosColl.insert({_id: 1, z: 1}));
-
- // Verify that the change stream picks up the inserts, however the shard key is missing
- // since the collection has since been dropped and recreated.
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [
- {
- documentKey: {_id: -2},
- fullDocument: {_id: -2, x: -2},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: -3},
- fullDocument: {_id: -3, x: -3},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- }
- ]
- });
-
- cst.cleanUp();
- }
+ ]
+ });
+
+ cst.cleanUp();
+}
- // First test against a change stream on a single collection.
- testUnshardedBecomesSharded(mongosColl.getName());
+// First test against a change stream on a single collection.
+testUnshardedBecomesSharded(mongosColl.getName());
- // Test against a change stream on the entire database.
- testUnshardedBecomesSharded(1);
+// Test against a change stream on the entire database.
+testUnshardedBecomesSharded(1);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/change_streams_whole_db.js b/jstests/sharding/change_streams_whole_db.js
index 4051493c04f..322be4a19b4 100644
--- a/jstests/sharding/change_streams_whole_db.js
+++ b/jstests/sharding/change_streams_whole_db.js
@@ -1,192 +1,192 @@
// Tests the behavior of a change stream on a whole database in a sharded cluster.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- load('jstests/aggregation/extras/utils.js'); // For assertErrorCode().
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
+"use strict";
+
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load('jstests/aggregation/extras/utils.js'); // For assertErrorCode().
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
}
-
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- }
- });
-
- const mongosDB = st.s0.getDB("test");
- const mongosColl = mongosDB[jsTestName()];
-
- let cst = new ChangeStreamTest(mongosDB);
- let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
-
- // Test that if there are no changes, we return an empty batch.
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- // Test that the change stream returns operations on the unsharded test collection.
- assert.writeOK(mongosColl.insert({_id: 0}));
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0},
+});
+
+const mongosDB = st.s0.getDB("test");
+const mongosColl = mongosDB[jsTestName()];
+
+let cst = new ChangeStreamTest(mongosDB);
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+// Test that if there are no changes, we return an empty batch.
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+
+// Test that the change stream returns operations on the unsharded test collection.
+assert.writeOK(mongosColl.insert({_id: 0}));
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+// Create a new sharded collection.
+mongosDB.createCollection(jsTestName() + "_sharded_on_x");
+const mongosCollShardedOnX = mongosDB[jsTestName() + "_sharded_on_x"];
+
+// Shard, split, and move one chunk to shard1.
+st.shardColl(mongosCollShardedOnX.getName(), {x: 1}, {x: 0}, {x: 1}, mongosDB.getName());
+
+// Write a document to each chunk.
+assert.writeOK(mongosCollShardedOnX.insert({_id: 0, x: -1}));
+assert.writeOK(mongosCollShardedOnX.insert({_id: 1, x: 1}));
+
+// Verify that the change stream returns both inserts.
+expected = [
+ {
+ documentKey: {_id: 0, x: -1},
+ fullDocument: {_id: 0, x: -1},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1, x: 1},
+ fullDocument: {_id: 1, x: 1},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
+ }
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+// Now send inserts to both the sharded and unsharded collections, and verify that the change
+// streams returns them in order.
+assert.writeOK(mongosCollShardedOnX.insert({_id: 2, x: 2}));
+assert.writeOK(mongosColl.insert({_id: 1}));
+
+// Verify that the change stream returns both inserts.
+expected = [
+ {
+ documentKey: {_id: 2, x: 2},
+ fullDocument: {_id: 2, x: 2},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1},
ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- // Create a new sharded collection.
- mongosDB.createCollection(jsTestName() + "_sharded_on_x");
- const mongosCollShardedOnX = mongosDB[jsTestName() + "_sharded_on_x"];
-
- // Shard, split, and move one chunk to shard1.
- st.shardColl(mongosCollShardedOnX.getName(), {x: 1}, {x: 0}, {x: 1}, mongosDB.getName());
-
- // Write a document to each chunk.
- assert.writeOK(mongosCollShardedOnX.insert({_id: 0, x: -1}));
- assert.writeOK(mongosCollShardedOnX.insert({_id: 1, x: 1}));
-
- // Verify that the change stream returns both inserts.
- expected = [
- {
- documentKey: {_id: 0, x: -1},
- fullDocument: {_id: 0, x: -1},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1, x: 1},
- fullDocument: {_id: 1, x: 1},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- // Now send inserts to both the sharded and unsharded collections, and verify that the change
- // streams returns them in order.
- assert.writeOK(mongosCollShardedOnX.insert({_id: 2, x: 2}));
- assert.writeOK(mongosColl.insert({_id: 1}));
-
- // Verify that the change stream returns both inserts.
- expected = [
- {
- documentKey: {_id: 2, x: 2},
- fullDocument: {_id: 2, x: 2},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- // Create a third sharded collection with a compound shard key.
- mongosDB.createCollection(jsTestName() + "_sharded_compound");
- const mongosCollShardedCompound = mongosDB[jsTestName() + "_sharded_compound"];
-
- // Shard, split, and move one chunk to shard1.
- st.shardColl(mongosCollShardedCompound.getName(),
- {y: 1, x: 1},
- {y: 1, x: MinKey},
- {y: 1, x: MinKey},
- mongosDB.getName());
-
- // Write a document to each chunk.
- assert.writeOK(mongosCollShardedCompound.insert({_id: 0, y: -1, x: 0}));
- assert.writeOK(mongosCollShardedCompound.insert({_id: 1, y: 1, x: 0}));
-
- // Verify that the change stream returns both inserts.
- expected = [
- {
- documentKey: {_id: 0, y: -1, x: 0},
- fullDocument: {_id: 0, y: -1, x: 0},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1, y: 1, x: 0},
- fullDocument: {_id: 1, y: 1, x: 0},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
- operationType: "insert",
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- // Send inserts to all 3 collections and verify that the results contain the correct
- // documentKeys and are in the correct order.
- assert.writeOK(mongosCollShardedOnX.insert({_id: 3, x: 3}));
- assert.writeOK(mongosColl.insert({_id: 3}));
- assert.writeOK(mongosCollShardedCompound.insert({_id: 2, x: 0, y: -2}));
-
- // Verify that the change stream returns both inserts.
- expected = [
- {
- documentKey: {_id: 3, x: 3},
- fullDocument: {_id: 3, x: 3},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 3},
- fullDocument: {_id: 3},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- },
+ }
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+// Create a third sharded collection with a compound shard key.
+mongosDB.createCollection(jsTestName() + "_sharded_compound");
+const mongosCollShardedCompound = mongosDB[jsTestName() + "_sharded_compound"];
+
+// Shard, split, and move one chunk to shard1.
+st.shardColl(mongosCollShardedCompound.getName(),
+ {y: 1, x: 1},
+ {y: 1, x: MinKey},
+ {y: 1, x: MinKey},
+ mongosDB.getName());
+
+// Write a document to each chunk.
+assert.writeOK(mongosCollShardedCompound.insert({_id: 0, y: -1, x: 0}));
+assert.writeOK(mongosCollShardedCompound.insert({_id: 1, y: 1, x: 0}));
+
+// Verify that the change stream returns both inserts.
+expected = [
+ {
+ documentKey: {_id: 0, y: -1, x: 0},
+ fullDocument: {_id: 0, y: -1, x: 0},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1, y: 1, x: 0},
+ fullDocument: {_id: 1, y: 1, x: 0},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
+ operationType: "insert",
+ }
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+// Send inserts to all 3 collections and verify that the results contain the correct
+// documentKeys and are in the correct order.
+assert.writeOK(mongosCollShardedOnX.insert({_id: 3, x: 3}));
+assert.writeOK(mongosColl.insert({_id: 3}));
+assert.writeOK(mongosCollShardedCompound.insert({_id: 2, x: 0, y: -2}));
+
+// Verify that the change stream returns both inserts.
+expected = [
+ {
+ documentKey: {_id: 3, x: 3},
+ fullDocument: {_id: 3, x: 3},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 3},
+ fullDocument: {_id: 3},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 2, x: 0, y: -2},
+ fullDocument: {_id: 2, x: 0, y: -2},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
+ operationType: "insert",
+ },
+];
+
+const results = cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+// Store the resume token of the first insert to use after dropping the collection.
+const resumeTokenBeforeDrop = results[0]._id;
+
+// Write one more document to the collection that will be dropped, to be returned after
+// resuming.
+assert.writeOK(mongosCollShardedOnX.insert({_id: 4, x: 4}));
+
+// Drop the collection, invalidating the open change stream.
+assertDropCollection(mongosDB, mongosCollShardedOnX.getName());
+
+// Resume the change stream from before the collection drop, and verify that the documentKey
+// field contains the extracted shard key from the resume token.
+cursor = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {resumeAfter: resumeTokenBeforeDrop}},
+ {$match: {"ns.coll": mongosCollShardedOnX.getName()}}
+ ],
+ collection: 1
+});
+cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [
{
- documentKey: {_id: 2, x: 0, y: -2},
- fullDocument: {_id: 2, x: 0, y: -2},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
- operationType: "insert",
+ documentKey: {_id: 4, x: 4},
+ fullDocument: {_id: 4, x: 4},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
},
- ];
-
- const results = cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- // Store the resume token of the first insert to use after dropping the collection.
- const resumeTokenBeforeDrop = results[0]._id;
-
- // Write one more document to the collection that will be dropped, to be returned after
- // resuming.
- assert.writeOK(mongosCollShardedOnX.insert({_id: 4, x: 4}));
-
- // Drop the collection, invalidating the open change stream.
- assertDropCollection(mongosDB, mongosCollShardedOnX.getName());
-
- // Resume the change stream from before the collection drop, and verify that the documentKey
- // field contains the extracted shard key from the resume token.
- cursor = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {resumeAfter: resumeTokenBeforeDrop}},
- {$match: {"ns.coll": mongosCollShardedOnX.getName()}}
- ],
- collection: 1
- });
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [
- {
- documentKey: {_id: 4, x: 4},
- fullDocument: {_id: 4, x: 4},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- },
- ]
- });
-
- cst.cleanUp();
-
- st.stop();
+ ]
+});
+
+cst.cleanUp();
+
+st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_auth.js b/jstests/sharding/cleanup_orphaned_auth.js
index f32a66ee242..a54030fbf12 100644
--- a/jstests/sharding/cleanup_orphaned_auth.js
+++ b/jstests/sharding/cleanup_orphaned_auth.js
@@ -3,60 +3,58 @@
//
(function() {
- 'use strict';
+'use strict';
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
- function assertUnauthorized(res, msg) {
- if (assert._debug && msg)
- print("in assert for: " + msg);
+function assertUnauthorized(res, msg) {
+ if (assert._debug && msg)
+ print("in assert for: " + msg);
- if (res.ok == 0 && (res.errmsg.startsWith('not authorized') ||
- res.errmsg.match(/requires authentication/)))
- return;
+ if (res.ok == 0 &&
+ (res.errmsg.startsWith('not authorized') || res.errmsg.match(/requires authentication/)))
+ return;
- var finalMsg = "command worked when it should have been unauthorized: " + tojson(res);
- if (msg) {
- finalMsg += " : " + msg;
- }
- doassert(finalMsg);
+ var finalMsg = "command worked when it should have been unauthorized: " + tojson(res);
+ if (msg) {
+ finalMsg += " : " + msg;
}
+ doassert(finalMsg);
+}
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- auth: true,
- other: {keyFile: 'jstests/libs/key1', useHostname: false, shardAsReplicaSet: false}
- });
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ auth: true,
+ other: {keyFile: 'jstests/libs/key1', useHostname: false, shardAsReplicaSet: false}
+});
- var shardAdmin = st.shard0.getDB('admin');
- shardAdmin.createUser(
- {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
- shardAdmin.auth('admin', 'x');
+var shardAdmin = st.shard0.getDB('admin');
+shardAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
+shardAdmin.auth('admin', 'x');
- var mongos = st.s0;
- var mongosAdmin = mongos.getDB('admin');
- var coll = mongos.getCollection('foo.bar');
+var mongos = st.s0;
+var mongosAdmin = mongos.getDB('admin');
+var coll = mongos.getCollection('foo.bar');
- mongosAdmin.createUser(
- {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
- mongosAdmin.auth('admin', 'x');
+mongosAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
+mongosAdmin.auth('admin', 'x');
- assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
+assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
- assert.commandWorked(
- mongosAdmin.runCommand({shardCollection: coll.getFullName(), key: {_id: 'hashed'}}));
+assert.commandWorked(
+ mongosAdmin.runCommand({shardCollection: coll.getFullName(), key: {_id: 'hashed'}}));
- // cleanupOrphaned requires auth as admin user.
- assert.commandWorked(shardAdmin.logout());
- assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
+// cleanupOrphaned requires auth as admin user.
+assert.commandWorked(shardAdmin.logout());
+assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
- var fooDB = st.shard0.getDB('foo');
- shardAdmin.auth('admin', 'x');
- fooDB.createUser({user: 'user', pwd: 'x', roles: ['readWrite', 'dbAdmin']});
- shardAdmin.logout();
- fooDB.auth('user', 'x');
- assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
+var fooDB = st.shard0.getDB('foo');
+shardAdmin.auth('admin', 'x');
+fooDB.createUser({user: 'user', pwd: 'x', roles: ['readWrite', 'dbAdmin']});
+shardAdmin.logout();
+fooDB.auth('user', 'x');
+assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_basic.js b/jstests/sharding/cleanup_orphaned_basic.js
index 66fe7924157..fb8893d5677 100644
--- a/jstests/sharding/cleanup_orphaned_basic.js
+++ b/jstests/sharding/cleanup_orphaned_basic.js
@@ -4,119 +4,117 @@
//
(function() {
- "use strict";
-
- /*****************************************************************************
- * Unsharded mongod.
- ****************************************************************************/
-
- // cleanupOrphaned fails against unsharded mongod.
- var mongod = MongoRunner.runMongod();
- assert.commandFailed(mongod.getDB('admin').runCommand({cleanupOrphaned: 'foo.bar'}));
-
- /*****************************************************************************
- * Bad invocations of cleanupOrphaned command.
- ****************************************************************************/
-
- var st = new ShardingTest({other: {rs: true, rsOptions: {nodes: 2}}});
-
- var mongos = st.s0;
- var mongosAdmin = mongos.getDB('admin');
- var dbName = 'foo';
- var collectionName = 'bar';
- var ns = dbName + '.' + collectionName;
- var coll = mongos.getCollection(ns);
-
- // cleanupOrphaned fails against mongos ('no such command'): it must be run
- // on mongod.
- assert.commandFailed(mongosAdmin.runCommand({cleanupOrphaned: ns}));
-
- // cleanupOrphaned must be run on admin DB.
- var shardFooDB = st.shard0.getDB(dbName);
- assert.commandFailed(shardFooDB.runCommand({cleanupOrphaned: ns}));
-
- // Must be run on primary.
- var secondaryAdmin = st.rs0.getSecondary().getDB('admin');
- var response = secondaryAdmin.runCommand({cleanupOrphaned: ns});
- print('cleanupOrphaned on secondary:');
- printjson(response);
- assert.commandFailed(response);
-
- var shardAdmin = st.shard0.getDB('admin');
- var badNS = ' \\/."*<>:|?';
- assert.commandFailed(shardAdmin.runCommand({cleanupOrphaned: badNS}));
-
- // cleanupOrphaned works on sharded collection.
- assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
-
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
-
- assert.commandWorked(mongosAdmin.runCommand({shardCollection: ns, key: {_id: 1}}));
-
- assert.commandWorked(shardAdmin.runCommand({cleanupOrphaned: ns}));
-
- /*****************************************************************************
- * Empty shard.
- ****************************************************************************/
-
- // Ping shard[1] so it will be aware that it is sharded. Otherwise cleanupOrphaned
- // may fail.
- assert.commandWorked(mongosAdmin.runCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- assert.commandWorked(mongosAdmin.runCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: st.shard0.shardName,
- _waitForDelete: true
- }));
-
- // Collection's home is shard0, there are no chunks assigned to shard1.
- st.shard1.getCollection(ns).insert({});
- assert.eq(null, st.shard1.getDB(dbName).getLastError());
- assert.eq(1, st.shard1.getCollection(ns).count());
- response = st.shard1.getDB('admin').runCommand({cleanupOrphaned: ns});
- assert.commandWorked(response);
- assert.eq({_id: {$maxKey: 1}}, response.stoppedAtKey);
- assert.eq(0,
- st.shard1.getCollection(ns).count(),
- "cleanupOrphaned didn't delete orphan on empty shard.");
-
- /*****************************************************************************
- * Bad startingFromKeys.
- ****************************************************************************/
-
- // startingFromKey of MaxKey.
- response = shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {_id: MaxKey}});
- assert.commandWorked(response);
- assert.eq(null, response.stoppedAtKey);
-
- // startingFromKey doesn't match number of fields in shard key.
- assert.commandFailed(shardAdmin.runCommand(
- {cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue', someOtherKey: 1}}));
-
- // startingFromKey matches number of fields in shard key but not field names.
- assert.commandFailed(
- shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue'}}));
-
- var coll2 = mongos.getCollection('foo.baz');
-
- assert.commandWorked(
- mongosAdmin.runCommand({shardCollection: coll2.getFullName(), key: {a: 1, b: 1}}));
-
- // startingFromKey doesn't match number of fields in shard key.
- assert.commandFailed(shardAdmin.runCommand(
- {cleanupOrphaned: coll2.getFullName(), startingFromKey: {someKey: 'someValue'}}));
-
- // startingFromKey matches number of fields in shard key but not field names.
- assert.commandFailed(shardAdmin.runCommand(
- {cleanupOrphaned: coll2.getFullName(), startingFromKey: {a: 'someValue', c: 1}}));
-
- st.stop();
- MongoRunner.stopMongod(mongod);
-
+"use strict";
+
+/*****************************************************************************
+ * Unsharded mongod.
+ ****************************************************************************/
+
+// cleanupOrphaned fails against unsharded mongod.
+var mongod = MongoRunner.runMongod();
+assert.commandFailed(mongod.getDB('admin').runCommand({cleanupOrphaned: 'foo.bar'}));
+
+/*****************************************************************************
+ * Bad invocations of cleanupOrphaned command.
+ ****************************************************************************/
+
+var st = new ShardingTest({other: {rs: true, rsOptions: {nodes: 2}}});
+
+var mongos = st.s0;
+var mongosAdmin = mongos.getDB('admin');
+var dbName = 'foo';
+var collectionName = 'bar';
+var ns = dbName + '.' + collectionName;
+var coll = mongos.getCollection(ns);
+
+// cleanupOrphaned fails against mongos ('no such command'): it must be run
+// on mongod.
+assert.commandFailed(mongosAdmin.runCommand({cleanupOrphaned: ns}));
+
+// cleanupOrphaned must be run on admin DB.
+var shardFooDB = st.shard0.getDB(dbName);
+assert.commandFailed(shardFooDB.runCommand({cleanupOrphaned: ns}));
+
+// Must be run on primary.
+var secondaryAdmin = st.rs0.getSecondary().getDB('admin');
+var response = secondaryAdmin.runCommand({cleanupOrphaned: ns});
+print('cleanupOrphaned on secondary:');
+printjson(response);
+assert.commandFailed(response);
+
+var shardAdmin = st.shard0.getDB('admin');
+var badNS = ' \\/."*<>:|?';
+assert.commandFailed(shardAdmin.runCommand({cleanupOrphaned: badNS}));
+
+// cleanupOrphaned works on sharded collection.
+assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
+
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+
+assert.commandWorked(mongosAdmin.runCommand({shardCollection: ns, key: {_id: 1}}));
+
+assert.commandWorked(shardAdmin.runCommand({cleanupOrphaned: ns}));
+
+/*****************************************************************************
+ * Empty shard.
+ ****************************************************************************/
+
+// Ping shard[1] so it will be aware that it is sharded. Otherwise cleanupOrphaned
+// may fail.
+assert.commandWorked(mongosAdmin.runCommand({
+ moveChunk: coll.getFullName(),
+ find: {_id: 1},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(mongosAdmin.runCommand({
+ moveChunk: coll.getFullName(),
+ find: {_id: 1},
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+
+// Collection's home is shard0, there are no chunks assigned to shard1.
+st.shard1.getCollection(ns).insert({});
+assert.eq(null, st.shard1.getDB(dbName).getLastError());
+assert.eq(1, st.shard1.getCollection(ns).count());
+response = st.shard1.getDB('admin').runCommand({cleanupOrphaned: ns});
+assert.commandWorked(response);
+assert.eq({_id: {$maxKey: 1}}, response.stoppedAtKey);
+assert.eq(
+ 0, st.shard1.getCollection(ns).count(), "cleanupOrphaned didn't delete orphan on empty shard.");
+
+/*****************************************************************************
+ * Bad startingFromKeys.
+ ****************************************************************************/
+
+// startingFromKey of MaxKey.
+response = shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {_id: MaxKey}});
+assert.commandWorked(response);
+assert.eq(null, response.stoppedAtKey);
+
+// startingFromKey doesn't match number of fields in shard key.
+assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue', someOtherKey: 1}}));
+
+// startingFromKey matches number of fields in shard key but not field names.
+assert.commandFailed(
+ shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue'}}));
+
+var coll2 = mongos.getCollection('foo.baz');
+
+assert.commandWorked(
+ mongosAdmin.runCommand({shardCollection: coll2.getFullName(), key: {a: 1, b: 1}}));
+
+// startingFromKey doesn't match number of fields in shard key.
+assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: coll2.getFullName(), startingFromKey: {someKey: 'someValue'}}));
+
+// startingFromKey matches number of fields in shard key but not field names.
+assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: coll2.getFullName(), startingFromKey: {a: 'someValue', c: 1}}));
+
+st.stop();
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
index 6341dbb887d..01576805b49 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
@@ -8,151 +8,150 @@ load('./jstests/libs/chunk_manipulation_util.js');
load('./jstests/libs/cleanup_orphaned_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = 'foo', ns = dbName + '.bar',
- coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1,
- donorColl = donor.getCollection(ns), recipientColl = st.shard1.getCollection(ns);
-
- // Three chunks of 10 documents each, with ids -20, -18, -16, ..., 38.
- // Donor: [minKey, 0) [0, 20)
- // Recipient: [20, maxKey)
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 20}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 20}, to: st.shard1.shardName, _waitForDelete: true}));
-
- jsTest.log('Inserting 20 docs into shard 0....');
- for (var i = -20; i < 20; i += 2) {
- coll.insert({_id: i});
- }
- assert.eq(null, coll.getDB().getLastError());
- assert.eq(20, donorColl.count());
-
- jsTest.log('Inserting 10 docs into shard 1....');
- for (i = 20; i < 40; i += 2) {
- coll.insert({_id: i});
- }
- assert.eq(null, coll.getDB().getLastError());
- assert.eq(10, recipientColl.count());
-
- //
- // Start a moveChunk in the background. Move chunk [0, 20), which has 10 docs,
- // from shard 0 to shard 1. Pause it at some points in the donor's and
- // recipient's work flows, and test cleanupOrphaned on shard 0 and shard 1.
- //
-
- jsTest.log('setting failpoint startedMoveChunk (donor) and cloned (recipient)');
- pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
- pauseMigrateAtStep(recipient, migrateStepNames.cloned);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {_id: 0}, null, coll.getFullName(), st.shard1.shardName);
-
- waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
- waitForMigrateStep(recipient, migrateStepNames.cloned);
- // Recipient has run _recvChunkStart and begun its migration thread; docs have
- // been cloned and chunk [0, 20) is noted as 'pending' on recipient.
-
- // Donor: [minKey, 0) [0, 20)
- // Recipient (pending): [0, 20)
- // Recipient: [20, maxKey)
-
- // Create orphans. I'll show an orphaned doc on donor with _id 26 like {26}:
- //
- // Donor: [minKey, 0) [0, 20) {26}
- // Recipient (pending): [0, 20)
- // Recipient: {-1} [20, maxKey)
- donorColl.insert([{_id: 26}]);
- assert.eq(null, donorColl.getDB().getLastError());
- assert.eq(21, donorColl.count());
- recipientColl.insert([{_id: -1}]);
- assert.eq(null, recipientColl.getDB().getLastError());
- assert.eq(21, recipientColl.count());
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(20, donorColl.count());
- cleanupOrphaned(recipient, ns, 2);
- assert.eq(20, recipientColl.count());
-
- jsTest.log('Inserting document on donor side');
- // Inserted a new document (not an orphan) with id 19, which belongs in the
- // [0, 20) chunk.
- donorColl.insert({_id: 19});
- assert.eq(null, coll.getDB().getLastError());
- assert.eq(21, donorColl.count());
-
- // Recipient transfers this modification.
- jsTest.log('Let migrate proceed to transferredMods');
- proceedToMigrateStep(recipient, migrateStepNames.catchup);
- jsTest.log('Done letting migrate proceed to transferredMods');
-
- assert.eq(21, recipientColl.count(), "Recipient didn't transfer inserted document.");
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(21, donorColl.count());
- cleanupOrphaned(recipient, ns, 2);
- assert.eq(21, recipientColl.count());
-
- // Create orphans.
- donorColl.insert([{_id: 26}]);
- assert.eq(null, donorColl.getDB().getLastError());
- assert.eq(22, donorColl.count());
- recipientColl.insert([{_id: -1}]);
- assert.eq(null, recipientColl.getDB().getLastError());
- assert.eq(22, recipientColl.count());
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(21, donorColl.count());
- cleanupOrphaned(recipient, ns, 2);
- assert.eq(21, recipientColl.count());
-
- // Recipient has been waiting for donor to call _recvChunkCommit.
- pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
- unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
- proceedToMigrateStep(recipient, migrateStepNames.steady);
- proceedToMigrateStep(recipient, migrateStepNames.done);
-
- // Create orphans.
- donorColl.insert([{_id: 26}]);
- assert.eq(null, donorColl.getDB().getLastError());
- assert.eq(22, donorColl.count());
- recipientColl.insert([{_id: -1}]);
- assert.eq(null, recipientColl.getDB().getLastError());
- assert.eq(22, recipientColl.count());
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(21, donorColl.count());
- cleanupOrphaned(recipient, ns, 2);
- assert.eq(21, recipientColl.count());
-
- // Let recipient side of the migration finish so that the donor can proceed with the commit.
- unpauseMigrateAtStep(recipient, migrateStepNames.done);
- waitForMoveChunkStep(donor, moveChunkStepNames.committed);
-
- // Donor is paused after the migration chunk commit, but before it finishes the cleanup that
- // includes running the range deleter. Thus it technically has orphaned data -- commit is
- // complete, but moved data is still present. cleanupOrphaned can remove the data the donor
- // would otherwise clean up itself in its post-move delete phase.
- cleanupOrphaned(donor, ns, 2);
- assert.eq(10, donorColl.count());
-
- // Let the donor migration finish.
- unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
- joinMoveChunk();
-
- // Donor has finished post-move delete, which had nothing to remove with the range deleter
- // because of the preemptive cleanupOrphaned call.
- assert.eq(10, donorColl.count());
- assert.eq(21, recipientColl.count());
- assert.eq(31, coll.count());
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = 'foo', ns = dbName + '.bar',
+ coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1,
+ donorColl = donor.getCollection(ns), recipientColl = st.shard1.getCollection(ns);
+
+// Three chunks of 10 documents each, with ids -20, -18, -16, ..., 38.
+// Donor: [minKey, 0) [0, 20)
+// Recipient: [20, maxKey)
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 20}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 20}, to: st.shard1.shardName, _waitForDelete: true}));
+
+jsTest.log('Inserting 20 docs into shard 0....');
+for (var i = -20; i < 20; i += 2) {
+ coll.insert({_id: i});
+}
+assert.eq(null, coll.getDB().getLastError());
+assert.eq(20, donorColl.count());
+
+jsTest.log('Inserting 10 docs into shard 1....');
+for (i = 20; i < 40; i += 2) {
+ coll.insert({_id: i});
+}
+assert.eq(null, coll.getDB().getLastError());
+assert.eq(10, recipientColl.count());
+//
+// Start a moveChunk in the background. Move chunk [0, 20), which has 10 docs,
+// from shard 0 to shard 1. Pause it at some points in the donor's and
+// recipient's work flows, and test cleanupOrphaned on shard 0 and shard 1.
+//
+
+jsTest.log('setting failpoint startedMoveChunk (donor) and cloned (recipient)');
+pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 0}, null, coll.getFullName(), st.shard1.shardName);
+
+waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
+waitForMigrateStep(recipient, migrateStepNames.cloned);
+// Recipient has run _recvChunkStart and begun its migration thread; docs have
+// been cloned and chunk [0, 20) is noted as 'pending' on recipient.
+
+// Donor: [minKey, 0) [0, 20)
+// Recipient (pending): [0, 20)
+// Recipient: [20, maxKey)
+
+// Create orphans. I'll show an orphaned doc on donor with _id 26 like {26}:
+//
+// Donor: [minKey, 0) [0, 20) {26}
+// Recipient (pending): [0, 20)
+// Recipient: {-1} [20, maxKey)
+donorColl.insert([{_id: 26}]);
+assert.eq(null, donorColl.getDB().getLastError());
+assert.eq(21, donorColl.count());
+recipientColl.insert([{_id: -1}]);
+assert.eq(null, recipientColl.getDB().getLastError());
+assert.eq(21, recipientColl.count());
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(20, donorColl.count());
+cleanupOrphaned(recipient, ns, 2);
+assert.eq(20, recipientColl.count());
+
+jsTest.log('Inserting document on donor side');
+// Inserted a new document (not an orphan) with id 19, which belongs in the
+// [0, 20) chunk.
+donorColl.insert({_id: 19});
+assert.eq(null, coll.getDB().getLastError());
+assert.eq(21, donorColl.count());
+
+// Recipient transfers this modification.
+jsTest.log('Let migrate proceed to transferredMods');
+proceedToMigrateStep(recipient, migrateStepNames.catchup);
+jsTest.log('Done letting migrate proceed to transferredMods');
+
+assert.eq(21, recipientColl.count(), "Recipient didn't transfer inserted document.");
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(21, donorColl.count());
+cleanupOrphaned(recipient, ns, 2);
+assert.eq(21, recipientColl.count());
+
+// Create orphans.
+donorColl.insert([{_id: 26}]);
+assert.eq(null, donorColl.getDB().getLastError());
+assert.eq(22, donorColl.count());
+recipientColl.insert([{_id: -1}]);
+assert.eq(null, recipientColl.getDB().getLastError());
+assert.eq(22, recipientColl.count());
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(21, donorColl.count());
+cleanupOrphaned(recipient, ns, 2);
+assert.eq(21, recipientColl.count());
+
+// Recipient has been waiting for donor to call _recvChunkCommit.
+pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+proceedToMigrateStep(recipient, migrateStepNames.steady);
+proceedToMigrateStep(recipient, migrateStepNames.done);
+
+// Create orphans.
+donorColl.insert([{_id: 26}]);
+assert.eq(null, donorColl.getDB().getLastError());
+assert.eq(22, donorColl.count());
+recipientColl.insert([{_id: -1}]);
+assert.eq(null, recipientColl.getDB().getLastError());
+assert.eq(22, recipientColl.count());
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(21, donorColl.count());
+cleanupOrphaned(recipient, ns, 2);
+assert.eq(21, recipientColl.count());
+
+// Let recipient side of the migration finish so that the donor can proceed with the commit.
+unpauseMigrateAtStep(recipient, migrateStepNames.done);
+waitForMoveChunkStep(donor, moveChunkStepNames.committed);
+
+// Donor is paused after the migration chunk commit, but before it finishes the cleanup that
+// includes running the range deleter. Thus it technically has orphaned data -- commit is
+// complete, but moved data is still present. cleanupOrphaned can remove the data the donor
+// would otherwise clean up itself in its post-move delete phase.
+cleanupOrphaned(donor, ns, 2);
+assert.eq(10, donorColl.count());
+
+// Let the donor migration finish.
+unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+joinMoveChunk();
+
+// Donor has finished post-move delete, which had nothing to remove with the range deleter
+// because of the preemptive cleanupOrphaned call.
+assert.eq(10, donorColl.count());
+assert.eq(21, recipientColl.count());
+assert.eq(31, coll.count());
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
index 8fb97e4aa1a..34a5f8d89fc 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
@@ -9,121 +9,119 @@ load('./jstests/libs/chunk_manipulation_util.js');
load('./jstests/libs/cleanup_orphaned_util.js');
(function() {
- "use strict";
+"use strict";
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = 'foo', ns = dbName + '.bar',
- coll = mongos.getCollection(ns);
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = 'foo', ns = dbName + '.bar',
+ coll = mongos.getCollection(ns);
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {key: 'hashed'}}));
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {key: 'hashed'}}));
- // Makes four chunks by default, two on each shard.
- var chunks = st.config.chunks.find({ns: ns}).sort({min: 1}).toArray();
- assert.eq(4, chunks.length);
+// Makes four chunks by default, two on each shard.
+var chunks = st.config.chunks.find({ns: ns}).sort({min: 1}).toArray();
+assert.eq(4, chunks.length);
- var chunkWithDoc = chunks[1];
- print('Trying to make doc that hashes to this chunk: ' + tojson(chunkWithDoc));
+var chunkWithDoc = chunks[1];
+print('Trying to make doc that hashes to this chunk: ' + tojson(chunkWithDoc));
- var found = false;
- for (var i = 0; i < 10000; i++) {
- var doc = {key: ObjectId()}, hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
+var found = false;
+for (var i = 0; i < 10000; i++) {
+ var doc = {key: ObjectId()}, hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
- print('doc.key ' + doc.key + ' hashes to ' + hash);
+ print('doc.key ' + doc.key + ' hashes to ' + hash);
- if (mongos.getCollection('config.chunks')
- .findOne(
- {_id: chunkWithDoc._id, 'min.key': {$lte: hash}, 'max.key': {$gt: hash}})) {
- found = true;
- break;
- }
+ if (mongos.getCollection('config.chunks')
+ .findOne({_id: chunkWithDoc._id, 'min.key': {$lte: hash}, 'max.key': {$gt: hash}})) {
+ found = true;
+ break;
}
+}
- assert(found, "Couldn't make doc that belongs to chunk 1.");
- print('Doc: ' + tojson(doc));
- coll.insert(doc);
- assert.eq(null, coll.getDB().getLastError());
-
- //
- // Start a moveChunk in the background from shard 0 to shard 1. Pause it at
- // some points in the donor's and recipient's work flows, and test
- // cleanupOrphaned.
- //
-
- var donor, recip;
- if (chunkWithDoc.shard == st.shard0.shardName) {
- donor = st.shard0;
- recip = st.shard1;
- } else {
- recip = st.shard0;
- donor = st.shard1;
- }
+assert(found, "Couldn't make doc that belongs to chunk 1.");
+print('Doc: ' + tojson(doc));
+coll.insert(doc);
+assert.eq(null, coll.getDB().getLastError());
- jsTest.log('setting failpoint startedMoveChunk (donor) and cloned (recipient)');
- pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
- pauseMigrateAtStep(recip, migrateStepNames.cloned);
-
- var joinMoveChunk = moveChunkParallel(staticMongod,
- st.s0.host,
- null,
- [chunkWithDoc.min, chunkWithDoc.max], // bounds
- coll.getFullName(),
- recip.shardName);
-
- waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
- waitForMigrateStep(recip, migrateStepNames.cloned);
- proceedToMigrateStep(recip, migrateStepNames.catchup);
- // recipient has run _recvChunkStart and begun its migration thread;
- // 'doc' has been cloned and chunkWithDoc is noted as 'pending' on recipient.
-
- var donorColl = donor.getCollection(ns), recipColl = recip.getCollection(ns);
-
- assert.eq(1, donorColl.count());
- assert.eq(1, recipColl.count());
-
- // cleanupOrphaned should go through two iterations, since the default chunk
- // setup leaves two unowned ranges on each shard.
- cleanupOrphaned(donor, ns, 2);
- cleanupOrphaned(recip, ns, 2);
- assert.eq(1, donorColl.count());
- assert.eq(1, recipColl.count());
-
- // recip has been waiting for donor to call _recvChunkCommit.
- pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
- unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
- proceedToMigrateStep(recip, migrateStepNames.steady);
- proceedToMigrateStep(recip, migrateStepNames.done);
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(1, donorColl.count());
- cleanupOrphaned(recip, ns, 2);
- assert.eq(1, recipColl.count());
-
- // Let recip side of the migration finish so that the donor proceeds with the commit.
- unpauseMigrateAtStep(recip, migrateStepNames.done);
- waitForMoveChunkStep(donor, moveChunkStepNames.committed);
-
- // Donor is paused after the migration chunk commit, but before it finishes the cleanup that
- // includes running the range deleter. Thus it technically has orphaned data -- commit is
- // complete, but moved data is still present. cleanupOrphaned can remove the data the donor
- // would otherwise clean up itself in its post-move delete phase.
- cleanupOrphaned(donor, ns, 2);
- assert.eq(0, donorColl.count());
-
- // Let migration thread complete.
- unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
- joinMoveChunk();
-
- // donor has finished post-move delete, which had nothing to remove with the range deleter
- // because of the preemptive cleanupOrphaned call.
- assert.eq(0, donorColl.count());
- assert.eq(1, recipColl.count());
- assert.eq(1, coll.count());
-
- MongoRunner.stopMongod(staticMongod);
- st.stop();
+//
+// Start a moveChunk in the background from shard 0 to shard 1. Pause it at
+// some points in the donor's and recipient's work flows, and test
+// cleanupOrphaned.
+//
+var donor, recip;
+if (chunkWithDoc.shard == st.shard0.shardName) {
+ donor = st.shard0;
+ recip = st.shard1;
+} else {
+ recip = st.shard0;
+ donor = st.shard1;
+}
+
+jsTest.log('setting failpoint startedMoveChunk (donor) and cloned (recipient)');
+pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+pauseMigrateAtStep(recip, migrateStepNames.cloned);
+
+var joinMoveChunk = moveChunkParallel(staticMongod,
+ st.s0.host,
+ null,
+ [chunkWithDoc.min, chunkWithDoc.max], // bounds
+ coll.getFullName(),
+ recip.shardName);
+
+waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
+waitForMigrateStep(recip, migrateStepNames.cloned);
+proceedToMigrateStep(recip, migrateStepNames.catchup);
+// recipient has run _recvChunkStart and begun its migration thread;
+// 'doc' has been cloned and chunkWithDoc is noted as 'pending' on recipient.
+
+var donorColl = donor.getCollection(ns), recipColl = recip.getCollection(ns);
+
+assert.eq(1, donorColl.count());
+assert.eq(1, recipColl.count());
+
+// cleanupOrphaned should go through two iterations, since the default chunk
+// setup leaves two unowned ranges on each shard.
+cleanupOrphaned(donor, ns, 2);
+cleanupOrphaned(recip, ns, 2);
+assert.eq(1, donorColl.count());
+assert.eq(1, recipColl.count());
+
+// recip has been waiting for donor to call _recvChunkCommit.
+pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+proceedToMigrateStep(recip, migrateStepNames.steady);
+proceedToMigrateStep(recip, migrateStepNames.done);
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(1, donorColl.count());
+cleanupOrphaned(recip, ns, 2);
+assert.eq(1, recipColl.count());
+
+// Let recip side of the migration finish so that the donor proceeds with the commit.
+unpauseMigrateAtStep(recip, migrateStepNames.done);
+waitForMoveChunkStep(donor, moveChunkStepNames.committed);
+
+// Donor is paused after the migration chunk commit, but before it finishes the cleanup that
+// includes running the range deleter. Thus it technically has orphaned data -- commit is
+// complete, but moved data is still present. cleanupOrphaned can remove the data the donor
+// would otherwise clean up itself in its post-move delete phase.
+cleanupOrphaned(donor, ns, 2);
+assert.eq(0, donorColl.count());
+
+// Let migration thread complete.
+unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+joinMoveChunk();
+
+// donor has finished post-move delete, which had nothing to remove with the range deleter
+// because of the preemptive cleanupOrphaned call.
+assert.eq(0, donorColl.count());
+assert.eq(1, recipColl.count());
+assert.eq(1, coll.count());
+
+MongoRunner.stopMongod(staticMongod);
+st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
index a4f9cfb25eb..bf996dda39b 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -3,76 +3,75 @@
//
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
-
- // Create two orphaned data holes, one bounded by min or max on each shard
-
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-100)}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-50)}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(50)}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(100)}}));
- assert.commandWorked(admin.runCommand({
- moveChunk: coll + "",
- bounds: [{_id: NumberLong(-100)}, {_id: NumberLong(-50)}],
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
- assert.commandWorked(admin.runCommand({
- moveChunk: coll + "",
- bounds: [{_id: NumberLong(50)}, {_id: NumberLong(100)}],
- to: st.shard0.shardName,
- _waitForDelete: true
- }));
- st.printShardingStatus();
-
- jsTest.log("Inserting some docs on each shard, so 1/2 will be orphaned...");
-
- for (var s = 0; s < 2; s++) {
- var shardColl = (s == 0 ? st.shard0 : st.shard1).getCollection(coll + "");
- var bulk = shardColl.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++)
- bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
- }
-
- assert.eq(200,
- st.shard0.getCollection(coll + "").find().itcount() +
- st.shard1.getCollection(coll + "").find().itcount());
- assert.eq(100, coll.find().itcount());
-
- jsTest.log("Cleaning up orphaned data in hashed coll...");
-
- for (var s = 0; s < 2; s++) {
- var shardAdmin = (s == 0 ? st.shard0 : st.shard1).getDB("admin");
-
- var result = shardAdmin.runCommand({cleanupOrphaned: coll + ""});
- while (result.ok && result.stoppedAtKey) {
- printjson(result);
- result = shardAdmin.runCommand(
- {cleanupOrphaned: coll + "", startingFromKey: result.stoppedAtKey});
- }
-
+"use strict";
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
+
+// Create two orphaned data holes, one bounded by min or max on each shard
+
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-100)}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-50)}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(50)}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(100)}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(-100)}, {_id: NumberLong(-50)}],
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(50)}, {_id: NumberLong(100)}],
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+st.printShardingStatus();
+
+jsTest.log("Inserting some docs on each shard, so 1/2 will be orphaned...");
+
+for (var s = 0; s < 2; s++) {
+ var shardColl = (s == 0 ? st.shard0 : st.shard1).getCollection(coll + "");
+ var bulk = shardColl.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++)
+ bulk.insert({_id: i});
+ assert.writeOK(bulk.execute());
+}
+
+assert.eq(200,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+assert.eq(100, coll.find().itcount());
+
+jsTest.log("Cleaning up orphaned data in hashed coll...");
+
+for (var s = 0; s < 2; s++) {
+ var shardAdmin = (s == 0 ? st.shard0 : st.shard1).getDB("admin");
+
+ var result = shardAdmin.runCommand({cleanupOrphaned: coll + ""});
+ while (result.ok && result.stoppedAtKey) {
printjson(result);
- assert(result.ok);
+ result = shardAdmin.runCommand(
+ {cleanupOrphaned: coll + "", startingFromKey: result.stoppedAtKey});
}
- assert.eq(100,
- st.shard0.getCollection(coll + "").find().itcount() +
- st.shard1.getCollection(coll + "").find().itcount());
- assert.eq(100, coll.find().itcount());
+ printjson(result);
+ assert(result.ok);
+}
- jsTest.log("DONE!");
+assert.eq(100,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+assert.eq(100, coll.find().itcount());
- st.stop();
+jsTest.log("DONE!");
+st.stop();
})();
diff --git a/jstests/sharding/clone_catalog_data.js b/jstests/sharding/clone_catalog_data.js
index c2d25fc9d1f..fae12fbcd56 100644
--- a/jstests/sharding/clone_catalog_data.js
+++ b/jstests/sharding/clone_catalog_data.js
@@ -4,7 +4,6 @@
// Eventually, _movePrimary will use this command.
(() => {
-
function sortByName(a, b) {
if (a.name < b.name)
return -1;
@@ -34,8 +33,8 @@
// Create indexes on each collection.
var coll1Indexes =
[
- {key: {a: 1}, name: 'index1', expireAfterSeconds: 5000},
- {key: {b: -1}, name: 'index2', unique: true},
+ {key: {a: 1}, name: 'index1', expireAfterSeconds: 5000},
+ {key: {b: -1}, name: 'index2', unique: true},
],
coll2Indexes = [
{key: {a: 1, b: 1}, name: 'index3'},
diff --git a/jstests/sharding/coll_epoch_test0.js b/jstests/sharding/coll_epoch_test0.js
index a8745cd3110..ba87929cb4e 100644
--- a/jstests/sharding/coll_epoch_test0.js
+++ b/jstests/sharding/coll_epoch_test0.js
@@ -22,7 +22,6 @@ config.shards.find().forEach(function(doc) {
var createdEpoch = null;
var checkEpochs = function() {
config.chunks.find({ns: coll + ""}).forEach(function(chunk) {
-
// Make sure the epochs exist, are non-zero, and are consistent
assert(chunk.lastmodEpoch);
print(chunk.lastmodEpoch + "");
@@ -31,7 +30,6 @@ var checkEpochs = function() {
createdEpoch = chunk.lastmodEpoch;
else
assert.eq(createdEpoch, chunk.lastmodEpoch);
-
});
};
diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js
index 5e243b8fff4..d995ee19ab6 100644
--- a/jstests/sharding/coll_epoch_test1.js
+++ b/jstests/sharding/coll_epoch_test1.js
@@ -1,83 +1,83 @@
// Tests various cases of dropping and recreating collections in the same namespace with multiple
// mongoses
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 3, causallyConsistent: true});
+var st = new ShardingTest({shards: 3, mongos: 3, causallyConsistent: true});
- var config = st.s0.getDB("config");
- var admin = st.s0.getDB("admin");
- var coll = st.s0.getCollection("foo.bar");
+var config = st.s0.getDB("config");
+var admin = st.s0.getDB("admin");
+var coll = st.s0.getCollection("foo.bar");
- // Use separate mongoses for admin, inserting data, and validating results, so no single-mongos
- // tricks will work
- var staleMongos = st.s1;
- var insertMongos = st.s2;
+// Use separate mongoses for admin, inserting data, and validating results, so no single-mongos
+// tricks will work
+var staleMongos = st.s1;
+var insertMongos = st.s2;
- var shards = [st.shard0, st.shard1, st.shard2];
+var shards = [st.shard0, st.shard1, st.shard2];
- //
- // Test that inserts and queries go to the correct shard even when the collection has been
- // sharded from another mongos
- //
+//
+// Test that inserts and queries go to the correct shard even when the collection has been
+// sharded from another mongos
+//
- jsTest.log("Enabling sharding for the first time...");
+jsTest.log("Enabling sharding for the first time...");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- // TODO(PM-85): Make sure we *always* move the primary after collection lifecyle project is
- // complete
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- st.configRS.awaitLastOpCommitted(); // TODO: Remove after collection lifecyle project (PM-85)
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+// TODO(PM-85): Make sure we *always* move the primary after collection lifecyle project is
+// complete
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+st.configRS.awaitLastOpCommitted(); // TODO: Remove after collection lifecyle project (PM-85)
- var bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({_id: i, test: "a"});
- }
- assert.writeOK(bulk.execute());
- assert.eq(100, staleMongos.getCollection(coll + "").find({test: "a"}).itcount());
+var bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({_id: i, test: "a"});
+}
+assert.writeOK(bulk.execute());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "a"}).itcount());
- assert(coll.drop());
- st.configRS.awaitLastOpCommitted();
+assert(coll.drop());
+st.configRS.awaitLastOpCommitted();
- //
- // Test that inserts and queries go to the correct shard even when the collection has been
- // resharded from another mongos, with a different key
- //
+//
+// Test that inserts and queries go to the correct shard even when the collection has been
+// resharded from another mongos, with a different key
+//
- jsTest.log("Re-enabling sharding with a different key...");
+jsTest.log("Re-enabling sharding with a different key...");
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
- assert.commandWorked(coll.ensureIndex({notId: 1}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {notId: 1}}));
- st.configRS.awaitLastOpCommitted();
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+assert.commandWorked(coll.ensureIndex({notId: 1}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {notId: 1}}));
+st.configRS.awaitLastOpCommitted();
- bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({notId: i, test: "b"});
- }
- assert.writeOK(bulk.execute());
- assert.eq(100, staleMongos.getCollection(coll + "").find({test: "b"}).itcount());
- assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a"]}}).itcount());
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({notId: i, test: "b"});
+}
+assert.writeOK(bulk.execute());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "b"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a"]}}).itcount());
- assert(coll.drop());
- st.configRS.awaitLastOpCommitted();
+assert(coll.drop());
+st.configRS.awaitLastOpCommitted();
- //
- // Test that inserts and queries go to the correct shard even when the collection has been
- // unsharded from another mongos
- //
+//
+// Test that inserts and queries go to the correct shard even when the collection has been
+// unsharded from another mongos
+//
- jsTest.log("Re-creating unsharded collection from a sharded collection...");
+jsTest.log("Re-creating unsharded collection from a sharded collection...");
- bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({test: "c"});
- }
- assert.writeOK(bulk.execute());
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({test: "c"});
+}
+assert.writeOK(bulk.execute());
- assert.eq(100, staleMongos.getCollection(coll + "").find({test: "c"}).itcount());
- assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b"]}}).itcount());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "c"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b"]}}).itcount());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/collation_lookup.js b/jstests/sharding/collation_lookup.js
index 6e202b069c6..f9388cf9aa3 100644
--- a/jstests/sharding/collation_lookup.js
+++ b/jstests/sharding/collation_lookup.js
@@ -7,15 +7,15 @@
* collection the "aggregate" command was performed on.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // for arrayEq
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
+load("jstests/aggregation/extras/utils.js"); // for arrayEq
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
- function runTests(withDefaultCollationColl, withoutDefaultCollationColl, collation) {
- // Test that the $lookup stage respects the inherited collation.
- let res = withDefaultCollationColl
+function runTests(withDefaultCollationColl, withoutDefaultCollationColl, collation) {
+ // Test that the $lookup stage respects the inherited collation.
+ let res = withDefaultCollationColl
.aggregate([{
$lookup: {
from: withoutDefaultCollationColl.getName(),
@@ -25,14 +25,14 @@
},
}])
.toArray();
- assert.eq(1, res.length, tojson(res));
+ assert.eq(1, res.length, tojson(res));
- let expected = [{_id: "lowercase", str: "abc"}, {_id: "uppercase", str: "ABC"}];
- assert(arrayEq(expected, res[0].matched),
- "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched) +
- " up to ordering");
+ let expected = [{_id: "lowercase", str: "abc"}, {_id: "uppercase", str: "ABC"}];
+ assert(
+ arrayEq(expected, res[0].matched),
+ "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched) + " up to ordering");
- res = withDefaultCollationColl
+ res = withDefaultCollationColl
.aggregate([{
$lookup: {
from: withoutDefaultCollationColl.getName(),
@@ -52,28 +52,27 @@
},
}])
.toArray();
- assert.eq(1, res.length, tojson(res));
-
- expected = [
- {
- "_id": "lowercase",
- "str": "abc",
- "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
- },
- {
- "_id": "uppercase",
- "str": "ABC",
- "matched2":
- [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
- }
- ];
- assert(arrayEq(expected, res[0].matched1),
- "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched1) +
- " up to ordering. " + tojson(res));
-
- // Test that the $lookup stage respects the inherited collation when it optimizes with an
- // $unwind stage.
- res = withDefaultCollationColl
+ assert.eq(1, res.length, tojson(res));
+
+ expected = [
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
+ },
+ {
+ "_id": "uppercase",
+ "str": "ABC",
+ "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
+ }
+ ];
+ assert(arrayEq(expected, res[0].matched1),
+ "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched1) +
+ " up to ordering. " + tojson(res));
+
+ // Test that the $lookup stage respects the inherited collation when it optimizes with an
+ // $unwind stage.
+ res = withDefaultCollationColl
.aggregate([
{
$lookup: {
@@ -86,16 +85,16 @@
{$unwind: "$matched"},
])
.toArray();
- assert.eq(2, res.length, tojson(res));
+ assert.eq(2, res.length, tojson(res));
- expected = [
- {_id: "lowercase", str: "abc", matched: {_id: "lowercase", str: "abc"}},
- {_id: "lowercase", str: "abc", matched: {_id: "uppercase", str: "ABC"}}
- ];
- assert(arrayEq(expected, res),
- "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
+ expected = [
+ {_id: "lowercase", str: "abc", matched: {_id: "lowercase", str: "abc"}},
+ {_id: "lowercase", str: "abc", matched: {_id: "uppercase", str: "ABC"}}
+ ];
+ assert(arrayEq(expected, res),
+ "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
- res = withDefaultCollationColl
+ res = withDefaultCollationColl
.aggregate([
{
$lookup: {
@@ -119,51 +118,39 @@
{$unwind: "$matched1"},
])
.toArray();
- assert.eq(4, res.length, tojson(res));
-
- expected = [
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "uppercase", "str": "ABC"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "uppercase",
- "str": "ABC",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "uppercase",
- "str": "ABC",
- "matched2": {"_id": "uppercase", "str": "ABC"}
- }
- }
- ];
- assert(arrayEq(expected, res),
- "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
-
- // Test that the $lookup stage respects an explicit collation on the aggregation operation.
- res = withoutDefaultCollationColl
+ assert.eq(4, res.length, tojson(res));
+
+ expected = [
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "lowercase", "str": "abc", "matched2": {"_id": "lowercase", "str": "abc"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "lowercase", "str": "abc", "matched2": {"_id": "uppercase", "str": "ABC"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "uppercase", "str": "ABC", "matched2": {"_id": "lowercase", "str": "abc"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "uppercase", "str": "ABC", "matched2": {"_id": "uppercase", "str": "ABC"}}
+ }
+ ];
+ assert(arrayEq(expected, res),
+ "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
+
+ // Test that the $lookup stage respects an explicit collation on the aggregation operation.
+ res = withoutDefaultCollationColl
.aggregate(
[
{$match: {_id: "lowercase"}},
@@ -178,14 +165,14 @@
],
collation)
.toArray();
- assert.eq(1, res.length, tojson(res));
+ assert.eq(1, res.length, tojson(res));
- expected = [{_id: "lowercase", str: "abc"}, {_id: "uppercase", str: "ABC"}];
- assert(arrayEq(expected, res[0].matched),
- "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched) +
- " up to ordering");
+ expected = [{_id: "lowercase", str: "abc"}, {_id: "uppercase", str: "ABC"}];
+ assert(
+ arrayEq(expected, res[0].matched),
+ "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched) + " up to ordering");
- res = withoutDefaultCollationColl
+ res = withoutDefaultCollationColl
.aggregate(
[
{$match: {_id: "lowercase"}},
@@ -210,29 +197,28 @@
],
collation)
.toArray();
- assert.eq(1, res.length, tojson(res));
-
- expected = [
- {
- "_id": "lowercase",
- "str": "abc",
- "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
- },
- {
- "_id": "uppercase",
- "str": "ABC",
- "matched2":
- [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
- }
- ];
- assert(arrayEq(expected, res[0].matched1),
- "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched1) +
- " up to ordering");
-
- // Test that the $lookup stage respects an explicit collation on the aggregation operation
- // when
- // it optimizes with an $unwind stage.
- res = withoutDefaultCollationColl
+ assert.eq(1, res.length, tojson(res));
+
+ expected = [
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
+ },
+ {
+ "_id": "uppercase",
+ "str": "ABC",
+ "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
+ }
+ ];
+ assert(arrayEq(expected, res[0].matched1),
+ "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched1) +
+ " up to ordering");
+
+ // Test that the $lookup stage respects an explicit collation on the aggregation operation
+ // when
+ // it optimizes with an $unwind stage.
+ res = withoutDefaultCollationColl
.aggregate(
[
{$match: {_id: "lowercase"}},
@@ -248,16 +234,16 @@
],
collation)
.toArray();
- assert.eq(2, res.length, tojson(res));
+ assert.eq(2, res.length, tojson(res));
- expected = [
- {_id: "lowercase", str: "abc", matched: {_id: "lowercase", str: "abc"}},
- {_id: "lowercase", str: "abc", matched: {_id: "uppercase", str: "ABC"}}
- ];
- assert(arrayEq(expected, res),
- "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
+ expected = [
+ {_id: "lowercase", str: "abc", matched: {_id: "lowercase", str: "abc"}},
+ {_id: "lowercase", str: "abc", matched: {_id: "uppercase", str: "ABC"}}
+ ];
+ assert(arrayEq(expected, res),
+ "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
- res = withoutDefaultCollationColl
+ res = withoutDefaultCollationColl
.aggregate(
[
{$match: {_id: "lowercase"}},
@@ -284,52 +270,40 @@
],
collation)
.toArray();
- assert.eq(4, res.length, tojson(res));
-
- expected = [
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "uppercase", "str": "ABC"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "uppercase",
- "str": "ABC",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "uppercase",
- "str": "ABC",
- "matched2": {"_id": "uppercase", "str": "ABC"}
- }
- }
- ];
- assert(arrayEq(expected, res),
- "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
-
- // Test that the $lookup stage uses the "simple" collation if a collation isn't set on the
- // collection or the aggregation operation.
- res = withoutDefaultCollationColl
+ assert.eq(4, res.length, tojson(res));
+
+ expected = [
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "lowercase", "str": "abc", "matched2": {"_id": "lowercase", "str": "abc"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "lowercase", "str": "abc", "matched2": {"_id": "uppercase", "str": "ABC"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "uppercase", "str": "ABC", "matched2": {"_id": "lowercase", "str": "abc"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "uppercase", "str": "ABC", "matched2": {"_id": "uppercase", "str": "ABC"}}
+ }
+ ];
+ assert(arrayEq(expected, res),
+ "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
+
+ // Test that the $lookup stage uses the "simple" collation if a collation isn't set on the
+ // collection or the aggregation operation.
+ res = withoutDefaultCollationColl
.aggregate([
{$match: {_id: "lowercase"}},
{
@@ -342,9 +316,9 @@
},
])
.toArray();
- assert.eq([{_id: "lowercase", str: "abc", matched: [{_id: "lowercase", str: "abc"}]}], res);
+ assert.eq([{_id: "lowercase", str: "abc", matched: [{_id: "lowercase", str: "abc"}]}], res);
- res = withoutDefaultCollationColl
+ res = withoutDefaultCollationColl
.aggregate([
{$match: {_id: "lowercase"}},
{
@@ -368,92 +342,92 @@
},
])
.toArray();
- assert.eq([{
- "_id": "lowercase",
- "str": "abc",
- "matched1": [{
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }]
- }],
- res);
- }
-
- const st = new ShardingTest({shards: 2, config: 1});
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
-
- const testName = "collation_lookup";
- const caseInsensitive = {collation: {locale: "en_US", strength: 2}};
-
- const mongosDB = st.s0.getDB(testName);
- const withDefaultCollationColl = mongosDB[testName + "_with_default"];
- const withoutDefaultCollationColl = mongosDB[testName + "_without_default"];
-
- assert.commandWorked(
- mongosDB.createCollection(withDefaultCollationColl.getName(), caseInsensitive));
- assert.writeOK(withDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
-
- assert.writeOK(withoutDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
- assert.writeOK(withoutDefaultCollationColl.insert({_id: "uppercase", str: "ABC"}));
- assert.writeOK(withoutDefaultCollationColl.insert({_id: "unmatched", str: "def"}));
-
- //
- // Sharded collection with default collation and unsharded collection without a default
- // collation.
- //
- assert.commandWorked(
- withDefaultCollationColl.createIndex({str: 1}, {collation: {locale: "simple"}}));
-
- // Enable sharding on the test DB and ensure its primary is shard0000.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Shard the collection with a default collation.
- assert.commandWorked(mongosDB.adminCommand({
- shardCollection: withDefaultCollationColl.getFullName(),
- key: {str: 1},
- collation: {locale: "simple"}
- }));
-
- // Split the collection into 2 chunks.
- assert.commandWorked(mongosDB.adminCommand(
- {split: withDefaultCollationColl.getFullName(), middle: {str: "abc"}}));
-
- // Move the chunk containing {str: "abc"} to shard0001.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: withDefaultCollationColl.getFullName(),
- find: {str: "abc"},
- to: st.shard1.shardName
- }));
-
- runTests(withDefaultCollationColl, withoutDefaultCollationColl, caseInsensitive);
-
- // TODO: Enable the following tests once SERVER-32536 is fixed.
- //
- // Sharded collection with default collation and sharded collection without a default
- // collation.
- //
-
- // Shard the collection without a default collation.
- // assert.commandWorked(mongosDB.adminCommand({
- // shardCollection: withoutDefaultCollationColl.getFullName(),
- // key: {_id: 1},
- // }));
-
- // // Split the collection into 2 chunks.
- // assert.commandWorked(mongosDB.adminCommand(
- // {split: withoutDefaultCollationColl.getFullName(), middle: {_id: "unmatched"}}));
-
- // // Move the chunk containing {_id: "lowercase"} to shard0001.
- // assert.commandWorked(mongosDB.adminCommand({
- // moveChunk: withoutDefaultCollationColl.getFullName(),
- // find: {_id: "lowercase"},
- // to: st.shard1.shardName
- // }));
-
- // runTests(withDefaultCollationColl, withoutDefaultCollationColl, caseInsensitive);
-
- st.stop();
+ assert.eq(
+ [{
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ [{"_id": "lowercase", "str": "abc", "matched2": {"_id": "lowercase", "str": "abc"}}]
+ }],
+ res);
+}
+
+const st = new ShardingTest({shards: 2, config: 1});
+setParameterOnAllHosts(
+ DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
+
+const testName = "collation_lookup";
+const caseInsensitive = {
+ collation: {locale: "en_US", strength: 2}
+};
+
+const mongosDB = st.s0.getDB(testName);
+const withDefaultCollationColl = mongosDB[testName + "_with_default"];
+const withoutDefaultCollationColl = mongosDB[testName + "_without_default"];
+
+assert.commandWorked(
+ mongosDB.createCollection(withDefaultCollationColl.getName(), caseInsensitive));
+assert.writeOK(withDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
+
+assert.writeOK(withoutDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
+assert.writeOK(withoutDefaultCollationColl.insert({_id: "uppercase", str: "ABC"}));
+assert.writeOK(withoutDefaultCollationColl.insert({_id: "unmatched", str: "def"}));
+
+//
+// Sharded collection with default collation and unsharded collection without a default
+// collation.
+//
+assert.commandWorked(
+ withDefaultCollationColl.createIndex({str: 1}, {collation: {locale: "simple"}}));
+
+// Enable sharding on the test DB and ensure its primary is shard0000.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Shard the collection with a default collation.
+assert.commandWorked(mongosDB.adminCommand({
+ shardCollection: withDefaultCollationColl.getFullName(),
+ key: {str: 1},
+ collation: {locale: "simple"}
+}));
+
+// Split the collection into 2 chunks.
+assert.commandWorked(
+ mongosDB.adminCommand({split: withDefaultCollationColl.getFullName(), middle: {str: "abc"}}));
+
+// Move the chunk containing {str: "abc"} to shard0001.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: withDefaultCollationColl.getFullName(),
+ find: {str: "abc"},
+ to: st.shard1.shardName
+}));
+
+runTests(withDefaultCollationColl, withoutDefaultCollationColl, caseInsensitive);
+
+// TODO: Enable the following tests once SERVER-32536 is fixed.
+//
+// Sharded collection with default collation and sharded collection without a default
+// collation.
+//
+
+// Shard the collection without a default collation.
+// assert.commandWorked(mongosDB.adminCommand({
+// shardCollection: withoutDefaultCollationColl.getFullName(),
+// key: {_id: 1},
+// }));
+
+// // Split the collection into 2 chunks.
+// assert.commandWorked(mongosDB.adminCommand(
+// {split: withoutDefaultCollationColl.getFullName(), middle: {_id: "unmatched"}}));
+
+// // Move the chunk containing {_id: "lowercase"} to shard0001.
+// assert.commandWorked(mongosDB.adminCommand({
+// moveChunk: withoutDefaultCollationColl.getFullName(),
+// find: {_id: "lowercase"},
+// to: st.shard1.shardName
+// }));
+
+// runTests(withDefaultCollationColl, withoutDefaultCollationColl, caseInsensitive);
+
+st.stop();
})();
diff --git a/jstests/sharding/collation_targeting.js b/jstests/sharding/collation_targeting.js
index fc2b9c193eb..c58396eaa80 100644
--- a/jstests/sharding/collation_targeting.js
+++ b/jstests/sharding/collation_targeting.js
@@ -1,462 +1,465 @@
// Test shard targeting for queries with collation.
(function() {
- "use strict";
-
- const caseInsensitive = {locale: "en_US", strength: 2};
-
- var explain;
- var writeRes;
-
- // Create a cluster with 3 shards.
- var st = new ShardingTest({shards: 3});
- var testDB = st.s.getDB("test");
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
-
- // Create a collection sharded on {a: 1}. Add 2dsphere index to test $geoNear.
- var coll = testDB.getCollection("simple_collation");
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
- assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
-
- // Split the collection.
- // st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 }
- // st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"}
- // shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }}
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: 10}}));
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: "a"}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: "foo"}, to: st.shard2.shardName}));
-
- // Put data on each shard.
- // Note that the balancer is off by default, so the chunks will stay put.
- // st.shard0.shardName: {a: 1}
- // st.shard1.shardName: {a: 100}, {a: "FOO"}
- // shard0002: {a: "foo"}
- // Include geo field to test $geoNear.
- var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
- var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
- var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
- var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
- assert.writeOK(coll.insert(a_1));
- assert.writeOK(coll.insert(a_100));
- assert.writeOK(coll.insert(a_FOO));
- assert.writeOK(coll.insert(a_foo));
-
- // Aggregate.
-
- // Test an aggregate command on strings with a non-simple collation. This should be
- // scatter-gather.
- assert.eq(2, coll.aggregate([{$match: {a: "foo"}}], {collation: caseInsensitive}).itcount());
- explain = coll.explain().aggregate([{$match: {a: "foo"}}], {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(3, Object.keys(explain.shards).length);
-
- // Test an aggregate command with a simple collation. This should be single-shard.
- assert.eq(1, coll.aggregate([{$match: {a: "foo"}}]).itcount());
- explain = coll.explain().aggregate([{$match: {a: "foo"}}]);
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Test an aggregate command on numbers with a non-simple collation. This should be
- // single-shard.
- assert.eq(1, coll.aggregate([{$match: {a: 100}}], {collation: caseInsensitive}).itcount());
- explain = coll.explain().aggregate([{$match: {a: 100}}], {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Aggregate with $geoNear.
- const geoJSONPoint = {type: "Point", coordinates: [0, 0]};
-
- // Test $geoNear with a query on strings with a non-simple collation. This should
- // scatter-gather.
- const geoNearStageStringQuery = [{
- $geoNear: {
- near: geoJSONPoint,
- distanceField: "dist",
- spherical: true,
- query: {a: "foo"},
- }
- }];
- assert.eq(2, coll.aggregate(geoNearStageStringQuery, {collation: caseInsensitive}).itcount());
- explain = coll.explain().aggregate(geoNearStageStringQuery, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(3, Object.keys(explain.shards).length);
-
- // Test $geoNear with a query on strings with a simple collation. This should be single-shard.
- assert.eq(1, coll.aggregate(geoNearStageStringQuery).itcount());
- explain = coll.explain().aggregate(geoNearStageStringQuery);
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Test a $geoNear with a query on numbers with a non-simple collation. This should be
- // single-shard.
- const geoNearStageNumericalQuery = [{
- $geoNear: {
- near: geoJSONPoint,
- distanceField: "dist",
- spherical: true,
- query: {a: 100},
- }
- }];
- assert.eq(1,
- coll.aggregate(geoNearStageNumericalQuery, {collation: caseInsensitive}).itcount());
- explain = coll.explain().aggregate(geoNearStageNumericalQuery, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Count.
-
- // Test a count command on strings with a non-simple collation. This should be scatter-gather.
- assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).count());
- explain = coll.explain().find({a: "foo"}).collation(caseInsensitive).count();
+"use strict";
+
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+
+var explain;
+var writeRes;
+
+// Create a cluster with 3 shards.
+var st = new ShardingTest({shards: 3});
+var testDB = st.s.getDB("test");
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
+
+// Create a collection sharded on {a: 1}. Add 2dsphere index to test $geoNear.
+var coll = testDB.getCollection("simple_collation");
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
+assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+
+// Split the collection.
+// st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 }
+// st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"}
+// shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }}
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: 10}}));
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: "a"}}));
+assert.commandWorked(
+ testDB.adminCommand({moveChunk: coll.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: coll.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: coll.getFullName(), find: {a: "foo"}, to: st.shard2.shardName}));
+
+// Put data on each shard.
+// Note that the balancer is off by default, so the chunks will stay put.
+// st.shard0.shardName: {a: 1}
+// st.shard1.shardName: {a: 100}, {a: "FOO"}
+// shard0002: {a: "foo"}
+// Include geo field to test $geoNear.
+var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
+var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
+var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
+var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
+assert.writeOK(coll.insert(a_1));
+assert.writeOK(coll.insert(a_100));
+assert.writeOK(coll.insert(a_FOO));
+assert.writeOK(coll.insert(a_foo));
+
+// Aggregate.
+
+// Test an aggregate command on strings with a non-simple collation. This should be
+// scatter-gather.
+assert.eq(2, coll.aggregate([{$match: {a: "foo"}}], {collation: caseInsensitive}).itcount());
+explain = coll.explain().aggregate([{$match: {a: "foo"}}], {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(3, Object.keys(explain.shards).length);
+
+// Test an aggregate command with a simple collation. This should be single-shard.
+assert.eq(1, coll.aggregate([{$match: {a: "foo"}}]).itcount());
+explain = coll.explain().aggregate([{$match: {a: "foo"}}]);
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Test an aggregate command on numbers with a non-simple collation. This should be
+// single-shard.
+assert.eq(1, coll.aggregate([{$match: {a: 100}}], {collation: caseInsensitive}).itcount());
+explain = coll.explain().aggregate([{$match: {a: 100}}], {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Aggregate with $geoNear.
+const geoJSONPoint = {
+ type: "Point",
+ coordinates: [0, 0]
+};
+
+// Test $geoNear with a query on strings with a non-simple collation. This should
+// scatter-gather.
+const geoNearStageStringQuery = [{
+ $geoNear: {
+ near: geoJSONPoint,
+ distanceField: "dist",
+ spherical: true,
+ query: {a: "foo"},
+ }
+}];
+assert.eq(2, coll.aggregate(geoNearStageStringQuery, {collation: caseInsensitive}).itcount());
+explain = coll.explain().aggregate(geoNearStageStringQuery, {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(3, Object.keys(explain.shards).length);
+
+// Test $geoNear with a query on strings with a simple collation. This should be single-shard.
+assert.eq(1, coll.aggregate(geoNearStageStringQuery).itcount());
+explain = coll.explain().aggregate(geoNearStageStringQuery);
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Test a $geoNear with a query on numbers with a non-simple collation. This should be
+// single-shard.
+const geoNearStageNumericalQuery = [{
+ $geoNear: {
+ near: geoJSONPoint,
+ distanceField: "dist",
+ spherical: true,
+ query: {a: 100},
+ }
+}];
+assert.eq(1, coll.aggregate(geoNearStageNumericalQuery, {collation: caseInsensitive}).itcount());
+explain = coll.explain().aggregate(geoNearStageNumericalQuery, {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Count.
+
+// Test a count command on strings with a non-simple collation. This should be scatter-gather.
+assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).count());
+explain = coll.explain().find({a: "foo"}).collation(caseInsensitive).count();
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a count command with a simple collation. This should be single-shard.
+assert.eq(1, coll.find({a: "foo"}).count());
+explain = coll.explain().find({a: "foo"}).count();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a count command on numbers with a non-simple collation. This should be single-shard.
+assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).count());
+explain = coll.explain().find({a: 100}).collation(caseInsensitive).count();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Distinct.
+
+// Test a distinct command on strings with a non-simple collation. This should be
+// scatter-gather.
+assert.eq(2, coll.distinct("_id", {a: "foo"}, {collation: caseInsensitive}).length);
+explain = coll.explain().distinct("_id", {a: "foo"}, {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test that deduping respects the collation.
+assert.eq(1, coll.distinct("a", {a: "foo"}, {collation: caseInsensitive}).length);
+
+// Test a distinct command with a simple collation. This should be single-shard.
+assert.eq(1, coll.distinct("_id", {a: "foo"}).length);
+explain = coll.explain().distinct("_id", {a: "foo"});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a distinct command on numbers with a non-simple collation. This should be single-shard.
+assert.eq(1, coll.distinct("_id", {a: 100}, {collation: caseInsensitive}).length);
+explain = coll.explain().distinct("_id", {a: 100}, {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Find.
+
+// Test a find command on strings with a non-simple collation. This should be scatter-gather.
+if (testDB.getMongo().useReadCommands()) {
+ assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).itcount());
+ explain = coll.find({a: "foo"}).collation(caseInsensitive).explain();
assert.commandWorked(explain);
assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a count command with a simple collation. This should be single-shard.
- assert.eq(1, coll.find({a: "foo"}).count());
- explain = coll.explain().find({a: "foo"}).count();
+}
+
+// Test a find command with a simple collation. This should be single-shard.
+assert.eq(1, coll.find({a: "foo"}).itcount());
+explain = coll.find({a: "foo"}).explain();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a find command on numbers with a non-simple collation. This should be single-shard.
+if (testDB.getMongo().useReadCommands()) {
+ assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).itcount());
+ explain = coll.find({a: 100}).collation(caseInsensitive).explain();
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a count command on numbers with a non-simple collation. This should be single-shard.
- assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).count());
- explain = coll.explain().find({a: 100}).collation(caseInsensitive).count();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Distinct.
-
- // Test a distinct command on strings with a non-simple collation. This should be
- // scatter-gather.
- assert.eq(2, coll.distinct("_id", {a: "foo"}, {collation: caseInsensitive}).length);
- explain = coll.explain().distinct("_id", {a: "foo"}, {collation: caseInsensitive});
+}
+
+// FindAndModify.
+
+// Sharded findAndModify on strings with non-simple collation should fail, because findAndModify
+// must target a single shard.
+assert.throws(function() {
+ coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive});
+});
+assert.throws(function() {
+ coll.explain().findAndModify(
+ {query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive});
+});
+
+// Sharded findAndModify on strings with simple collation should succeed. This should be
+// single-shard.
+assert.eq("foo", coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}).a);
+explain = coll.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Sharded findAndModify on numbers with non-simple collation should succeed. This should be
+// single-shard.
+assert.eq(
+ 100,
+ coll.findAndModify({query: {a: 100}, update: {$set: {b: 1}}, collation: caseInsensitive}).a);
+explain = coll.explain().findAndModify(
+ {query: {a: 100}, update: {$set: {b: 1}}, collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// MapReduce.
+
+// Test mapReduce on strings with a non-simple collation.
+assert.eq(2,
+ assert
+ .commandWorked(coll.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}, collation: caseInsensitive}))
+ .results.length);
+
+// Test mapReduce on strings with a simple collation.
+assert.eq(1,
+ assert
+ .commandWorked(coll.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}}))
+ .results.length);
+
+// Remove.
+
+// Test a remove command on strings with non-simple collation. This should be scatter-gather.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.remove({a: "foo"}, {collation: caseInsensitive});
+ assert.writeOK(writeRes);
+ assert.eq(2, writeRes.nRemoved);
+ explain = coll.explain().remove({a: "foo"}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test that deduping respects the collation.
- assert.eq(1, coll.distinct("a", {a: "foo"}, {collation: caseInsensitive}).length);
-
- // Test a distinct command with a simple collation. This should be single-shard.
- assert.eq(1, coll.distinct("_id", {a: "foo"}).length);
- explain = coll.explain().distinct("_id", {a: "foo"});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a distinct command on numbers with a non-simple collation. This should be single-shard.
- assert.eq(1, coll.distinct("_id", {a: 100}, {collation: caseInsensitive}).length);
- explain = coll.explain().distinct("_id", {a: 100}, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Find.
-
- // Test a find command on strings with a non-simple collation. This should be scatter-gather.
- if (testDB.getMongo().useReadCommands()) {
- assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).itcount());
- explain = coll.find({a: "foo"}).collation(caseInsensitive).explain();
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // Test a find command with a simple collation. This should be single-shard.
- assert.eq(1, coll.find({a: "foo"}).itcount());
- explain = coll.find({a: "foo"}).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a find command on numbers with a non-simple collation. This should be single-shard.
- if (testDB.getMongo().useReadCommands()) {
- assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).itcount());
- explain = coll.find({a: 100}).collation(caseInsensitive).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // FindAndModify.
-
- // Sharded findAndModify on strings with non-simple collation should fail, because findAndModify
- // must target a single shard.
- assert.throws(function() {
- coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive});
- });
- assert.throws(function() {
- coll.explain().findAndModify(
- {query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive});
- });
-
- // Sharded findAndModify on strings with simple collation should succeed. This should be
- // single-shard.
- assert.eq("foo", coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}).a);
- explain = coll.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Sharded findAndModify on numbers with non-simple collation should succeed. This should be
- // single-shard.
- assert.eq(
- 100,
- coll.findAndModify({query: {a: 100}, update: {$set: {b: 1}}, collation: caseInsensitive})
- .a);
- explain = coll.explain().findAndModify(
- {query: {a: 100}, update: {$set: {b: 1}}, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // MapReduce.
-
- // Test mapReduce on strings with a non-simple collation.
- assert.eq(2,
- assert
- .commandWorked(coll.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}, collation: caseInsensitive}))
- .results.length);
-
- // Test mapReduce on strings with a simple collation.
- assert.eq(1,
- assert
- .commandWorked(coll.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}}))
- .results.length);
-
- // Remove.
-
- // Test a remove command on strings with non-simple collation. This should be scatter-gather.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.remove({a: "foo"}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nRemoved);
- explain = coll.explain().remove({a: "foo"}, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_FOO));
- assert.writeOK(coll.insert(a_foo));
- }
-
- // Test a remove command on strings with simple collation. This should be single-shard.
- writeRes = coll.remove({a: "foo"});
+ assert.writeOK(coll.insert(a_FOO));
+ assert.writeOK(coll.insert(a_foo));
+}
+
+// Test a remove command on strings with simple collation. This should be single-shard.
+writeRes = coll.remove({a: "foo"});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+explain = coll.explain().remove({a: "foo"});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(coll.insert(a_foo));
+
+// Test a remove command on numbers with non-simple collation. This should be single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.remove({a: 100}, {collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- explain = coll.explain().remove({a: "foo"});
+ explain = coll.explain().remove({a: 100}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_foo));
-
- // Test a remove command on numbers with non-simple collation. This should be single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.remove({a: 100}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- explain = coll.explain().remove({a: 100}, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_100));
- }
-
- // A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
- // exact-ID if it contains an equality on _id and either has the collection default collation or
- // _id is not a string/object/array.
-
- // Single remove on string shard key with non-simple collation should fail, because it is not
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(coll.remove({a: "foo"}, {justOne: true, collation: caseInsensitive}));
- }
-
- // Single remove on string shard key with simple collation should succeed, because it is
- // single-shard.
- writeRes = coll.remove({a: "foo"}, {justOne: true});
+ assert.writeOK(coll.insert(a_100));
+}
+
+// A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
+// exact-ID if it contains an equality on _id and either has the collection default collation or
+// _id is not a string/object/array.
+
+// Single remove on string shard key with non-simple collation should fail, because it is not
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(coll.remove({a: "foo"}, {justOne: true, collation: caseInsensitive}));
+}
+
+// Single remove on string shard key with simple collation should succeed, because it is
+// single-shard.
+writeRes = coll.remove({a: "foo"}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+explain = coll.explain().remove({a: "foo"}, {justOne: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(coll.insert(a_foo));
+
+// Single remove on number shard key with non-simple collation should succeed, because it is
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.remove({a: 100}, {justOne: true, collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- explain = coll.explain().remove({a: "foo"}, {justOne: true});
+ explain = coll.explain().remove({a: 100}, {justOne: true, collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_foo));
-
- // Single remove on number shard key with non-simple collation should succeed, because it is
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.remove({a: 100}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- explain = coll.explain().remove({a: 100}, {justOne: true, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_100));
- }
-
- // Single remove on string _id with non-collection-default collation should fail, because it is
- // not an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(coll.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive}));
- }
-
- // Single remove on string _id with collection-default collation should succeed, because it is
- // an exact-ID query.
+ assert.writeOK(coll.insert(a_100));
+}
+
+// Single remove on string _id with non-collection-default collation should fail, because it is
+// not an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(coll.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive}));
+}
+
+// Single remove on string _id with collection-default collation should succeed, because it is
+// an exact-ID query.
+assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+writeRes = coll.remove({_id: "foo"}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+
+// Single remove on string _id with collection-default collation explicitly given should
+// succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- writeRes = coll.remove({_id: "foo"}, {justOne: true});
+ writeRes = coll.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
+}
- // Single remove on string _id with collection-default collation explicitly given should
- // succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- writeRes = coll.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- }
-
- // Single remove on number _id with non-collection-default collation should succeed, because it
- // is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.remove({_id: a_100._id}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- assert.writeOK(coll.insert(a_100));
- }
+// Single remove on number _id with non-collection-default collation should succeed, because it
+// is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.remove({_id: a_100._id}, {justOne: true, collation: caseInsensitive});
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nRemoved);
+ assert.writeOK(coll.insert(a_100));
+}
- // Update.
-
- // Test an update command on strings with non-simple collation. This should be scatter-gather.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes =
- coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nMatched);
- explain = coll.explain().update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- }
+// Update.
- // Test an update command on strings with simple collation. This should be single-shard.
- writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+// Test an update command on strings with non-simple collation. This should be scatter-gather.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
+ assert.writeOK(writeRes);
+ assert.eq(2, writeRes.nMatched);
+ explain = coll.explain().update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
+ assert.commandWorked(explain);
+ assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+}
+
+// Test an update command on strings with simple collation. This should be single-shard.
+writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test an update command on numbers with non-simple collation. This should be single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+ explain =
+ coll.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test an update command on numbers with non-simple collation. This should be single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update(
- {a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if
- // it contains an equality on _id and either has the collection default collation or _id is not
- // a string/object/array.
-
- // Single update on string shard key with non-simple collation should fail, because it is not
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(coll.update({a: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
- }
-
- // Single update on string shard key with simple collation should succeed, because it is
- // single-shard.
- writeRes = coll.update({a: "foo"}, {$set: {b: 1}});
+}
+
+// A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if
+// it contains an equality on _id and either has the collection default collation or _id is not
+// a string/object/array.
+
+// Single update on string shard key with non-simple collation should fail, because it is not
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(coll.update({a: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
+}
+
+// Single update on string shard key with simple collation should succeed, because it is
+// single-shard.
+writeRes = coll.update({a: "foo"}, {$set: {b: 1}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = coll.explain().update({a: "foo"}, {$set: {b: 1}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Single update on number shard key with non-simple collation should succeed, because it is
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update({a: "foo"}, {$set: {b: 1}});
+ explain = coll.explain().update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+}
- // Single update on number shard key with non-simple collation should succeed, because it is
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // Single update on string _id with non-collection-default collation should fail, because it is
- // not an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- assert.writeError(coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
- assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
- }
-
- // Single update on string _id with collection-default collation should succeed, because it is
- // an exact-ID query.
+// Single update on string _id with non-collection-default collation should fail, because it is
+// not an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+ assert.writeError(coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
+ assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+}
+
+// Single update on string _id with collection-default collation should succeed, because it is
+// an exact-ID query.
+assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+writeRes = coll.update({_id: "foo"}, {$set: {b: 1}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+
+// Single update on string _id with collection-default collation explicitly given should
+// succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- writeRes = coll.update({_id: "foo"}, {$set: {b: 1}});
+ writeRes = coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+}
- // Single update on string _id with collection-default collation explicitly given should
- // succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- writeRes = coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
- }
-
- // Single update on number _id with non-collection-default collation should succeed, because it
- // is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.update({_id: a_foo._id}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- }
-
- // Upsert must always be single-shard.
-
- // Upsert on strings with non-simple collation should fail, because it is not single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(coll.update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive}));
- }
-
- // Upsert on strings with simple collation should succeed, because it is single-shard.
- writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
+// Single update on number _id with non-collection-default collation should succeed, because it
+// is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update({_id: a_foo._id}, {$set: {b: 1}}, {collation: caseInsensitive});
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nMatched);
+}
+
+// Upsert must always be single-shard.
+
+// Upsert on strings with non-simple collation should fail, because it is not single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(coll.update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive}));
+}
+
+// Upsert on strings with simple collation should succeed, because it is single-shard.
+writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Upsert on numbers with non-simple collation should succeed, because it is single shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update(
+ {a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
+ explain = coll.explain().update(
+ {a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+}
- // Upsert on numbers with non-simple collation should succeed, because it is single shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.update(
- {a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update(
- {a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/collation_targeting_inherited.js b/jstests/sharding/collation_targeting_inherited.js
index 4c68e23fbc7..676dadbc972 100644
--- a/jstests/sharding/collation_targeting_inherited.js
+++ b/jstests/sharding/collation_targeting_inherited.js
@@ -1,482 +1,486 @@
// Test shard targeting for queries on a collection with a default collation.
(function() {
- "use strict";
-
- const caseInsensitive = {locale: "en_US", strength: 2};
-
- var explain;
- var writeRes;
-
- // Create a cluster with 3 shards.
- var st = new ShardingTest({shards: 3});
- var testDB = st.s.getDB("test");
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
-
- // Create a collection with a case-insensitive default collation sharded on {a: 1}.
- var collCaseInsensitive = testDB.getCollection("case_insensitive");
- collCaseInsensitive.drop();
- assert.commandWorked(testDB.createCollection("case_insensitive", {collation: caseInsensitive}));
- assert.commandWorked(collCaseInsensitive.createIndex({a: 1}, {collation: {locale: "simple"}}));
- assert.commandWorked(collCaseInsensitive.createIndex({geo: "2dsphere"}));
- assert.commandWorked(testDB.adminCommand({
- shardCollection: collCaseInsensitive.getFullName(),
- key: {a: 1},
- collation: {locale: "simple"}
- }));
-
- // Split the collection.
- // st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 }
- // st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"}
- // shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }}
- assert.commandWorked(
- testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: 10}}));
- assert.commandWorked(
- testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: "a"}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: collCaseInsensitive.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: collCaseInsensitive.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: collCaseInsensitive.getFullName(), find: {a: "foo"}, to: st.shard2.shardName}));
-
- // Put data on each shard.
- // Note that the balancer is off by default, so the chunks will stay put.
- // st.shard0.shardName: {a: 1}
- // st.shard1.shardName: {a: 100}, {a: "FOO"}
- // shard0002: {a: "foo"}
- // Include geo field to test geoNear.
- var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
- var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
- var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
- var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
- assert.writeOK(collCaseInsensitive.insert(a_1));
- assert.writeOK(collCaseInsensitive.insert(a_100));
- assert.writeOK(collCaseInsensitive.insert(a_FOO));
- assert.writeOK(collCaseInsensitive.insert(a_foo));
-
- // Aggregate.
-
- // Test an aggregate command on strings with a non-simple collation inherited from the
- // collection default. This should be scatter-gather.
- assert.eq(2, collCaseInsensitive.aggregate([{$match: {a: "foo"}}]).itcount());
- explain = collCaseInsensitive.explain().aggregate([{$match: {a: "foo"}}]);
- assert.commandWorked(explain);
- assert.eq(3, Object.keys(explain.shards).length);
-
- // Test an aggregate command with a simple collation. This should be single-shard.
- assert.eq(1,
- collCaseInsensitive.aggregate([{$match: {a: "foo"}}], {collation: {locale: "simple"}})
- .itcount());
- explain = collCaseInsensitive.explain().aggregate([{$match: {a: "foo"}}],
- {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Test an aggregate command on numbers with a non-simple collation inherited from the
- // collection default. This should be single-shard.
- assert.eq(1, collCaseInsensitive.aggregate([{$match: {a: 100}}]).itcount());
- explain = collCaseInsensitive.explain().aggregate([{$match: {a: 100}}]);
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Aggregate with $geoNear.
- const geoJSONPoint = {type: "Point", coordinates: [0, 0]};
-
- // Test $geoNear with a query on strings with a non-simple collation inherited from the
- // collection default. This should scatter-gather.
- const geoNearStageStringQuery = [{
- $geoNear: {
- near: geoJSONPoint,
- distanceField: "dist",
- spherical: true,
- query: {a: "foo"},
- }
- }];
- assert.eq(2, collCaseInsensitive.aggregate(geoNearStageStringQuery).itcount());
- explain = collCaseInsensitive.explain().aggregate(geoNearStageStringQuery);
- assert.commandWorked(explain);
- assert.eq(3, Object.keys(explain.shards).length);
-
- // Test $geoNear with a query on strings with a simple collation. This should be single-shard.
- assert.eq(
- 1,
- collCaseInsensitive.aggregate(geoNearStageStringQuery, {collation: {locale: "simple"}})
- .itcount());
- explain = collCaseInsensitive.explain().aggregate(geoNearStageStringQuery,
- {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Test a $geoNear with a query on numbers with a non-simple collation inherited from the
- // collection default. This should be single-shard.
- const geoNearStageNumericalQuery = [{
- $geoNear: {
- near: geoJSONPoint,
- distanceField: "dist",
- spherical: true,
- query: {a: 100},
- }
- }];
- assert.eq(1, collCaseInsensitive.aggregate(geoNearStageNumericalQuery).itcount());
- explain = collCaseInsensitive.explain().aggregate(geoNearStageNumericalQuery);
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Count.
-
- // Test a count command on strings with a non-simple collation inherited from the collection
- // default. This should be scatter-gather.
- assert.eq(2, collCaseInsensitive.find({a: "foo"}).count());
- explain = collCaseInsensitive.explain().find({a: "foo"}).count();
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a count command with a simple collation. This should be single-shard.
- assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).count());
- explain = collCaseInsensitive.explain().find({a: "foo"}).collation({locale: "simple"}).count();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a find command on numbers with a non-simple collation inheritied from the collection
- // default. This should be single-shard.
- assert.eq(1, collCaseInsensitive.find({a: 100}).count());
- explain = collCaseInsensitive.explain().find({a: 100}).count();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Distinct.
-
- // Test a distinct command on strings with a non-simple collation inherited from the collection
- // default. This should be scatter-gather.
- assert.eq(2, collCaseInsensitive.distinct("_id", {a: "foo"}).length);
- explain = collCaseInsensitive.explain().distinct("_id", {a: "foo"});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test that deduping respects the collation inherited from the collection default.
- assert.eq(1, collCaseInsensitive.distinct("a", {a: "foo"}).length);
-
- // Test a distinct command with a simple collation. This should be single-shard.
- assert.eq(
- 1, collCaseInsensitive.distinct("_id", {a: "foo"}, {collation: {locale: "simple"}}).length);
- explain =
- collCaseInsensitive.explain().distinct("_id", {a: "foo"}, {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a distinct command on numbers with a non-simple collation inherited from the collection
- // default. This should be single-shard.
- assert.eq(1, collCaseInsensitive.distinct("_id", {a: 100}).length);
- explain = collCaseInsensitive.explain().distinct("_id", {a: 100});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Find.
-
- // Test a find command on strings with a non-simple collation inherited from the collection
- // default. This should be scatter-gather.
- assert.eq(2, collCaseInsensitive.find({a: "foo"}).itcount());
- explain = collCaseInsensitive.find({a: "foo"}).explain();
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a find command with a simple collation. This should be single-shard.
- if (testDB.getMongo().useReadCommands()) {
- assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).itcount());
- explain = collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+"use strict";
+
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+
+var explain;
+var writeRes;
+
+// Create a cluster with 3 shards.
+var st = new ShardingTest({shards: 3});
+var testDB = st.s.getDB("test");
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
+
+// Create a collection with a case-insensitive default collation sharded on {a: 1}.
+var collCaseInsensitive = testDB.getCollection("case_insensitive");
+collCaseInsensitive.drop();
+assert.commandWorked(testDB.createCollection("case_insensitive", {collation: caseInsensitive}));
+assert.commandWorked(collCaseInsensitive.createIndex({a: 1}, {collation: {locale: "simple"}}));
+assert.commandWorked(collCaseInsensitive.createIndex({geo: "2dsphere"}));
+assert.commandWorked(testDB.adminCommand({
+ shardCollection: collCaseInsensitive.getFullName(),
+ key: {a: 1},
+ collation: {locale: "simple"}
+}));
+
+// Split the collection.
+// st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 }
+// st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"}
+// shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }}
+assert.commandWorked(
+ testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: 10}}));
+assert.commandWorked(
+ testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: "a"}}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: collCaseInsensitive.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: collCaseInsensitive.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: collCaseInsensitive.getFullName(), find: {a: "foo"}, to: st.shard2.shardName}));
+
+// Put data on each shard.
+// Note that the balancer is off by default, so the chunks will stay put.
+// st.shard0.shardName: {a: 1}
+// st.shard1.shardName: {a: 100}, {a: "FOO"}
+// shard0002: {a: "foo"}
+// Include geo field to test geoNear.
+var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
+var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
+var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
+var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
+assert.writeOK(collCaseInsensitive.insert(a_1));
+assert.writeOK(collCaseInsensitive.insert(a_100));
+assert.writeOK(collCaseInsensitive.insert(a_FOO));
+assert.writeOK(collCaseInsensitive.insert(a_foo));
+
+// Aggregate.
+
+// Test an aggregate command on strings with a non-simple collation inherited from the
+// collection default. This should be scatter-gather.
+assert.eq(2, collCaseInsensitive.aggregate([{$match: {a: "foo"}}]).itcount());
+explain = collCaseInsensitive.explain().aggregate([{$match: {a: "foo"}}]);
+assert.commandWorked(explain);
+assert.eq(3, Object.keys(explain.shards).length);
+
+// Test an aggregate command with a simple collation. This should be single-shard.
+assert.eq(1,
+ collCaseInsensitive.aggregate([{$match: {a: "foo"}}], {collation: {locale: "simple"}})
+ .itcount());
+explain = collCaseInsensitive.explain().aggregate([{$match: {a: "foo"}}],
+ {collation: {locale: "simple"}});
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Test an aggregate command on numbers with a non-simple collation inherited from the
+// collection default. This should be single-shard.
+assert.eq(1, collCaseInsensitive.aggregate([{$match: {a: 100}}]).itcount());
+explain = collCaseInsensitive.explain().aggregate([{$match: {a: 100}}]);
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Aggregate with $geoNear.
+const geoJSONPoint = {
+ type: "Point",
+ coordinates: [0, 0]
+};
+
+// Test $geoNear with a query on strings with a non-simple collation inherited from the
+// collection default. This should scatter-gather.
+const geoNearStageStringQuery = [{
+ $geoNear: {
+ near: geoJSONPoint,
+ distanceField: "dist",
+ spherical: true,
+ query: {a: "foo"},
}
-
- // Test a find command on numbers with a non-simple collation inherited from the collection
- // default. This should be single-shard.
- assert.eq(1, collCaseInsensitive.find({a: 100}).itcount());
- explain = collCaseInsensitive.find({a: 100}).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // FindAndModify.
-
- // Sharded findAndModify on strings with non-simple collation inherited from the collection
- // default should fail, because findAndModify must target a single shard.
- assert.throws(function() {
- collCaseInsensitive.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
- });
- assert.throws(function() {
- collCaseInsensitive.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
- });
-
- // Sharded findAndModify on strings with simple collation should succeed. This should be
- // single-shard.
- assert.eq("foo",
- collCaseInsensitive
- .findAndModify(
- {query: {a: "foo"}, update: {$set: {b: 1}}, collation: {locale: "simple"}})
- .a);
- explain = collCaseInsensitive.explain().findAndModify(
- {query: {a: "foo"}, update: {$set: {b: 1}}, collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Sharded findAndModify on numbers with non-simple collation inherited from collection default
- // should succeed. This should be single-shard.
- assert.eq(100, collCaseInsensitive.findAndModify({query: {a: 100}, update: {$set: {b: 1}}}).a);
- explain =
- collCaseInsensitive.explain().findAndModify({query: {a: 100}, update: {$set: {b: 1}}});
+}];
+assert.eq(2, collCaseInsensitive.aggregate(geoNearStageStringQuery).itcount());
+explain = collCaseInsensitive.explain().aggregate(geoNearStageStringQuery);
+assert.commandWorked(explain);
+assert.eq(3, Object.keys(explain.shards).length);
+
+// Test $geoNear with a query on strings with a simple collation. This should be single-shard.
+assert.eq(1,
+ collCaseInsensitive.aggregate(geoNearStageStringQuery, {collation: {locale: "simple"}})
+ .itcount());
+explain = collCaseInsensitive.explain().aggregate(geoNearStageStringQuery,
+ {collation: {locale: "simple"}});
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Test a $geoNear with a query on numbers with a non-simple collation inherited from the
+// collection default. This should be single-shard.
+const geoNearStageNumericalQuery = [{
+ $geoNear: {
+ near: geoJSONPoint,
+ distanceField: "dist",
+ spherical: true,
+ query: {a: 100},
+ }
+}];
+assert.eq(1, collCaseInsensitive.aggregate(geoNearStageNumericalQuery).itcount());
+explain = collCaseInsensitive.explain().aggregate(geoNearStageNumericalQuery);
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Count.
+
+// Test a count command on strings with a non-simple collation inherited from the collection
+// default. This should be scatter-gather.
+assert.eq(2, collCaseInsensitive.find({a: "foo"}).count());
+explain = collCaseInsensitive.explain().find({a: "foo"}).count();
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a count command with a simple collation. This should be single-shard.
+assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).count());
+explain = collCaseInsensitive.explain().find({a: "foo"}).collation({locale: "simple"}).count();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a find command on numbers with a non-simple collation inheritied from the collection
+// default. This should be single-shard.
+assert.eq(1, collCaseInsensitive.find({a: 100}).count());
+explain = collCaseInsensitive.explain().find({a: 100}).count();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Distinct.
+
+// Test a distinct command on strings with a non-simple collation inherited from the collection
+// default. This should be scatter-gather.
+assert.eq(2, collCaseInsensitive.distinct("_id", {a: "foo"}).length);
+explain = collCaseInsensitive.explain().distinct("_id", {a: "foo"});
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test that deduping respects the collation inherited from the collection default.
+assert.eq(1, collCaseInsensitive.distinct("a", {a: "foo"}).length);
+
+// Test a distinct command with a simple collation. This should be single-shard.
+assert.eq(1,
+ collCaseInsensitive.distinct("_id", {a: "foo"}, {collation: {locale: "simple"}}).length);
+explain =
+ collCaseInsensitive.explain().distinct("_id", {a: "foo"}, {collation: {locale: "simple"}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a distinct command on numbers with a non-simple collation inherited from the collection
+// default. This should be single-shard.
+assert.eq(1, collCaseInsensitive.distinct("_id", {a: 100}).length);
+explain = collCaseInsensitive.explain().distinct("_id", {a: 100});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Find.
+
+// Test a find command on strings with a non-simple collation inherited from the collection
+// default. This should be scatter-gather.
+assert.eq(2, collCaseInsensitive.find({a: "foo"}).itcount());
+explain = collCaseInsensitive.find({a: "foo"}).explain();
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a find command with a simple collation. This should be single-shard.
+if (testDB.getMongo().useReadCommands()) {
+ assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).itcount());
+ explain = collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).explain();
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // MapReduce.
-
- // Test mapReduce on strings with a non-simple collation inherited from collection default.
- assert.eq(2,
- assert
- .commandWorked(collCaseInsensitive.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}}))
- .results.length);
-
- // Test mapReduce on strings with a simple collation.
- assert.eq(1,
- assert
- .commandWorked(collCaseInsensitive.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}, collation: {locale: "simple"}}))
- .results.length);
-
- // Remove.
-
- // Test a remove command on strings with non-simple collation inherited from collection default.
- // This should be scatter-gather.
- writeRes = collCaseInsensitive.remove({a: "foo"});
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove({a: "foo"});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_FOO));
- assert.writeOK(collCaseInsensitive.insert(a_foo));
-
- // Test a remove command on strings with simple collation. This should be single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = collCaseInsensitive.remove({a: "foo"}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove({a: "foo"}, {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_foo));
- }
-
- // Test a remove command on numbers with non-simple collation inherited from collection default.
- // This should be single-shard.
- writeRes = collCaseInsensitive.remove({a: 100});
+}
+
+// Test a find command on numbers with a non-simple collation inherited from the collection
+// default. This should be single-shard.
+assert.eq(1, collCaseInsensitive.find({a: 100}).itcount());
+explain = collCaseInsensitive.find({a: 100}).explain();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// FindAndModify.
+
+// Sharded findAndModify on strings with non-simple collation inherited from the collection
+// default should fail, because findAndModify must target a single shard.
+assert.throws(function() {
+ collCaseInsensitive.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
+});
+assert.throws(function() {
+ collCaseInsensitive.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
+});
+
+// Sharded findAndModify on strings with simple collation should succeed. This should be
+// single-shard.
+assert.eq(
+ "foo",
+ collCaseInsensitive
+ .findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}, collation: {locale: "simple"}})
+ .a);
+explain = collCaseInsensitive.explain().findAndModify(
+ {query: {a: "foo"}, update: {$set: {b: 1}}, collation: {locale: "simple"}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Sharded findAndModify on numbers with non-simple collation inherited from collection default
+// should succeed. This should be single-shard.
+assert.eq(100, collCaseInsensitive.findAndModify({query: {a: 100}, update: {$set: {b: 1}}}).a);
+explain = collCaseInsensitive.explain().findAndModify({query: {a: 100}, update: {$set: {b: 1}}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// MapReduce.
+
+// Test mapReduce on strings with a non-simple collation inherited from collection default.
+assert.eq(2,
+ assert
+ .commandWorked(collCaseInsensitive.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}}))
+ .results.length);
+
+// Test mapReduce on strings with a simple collation.
+assert.eq(1,
+ assert
+ .commandWorked(collCaseInsensitive.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}, collation: {locale: "simple"}}))
+ .results.length);
+
+// Remove.
+
+// Test a remove command on strings with non-simple collation inherited from collection default.
+// This should be scatter-gather.
+writeRes = collCaseInsensitive.remove({a: "foo"});
+assert.writeOK(writeRes);
+assert.eq(2, writeRes.nRemoved);
+explain = collCaseInsensitive.explain().remove({a: "foo"});
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(collCaseInsensitive.insert(a_FOO));
+assert.writeOK(collCaseInsensitive.insert(a_foo));
+
+// Test a remove command on strings with simple collation. This should be single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = collCaseInsensitive.remove({a: "foo"}, {collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove({a: 100});
+ explain = collCaseInsensitive.explain().remove({a: "foo"}, {collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_100));
-
- // A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
- // exact-ID if it contains an equality on _id and either has the collection default collation or
- // _id is not a string/object/array.
-
- // Single remove on string shard key with non-simple collation inherited from collection default
- // should fail, because it is not single-shard.
- assert.writeError(collCaseInsensitive.remove({a: "foo"}, {justOne: true}));
-
- // Single remove on string shard key with simple collation should succeed, because it is
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes =
- collCaseInsensitive.remove({a: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove(
- {a: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_foo));
- }
-
- // Single remove on number shard key with non-simple collation inherited from collection default
- // should succeed, because it is single-shard.
- writeRes = collCaseInsensitive.remove({a: 100}, {justOne: true});
+ assert.writeOK(collCaseInsensitive.insert(a_foo));
+}
+
+// Test a remove command on numbers with non-simple collation inherited from collection default.
+// This should be single-shard.
+writeRes = collCaseInsensitive.remove({a: 100});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+explain = collCaseInsensitive.explain().remove({a: 100});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(collCaseInsensitive.insert(a_100));
+
+// A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
+// exact-ID if it contains an equality on _id and either has the collection default collation or
+// _id is not a string/object/array.
+
+// Single remove on string shard key with non-simple collation inherited from collection default
+// should fail, because it is not single-shard.
+assert.writeError(collCaseInsensitive.remove({a: "foo"}, {justOne: true}));
+
+// Single remove on string shard key with simple collation should succeed, because it is
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes =
+ collCaseInsensitive.remove({a: "foo"}, {justOne: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove({a: 100}, {justOne: true});
+ explain = collCaseInsensitive.explain().remove({a: "foo"},
+ {justOne: true, collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_100));
-
- // Single remove on string _id with non-collection-default collation should fail, because it is
- // not an exact-ID query.
- assert.writeError(
- collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}}));
-
- // Single remove on string _id with collection-default collation should succeed, because it is
- // an exact-ID query.
+ assert.writeOK(collCaseInsensitive.insert(a_foo));
+}
+
+// Single remove on number shard key with non-simple collation inherited from collection default
+// should succeed, because it is single-shard.
+writeRes = collCaseInsensitive.remove({a: 100}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+explain = collCaseInsensitive.explain().remove({a: 100}, {justOne: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(collCaseInsensitive.insert(a_100));
+
+// Single remove on string _id with non-collection-default collation should fail, because it is
+// not an exact-ID query.
+assert.writeError(
+ collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}}));
+
+// Single remove on string _id with collection-default collation should succeed, because it is
+// an exact-ID query.
+assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+writeRes = collCaseInsensitive.remove({_id: "foo"}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+
+// Single remove on string _id with collection-default collation explicitly given should
+// succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
- writeRes = collCaseInsensitive.remove({_id: "foo"}, {justOne: true});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
-
- // Single remove on string _id with collection-default collation explicitly given should
- // succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
- writeRes =
- collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- }
-
- // Single remove on number _id with non-collection-default collation should succeed, because it
- // is an exact-ID query.
- writeRes = collCaseInsensitive.remove({_id: a_100._id},
- {justOne: true, collation: {locale: "simple"}});
+ writeRes =
+ collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- assert.writeOK(collCaseInsensitive.insert(a_100));
-
- // Update.
-
- // Test an update command on strings with non-simple collation inherited from collection
- // default. This should be scatter-gather.
- writeRes = collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test an update command on strings with simple collation. This should be single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = collCaseInsensitive.update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // Test an update command on numbers with non-simple collation inherited from collection
- // default. This should be single-shard.
- writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true});
+}
+
+// Single remove on number _id with non-collection-default collation should succeed, because it
+// is an exact-ID query.
+writeRes =
+ collCaseInsensitive.remove({_id: a_100._id}, {justOne: true, collation: {locale: "simple"}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+assert.writeOK(collCaseInsensitive.insert(a_100));
+
+// Update.
+
+// Test an update command on strings with non-simple collation inherited from collection
+// default. This should be scatter-gather.
+writeRes = collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+assert.writeOK(writeRes);
+assert.eq(2, writeRes.nMatched);
+explain = collCaseInsensitive.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test an update command on strings with simple collation. This should be single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = collCaseInsensitive.update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true});
+ explain = collCaseInsensitive.explain().update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if
- // it
- // contains an equality on _id and either has the collection default collation or _id is not a
- // string/object/array.
-
- // Single update on string shard key with non-simple collation inherited from collection default
- // should fail, because it is not single-shard.
- assert.writeError(collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}));
-
- // Single update on string shard key with simple collation should succeed, because it is
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes =
- collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update(
- {a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // Single update on number shard key with non-simple collation inherited from collation default
- // should succeed, because it is single-shard.
- writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}});
+}
+
+// Test an update command on numbers with non-simple collation inherited from collection
+// default. This should be single-shard.
+writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if
+// it
+// contains an equality on _id and either has the collection default collation or _id is not a
+// string/object/array.
+
+// Single update on string shard key with non-simple collation inherited from collection default
+// should fail, because it is not single-shard.
+assert.writeError(collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}));
+
+// Single update on string shard key with simple collation should succeed, because it is
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes =
+ collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}});
+ explain = collCaseInsensitive.explain().update(
+ {a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Single update on string _id with non-collection-default collation should fail, because it is
- // not an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(collCaseInsensitive.update(
- {_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}}));
- }
-
- // Single update on string _id with collection-default collation should succeed, because it is
- // an exact-ID query.
+}
+
+// Single update on number shard key with non-simple collation inherited from collation default
+// should succeed, because it is single-shard.
+writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Single update on string _id with non-collection-default collation should fail, because it is
+// not an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(
+ collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}}));
+}
+
+// Single update on string _id with collection-default collation should succeed, because it is
+// an exact-ID query.
+assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+writeRes = collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
+
+// Single update on string _id with collection-default collation explicitly given should
+// succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
- writeRes = collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}});
+ writeRes =
+ collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
+}
- // Single update on string _id with collection-default collation explicitly given should
- // succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
- writeRes =
- collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
- }
-
- // Single update on number _id with non-collection-default collation inherited from collection
- // default should succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = collCaseInsensitive.update(
- {_id: a_foo._id}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- }
+// Single update on number _id with non-collection-default collation inherited from collection
+// default should succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = collCaseInsensitive.update(
+ {_id: a_foo._id}, {$set: {b: 1}}, {collation: {locale: "simple"}});
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nMatched);
+}
- // Upsert must always be single-shard.
+// Upsert must always be single-shard.
- // Upsert on strings with non-simple collation inherited from collection default should fail,
- // because it is not single-shard.
- assert.writeError(
- collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true}));
-
- // Upsert on strings with simple collation should succeed, because it is single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = collCaseInsensitive.update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
+// Upsert on strings with non-simple collation inherited from collection default should fail,
+// because it is not single-shard.
+assert.writeError(
+ collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true}));
- // Upsert on numbers with non-simple collation inherited from collection default should succeed,
- // because it is single-shard.
- writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
+// Upsert on strings with simple collation should succeed, because it is single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = collCaseInsensitive.update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain =
- collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
+ explain = collCaseInsensitive.explain().update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- st.stop();
+}
+
+// Upsert on numbers with non-simple collation inherited from collection default should succeed,
+// because it is single-shard.
+writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain =
+ collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+st.stop();
})();
diff --git a/jstests/sharding/commands_that_write_accept_wc_configRS.js b/jstests/sharding/commands_that_write_accept_wc_configRS.js
index aac64734c7f..95a84f32532 100644
--- a/jstests/sharding/commands_that_write_accept_wc_configRS.js
+++ b/jstests/sharding/commands_that_write_accept_wc_configRS.js
@@ -15,217 +15,216 @@ load('jstests/libs/write_concern_util.js');
load('jstests/multiVersion/libs/auth_helpers.js');
(function() {
- "use strict";
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- var st = new ShardingTest({
- // Set priority of secondaries to zero to prevent spurious elections.
- shards: {
- rs0: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- },
- rs1: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- }
- },
- configReplSetTestOptions: {settings: {chainingAllowed: false}},
- mongos: 1
- });
-
- var mongos = st.s;
- var dbName = "wc-test-configRS";
- var db = mongos.getDB(dbName);
- var adminDB = mongos.getDB('admin');
- // A database connection on a local shard, rather than through the mongos.
- var localDB = st.shard0.getDB('localWCTest');
- var collName = 'leaves';
- var coll = db[collName];
- var counter = 0;
-
- function dropTestData() {
- st.configRS.awaitReplication();
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
- db.dropUser('username');
- db.dropUser('user1');
- localDB.dropUser('user2');
- assert(!db.auth("username", "password"), "auth should have failed");
- getNewDB();
- }
-
- // We get new databases because we do not want to reuse dropped databases that may be in a
- // bad state. This test calls dropDatabase when config server secondary nodes are down, so the
- // command fails after only the database metadata is dropped from the config servers, but the
- // data on the shards still remains. This makes future operations, such as moveChunk, fail.
- function getNewDB() {
- db = mongos.getDB(dbName + counter);
- counter++;
- coll = db[collName];
- }
-
- // Commands in 'commands' will accept any valid writeConcern.
- var commands = [];
-
- commands.push({
- req: {createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles},
- setupFunc: function() {},
- confirmFunc: function() {
- assert(db.auth("username", "password"), "auth failed");
- },
- requiresMajority: true,
- runsOnShards: false,
- failsOnShards: false,
- admin: false
- });
+"use strict";
- commands.push({
- req: {updateUser: 'username', pwd: 'password2', roles: jsTest.basicUserRoles},
- setupFunc: function() {
- db.runCommand({createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles});
- },
- confirmFunc: function() {
- assert(!db.auth("username", "password"), "auth should have failed");
- assert(db.auth("username", "password2"), "auth failed");
- },
- requiresMajority: true,
- runsOnShards: false,
- admin: false
- });
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
- commands.push({
- req: {dropUser: 'tempUser'},
- setupFunc: function() {
- db.runCommand({createUser: 'tempUser', pwd: 'password', roles: jsTest.basicUserRoles});
- assert(db.auth("tempUser", "password"), "auth failed");
- },
- confirmFunc: function() {
- assert(!db.auth("tempUser", "password"), "auth should have failed");
+var st = new ShardingTest({
+ // Set priority of secondaries to zero to prevent spurious elections.
+ shards: {
+ rs0: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
},
- requiresMajority: true,
- runsOnShards: false,
- failsOnShards: false,
- admin: false
- });
-
- function testInvalidWriteConcern(wc, cmd) {
- if (wc.w === 2 && !cmd.requiresMajority) {
- return;
+ rs1: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
}
- cmd.req.writeConcern = wc;
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropTestData();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandFailed(res);
- assert(!res.writeConcernError,
- 'bad writeConcern on config server had writeConcernError. ' +
- tojson(res.writeConcernError));
+ },
+ configReplSetTestOptions: {settings: {chainingAllowed: false}},
+ mongos: 1
+});
+
+var mongos = st.s;
+var dbName = "wc-test-configRS";
+var db = mongos.getDB(dbName);
+var adminDB = mongos.getDB('admin');
+// A database connection on a local shard, rather than through the mongos.
+var localDB = st.shard0.getDB('localWCTest');
+var collName = 'leaves';
+var coll = db[collName];
+var counter = 0;
+
+function dropTestData() {
+ st.configRS.awaitReplication();
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
+ db.dropUser('username');
+ db.dropUser('user1');
+ localDB.dropUser('user2');
+ assert(!db.auth("username", "password"), "auth should have failed");
+ getNewDB();
+}
+
+// We get new databases because we do not want to reuse dropped databases that may be in a
+// bad state. This test calls dropDatabase when config server secondary nodes are down, so the
+// command fails after only the database metadata is dropped from the config servers, but the
+// data on the shards still remains. This makes future operations, such as moveChunk, fail.
+function getNewDB() {
+ db = mongos.getDB(dbName + counter);
+ counter++;
+ coll = db[collName];
+}
+
+// Commands in 'commands' will accept any valid writeConcern.
+var commands = [];
+
+commands.push({
+ req: {createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles},
+ setupFunc: function() {},
+ confirmFunc: function() {
+ assert(db.auth("username", "password"), "auth failed");
+ },
+ requiresMajority: true,
+ runsOnShards: false,
+ failsOnShards: false,
+ admin: false
+});
+
+commands.push({
+ req: {updateUser: 'username', pwd: 'password2', roles: jsTest.basicUserRoles},
+ setupFunc: function() {
+ db.runCommand({createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles});
+ },
+ confirmFunc: function() {
+ assert(!db.auth("username", "password"), "auth should have failed");
+ assert(db.auth("username", "password2"), "auth failed");
+ },
+ requiresMajority: true,
+ runsOnShards: false,
+ admin: false
+});
+
+commands.push({
+ req: {dropUser: 'tempUser'},
+ setupFunc: function() {
+ db.runCommand({createUser: 'tempUser', pwd: 'password', roles: jsTest.basicUserRoles});
+ assert(db.auth("tempUser", "password"), "auth failed");
+ },
+ confirmFunc: function() {
+ assert(!db.auth("tempUser", "password"), "auth should have failed");
+ },
+ requiresMajority: true,
+ runsOnShards: false,
+ failsOnShards: false,
+ admin: false
+});
+
+function testInvalidWriteConcern(wc, cmd) {
+ if (wc.w === 2 && !cmd.requiresMajority) {
+ return;
}
-
- function runCommandFailOnShardsPassOnConfigs(cmd) {
- var req = cmd.req;
- var res;
- // This command is run on the shards in addition to the config servers.
- if (cmd.runsOnShards) {
- if (cmd.failsOnShards) {
- // This command fails when there is a writeConcernError on the shards.
- // We set the timeout high enough that the command should not time out against the
- // config server, but not exorbitantly high, because it will always time out against
- // shards and so will increase the runtime of this test.
- req.writeConcern.wtimeout = 15 * 1000;
- res = runCommandCheckAdmin(db, cmd);
- restartReplicationOnAllShards(st);
- assert.commandFailed(res);
- assert(
- !res.writeConcernError,
- 'command on config servers with a paused replicaset had writeConcernError: ' +
- tojson(res));
- } else {
- // This command passes and returns a writeConcernError when there is a
- // writeConcernError on the shards.
- // We set the timeout high enough that the command should not time out against the
- // config server, but not exorbitantly high, because it will always time out against
- // shards and so will increase the runtime of this test.
- req.writeConcern.wtimeout = 15 * 1000;
- res = runCommandCheckAdmin(db, cmd);
- restartReplicationOnAllShards(st);
- assert.commandWorked(res);
- cmd.confirmFunc();
- assertWriteConcernError(res);
- }
- } else {
- // This command is only run on the config servers and so should pass when shards are
- // not replicating.
+ cmd.req.writeConcern = wc;
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ dropTestData();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandFailed(res);
+ assert(!res.writeConcernError,
+ 'bad writeConcern on config server had writeConcernError. ' +
+ tojson(res.writeConcernError));
+}
+
+function runCommandFailOnShardsPassOnConfigs(cmd) {
+ var req = cmd.req;
+ var res;
+ // This command is run on the shards in addition to the config servers.
+ if (cmd.runsOnShards) {
+ if (cmd.failsOnShards) {
+ // This command fails when there is a writeConcernError on the shards.
+ // We set the timeout high enough that the command should not time out against the
+ // config server, but not exorbitantly high, because it will always time out against
+ // shards and so will increase the runtime of this test.
+ req.writeConcern.wtimeout = 15 * 1000;
res = runCommandCheckAdmin(db, cmd);
restartReplicationOnAllShards(st);
- assert.commandWorked(res);
- cmd.confirmFunc();
+ assert.commandFailed(res);
assert(!res.writeConcernError,
'command on config servers with a paused replicaset had writeConcernError: ' +
tojson(res));
+ } else {
+ // This command passes and returns a writeConcernError when there is a
+ // writeConcernError on the shards.
+ // We set the timeout high enough that the command should not time out against the
+ // config server, but not exorbitantly high, because it will always time out against
+ // shards and so will increase the runtime of this test.
+ req.writeConcern.wtimeout = 15 * 1000;
+ res = runCommandCheckAdmin(db, cmd);
+ restartReplicationOnAllShards(st);
+ assert.commandWorked(res);
+ cmd.confirmFunc();
+ assertWriteConcernError(res);
}
- }
-
- function testValidWriteConcern(wc, cmd) {
- var req = cmd.req;
- var setupFunc = cmd.setupFunc;
- var confirmFunc = cmd.confirmFunc;
-
- req.writeConcern = wc;
- jsTest.log("Testing " + tojson(req));
-
- dropTestData();
- setupFunc();
-
- // Command with a full cluster should succeed.
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandWorked(res);
- assert(!res.writeConcernError,
- 'command on a full cluster had writeConcernError: ' + tojson(res));
- confirmFunc();
-
- dropTestData();
- setupFunc();
- // Stop replication at all shard secondaries.
- stopReplicationOnSecondariesOfAllShards(st);
-
- // Command is running on full config server replica set but a majority of a shard's
- // nodes are down.
- runCommandFailOnShardsPassOnConfigs(cmd);
-
- dropTestData();
- setupFunc();
- // Stop replication at all config server secondaries and all shard secondaries.
- stopReplicationOnSecondariesOfAllShards(st);
- st.configRS.awaitReplication();
- stopReplicationOnSecondaries(st.configRS);
-
- // Command should fail after two config servers are not replicating.
- req.writeConcern.wtimeout = 3000;
+ } else {
+ // This command is only run on the config servers and so should pass when shards are
+ // not replicating.
res = runCommandCheckAdmin(db, cmd);
restartReplicationOnAllShards(st);
- assert.commandFailed(res);
+ assert.commandWorked(res);
+ cmd.confirmFunc();
assert(!res.writeConcernError,
'command on config servers with a paused replicaset had writeConcernError: ' +
tojson(res));
}
-
- var majorityWC = {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS};
-
- // Config server commands require w: majority writeConcerns.
- var nonMajorityWCs = [{w: 'invalid'}, {w: 2}];
-
- commands.forEach(function(cmd) {
- nonMajorityWCs.forEach(function(wc) {
- testInvalidWriteConcern(wc, cmd);
- });
- testValidWriteConcern(majorityWC, cmd);
+}
+
+function testValidWriteConcern(wc, cmd) {
+ var req = cmd.req;
+ var setupFunc = cmd.setupFunc;
+ var confirmFunc = cmd.confirmFunc;
+
+ req.writeConcern = wc;
+ jsTest.log("Testing " + tojson(req));
+
+ dropTestData();
+ setupFunc();
+
+ // Command with a full cluster should succeed.
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError,
+ 'command on a full cluster had writeConcernError: ' + tojson(res));
+ confirmFunc();
+
+ dropTestData();
+ setupFunc();
+ // Stop replication at all shard secondaries.
+ stopReplicationOnSecondariesOfAllShards(st);
+
+ // Command is running on full config server replica set but a majority of a shard's
+ // nodes are down.
+ runCommandFailOnShardsPassOnConfigs(cmd);
+
+ dropTestData();
+ setupFunc();
+ // Stop replication at all config server secondaries and all shard secondaries.
+ stopReplicationOnSecondariesOfAllShards(st);
+ st.configRS.awaitReplication();
+ stopReplicationOnSecondaries(st.configRS);
+
+ // Command should fail after two config servers are not replicating.
+ req.writeConcern.wtimeout = 3000;
+ res = runCommandCheckAdmin(db, cmd);
+ restartReplicationOnAllShards(st);
+ assert.commandFailed(res);
+ assert(
+ !res.writeConcernError,
+ 'command on config servers with a paused replicaset had writeConcernError: ' + tojson(res));
+}
+
+var majorityWC = {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS};
+
+// Config server commands require w: majority writeConcerns.
+var nonMajorityWCs = [{w: 'invalid'}, {w: 2}];
+
+commands.forEach(function(cmd) {
+ nonMajorityWCs.forEach(function(wc) {
+ testInvalidWriteConcern(wc, cmd);
});
+ testValidWriteConcern(majorityWC, cmd);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/commands_that_write_accept_wc_shards.js b/jstests/sharding/commands_that_write_accept_wc_shards.js
index 80ac26b36e7..ba791154b44 100644
--- a/jstests/sharding/commands_that_write_accept_wc_shards.js
+++ b/jstests/sharding/commands_that_write_accept_wc_shards.js
@@ -12,368 +12,368 @@
load('jstests/libs/write_concern_util.js');
(function() {
- "use strict";
- var st = new ShardingTest({
- // Set priority of secondaries to zero to prevent spurious elections.
- shards: {
- rs0: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- },
- rs1: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- }
- },
- configReplSetTestOptions: {settings: {chainingAllowed: false}},
- mongos: 1,
- });
+"use strict";
+var st = new ShardingTest({
+ // Set priority of secondaries to zero to prevent spurious elections.
+ shards: {
+ rs0: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+ },
+ rs1: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+ }
+ },
+ configReplSetTestOptions: {settings: {chainingAllowed: false}},
+ mongos: 1,
+});
- var mongos = st.s;
- var dbName = "wc-test-shards";
- var db = mongos.getDB(dbName);
- var collName = 'leaves';
- var coll = db[collName];
+var mongos = st.s;
+var dbName = "wc-test-shards";
+var db = mongos.getDB(dbName);
+var collName = 'leaves';
+var coll = db[collName];
- function dropTestDatabase() {
- db.runCommand({dropDatabase: 1});
- db.extra.insert({a: 1});
- coll = db[collName];
- st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
- assert.eq(0, coll.find().itcount(), "test collection not empty");
- assert.eq(1, db.extra.find().itcount(), "extra collection should have 1 document");
- }
+function dropTestDatabase() {
+ db.runCommand({dropDatabase: 1});
+ db.extra.insert({a: 1});
+ coll = db[collName];
+ st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
+ assert.eq(0, coll.find().itcount(), "test collection not empty");
+ assert.eq(1, db.extra.find().itcount(), "extra collection should have 1 document");
+}
- var commands = [];
+var commands = [];
- // Tests a runOnAllShardsCommand against a sharded collection.
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'sharded_type_index'}]},
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- coll.insert({type: 'oak', x: -3});
- coll.insert({type: 'maple', x: 23});
- assert.eq(coll.getIndexes().length, 2);
- },
- confirmFunc: function() {
- assert.eq(coll.getIndexes().length, 3);
- },
- admin: false
- });
+// Tests a runOnAllShardsCommand against a sharded collection.
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'sharded_type_index'}]},
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ coll.insert({type: 'oak', x: -3});
+ coll.insert({type: 'maple', x: 23});
+ assert.eq(coll.getIndexes().length, 2);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.getIndexes().length, 3);
+ },
+ admin: false
+});
- // Tests a runOnAllShardsCommand.
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
- setupFunc: function() {
- coll.insert({type: 'oak'});
- st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
- assert.eq(coll.getIndexes().length, 1);
- },
- confirmFunc: function() {
- assert.eq(coll.getIndexes().length, 2);
- },
- admin: false
- });
+// Tests a runOnAllShardsCommand.
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
+ assert.eq(coll.getIndexes().length, 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.getIndexes().length, 2);
+ },
+ admin: false
+});
- // Tests a batched write command.
- commands.push({
- req: {insert: collName, documents: [{x: -3, type: 'maple'}, {x: 23, type: 'maple'}]},
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'maple'}), 2);
- },
- admin: false
- });
+// Tests a batched write command.
+commands.push({
+ req: {insert: collName, documents: [{x: -3, type: 'maple'}, {x: 23, type: 'maple'}]},
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'maple'}), 2);
+ },
+ admin: false
+});
- // Tests a passthrough.
- commands.push({
- req: {renameCollection: "renameCollWC.leaves", to: 'renameCollWC.pine_needles'},
- setupFunc: function() {
- db = db.getSiblingDB("renameCollWC");
- // Ensure that database is created.
- db.leaves.insert({type: 'oak'});
- st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
- db.leaves.drop();
- db.pine_needles.drop();
- db.leaves.insert({type: 'oak'});
- assert.eq(db.leaves.count(), 1);
- assert.eq(db.pine_needles.count(), 0);
- },
- confirmFunc: function() {
- assert.eq(db.leaves.count(), 0);
- assert.eq(db.pine_needles.count(), 1);
- },
- admin: true
- });
+// Tests a passthrough.
+commands.push({
+ req: {renameCollection: "renameCollWC.leaves", to: 'renameCollWC.pine_needles'},
+ setupFunc: function() {
+ db = db.getSiblingDB("renameCollWC");
+ // Ensure that database is created.
+ db.leaves.insert({type: 'oak'});
+ st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
+ db.leaves.drop();
+ db.pine_needles.drop();
+ db.leaves.insert({type: 'oak'});
+ assert.eq(db.leaves.count(), 1);
+ assert.eq(db.pine_needles.count(), 0);
+ },
+ confirmFunc: function() {
+ assert.eq(db.leaves.count(), 0);
+ assert.eq(db.pine_needles.count(), 1);
+ },
+ admin: true
+});
- commands.push({
- req: {
- update: collName,
- updates: [{
- q: {type: 'oak'},
- u: [{$set: {type: 'ginkgo'}}],
- }],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- },
- admin: false
- });
+commands.push({
+ req: {
+ update: collName,
+ updates: [{
+ q: {type: 'oak'},
+ u: [{$set: {type: 'ginkgo'}}],
+ }],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ },
+ admin: false
+});
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: {$set: {type: 'ginkgo'}},
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- },
- admin: false
- });
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: {$set: {type: 'ginkgo'}},
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ },
+ admin: false
+});
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: [{$set: {type: 'ginkgo'}}],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- },
- admin: false
- });
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: [{$set: {type: 'ginkgo'}}],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ },
+ admin: false
+});
- // MapReduce on an unsharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: "foo"
- },
- setupFunc: function() {
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce on an unsharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: "foo"
+ },
+ setupFunc: function() {
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- // MapReduce on an unsharded collection with an output to a sharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: {replace: "foo", sharded: true}
- },
- setupFunc: function() {
- db.adminCommand({enablesharding: db.toString()});
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce on an unsharded collection with an output to a sharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: {replace: "foo", sharded: true}
+ },
+ setupFunc: function() {
+ db.adminCommand({enablesharding: db.toString()});
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- // MapReduce on a sharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- if (!this.tags) {
- return;
- }
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: "foo"
- },
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce on a sharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ if (!this.tags) {
+ return;
+ }
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: "foo"
+ },
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- // MapReduce on a sharded collection with an output action to an unsharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- if (!this.tags) {
- return;
- }
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: {replace: "foo", sharded: false}
- },
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce on a sharded collection with an output action to an unsharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ if (!this.tags) {
+ return;
+ }
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: {replace: "foo", sharded: false}
+ },
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- // MapReduce from a sharded collection with an output to a sharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- if (!this.tags) {
- return;
- }
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: {replace: "foo", sharded: true}
- },
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce from a sharded collection with an output to a sharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ if (!this.tags) {
+ return;
+ }
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: {replace: "foo", sharded: true}
+ },
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'majority', wtimeout: 5 * 60 * 1000};
- jsTest.log("Testing " + tojson(cmd.req));
+function testValidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'majority', wtimeout: 5 * 60 * 1000};
+ jsTest.log("Testing " + tojson(cmd.req));
- dropTestDatabase();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandWorked(res);
- assert(!res.writeConcernError,
- 'command on a full cluster had writeConcernError: ' + tojson(res));
- cmd.confirmFunc();
- }
+ dropTestDatabase();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError,
+ 'command on a full cluster had writeConcernError: ' + tojson(res));
+ cmd.confirmFunc();
+}
- function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'invalid'};
- jsTest.log("Testing " + tojson(cmd.req));
+function testInvalidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'invalid'};
+ jsTest.log("Testing " + tojson(cmd.req));
- dropTestDatabase();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assertWriteConcernError(res, ErrorCodes.UnknownReplWriteConcern);
- cmd.confirmFunc();
- }
+ dropTestDatabase();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assertWriteConcernError(res, ErrorCodes.UnknownReplWriteConcern);
+ cmd.confirmFunc();
+}
- commands.forEach(function(cmd) {
- testValidWriteConcern(cmd);
- testInvalidWriteConcern(cmd);
- });
+commands.forEach(function(cmd) {
+ testValidWriteConcern(cmd);
+ testInvalidWriteConcern(cmd);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/config_rs_no_primary.js b/jstests/sharding/config_rs_no_primary.js
index 92196425263..6b7c7155a6e 100644
--- a/jstests/sharding/config_rs_no_primary.js
+++ b/jstests/sharding/config_rs_no_primary.js
@@ -6,55 +6,55 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- var st = new ShardingTest({
- shards: 1,
- other: {
- c0: {}, // Make sure 1st config server is primary
- c1: {rsConfig: {priority: 0}},
- c2: {rsConfig: {priority: 0}}
- }
+"use strict";
+
+var st = new ShardingTest({
+ shards: 1,
+ other: {
+ c0: {}, // Make sure 1st config server is primary
+ c1: {rsConfig: {priority: 0}},
+ c2: {rsConfig: {priority: 0}}
+ }
+});
+
+assert.eq(st.config0, st.configRS.getPrimary());
+
+// Create the "test" database while the cluster metadata is still writeable.
+st.s.getDB('test').foo.insert({a: 1});
+
+// Take down two of the config servers so the remaining one goes into SECONDARY state.
+st.configRS.stop(1);
+st.configRS.stop(2);
+st.configRS.awaitNoPrimary();
+
+jsTestLog("Starting a new mongos when the config servers have no primary which should work");
+var mongos2 = MongoRunner.runMongos({configdb: st.configRS.getURL()});
+assert.neq(null, mongos2);
+
+var testOps = function(mongos) {
+ jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
+ mongos);
+ var initialCount = mongos.getDB('test').foo.count();
+ assert.writeOK(mongos.getDB('test').foo.insert({a: 1}));
+ assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
+
+ assert.throws(function() {
+ mongos.getDB('config').shards.findOne();
});
-
- assert.eq(st.config0, st.configRS.getPrimary());
-
- // Create the "test" database while the cluster metadata is still writeable.
- st.s.getDB('test').foo.insert({a: 1});
-
- // Take down two of the config servers so the remaining one goes into SECONDARY state.
- st.configRS.stop(1);
- st.configRS.stop(2);
- st.configRS.awaitNoPrimary();
-
- jsTestLog("Starting a new mongos when the config servers have no primary which should work");
- var mongos2 = MongoRunner.runMongos({configdb: st.configRS.getURL()});
- assert.neq(null, mongos2);
-
- var testOps = function(mongos) {
- jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
- mongos);
- var initialCount = mongos.getDB('test').foo.count();
- assert.writeOK(mongos.getDB('test').foo.insert({a: 1}));
- assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
-
- assert.throws(function() {
- mongos.getDB('config').shards.findOne();
- });
- mongos.setSlaveOk(true);
- var shardDoc = mongos.getDB('config').shards.findOne();
- mongos.setSlaveOk(false);
- assert.neq(null, shardDoc);
-
- jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
- assert.writeError(mongos.getDB("newDB").foo.insert({a: 1}));
- assert.commandFailed(
- mongos.getDB('admin').runCommand({shardCollection: "test.foo", key: {a: 1}}));
- };
-
- testOps(mongos2);
- testOps(st.s);
-
- st.stop();
- MongoRunner.stopMongos(mongos2);
+ mongos.setSlaveOk(true);
+ var shardDoc = mongos.getDB('config').shards.findOne();
+ mongos.setSlaveOk(false);
+ assert.neq(null, shardDoc);
+
+ jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
+ assert.writeError(mongos.getDB("newDB").foo.insert({a: 1}));
+ assert.commandFailed(
+ mongos.getDB('admin').runCommand({shardCollection: "test.foo", key: {a: 1}}));
+};
+
+testOps(mongos2);
+testOps(st.s);
+
+st.stop();
+MongoRunner.stopMongos(mongos2);
}());
diff --git a/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js b/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
index d4be74ed8bc..33e4c5b735b 100644
--- a/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
+++ b/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
@@ -5,193 +5,193 @@
* 2) Issuing a metadata command directly to a config server with non-majority write concern fails.
*/
(function() {
- 'use strict';
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
- const newShardName = "newShard";
-
- // Commands sent directly to the config server should fail with WC < majority.
- const unacceptableWCsForConfig = [
- {writeConcern: {w: 1}},
- {writeConcern: {w: 2}},
- {writeConcern: {w: 3}},
- // TODO: should metadata commands allow j: false? can CSRS have an in-memory storage engine?
- // writeConcern{w: "majority", j: "false"}},
- ];
-
- // Only write concern majority can be sent to the config server.
- const acceptableWCsForConfig = [
- {writeConcern: {w: "majority"}},
- {writeConcern: {w: "majority", wtimeout: 15000}},
- ];
-
- // Any write concern can be sent to a mongos, because mongos will upconvert it to majority.
- const unacceptableWCsForMongos = [];
- const acceptableWCsForMongos = [
- {},
- {writeConcern: {w: 0}},
- {writeConcern: {w: 0, wtimeout: 15000}},
- {writeConcern: {w: 1}},
- {writeConcern: {w: 2}},
- {writeConcern: {w: 3}},
- {writeConcern: {w: "majority"}},
- {writeConcern: {w: "majority", wtimeout: 15000}},
- ];
-
- const setupFuncs = {
- noop: function() {},
- createDatabase: function() {
- // A database is implicitly created when a collection within it is created.
- assert.commandWorked(st.s.getDB(dbName).runCommand({create: collName}));
- },
- enableSharding: function() {
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- },
- addShard: function() {
- assert.commandWorked(st.s.adminCommand({addShard: newShard.name, name: newShardName}));
- },
- };
-
- const cleanupFuncs = {
- noop: function() {},
- dropDatabase: function() {
- assert.commandWorked(st.s.getDB(dbName).runCommand({dropDatabase: 1}));
- },
- removeShardIfExists: function() {
- var res = st.s.adminCommand({removeShard: newShardName});
- if (!res.ok && res.code == ErrorCodes.ShardNotFound) {
- return;
- }
- assert.commandWorked(res);
- assert.eq('started', res.state);
- res = st.s.adminCommand({removeShard: newShardName});
- assert.commandWorked(res);
- assert.eq('completed', res.state);
- },
- };
-
- function checkCommand(
- conn, command, unacceptableWCs, acceptableWCs, adminCommand, setupFunc, cleanupFunc) {
- unacceptableWCs.forEach(function(writeConcern) {
- jsTest.log("testing " + tojson(command) + " with writeConcern " + tojson(writeConcern) +
- " against " + conn + ", expecting the command to fail");
- setupFunc();
- let commandWithWriteConcern = {};
- Object.assign(commandWithWriteConcern, command, writeConcern);
- if (adminCommand) {
- assert.commandFailedWithCode(conn.adminCommand(commandWithWriteConcern),
- ErrorCodes.InvalidOptions);
- } else {
- assert.commandFailedWithCode(conn.runCommand(commandWithWriteConcern),
- ErrorCodes.InvalidOptions);
- }
- cleanupFunc();
- });
-
- acceptableWCs.forEach(function(writeConcern) {
- jsTest.log("testing " + tojson(command) + " with writeConcern " + tojson(writeConcern) +
- " against " + conn + ", expecting the command to succeed");
- setupFunc();
- let commandWithWriteConcern = {};
- Object.assign(commandWithWriteConcern, command, writeConcern);
- if (adminCommand) {
- assert.commandWorked(conn.adminCommand(commandWithWriteConcern));
- } else {
- assert.commandWorked(conn.runCommand(commandWithWriteConcern));
- }
- cleanupFunc();
- });
- }
-
- function checkCommandMongos(command, setupFunc, cleanupFunc) {
- checkCommand(st.s,
- command,
- unacceptableWCsForMongos,
- acceptableWCsForMongos,
- true,
- setupFunc,
- cleanupFunc);
- }
-
- function checkCommandConfigSvr(command, setupFunc, cleanupFunc) {
- checkCommand(st.configRS.getPrimary(),
- command,
- unacceptableWCsForConfig,
- acceptableWCsForConfig,
- true,
- setupFunc,
- cleanupFunc);
- }
-
- var st = new ShardingTest({shards: 1});
-
- // enableSharding
- checkCommandMongos({enableSharding: dbName}, setupFuncs.noop, cleanupFuncs.dropDatabase);
- checkCommandConfigSvr(
- {_configsvrEnableSharding: dbName}, setupFuncs.noop, cleanupFuncs.dropDatabase);
-
- // movePrimary
- checkCommandMongos({movePrimary: dbName, to: st.shard0.name},
- setupFuncs.createDatabase,
- cleanupFuncs.dropDatabase);
- checkCommandConfigSvr({_configsvrMovePrimary: dbName, to: st.shard0.name},
- setupFuncs.createDatabase,
- cleanupFuncs.dropDatabase);
-
- // We are using a different name from ns because it was already created in setupFuncs.
- checkCommandConfigSvr({_configsvrCreateCollection: dbName + '.bar', options: {}},
- setupFuncs.createDatabase,
- cleanupFuncs.dropDatabase);
-
- // shardCollection
- checkCommandMongos(
- {shardCollection: ns, key: {_id: 1}}, setupFuncs.enableSharding, cleanupFuncs.dropDatabase);
- checkCommandConfigSvr({_configsvrShardCollection: ns, key: {_id: 1}},
- setupFuncs.enableSharding,
- cleanupFuncs.dropDatabase);
-
- // createDatabase
- // Don't check createDatabase against mongos: there is no createDatabase command exposed on
- // mongos; a database is created implicitly when a collection in it is created.
- checkCommandConfigSvr({_configsvrCreateDatabase: dbName, to: st.shard0.name},
- setupFuncs.noop,
- cleanupFuncs.dropDatabase);
-
- // addShard
- var newShard = MongoRunner.runMongod({shardsvr: ""});
- checkCommandMongos({addShard: newShard.name, name: newShardName},
- setupFuncs.noop,
- cleanupFuncs.removeShardIfExists);
- checkCommandConfigSvr({_configsvrAddShard: newShard.name, name: newShardName},
- setupFuncs.noop,
- cleanupFuncs.removeShardIfExists);
-
- // removeShard
- checkCommandMongos({removeShard: newShardName}, setupFuncs.addShard, cleanupFuncs.noop);
- checkCommandConfigSvr(
- {_configsvrRemoveShard: newShardName}, setupFuncs.addShard, cleanupFuncs.noop);
-
- // dropCollection
- checkCommandMongos({drop: ns}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
- checkCommandConfigSvr(
- {_configsvrDropCollection: ns}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
-
- // dropDatabase
-
- // We can't use the checkCommandMongos wrapper because we need a connection to the test
- // database.
- checkCommand(st.s.getDB(dbName),
- {dropDatabase: 1},
+'use strict';
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+const newShardName = "newShard";
+
+// Commands sent directly to the config server should fail with WC < majority.
+const unacceptableWCsForConfig = [
+ {writeConcern: {w: 1}},
+ {writeConcern: {w: 2}},
+ {writeConcern: {w: 3}},
+ // TODO: should metadata commands allow j: false? can CSRS have an in-memory storage engine?
+ // writeConcern{w: "majority", j: "false"}},
+];
+
+// Only write concern majority can be sent to the config server.
+const acceptableWCsForConfig = [
+ {writeConcern: {w: "majority"}},
+ {writeConcern: {w: "majority", wtimeout: 15000}},
+];
+
+// Any write concern can be sent to a mongos, because mongos will upconvert it to majority.
+const unacceptableWCsForMongos = [];
+const acceptableWCsForMongos = [
+ {},
+ {writeConcern: {w: 0}},
+ {writeConcern: {w: 0, wtimeout: 15000}},
+ {writeConcern: {w: 1}},
+ {writeConcern: {w: 2}},
+ {writeConcern: {w: 3}},
+ {writeConcern: {w: "majority"}},
+ {writeConcern: {w: "majority", wtimeout: 15000}},
+];
+
+const setupFuncs = {
+ noop: function() {},
+ createDatabase: function() {
+ // A database is implicitly created when a collection within it is created.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({create: collName}));
+ },
+ enableSharding: function() {
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ },
+ addShard: function() {
+ assert.commandWorked(st.s.adminCommand({addShard: newShard.name, name: newShardName}));
+ },
+};
+
+const cleanupFuncs = {
+ noop: function() {},
+ dropDatabase: function() {
+ assert.commandWorked(st.s.getDB(dbName).runCommand({dropDatabase: 1}));
+ },
+ removeShardIfExists: function() {
+ var res = st.s.adminCommand({removeShard: newShardName});
+ if (!res.ok && res.code == ErrorCodes.ShardNotFound) {
+ return;
+ }
+ assert.commandWorked(res);
+ assert.eq('started', res.state);
+ res = st.s.adminCommand({removeShard: newShardName});
+ assert.commandWorked(res);
+ assert.eq('completed', res.state);
+ },
+};
+
+function checkCommand(
+ conn, command, unacceptableWCs, acceptableWCs, adminCommand, setupFunc, cleanupFunc) {
+ unacceptableWCs.forEach(function(writeConcern) {
+ jsTest.log("testing " + tojson(command) + " with writeConcern " + tojson(writeConcern) +
+ " against " + conn + ", expecting the command to fail");
+ setupFunc();
+ let commandWithWriteConcern = {};
+ Object.assign(commandWithWriteConcern, command, writeConcern);
+ if (adminCommand) {
+ assert.commandFailedWithCode(conn.adminCommand(commandWithWriteConcern),
+ ErrorCodes.InvalidOptions);
+ } else {
+ assert.commandFailedWithCode(conn.runCommand(commandWithWriteConcern),
+ ErrorCodes.InvalidOptions);
+ }
+ cleanupFunc();
+ });
+
+ acceptableWCs.forEach(function(writeConcern) {
+ jsTest.log("testing " + tojson(command) + " with writeConcern " + tojson(writeConcern) +
+ " against " + conn + ", expecting the command to succeed");
+ setupFunc();
+ let commandWithWriteConcern = {};
+ Object.assign(commandWithWriteConcern, command, writeConcern);
+ if (adminCommand) {
+ assert.commandWorked(conn.adminCommand(commandWithWriteConcern));
+ } else {
+ assert.commandWorked(conn.runCommand(commandWithWriteConcern));
+ }
+ cleanupFunc();
+ });
+}
+
+function checkCommandMongos(command, setupFunc, cleanupFunc) {
+ checkCommand(st.s,
+ command,
unacceptableWCsForMongos,
acceptableWCsForMongos,
- false,
- setupFuncs.createDatabase,
- cleanupFuncs.dropDatabase);
- checkCommandConfigSvr(
- {_configsvrDropDatabase: dbName}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
-
- MongoRunner.stopMongos(newShard);
- st.stop();
+ true,
+ setupFunc,
+ cleanupFunc);
+}
+
+function checkCommandConfigSvr(command, setupFunc, cleanupFunc) {
+ checkCommand(st.configRS.getPrimary(),
+ command,
+ unacceptableWCsForConfig,
+ acceptableWCsForConfig,
+ true,
+ setupFunc,
+ cleanupFunc);
+}
+
+var st = new ShardingTest({shards: 1});
+
+// enableSharding
+checkCommandMongos({enableSharding: dbName}, setupFuncs.noop, cleanupFuncs.dropDatabase);
+checkCommandConfigSvr(
+ {_configsvrEnableSharding: dbName}, setupFuncs.noop, cleanupFuncs.dropDatabase);
+
+// movePrimary
+checkCommandMongos({movePrimary: dbName, to: st.shard0.name},
+ setupFuncs.createDatabase,
+ cleanupFuncs.dropDatabase);
+checkCommandConfigSvr({_configsvrMovePrimary: dbName, to: st.shard0.name},
+ setupFuncs.createDatabase,
+ cleanupFuncs.dropDatabase);
+
+// We are using a different name from ns because it was already created in setupFuncs.
+checkCommandConfigSvr({_configsvrCreateCollection: dbName + '.bar', options: {}},
+ setupFuncs.createDatabase,
+ cleanupFuncs.dropDatabase);
+
+// shardCollection
+checkCommandMongos(
+ {shardCollection: ns, key: {_id: 1}}, setupFuncs.enableSharding, cleanupFuncs.dropDatabase);
+checkCommandConfigSvr({_configsvrShardCollection: ns, key: {_id: 1}},
+ setupFuncs.enableSharding,
+ cleanupFuncs.dropDatabase);
+
+// createDatabase
+// Don't check createDatabase against mongos: there is no createDatabase command exposed on
+// mongos; a database is created implicitly when a collection in it is created.
+checkCommandConfigSvr({_configsvrCreateDatabase: dbName, to: st.shard0.name},
+ setupFuncs.noop,
+ cleanupFuncs.dropDatabase);
+
+// addShard
+var newShard = MongoRunner.runMongod({shardsvr: ""});
+checkCommandMongos({addShard: newShard.name, name: newShardName},
+ setupFuncs.noop,
+ cleanupFuncs.removeShardIfExists);
+checkCommandConfigSvr({_configsvrAddShard: newShard.name, name: newShardName},
+ setupFuncs.noop,
+ cleanupFuncs.removeShardIfExists);
+
+// removeShard
+checkCommandMongos({removeShard: newShardName}, setupFuncs.addShard, cleanupFuncs.noop);
+checkCommandConfigSvr(
+ {_configsvrRemoveShard: newShardName}, setupFuncs.addShard, cleanupFuncs.noop);
+
+// dropCollection
+checkCommandMongos({drop: ns}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
+checkCommandConfigSvr(
+ {_configsvrDropCollection: ns}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
+
+// dropDatabase
+
+// We can't use the checkCommandMongos wrapper because we need a connection to the test
+// database.
+checkCommand(st.s.getDB(dbName),
+ {dropDatabase: 1},
+ unacceptableWCsForMongos,
+ acceptableWCsForMongos,
+ false,
+ setupFuncs.createDatabase,
+ cleanupFuncs.dropDatabase);
+checkCommandConfigSvr(
+ {_configsvrDropDatabase: dbName}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
+
+MongoRunner.stopMongos(newShard);
+st.stop();
})();
diff --git a/jstests/sharding/conn_pool_stats.js b/jstests/sharding/conn_pool_stats.js
index 7c248f383e6..0476d3f7541 100644
--- a/jstests/sharding/conn_pool_stats.js
+++ b/jstests/sharding/conn_pool_stats.js
@@ -1,33 +1,33 @@
// Tests for the connPoolStats command.
(function() {
- "use strict";
- // Create a cluster with 2 shards.
- var cluster = new ShardingTest({shards: 2});
+"use strict";
+// Create a cluster with 2 shards.
+var cluster = new ShardingTest({shards: 2});
- // Needed because the command was expanded post 3.2
- var version = cluster.s.getDB("admin").runCommand({buildinfo: 1}).versionArray;
- var post32 = (version[0] > 4) || ((version[0] == 3) && (version[1] > 2));
+// Needed because the command was expanded post 3.2
+var version = cluster.s.getDB("admin").runCommand({buildinfo: 1}).versionArray;
+var post32 = (version[0] > 4) || ((version[0] == 3) && (version[1] > 2));
- // Run the connPoolStats command
- var stats = cluster.s.getDB("admin").runCommand({connPoolStats: 1});
+// Run the connPoolStats command
+var stats = cluster.s.getDB("admin").runCommand({connPoolStats: 1});
- // Validate output
- printjson(stats);
- assert.commandWorked(stats);
- assert("replicaSets" in stats);
- assert("hosts" in stats);
- assert("numClientConnections" in stats);
- assert("numAScopedConnections" in stats);
- assert("totalInUse" in stats);
- assert("totalAvailable" in stats);
- assert("totalCreated" in stats);
- assert.lte(stats["totalInUse"] + stats["totalAvailable"], stats["totalCreated"], tojson(stats));
- if (post32) {
- assert("pools" in stats);
- assert("totalRefreshing" in stats);
- assert.lte(stats["totalInUse"] + stats["totalAvailable"] + stats["totalRefreshing"],
- stats["totalCreated"],
- tojson(stats));
- }
- cluster.stop();
+// Validate output
+printjson(stats);
+assert.commandWorked(stats);
+assert("replicaSets" in stats);
+assert("hosts" in stats);
+assert("numClientConnections" in stats);
+assert("numAScopedConnections" in stats);
+assert("totalInUse" in stats);
+assert("totalAvailable" in stats);
+assert("totalCreated" in stats);
+assert.lte(stats["totalInUse"] + stats["totalAvailable"], stats["totalCreated"], tojson(stats));
+if (post32) {
+ assert("pools" in stats);
+ assert("totalRefreshing" in stats);
+ assert.lte(stats["totalInUse"] + stats["totalAvailable"] + stats["totalRefreshing"],
+ stats["totalCreated"],
+ tojson(stats));
+}
+cluster.stop();
})();
diff --git a/jstests/sharding/convert_to_and_from_sharded.js b/jstests/sharding/convert_to_and_from_sharded.js
index 96ee9d19a6d..15da2e0cc73 100644
--- a/jstests/sharding/convert_to_and_from_sharded.js
+++ b/jstests/sharding/convert_to_and_from_sharded.js
@@ -4,126 +4,126 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- var NUM_NODES = 3;
+var NUM_NODES = 3;
- /**
- * Checks that basic CRUD operations work as expected. Expects the collection to have a
- * { _id: 'marker' } document.
- */
- var checkBasicCRUD = function(coll) {
- var doc = coll.findOne({_id: 'marker', y: {$exists: false}});
- assert.neq(null, doc);
+/**
+ * Checks that basic CRUD operations work as expected. Expects the collection to have a
+ * { _id: 'marker' } document.
+ */
+var checkBasicCRUD = function(coll) {
+ var doc = coll.findOne({_id: 'marker', y: {$exists: false}});
+ assert.neq(null, doc);
- assert.writeOK(coll.update({_id: 'marker'}, {$set: {y: 2}}));
- assert.eq(2, coll.findOne({_id: 'marker'}).y);
+ assert.writeOK(coll.update({_id: 'marker'}, {$set: {y: 2}}));
+ assert.eq(2, coll.findOne({_id: 'marker'}).y);
- assert.writeOK(coll.remove({_id: 'marker'}));
- assert.eq(null, coll.findOne({_id: 'marker'}));
+ assert.writeOK(coll.remove({_id: 'marker'}));
+ assert.eq(null, coll.findOne({_id: 'marker'}));
- assert.writeOK(coll.insert({_id: 'marker'}, {writeConcern: {w: NUM_NODES}}));
- assert.eq('marker', coll.findOne({_id: 'marker'})._id);
- };
+ assert.writeOK(coll.insert({_id: 'marker'}, {writeConcern: {w: NUM_NODES}}));
+ assert.eq('marker', coll.findOne({_id: 'marker'})._id);
+};
- var st = new ShardingTest({shards: {}});
+var st = new ShardingTest({shards: {}});
- var replShard = new ReplSetTest({nodes: NUM_NODES});
- replShard.startSet({verbose: 1});
- replShard.initiate();
+var replShard = new ReplSetTest({nodes: NUM_NODES});
+replShard.startSet({verbose: 1});
+replShard.initiate();
- var priConn = replShard.getPrimary();
+var priConn = replShard.getPrimary();
- // Starting a brand new replica set without '--shardsvr' will cause the FCV to be written as the
- // latest available for that binary. This poses a problem when this test is run in the mixed
- // version suite because mongos will be 'last-stable' and if this node is of the latest binary,
- // it will report itself as the 'latest' FCV, which would cause mongos to refuse to connect to
- // it and shutdown.
- //
- // In order to work around this, in the mixed version suite, be pessimistic and always set this
- // node to the 'last-stable' FCV
- if (jsTestOptions().shardMixedBinVersions) {
- assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- replShard.awaitReplication();
- }
+// Starting a brand new replica set without '--shardsvr' will cause the FCV to be written as the
+// latest available for that binary. This poses a problem when this test is run in the mixed
+// version suite because mongos will be 'last-stable' and if this node is of the latest binary,
+// it will report itself as the 'latest' FCV, which would cause mongos to refuse to connect to
+// it and shutdown.
+//
+// In order to work around this, in the mixed version suite, be pessimistic and always set this
+// node to the 'last-stable' FCV
+if (jsTestOptions().shardMixedBinVersions) {
+ assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ replShard.awaitReplication();
+}
- assert.writeOK(priConn.getDB('test').unsharded.insert({_id: 'marker'}));
- checkBasicCRUD(priConn.getDB('test').unsharded);
+assert.writeOK(priConn.getDB('test').unsharded.insert({_id: 'marker'}));
+checkBasicCRUD(priConn.getDB('test').unsharded);
- assert.writeOK(priConn.getDB('test').sharded.insert({_id: 'marker'}));
- checkBasicCRUD(priConn.getDB('test').sharded);
+assert.writeOK(priConn.getDB('test').sharded.insert({_id: 'marker'}));
+checkBasicCRUD(priConn.getDB('test').sharded);
- for (var x = 0; x < NUM_NODES; x++) {
- replShard.restart(x, {shardsvr: ''});
- }
+for (var x = 0; x < NUM_NODES; x++) {
+ replShard.restart(x, {shardsvr: ''});
+}
- replShard.awaitNodesAgreeOnPrimary();
- assert.commandWorked(st.s.adminCommand({addShard: replShard.getURL()}));
+replShard.awaitNodesAgreeOnPrimary();
+assert.commandWorked(st.s.adminCommand({addShard: replShard.getURL()}));
- priConn = replShard.getPrimary();
- checkBasicCRUD(priConn.getDB('test').unsharded);
- checkBasicCRUD(priConn.getDB('test').sharded);
+priConn = replShard.getPrimary();
+checkBasicCRUD(priConn.getDB('test').unsharded);
+checkBasicCRUD(priConn.getDB('test').sharded);
- checkBasicCRUD(st.s.getDB('test').unsharded);
- checkBasicCRUD(st.s.getDB('test').sharded);
+checkBasicCRUD(st.s.getDB('test').unsharded);
+checkBasicCRUD(st.s.getDB('test').sharded);
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.sharded', key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.sharded', key: {_id: 1}}));
- checkBasicCRUD(st.s.getDB('test').unsharded);
- checkBasicCRUD(st.s.getDB('test').sharded);
+checkBasicCRUD(st.s.getDB('test').unsharded);
+checkBasicCRUD(st.s.getDB('test').sharded);
- for (x = 0; x < 4; x++) {
- assert.writeOK(st.s.getDB('test').sharded.insert({_id: x}));
- assert.commandWorked(st.s.adminCommand({split: 'test.sharded', middle: {_id: x}}));
- }
+for (x = 0; x < 4; x++) {
+ assert.writeOK(st.s.getDB('test').sharded.insert({_id: x}));
+ assert.commandWorked(st.s.adminCommand({split: 'test.sharded', middle: {_id: x}}));
+}
- var newMongod = MongoRunner.runMongod({shardsvr: ''});
+var newMongod = MongoRunner.runMongod({shardsvr: ''});
- assert.commandWorked(st.s.adminCommand({addShard: newMongod.name, name: 'toRemoveLater'}));
+assert.commandWorked(st.s.adminCommand({addShard: newMongod.name, name: 'toRemoveLater'}));
- for (x = 0; x < 2; x++) {
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.sharded', find: {_id: x}, to: 'toRemoveLater'}));
- }
+for (x = 0; x < 2; x++) {
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.sharded', find: {_id: x}, to: 'toRemoveLater'}));
+}
- checkBasicCRUD(st.s.getDB('test').unsharded);
- checkBasicCRUD(st.s.getDB('test').sharded);
+checkBasicCRUD(st.s.getDB('test').unsharded);
+checkBasicCRUD(st.s.getDB('test').sharded);
- assert.commandWorked(st.s.adminCommand({removeShard: 'toRemoveLater'}));
+assert.commandWorked(st.s.adminCommand({removeShard: 'toRemoveLater'}));
- // Start the balancer to start draining the chunks.
- st.startBalancer();
+// Start the balancer to start draining the chunks.
+st.startBalancer();
- assert.soon(function() {
- var res = st.s.adminCommand({removeShard: 'toRemoveLater'});
- return res.state == 'completed';
- });
+assert.soon(function() {
+ var res = st.s.adminCommand({removeShard: 'toRemoveLater'});
+ return res.state == 'completed';
+});
- MongoRunner.stopMongod(newMongod);
+MongoRunner.stopMongod(newMongod);
- checkBasicCRUD(st.s.getDB('test').unsharded);
- checkBasicCRUD(st.s.getDB('test').sharded);
+checkBasicCRUD(st.s.getDB('test').unsharded);
+checkBasicCRUD(st.s.getDB('test').sharded);
- st.stop();
+st.stop();
- checkBasicCRUD(priConn.getDB('test').unsharded);
- checkBasicCRUD(priConn.getDB('test').sharded);
+checkBasicCRUD(priConn.getDB('test').unsharded);
+checkBasicCRUD(priConn.getDB('test').sharded);
- jsTest.log('About to restart repl w/o shardsvr');
+jsTest.log('About to restart repl w/o shardsvr');
- replShard.nodes.forEach(function(node) {
- delete node.fullOptions.shardsvr;
- });
+replShard.nodes.forEach(function(node) {
+ delete node.fullOptions.shardsvr;
+});
- replShard.restart(replShard.nodes);
- replShard.awaitNodesAgreeOnPrimary();
+replShard.restart(replShard.nodes);
+replShard.awaitNodesAgreeOnPrimary();
- priConn = replShard.getPrimary();
- checkBasicCRUD(priConn.getDB('test').unsharded);
- checkBasicCRUD(priConn.getDB('test').sharded);
+priConn = replShard.getPrimary();
+checkBasicCRUD(priConn.getDB('test').unsharded);
+checkBasicCRUD(priConn.getDB('test').sharded);
- replShard.stopSet();
+replShard.stopSet();
})();
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index 2275faed656..b712191e6ed 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -1,181 +1,180 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({shards: 2});
- var db = s.getDB("test");
-
- // ************** Test Set #1 *************
- // Basic counts on "bar" collections, not yet sharded
-
- db.bar.save({n: 1});
- db.bar.save({n: 2});
- db.bar.save({n: 3});
-
- assert.eq(3, db.bar.find().count(), "bar 1");
- assert.eq(1, db.bar.find({n: 1}).count(), "bar 2");
-
- //************** Test Set #2 *************
- // Basic counts on sharded "foo" collection.
- // 1. Create foo collection, insert 6 docs
- // 2. Divide into three chunks
- // 3. Test counts before chunk migrations
- // 4. Manually move chunks. Now each shard should have 3 docs.
- // 5. i. Test basic counts on foo
- // ii. Test counts with limit
- // iii. Test counts with skip
- // iv. Test counts with skip + limit
- // v. Test counts with skip + limit + sorting
- // 6. Insert 10 more docs. Further limit/skip testing with a find query
- // 7. test invalid queries/values
-
- // part 1
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
-
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
-
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
-
- assert.commandWorked(db.foo.insert({_id: 1, name: "eliot"}));
- assert.commandWorked(db.foo.insert({_id: 2, name: "sara"}));
- assert.commandWorked(db.foo.insert({_id: 3, name: "bob"}));
- assert.commandWorked(db.foo.insert({_id: 4, name: "joe"}));
- assert.commandWorked(db.foo.insert({_id: 5, name: "mark"}));
- assert.commandWorked(db.foo.insert({_id: 6, name: "allan"}));
-
- assert.eq(6, db.foo.find().count(), "basic count");
-
- // part 2
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "allan"}}));
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "sara"}}));
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "eliot"}}));
-
- // MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
-
- s.printChunks();
-
- // part 3
- assert.eq(6, db.foo.find().count(), "basic count after split ");
- assert.eq(6, db.foo.find().sort({name: 1}).count(), "basic count after split sorted ");
-
- // part 4
- assert.commandWorked(s.s0.adminCommand({
- moveChunk: "test.foo",
- find: {name: "eliot"},
- to: secondary.getMongo().name,
- _waitForDelete: true
- }));
-
- assert.eq(3, primary.foo.find().toArray().length, "primary count");
- assert.eq(3, secondary.foo.find().toArray().length, "secondary count");
- assert.eq(3, primary.foo.find().sort({name: 1}).toArray().length, "primary count sorted");
- assert.eq(3, secondary.foo.find().sort({name: 1}).toArray().length, "secondary count sorted");
-
- // part 5
- // Some redundant tests, but better safe than sorry. These are fast tests, anyway.
-
- // i.
- assert.eq(6, db.foo.find().count(), "total count after move");
- assert.eq(6, db.foo.find().toArray().length, "total count after move");
- assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count() sorted");
- assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count() after move");
-
- // ii.
- assert.eq(2, db.foo.find().limit(2).count(true));
- assert.eq(2, db.foo.find().limit(-2).count(true));
- assert.eq(6, db.foo.find().limit(100).count(true));
- assert.eq(6, db.foo.find().limit(-100).count(true));
- assert.eq(6, db.foo.find().limit(0).count(true));
-
- // iii.
- assert.eq(6, db.foo.find().skip(0).count(true));
- assert.eq(5, db.foo.find().skip(1).count(true));
- assert.eq(4, db.foo.find().skip(2).count(true));
- assert.eq(3, db.foo.find().skip(3).count(true));
- assert.eq(2, db.foo.find().skip(4).count(true));
- assert.eq(1, db.foo.find().skip(5).count(true));
- assert.eq(0, db.foo.find().skip(6).count(true));
- assert.eq(0, db.foo.find().skip(7).count(true));
-
- // iv.
- assert.eq(2, db.foo.find().limit(2).skip(1).count(true));
- assert.eq(2, db.foo.find().limit(-2).skip(1).count(true));
- assert.eq(5, db.foo.find().limit(100).skip(1).count(true));
- assert.eq(5, db.foo.find().limit(-100).skip(1).count(true));
- assert.eq(5, db.foo.find().limit(0).skip(1).count(true));
-
- assert.eq(0, db.foo.find().limit(2).skip(10).count(true));
- assert.eq(0, db.foo.find().limit(-2).skip(10).count(true));
- assert.eq(0, db.foo.find().limit(100).skip(10).count(true));
- assert.eq(0, db.foo.find().limit(-100).skip(10).count(true));
- assert.eq(0, db.foo.find().limit(0).skip(10).count(true));
-
- assert.eq(2, db.foo.find().limit(2).itcount(), "LS1");
- assert.eq(2, db.foo.find().skip(2).limit(2).itcount(), "LS2");
- assert.eq(1, db.foo.find().skip(5).limit(2).itcount(), "LS3");
- assert.eq(6, db.foo.find().limit(2).count(), "LSC1");
- assert.eq(2, db.foo.find().limit(2).size(), "LSC2");
- assert.eq(2, db.foo.find().skip(2).limit(2).size(), "LSC3");
- assert.eq(1, db.foo.find().skip(5).limit(2).size(), "LSC4");
- assert.eq(4, db.foo.find().skip(1).limit(4).size(), "LSC5");
- assert.eq(5, db.foo.find().skip(1).limit(6).size(), "LSC6");
-
- // SERVER-3567 older negative limit tests
- assert.eq(2, db.foo.find().limit(2).itcount(), "N1");
- assert.eq(2, db.foo.find().limit(-2).itcount(), "N2");
- assert.eq(2, db.foo.find().skip(4).limit(2).itcount(), "N3");
- assert.eq(2, db.foo.find().skip(4).limit(-2).itcount(), "N4");
-
- // v.
- function nameString(c) {
- var s = "";
- while (c.hasNext()) {
- var o = c.next();
- if (s.length > 0)
- s += ",";
- s += o.name;
- }
- return s;
+'use strict';
+
+var s = new ShardingTest({shards: 2});
+var db = s.getDB("test");
+
+// ************** Test Set #1 *************
+// Basic counts on "bar" collections, not yet sharded
+
+db.bar.save({n: 1});
+db.bar.save({n: 2});
+db.bar.save({n: 3});
+
+assert.eq(3, db.bar.find().count(), "bar 1");
+assert.eq(1, db.bar.find({n: 1}).count(), "bar 2");
+
+//************** Test Set #2 *************
+// Basic counts on sharded "foo" collection.
+// 1. Create foo collection, insert 6 docs
+// 2. Divide into three chunks
+// 3. Test counts before chunk migrations
+// 4. Manually move chunks. Now each shard should have 3 docs.
+// 5. i. Test basic counts on foo
+// ii. Test counts with limit
+// iii. Test counts with skip
+// iv. Test counts with skip + limit
+// v. Test counts with skip + limit + sorting
+// 6. Insert 10 more docs. Further limit/skip testing with a find query
+// 7. test invalid queries/values
+
+// part 1
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
+
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
+
+assert.commandWorked(db.foo.insert({_id: 1, name: "eliot"}));
+assert.commandWorked(db.foo.insert({_id: 2, name: "sara"}));
+assert.commandWorked(db.foo.insert({_id: 3, name: "bob"}));
+assert.commandWorked(db.foo.insert({_id: 4, name: "joe"}));
+assert.commandWorked(db.foo.insert({_id: 5, name: "mark"}));
+assert.commandWorked(db.foo.insert({_id: 6, name: "allan"}));
+
+assert.eq(6, db.foo.find().count(), "basic count");
+
+// part 2
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "allan"}}));
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "sara"}}));
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "eliot"}}));
+
+// MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
+
+s.printChunks();
+
+// part 3
+assert.eq(6, db.foo.find().count(), "basic count after split ");
+assert.eq(6, db.foo.find().sort({name: 1}).count(), "basic count after split sorted ");
+
+// part 4
+assert.commandWorked(s.s0.adminCommand({
+ moveChunk: "test.foo",
+ find: {name: "eliot"},
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+}));
+
+assert.eq(3, primary.foo.find().toArray().length, "primary count");
+assert.eq(3, secondary.foo.find().toArray().length, "secondary count");
+assert.eq(3, primary.foo.find().sort({name: 1}).toArray().length, "primary count sorted");
+assert.eq(3, secondary.foo.find().sort({name: 1}).toArray().length, "secondary count sorted");
+
+// part 5
+// Some redundant tests, but better safe than sorry. These are fast tests, anyway.
+
+// i.
+assert.eq(6, db.foo.find().count(), "total count after move");
+assert.eq(6, db.foo.find().toArray().length, "total count after move");
+assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count() sorted");
+assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count() after move");
+
+// ii.
+assert.eq(2, db.foo.find().limit(2).count(true));
+assert.eq(2, db.foo.find().limit(-2).count(true));
+assert.eq(6, db.foo.find().limit(100).count(true));
+assert.eq(6, db.foo.find().limit(-100).count(true));
+assert.eq(6, db.foo.find().limit(0).count(true));
+
+// iii.
+assert.eq(6, db.foo.find().skip(0).count(true));
+assert.eq(5, db.foo.find().skip(1).count(true));
+assert.eq(4, db.foo.find().skip(2).count(true));
+assert.eq(3, db.foo.find().skip(3).count(true));
+assert.eq(2, db.foo.find().skip(4).count(true));
+assert.eq(1, db.foo.find().skip(5).count(true));
+assert.eq(0, db.foo.find().skip(6).count(true));
+assert.eq(0, db.foo.find().skip(7).count(true));
+
+// iv.
+assert.eq(2, db.foo.find().limit(2).skip(1).count(true));
+assert.eq(2, db.foo.find().limit(-2).skip(1).count(true));
+assert.eq(5, db.foo.find().limit(100).skip(1).count(true));
+assert.eq(5, db.foo.find().limit(-100).skip(1).count(true));
+assert.eq(5, db.foo.find().limit(0).skip(1).count(true));
+
+assert.eq(0, db.foo.find().limit(2).skip(10).count(true));
+assert.eq(0, db.foo.find().limit(-2).skip(10).count(true));
+assert.eq(0, db.foo.find().limit(100).skip(10).count(true));
+assert.eq(0, db.foo.find().limit(-100).skip(10).count(true));
+assert.eq(0, db.foo.find().limit(0).skip(10).count(true));
+
+assert.eq(2, db.foo.find().limit(2).itcount(), "LS1");
+assert.eq(2, db.foo.find().skip(2).limit(2).itcount(), "LS2");
+assert.eq(1, db.foo.find().skip(5).limit(2).itcount(), "LS3");
+assert.eq(6, db.foo.find().limit(2).count(), "LSC1");
+assert.eq(2, db.foo.find().limit(2).size(), "LSC2");
+assert.eq(2, db.foo.find().skip(2).limit(2).size(), "LSC3");
+assert.eq(1, db.foo.find().skip(5).limit(2).size(), "LSC4");
+assert.eq(4, db.foo.find().skip(1).limit(4).size(), "LSC5");
+assert.eq(5, db.foo.find().skip(1).limit(6).size(), "LSC6");
+
+// SERVER-3567 older negative limit tests
+assert.eq(2, db.foo.find().limit(2).itcount(), "N1");
+assert.eq(2, db.foo.find().limit(-2).itcount(), "N2");
+assert.eq(2, db.foo.find().skip(4).limit(2).itcount(), "N3");
+assert.eq(2, db.foo.find().skip(4).limit(-2).itcount(), "N4");
+
+// v.
+function nameString(c) {
+ var s = "";
+ while (c.hasNext()) {
+ var o = c.next();
+ if (s.length > 0)
+ s += ",";
+ s += o.name;
}
- assert.eq("allan,bob,eliot,joe,mark,sara", nameString(db.foo.find().sort({name: 1})), "sort 1");
- assert.eq(
- "sara,mark,joe,eliot,bob,allan", nameString(db.foo.find().sort({name: -1})), "sort 2");
-
- assert.eq("allan,bob", nameString(db.foo.find().sort({name: 1}).limit(2)), "LSD1");
- assert.eq("bob,eliot", nameString(db.foo.find().sort({name: 1}).skip(1).limit(2)), "LSD2");
- assert.eq("joe,mark", nameString(db.foo.find().sort({name: 1}).skip(3).limit(2)), "LSD3");
-
- assert.eq("eliot,sara", nameString(db.foo.find().sort({_id: 1}).limit(2)), "LSE1");
- assert.eq("sara,bob", nameString(db.foo.find().sort({_id: 1}).skip(1).limit(2)), "LSE2");
- assert.eq("joe,mark", nameString(db.foo.find().sort({_id: 1}).skip(3).limit(2)), "LSE3");
-
- // part 6
- for (var i = 0; i < 10; i++) {
- assert.commandWorked(db.foo.insert({_id: 7 + i, name: "zzz" + i}));
- }
-
- assert.eq(10, db.foo.find({name: {$gt: "z"}}).itcount(), "LSF1");
- assert.eq(10, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).itcount(), "LSF2");
- assert.eq(5, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).itcount(), "LSF3");
- assert.eq(3, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).limit(3).itcount(), "LSF4");
-
- // part 7
- // Make sure count command returns error for invalid queries
- var badCmdResult = db.runCommand({count: 'foo', query: {$c: {$abc: 3}}});
- assert(!badCmdResult.ok, "invalid query syntax didn't return error");
- assert(badCmdResult.errmsg.length > 0, "no error msg for invalid query");
-
- // Negative skip values should return error
- var negSkipResult = db.runCommand({count: 'foo', skip: -2});
- assert(!negSkipResult.ok, "negative skip value shouldn't work");
- assert(negSkipResult.errmsg.length > 0, "no error msg for negative skip");
-
- // Negative skip values with positive limit should return error
- var negSkipLimitResult = db.runCommand({count: 'foo', skip: -2, limit: 1});
- assert(!negSkipLimitResult.ok, "negative skip value with limit shouldn't work");
- assert(negSkipLimitResult.errmsg.length > 0, "no error msg for negative skip");
-
- s.stop();
+ return s;
+}
+assert.eq("allan,bob,eliot,joe,mark,sara", nameString(db.foo.find().sort({name: 1})), "sort 1");
+assert.eq("sara,mark,joe,eliot,bob,allan", nameString(db.foo.find().sort({name: -1})), "sort 2");
+
+assert.eq("allan,bob", nameString(db.foo.find().sort({name: 1}).limit(2)), "LSD1");
+assert.eq("bob,eliot", nameString(db.foo.find().sort({name: 1}).skip(1).limit(2)), "LSD2");
+assert.eq("joe,mark", nameString(db.foo.find().sort({name: 1}).skip(3).limit(2)), "LSD3");
+
+assert.eq("eliot,sara", nameString(db.foo.find().sort({_id: 1}).limit(2)), "LSE1");
+assert.eq("sara,bob", nameString(db.foo.find().sort({_id: 1}).skip(1).limit(2)), "LSE2");
+assert.eq("joe,mark", nameString(db.foo.find().sort({_id: 1}).skip(3).limit(2)), "LSE3");
+
+// part 6
+for (var i = 0; i < 10; i++) {
+ assert.commandWorked(db.foo.insert({_id: 7 + i, name: "zzz" + i}));
+}
+
+assert.eq(10, db.foo.find({name: {$gt: "z"}}).itcount(), "LSF1");
+assert.eq(10, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).itcount(), "LSF2");
+assert.eq(5, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).itcount(), "LSF3");
+assert.eq(3, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).limit(3).itcount(), "LSF4");
+
+// part 7
+// Make sure count command returns error for invalid queries
+var badCmdResult = db.runCommand({count: 'foo', query: {$c: {$abc: 3}}});
+assert(!badCmdResult.ok, "invalid query syntax didn't return error");
+assert(badCmdResult.errmsg.length > 0, "no error msg for invalid query");
+
+// Negative skip values should return error
+var negSkipResult = db.runCommand({count: 'foo', skip: -2});
+assert(!negSkipResult.ok, "negative skip value shouldn't work");
+assert(negSkipResult.errmsg.length > 0, "no error msg for negative skip");
+
+// Negative skip values with positive limit should return error
+var negSkipLimitResult = db.runCommand({count: 'foo', skip: -2, limit: 1});
+assert(!negSkipLimitResult.ok, "negative skip value with limit shouldn't work");
+assert(negSkipLimitResult.errmsg.length > 0, "no error msg for negative skip");
+
+s.stop();
})();
diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js
index d5bad76a246..b1d6bad4bf3 100644
--- a/jstests/sharding/count2.js
+++ b/jstests/sharding/count2.js
@@ -1,56 +1,55 @@
(function() {
- var s1 = new ShardingTest({name: "count2", shards: 2, mongos: 2});
- var s2 = s1._mongos[1];
+var s1 = new ShardingTest({name: "count2", shards: 2, mongos: 2});
+var s2 = s1._mongos[1];
- s1.adminCommand({enablesharding: "test"});
- s1.ensurePrimaryShard('test', s1.shard1.shardName);
- s1.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+s1.adminCommand({enablesharding: "test"});
+s1.ensurePrimaryShard('test', s1.shard1.shardName);
+s1.adminCommand({shardcollection: "test.foo", key: {name: 1}});
- var db1 = s1.getDB("test").foo;
- var db2 = s2.getDB("test").foo;
+var db1 = s1.getDB("test").foo;
+var db2 = s2.getDB("test").foo;
- assert.eq(1, s1.config.chunks.count({"ns": "test.foo"}), "sanity check A");
+assert.eq(1, s1.config.chunks.count({"ns": "test.foo"}), "sanity check A");
- db1.save({name: "aaa"});
- db1.save({name: "bbb"});
- db1.save({name: "ccc"});
- db1.save({name: "ddd"});
- db1.save({name: "eee"});
- db1.save({name: "fff"});
+db1.save({name: "aaa"});
+db1.save({name: "bbb"});
+db1.save({name: "ccc"});
+db1.save({name: "ddd"});
+db1.save({name: "eee"});
+db1.save({name: "fff"});
- s1.adminCommand({split: "test.foo", middle: {name: "ddd"}});
+s1.adminCommand({split: "test.foo", middle: {name: "ddd"}});
- assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos1");
- assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos2");
+assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos1");
+assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos2");
- s1.printChunks("test.foo");
+s1.printChunks("test.foo");
- s1.adminCommand({
- movechunk: "test.foo",
- find: {name: "aaa"},
- to: s1.getOther(s1.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
+s1.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "aaa"},
+ to: s1.getOther(s1.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
- assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "post count mongos1");
+assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "post count mongos1");
- // The second mongos still thinks its shard mapping is valid and accepts a cound
- print("before sleep: " + Date());
- sleep(2000);
- print("after sleep: " + Date());
- s1.printChunks("test.foo");
- assert.eq(3, db2.find({name: {$gte: "aaa", $lt: "ddd"}}).count(), "post count mongos2");
+// The second mongos still thinks its shard mapping is valid and accepts a cound
+print("before sleep: " + Date());
+sleep(2000);
+print("after sleep: " + Date());
+s1.printChunks("test.foo");
+assert.eq(3, db2.find({name: {$gte: "aaa", $lt: "ddd"}}).count(), "post count mongos2");
- db2.findOne();
+db2.findOne();
- assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}));
+assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}));
- assert.eq(4, db2.find().limit(4).count(true));
- assert.eq(4, db2.find().limit(-4).count(true));
- assert.eq(6, db2.find().limit(0).count(true));
- assert.eq(6, db2.getDB().runCommand({count: db2.getName(), limit: 0}).n);
-
- s1.stop();
+assert.eq(4, db2.find().limit(4).count(true));
+assert.eq(4, db2.find().limit(-4).count(true));
+assert.eq(6, db2.find().limit(0).count(true));
+assert.eq(6, db2.getDB().runCommand({count: db2.getName(), limit: 0}).n);
+s1.stop();
})();
diff --git a/jstests/sharding/count_config_servers.js b/jstests/sharding/count_config_servers.js
index 6f2b244204b..ff7cff2c698 100644
--- a/jstests/sharding/count_config_servers.js
+++ b/jstests/sharding/count_config_servers.js
@@ -8,62 +8,61 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
- st.s.setSlaveOk(true);
-
- var configDB = st.config;
- var coll = configDB.test;
-
- for (var x = 0; x < 10; x++) {
- assert.writeOK(coll.insert({v: x}));
- }
-
- if (st.configRS) {
- // Make sure the inserts are replicated to all config servers.
- st.configRS.awaitReplication();
- }
-
- var testNormalCount = function() {
- var cmdRes = configDB.runCommand({count: coll.getName()});
- assert(cmdRes.ok);
- assert.eq(10, cmdRes.n);
- };
-
- var testCountWithQuery = function() {
- var cmdRes = configDB.runCommand({count: coll.getName(), query: {v: {$gt: 6}}});
- assert(cmdRes.ok);
- assert.eq(3, cmdRes.n);
- };
-
- // Use invalid query operator to make the count return error
- var testInvalidCount = function() {
- var cmdRes = configDB.runCommand({count: coll.getName(), query: {$c: {$abc: 3}}});
- assert(!cmdRes.ok);
- assert(cmdRes.errmsg.length > 0);
- };
-
- // Test with all config servers up
- testNormalCount();
- testCountWithQuery();
- testInvalidCount();
-
- // Test with the first config server down
- MongoRunner.stopMongod(st.c0);
-
- testNormalCount();
- testCountWithQuery();
- testInvalidCount();
-
- // Test with the first and second config server down
- MongoRunner.stopMongod(st.c1);
- jsTest.log('Second server is down');
-
- testNormalCount();
- testCountWithQuery();
- testInvalidCount();
-
- st.stop();
-
+"use strict";
+
+var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
+st.s.setSlaveOk(true);
+
+var configDB = st.config;
+var coll = configDB.test;
+
+for (var x = 0; x < 10; x++) {
+ assert.writeOK(coll.insert({v: x}));
+}
+
+if (st.configRS) {
+ // Make sure the inserts are replicated to all config servers.
+ st.configRS.awaitReplication();
+}
+
+var testNormalCount = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName()});
+ assert(cmdRes.ok);
+ assert.eq(10, cmdRes.n);
+};
+
+var testCountWithQuery = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName(), query: {v: {$gt: 6}}});
+ assert(cmdRes.ok);
+ assert.eq(3, cmdRes.n);
+};
+
+// Use invalid query operator to make the count return error
+var testInvalidCount = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName(), query: {$c: {$abc: 3}}});
+ assert(!cmdRes.ok);
+ assert(cmdRes.errmsg.length > 0);
+};
+
+// Test with all config servers up
+testNormalCount();
+testCountWithQuery();
+testInvalidCount();
+
+// Test with the first config server down
+MongoRunner.stopMongod(st.c0);
+
+testNormalCount();
+testCountWithQuery();
+testInvalidCount();
+
+// Test with the first and second config server down
+MongoRunner.stopMongod(st.c1);
+jsTest.log('Second server is down');
+
+testNormalCount();
+testCountWithQuery();
+testInvalidCount();
+
+st.stop();
}());
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index f93ed7e0fa6..596509c1c2d 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -7,67 +7,67 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- var st = new ShardingTest({shards: 1, mongos: 1, other: {rs: true, rs0: {nodes: 2}}});
- var rst = st.rs0;
+var st = new ShardingTest({shards: 1, mongos: 1, other: {rs: true, rs0: {nodes: 2}}});
+var rst = st.rs0;
- // Insert data into replica set
- var conn = new Mongo(st.s.host);
+// Insert data into replica set
+var conn = new Mongo(st.s.host);
- var coll = conn.getCollection('test.countSlaveOk');
- coll.drop();
+var coll = conn.getCollection('test.countSlaveOk');
+coll.drop();
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 300; i++) {
- bulk.insert({i: i % 10});
- }
- assert.writeOK(bulk.execute());
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10});
+}
+assert.writeOK(bulk.execute());
- var connA = conn;
- var connB = new Mongo(st.s.host);
- var connC = new Mongo(st.s.host);
+var connA = conn;
+var connB = new Mongo(st.s.host);
+var connC = new Mongo(st.s.host);
- st.printShardingStatus();
+st.printShardingStatus();
- // Wait for client to update itself and replication to finish
- rst.awaitReplication();
+// Wait for client to update itself and replication to finish
+rst.awaitReplication();
- var primary = rst.getPrimary();
- var sec = rst.getSecondary();
+var primary = rst.getPrimary();
+var sec = rst.getSecondary();
- // Data now inserted... stop the master, since only two in set, other will still be secondary
- rst.stop(rst.getPrimary());
- printjson(rst.status());
+// Data now inserted... stop the master, since only two in set, other will still be secondary
+rst.stop(rst.getPrimary());
+printjson(rst.status());
- // Wait for the mongos to recognize the slave
- awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
+// Wait for the mongos to recognize the slave
+awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
- // Make sure that mongos realizes that primary is already down
- awaitRSClientHosts(conn, primary, {ok: false});
+// Make sure that mongos realizes that primary is already down
+awaitRSClientHosts(conn, primary, {ok: false});
- // Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
- // master is down
- conn.setSlaveOk();
+// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+// master is down
+conn.setSlaveOk();
- // count using the command path
- assert.eq(30, coll.find({i: 0}).count());
- // count using the query path
- assert.eq(30, coll.find({i: 0}).itcount());
- assert.eq(10, coll.distinct("i").length);
+// count using the command path
+assert.eq(30, coll.find({i: 0}).count());
+// count using the query path
+assert.eq(30, coll.find({i: 0}).itcount());
+assert.eq(10, coll.distinct("i").length);
- try {
- conn.setSlaveOk(false);
- // Should throw exception, since not slaveOk'd
- coll.find({i: 0}).count();
+try {
+ conn.setSlaveOk(false);
+ // Should throw exception, since not slaveOk'd
+ coll.find({i: 0}).count();
- print("Should not reach here!");
- assert(false);
- } catch (e) {
- print("Non-slaveOk'd connection failed.");
- }
+ print("Should not reach here!");
+ assert(false);
+} catch (e) {
+ print("Non-slaveOk'd connection failed.");
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index ce6851cafe5..b68c4bf1bca 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -6,148 +6,142 @@
load("jstests/libs/analyze_plan.js");
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 1});
- var coll = st.s0.getCollection("foo.bar");
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: coll.getDB() + ""}));
-
- jsTest.log('Tests with _id : 1 shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: 1}}));
- st.printShardingStatus();
-
- assert.commandWorked(
- st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
-
- // Insert some data
- assert.writeOK(coll.insert({_id: true, a: true, b: true}));
-
- // Index without shard key query - not covered
- assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1,
- coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
-
- // Index with shard key query - covered when projecting
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.ensureIndex({a: 1, _id: 1}));
- assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
-
- // Compound index with shard key query - covered when projecting
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
- assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({a: true, b: true}, {_id: 1, a: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- jsTest.log('Tests with _id : hashed shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
- st.printShardingStatus();
-
- // Insert some data
- assert.writeOK(coll.insert({_id: true, a: true, b: true}));
-
- // Index without shard key query - not covered
- assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1,
- coll.find({a: true}, {_id: 0, a: 1}).explain(true).executionStats.totalDocsExamined);
-
- // Index with shard key query - can't be covered since hashed index
- assert.commandWorked(coll.dropIndex({a: 1}));
- assert.eq(1, coll.find({_id: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1, coll.find({_id: true}, {_id: 0}).explain(true).executionStats.totalDocsExamined);
-
- jsTest.log('Tests with compound shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1, b: 1}}));
- st.printShardingStatus();
-
- // Insert some data
- assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
-
- // Index without shard key query - not covered
- assert.commandWorked(coll.ensureIndex({c: 1}));
- assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1,
- coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- // Index with shard key query - covered when projecting
- assert.commandWorked(coll.dropIndex({c: 1}));
- assert.commandWorked(coll.ensureIndex({c: 1, b: 1, a: 1}));
- assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- // Compound index with shard key query - covered when projecting
- assert.commandWorked(coll.dropIndex({c: 1, b: 1, a: 1}));
- assert.commandWorked(coll.ensureIndex({c: 1, d: 1, a: 1, b: 1, _id: 1}));
- assert.eq(1, coll.find({c: true, d: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({c: true, d: true}, {a: 1, b: 1, c: 1, d: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- jsTest.log('Tests with nested shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {'a.b': 1}}));
- st.printShardingStatus();
-
- // Insert some data
- assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
-
- // Index without shard key query - not covered
- assert.commandWorked(coll.ensureIndex({c: 1}));
- assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- // Index with shard key query - can be covered given the appropriate projection.
- assert.commandWorked(coll.dropIndex({c: 1}));
- assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
- assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- jsTest.log('Tests with bad data with no shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1}}));
- st.printShardingStatus();
-
- // Insert some bad data manually on the shard
- assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
-
- // Index without shard key query - not covered but succeeds
- assert.commandWorked(coll.ensureIndex({c: 1}));
- var explain = coll.find({c: true}).explain(true).executionStats;
- assert.eq(0, explain.nReturned);
- assert.eq(1, explain.totalDocsExamined);
- assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
-
- // Index with shard key query - covered and succeeds and returns result
- //
- // NOTE: This is weird and only a result of the fact that we don't have a dedicated "does not
- // exist" value for indexes
- assert.commandWorked(coll.ensureIndex({c: 1, a: 1}));
- var explain = coll.find({c: true}, {_id: 0, a: 1, c: 1}).explain(true).executionStats;
- assert.eq(1, explain.nReturned);
- assert.eq(0, explain.totalDocsExamined);
- assert.eq(0, getChunkSkips(explain.executionStages.shards[0].executionStages));
-
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 1});
+var coll = st.s0.getCollection("foo.bar");
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: coll.getDB() + ""}));
+
+jsTest.log('Tests with _id : 1 shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: 1}}));
+st.printShardingStatus();
+
+assert.commandWorked(
+ st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
+
+// Insert some data
+assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+
+// Index without shard key query - not covered
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+// Index with shard key query - covered when projecting
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.ensureIndex({a: 1, _id: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+// Compound index with shard key query - covered when projecting
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
+assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(
+ 0,
+ coll.find({a: true, b: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+jsTest.log('Tests with _id : hashed shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
+st.printShardingStatus();
+
+// Insert some data
+assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+
+// Index without shard key query - not covered
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({a: true}, {_id: 0, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+// Index with shard key query - can't be covered since hashed index
+assert.commandWorked(coll.dropIndex({a: 1}));
+assert.eq(1, coll.find({_id: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({_id: true}, {_id: 0}).explain(true).executionStats.totalDocsExamined);
+
+jsTest.log('Tests with compound shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1, b: 1}}));
+st.printShardingStatus();
+
+// Insert some data
+assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
+
+// Index without shard key query - not covered
+assert.commandWorked(coll.ensureIndex({c: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+// Index with shard key query - covered when projecting
+assert.commandWorked(coll.dropIndex({c: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, b: 1, a: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+// Compound index with shard key query - covered when projecting
+assert.commandWorked(coll.dropIndex({c: 1, b: 1, a: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, d: 1, a: 1, b: 1, _id: 1}));
+assert.eq(1, coll.find({c: true, d: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({c: true, d: true}, {a: 1, b: 1, c: 1, d: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+jsTest.log('Tests with nested shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {'a.b': 1}}));
+st.printShardingStatus();
+
+// Insert some data
+assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
+
+// Index without shard key query - not covered
+assert.commandWorked(coll.ensureIndex({c: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(
+ 1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
+
+// Index with shard key query - can be covered given the appropriate projection.
+assert.commandWorked(coll.dropIndex({c: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(
+ 0,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
+
+jsTest.log('Tests with bad data with no shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1}}));
+st.printShardingStatus();
+
+// Insert some bad data manually on the shard
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
+
+// Index without shard key query - not covered but succeeds
+assert.commandWorked(coll.ensureIndex({c: 1}));
+var explain = coll.find({c: true}).explain(true).executionStats;
+assert.eq(0, explain.nReturned);
+assert.eq(1, explain.totalDocsExamined);
+assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
+
+// Index with shard key query - covered and succeeds and returns result
+//
+// NOTE: This is weird and only a result of the fact that we don't have a dedicated "does not
+// exist" value for indexes
+assert.commandWorked(coll.ensureIndex({c: 1, a: 1}));
+var explain = coll.find({c: true}, {_id: 0, a: 1, c: 1}).explain(true).executionStats;
+assert.eq(1, explain.nReturned);
+assert.eq(0, explain.totalDocsExamined);
+assert.eq(0, getChunkSkips(explain.executionStages.shards[0].executionStages));
+
+st.stop();
})();
diff --git a/jstests/sharding/create_database.js b/jstests/sharding/create_database.js
index 441582aa201..04dfeff7877 100644
--- a/jstests/sharding/create_database.js
+++ b/jstests/sharding/create_database.js
@@ -3,69 +3,68 @@
* databaseVersion if FCV > 3.6, but not if FCV <= 3.6.
*/
(function() {
- 'use strict';
+'use strict';
- function createDatabase(mongos, dbName) {
- // A database is implicitly created when a collection inside it is created.
- assert.commandWorked(mongos.getDB(dbName).runCommand({create: collName}));
- }
+function createDatabase(mongos, dbName) {
+ // A database is implicitly created when a collection inside it is created.
+ assert.commandWorked(mongos.getDB(dbName).runCommand({create: collName}));
+}
- function cleanUp(mongos, dbName) {
- assert.commandWorked(mongos.getDB(dbName).runCommand({dropDatabase: 1}));
- }
+function cleanUp(mongos, dbName) {
+ assert.commandWorked(mongos.getDB(dbName).runCommand({dropDatabase: 1}));
+}
- function assertDbVersionAssigned(mongos, dbName) {
- createDatabase(mongos, dbName);
+function assertDbVersionAssigned(mongos, dbName) {
+ createDatabase(mongos, dbName);
- // Check that the entry in the sharding catalog contains a dbVersion.
- const dbEntry = mongos.getDB("config").getCollection("databases").findOne({_id: dbName});
- assert.neq(null, dbEntry);
- assert.neq(null, dbEntry.version);
- assert.neq(null, dbEntry.version.uuid);
- assert.eq(1, dbEntry.version.lastMod);
+ // Check that the entry in the sharding catalog contains a dbVersion.
+ const dbEntry = mongos.getDB("config").getCollection("databases").findOne({_id: dbName});
+ assert.neq(null, dbEntry);
+ assert.neq(null, dbEntry.version);
+ assert.neq(null, dbEntry.version.uuid);
+ assert.eq(1, dbEntry.version.lastMod);
- // Check that the catalog cache on the mongos contains the same dbVersion.
- const cachedDbEntry = mongos.adminCommand({getShardVersion: dbName});
- assert.commandWorked(cachedDbEntry);
- assert.eq(dbEntry.version.uuid, cachedDbEntry.version.uuid);
- assert.eq(dbEntry.version.lastMod, cachedDbEntry.version.lastMod);
+ // Check that the catalog cache on the mongos contains the same dbVersion.
+ const cachedDbEntry = mongos.adminCommand({getShardVersion: dbName});
+ assert.commandWorked(cachedDbEntry);
+ assert.eq(dbEntry.version.uuid, cachedDbEntry.version.uuid);
+ assert.eq(dbEntry.version.lastMod, cachedDbEntry.version.lastMod);
- cleanUp(mongos, dbName);
+ cleanUp(mongos, dbName);
- return dbEntry;
- }
+ return dbEntry;
+}
- function assertDbVersionNotAssigned(mongos, dbName) {
- createDatabase(mongos, dbName);
+function assertDbVersionNotAssigned(mongos, dbName) {
+ createDatabase(mongos, dbName);
- // Check that the entry in the sharding catalog *does not* contain a dbVersion.
- const dbEntry = mongos.getDB("config").getCollection("databases").findOne({_id: dbName});
- assert.neq(null, dbEntry);
- assert.eq(null, dbEntry.version);
+ // Check that the entry in the sharding catalog *does not* contain a dbVersion.
+ const dbEntry = mongos.getDB("config").getCollection("databases").findOne({_id: dbName});
+ assert.neq(null, dbEntry);
+ assert.eq(null, dbEntry.version);
- // Check that the catalog cache on the mongos *does not* contain a dbVersion.
- const cachedDbEntry = mongos.adminCommand({getShardVersion: dbName});
- assert.commandWorked(cachedDbEntry);
- assert.eq(null, cachedDbEntry.version);
+ // Check that the catalog cache on the mongos *does not* contain a dbVersion.
+ const cachedDbEntry = mongos.adminCommand({getShardVersion: dbName});
+ assert.commandWorked(cachedDbEntry);
+ assert.eq(null, cachedDbEntry.version);
- cleanUp(mongos, dbName);
+ cleanUp(mongos, dbName);
- return dbEntry;
- }
+ return dbEntry;
+}
- const dbName = "db1";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "db1";
+const collName = "foo";
+const ns = dbName + "." + collName;
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- // A new database is given a databaseVersion.
- let dbEntry1 = assertDbVersionAssigned(st.s, dbName);
+// A new database is given a databaseVersion.
+let dbEntry1 = assertDbVersionAssigned(st.s, dbName);
- // A new incarnation of a database that was previously dropped is given a fresh databaseVersion.
- let dbEntry2 = assertDbVersionAssigned(st.s, dbName);
- assert.neq(dbEntry1.version.uuid, dbEntry2.version.uuid);
-
- st.stop();
+// A new incarnation of a database that was previously dropped is given a fresh databaseVersion.
+let dbEntry2 = assertDbVersionAssigned(st.s, dbName);
+assert.neq(dbEntry1.version.uuid, dbEntry2.version.uuid);
+st.stop();
})();
diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js
index 1610c1fef44..f11ffd13f2a 100644
--- a/jstests/sharding/create_idx_empty_primary.js
+++ b/jstests/sharding/create_idx_empty_primary.js
@@ -2,33 +2,32 @@
* Test to make sure that the createIndex command gets sent to all shards.
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enablesharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+var st = new ShardingTest({shards: 2});
+assert.commandWorked(st.s.adminCommand({enablesharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({shardcollection: 'test.user', key: {_id: 1}}));
+var testDB = st.s.getDB('test');
+assert.commandWorked(testDB.adminCommand({shardcollection: 'test.user', key: {_id: 1}}));
- // Move only chunk out of primary shard.
- assert.commandWorked(
- testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: st.shard0.shardName}));
+// Move only chunk out of primary shard.
+assert.commandWorked(
+ testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: st.shard0.shardName}));
- assert.writeOK(testDB.user.insert({_id: 0}));
+assert.writeOK(testDB.user.insert({_id: 0}));
- var res = testDB.user.ensureIndex({i: 1});
- assert.commandWorked(res);
+var res = testDB.user.ensureIndex({i: 1});
+assert.commandWorked(res);
- var indexes = testDB.user.getIndexes();
- assert.eq(2, indexes.length);
+var indexes = testDB.user.getIndexes();
+assert.eq(2, indexes.length);
- indexes = st.rs0.getPrimary().getDB('test').user.getIndexes();
- assert.eq(2, indexes.length);
+indexes = st.rs0.getPrimary().getDB('test').user.getIndexes();
+assert.eq(2, indexes.length);
- indexes = st.rs1.getPrimary().getDB('test').user.getIndexes();
- assert.eq(2, indexes.length);
-
- st.stop();
+indexes = st.rs1.getPrimary().getDB('test').user.getIndexes();
+assert.eq(2, indexes.length);
+st.stop();
})();
diff --git a/jstests/sharding/current_op_no_shards.js b/jstests/sharding/current_op_no_shards.js
index 926b032e229..6d6dd3bdfcb 100644
--- a/jstests/sharding/current_op_no_shards.js
+++ b/jstests/sharding/current_op_no_shards.js
@@ -3,16 +3,15 @@
* set, and does not cause the mongoS floating point failure described in SERVER-30084.
*/
(function() {
- const st = new ShardingTest({shards: 0, config: 1});
+const st = new ShardingTest({shards: 0, config: 1});
- const adminDB = st.s.getDB("admin");
+const adminDB = st.s.getDB("admin");
- assert.commandWorked(
- adminDB.runCommand({aggregate: 1, pipeline: [{$currentOp: {}}], cursor: {}}));
- assert.commandWorked(adminDB.currentOp());
+assert.commandWorked(adminDB.runCommand({aggregate: 1, pipeline: [{$currentOp: {}}], cursor: {}}));
+assert.commandWorked(adminDB.currentOp());
- assert.eq(adminDB.aggregate([{$currentOp: {}}]).itcount(), 0);
- assert.eq(adminDB.currentOp().inprog.length, 0);
+assert.eq(adminDB.aggregate([{$currentOp: {}}]).itcount(), 0);
+assert.eq(adminDB.currentOp().inprog.length, 0);
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/current_op_with_drop_shard.js b/jstests/sharding/current_op_with_drop_shard.js
index aaadca2dc3f..c6f9e7cad90 100644
--- a/jstests/sharding/current_op_with_drop_shard.js
+++ b/jstests/sharding/current_op_with_drop_shard.js
@@ -1,25 +1,25 @@
// Tests that currentOp is resilient to drop shard.
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- // We need the balancer to remove a shard.
- st.startBalancer();
+// We need the balancer to remove a shard.
+st.startBalancer();
- const mongosDB = st.s.getDB(jsTestName());
- const shardName = st.shard0.shardName;
+const mongosDB = st.s.getDB(jsTestName());
+const shardName = st.shard0.shardName;
- var res = st.s.adminCommand({removeShard: shardName});
+var res = st.s.adminCommand({removeShard: shardName});
+assert.commandWorked(res);
+assert.eq('started', res.state);
+assert.soon(function() {
+ res = st.s.adminCommand({removeShard: shardName});
assert.commandWorked(res);
- assert.eq('started', res.state);
- assert.soon(function() {
- res = st.s.adminCommand({removeShard: shardName});
- assert.commandWorked(res);
- return ('completed' === res.state);
- }, "removeShard never completed for shard " + shardName);
+ return ('completed' === res.state);
+}, "removeShard never completed for shard " + shardName);
- assert.commandWorked(mongosDB.currentOp());
+assert.commandWorked(mongosDB.currentOp());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 92e321eac27..6b66c9cf130 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -2,69 +2,68 @@
// checks that cursors survive a chunk's move
(function() {
- var s = new ShardingTest({name: "sharding_cursor1", shards: 2});
+var s = new ShardingTest({name: "sharding_cursor1", shards: 2});
- s.config.settings.find().forEach(printjson);
+s.config.settings.find().forEach(printjson);
- // create a sharded 'test.foo', for the moment with just one chunk
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+// create a sharded 'test.foo', for the moment with just one chunk
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- db = s.getDB("test");
- primary = s.getPrimaryShard("test").getDB("test");
- secondary = s.getOther(primary).getDB("test");
+db = s.getDB("test");
+primary = s.getPrimaryShard("test").getDB("test");
+secondary = s.getOther(primary).getDB("test");
- var numObjs = 30;
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (i = 0; i < numObjs; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
- assert.eq(1,
- s.config.chunks.count({"ns": "test.foo"}),
- "test requires collection to have one chunk initially");
-
- // we'll split the collection in two and move the second chunk while three cursors are open
- // cursor1 still has more data in the first chunk, the one that didn't move
- // cursor2 buffered the last obj of the first chunk
- // cursor3 buffered data that was moved on the second chunk
- var cursor1 = db.foo.find().batchSize(3);
- assert.eq(3, cursor1.objsLeftInBatch());
- var cursor2 = db.foo.find().batchSize(5);
- assert.eq(5, cursor2.objsLeftInBatch());
- var cursor3 = db.foo.find().batchSize(7);
- assert.eq(7, cursor3.objsLeftInBatch());
+var numObjs = 30;
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (i = 0; i < numObjs; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
+assert.eq(1,
+ s.config.chunks.count({"ns": "test.foo"}),
+ "test requires collection to have one chunk initially");
- s.adminCommand({split: "test.foo", middle: {_id: 5}});
- s.adminCommand({movechunk: "test.foo", find: {_id: 5}, to: secondary.getMongo().name});
- assert.eq(2, s.config.chunks.count({"ns": "test.foo"}));
+// we'll split the collection in two and move the second chunk while three cursors are open
+// cursor1 still has more data in the first chunk, the one that didn't move
+// cursor2 buffered the last obj of the first chunk
+// cursor3 buffered data that was moved on the second chunk
+var cursor1 = db.foo.find().batchSize(3);
+assert.eq(3, cursor1.objsLeftInBatch());
+var cursor2 = db.foo.find().batchSize(5);
+assert.eq(5, cursor2.objsLeftInBatch());
+var cursor3 = db.foo.find().batchSize(7);
+assert.eq(7, cursor3.objsLeftInBatch());
- // the cursors should not have been affected
- assert.eq(numObjs, cursor1.itcount(), "c1");
- assert.eq(numObjs, cursor2.itcount(), "c2");
- assert.eq(numObjs, cursor3.itcount(), "c3");
+s.adminCommand({split: "test.foo", middle: {_id: 5}});
+s.adminCommand({movechunk: "test.foo", find: {_id: 5}, to: secondary.getMongo().name});
+assert.eq(2, s.config.chunks.count({"ns": "test.foo"}));
- // Test that a cursor with a 1 second timeout eventually times out.
- var cur = db.foo.find().batchSize(2);
- assert(cur.next(), "T1");
- assert(cur.next(), "T2");
- assert.commandWorked(s.admin.runCommand({
- setParameter: 1,
- cursorTimeoutMillis: 1000 // 1 second.
- }));
+// the cursors should not have been affected
+assert.eq(numObjs, cursor1.itcount(), "c1");
+assert.eq(numObjs, cursor2.itcount(), "c2");
+assert.eq(numObjs, cursor3.itcount(), "c3");
- assert.soon(function() {
- try {
- cur.next();
- cur.next();
- print("cursor still alive");
- return false;
- } catch (e) {
- return true;
- }
- }, "cursor failed to time out", /*timeout*/ 30000, /*interval*/ 5000);
+// Test that a cursor with a 1 second timeout eventually times out.
+var cur = db.foo.find().batchSize(2);
+assert(cur.next(), "T1");
+assert(cur.next(), "T2");
+assert.commandWorked(s.admin.runCommand({
+ setParameter: 1,
+ cursorTimeoutMillis: 1000 // 1 second.
+}));
- s.stop();
+assert.soon(function() {
+ try {
+ cur.next();
+ cur.next();
+ print("cursor still alive");
+ return false;
+ } catch (e) {
+ return true;
+ }
+}, "cursor failed to time out", /*timeout*/ 30000, /*interval*/ 5000);
+s.stop();
})();
diff --git a/jstests/sharding/cursor_timeout.js b/jstests/sharding/cursor_timeout.js
index a6be1762245..7c43fd8f99a 100644
--- a/jstests/sharding/cursor_timeout.js
+++ b/jstests/sharding/cursor_timeout.js
@@ -9,112 +9,111 @@
// After a period of inactivity, the test asserts that cursors #1 and #2 are still alive, and that
// #3 and #4 have been killed.
(function() {
- 'use strict';
-
- // Cursor timeout on mongod is handled by a single thread/timer that will sleep for
- // "clientCursorMonitorFrequencySecs" and add the sleep value to each operation's duration when
- // it wakes up, timing out those whose "now() - last accessed since" time exceeds. A cursor
- // timeout of 5 seconds with a monitor frequency of 1 second means an effective timeout period
- // of 4 to 5 seconds.
- const mongodCursorTimeoutMs = 5000;
-
- // Cursor timeout on mongos is handled by checking whether the "last accessed" cursor time stamp
- // is older than "now() - cursorTimeoutMillis" and is checked every
- // "clientCursorMonitorFrequencySecs" by a global thread/timer. A timeout of 4 seconds with a
- // monitor frequency of 1 second means an effective timeout period of 4 to 5 seconds.
- const mongosCursorTimeoutMs = 4000;
-
- const cursorMonitorFrequencySecs = 1;
-
- const st = new ShardingTest({
- shards: 2,
- other: {
- shardOptions: {
- verbose: 1,
- setParameter: {
- cursorTimeoutMillis: mongodCursorTimeoutMs,
- clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs
- }
- },
- mongosOptions: {
- verbose: 1,
- setParameter: {
- cursorTimeoutMillis: mongosCursorTimeoutMs,
- clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs
- }
- },
+'use strict';
+
+// Cursor timeout on mongod is handled by a single thread/timer that will sleep for
+// "clientCursorMonitorFrequencySecs" and add the sleep value to each operation's duration when
+// it wakes up, timing out those whose "now() - last accessed since" time exceeds. A cursor
+// timeout of 5 seconds with a monitor frequency of 1 second means an effective timeout period
+// of 4 to 5 seconds.
+const mongodCursorTimeoutMs = 5000;
+
+// Cursor timeout on mongos is handled by checking whether the "last accessed" cursor time stamp
+// is older than "now() - cursorTimeoutMillis" and is checked every
+// "clientCursorMonitorFrequencySecs" by a global thread/timer. A timeout of 4 seconds with a
+// monitor frequency of 1 second means an effective timeout period of 4 to 5 seconds.
+const mongosCursorTimeoutMs = 4000;
+
+const cursorMonitorFrequencySecs = 1;
+
+const st = new ShardingTest({
+ shards: 2,
+ other: {
+ shardOptions: {
+ verbose: 1,
+ setParameter: {
+ cursorTimeoutMillis: mongodCursorTimeoutMs,
+ clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs
+ }
},
- enableBalancer: false
- });
-
- const adminDB = st.admin;
- const routerColl = st.s.getDB('test').user;
-
- const shardHost = st.config.shards.findOne({_id: st.shard1.shardName}).host;
- const mongod = new Mongo(shardHost);
- const shardColl = mongod.getCollection(routerColl.getFullName());
-
- assert.commandWorked(adminDB.runCommand({enableSharding: routerColl.getDB().getName()}));
- st.ensurePrimaryShard(routerColl.getDB().getName(), st.shard0.shardName);
-
- assert.commandWorked(
- adminDB.runCommand({shardCollection: routerColl.getFullName(), key: {x: 1}}));
- assert.commandWorked(adminDB.runCommand({split: routerColl.getFullName(), middle: {x: 10}}));
- assert.commandWorked(adminDB.runCommand({
- moveChunk: routerColl.getFullName(),
- find: {x: 11},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- for (let x = 0; x < 20; x++) {
- assert.writeOK(routerColl.insert({x: x}));
- }
-
- // Open both a normal and a no-timeout cursor on mongos. Batch size is 1 to ensure that
- // cursor.next() performs only a single operation.
- const routerCursorWithTimeout = routerColl.find().batchSize(1);
- const routerCursorWithNoTimeout = routerColl.find().batchSize(1);
- routerCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
-
- // Open both a normal and a no-timeout cursor on mongod. Batch size is 1 to ensure that
- // cursor.next() performs only a single operation.
- const shardCursorWithTimeout = shardColl.find().batchSize(1);
- const shardCursorWithNoTimeout = shardColl.find().batchSize(1);
- shardCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
-
- // Execute initial find on each cursor.
- routerCursorWithTimeout.next();
- routerCursorWithNoTimeout.next();
- shardCursorWithTimeout.next();
- shardCursorWithNoTimeout.next();
-
- // Wait until the idle cursor background job has killed the cursors that do not have the "no
- // timeout" flag set. We use the "cursorTimeoutMillis" and "clientCursorMonitorFrequencySecs"
- // setParameters above to reduce the amount of time we need to wait here.
- assert.soon(function() {
- return routerColl.getDB().serverStatus().metrics.cursor.timedOut > 0;
- }, "sharded cursor failed to time out");
-
- // Wait for the shard to have two open cursors on it (routerCursorWithNoTimeout and
- // shardCursorWithNoTimeout).
- // We cannot reliably use metrics.cursor.timedOut here, because this will be 2 if
- // routerCursorWithTimeout is killed for timing out on the shard, and 1 if
- // routerCursorWithTimeout is killed by a killCursors command from the mongos.
- assert.soon(function() {
- return shardColl.getDB().serverStatus().metrics.cursor.open.total == 2;
- }, "cursor failed to time out");
-
- assert.throws(function() {
- routerCursorWithTimeout.itcount();
- });
- assert.throws(function() {
- shardCursorWithTimeout.itcount();
- });
-
- // +1 because we already advanced once
- assert.eq(routerColl.count(), routerCursorWithNoTimeout.itcount() + 1);
- assert.eq(shardColl.count(), shardCursorWithNoTimeout.itcount() + 1);
-
- st.stop();
+ mongosOptions: {
+ verbose: 1,
+ setParameter: {
+ cursorTimeoutMillis: mongosCursorTimeoutMs,
+ clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs
+ }
+ },
+ },
+ enableBalancer: false
+});
+
+const adminDB = st.admin;
+const routerColl = st.s.getDB('test').user;
+
+const shardHost = st.config.shards.findOne({_id: st.shard1.shardName}).host;
+const mongod = new Mongo(shardHost);
+const shardColl = mongod.getCollection(routerColl.getFullName());
+
+assert.commandWorked(adminDB.runCommand({enableSharding: routerColl.getDB().getName()}));
+st.ensurePrimaryShard(routerColl.getDB().getName(), st.shard0.shardName);
+
+assert.commandWorked(adminDB.runCommand({shardCollection: routerColl.getFullName(), key: {x: 1}}));
+assert.commandWorked(adminDB.runCommand({split: routerColl.getFullName(), middle: {x: 10}}));
+assert.commandWorked(adminDB.runCommand({
+ moveChunk: routerColl.getFullName(),
+ find: {x: 11},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+for (let x = 0; x < 20; x++) {
+ assert.writeOK(routerColl.insert({x: x}));
+}
+
+// Open both a normal and a no-timeout cursor on mongos. Batch size is 1 to ensure that
+// cursor.next() performs only a single operation.
+const routerCursorWithTimeout = routerColl.find().batchSize(1);
+const routerCursorWithNoTimeout = routerColl.find().batchSize(1);
+routerCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
+
+// Open both a normal and a no-timeout cursor on mongod. Batch size is 1 to ensure that
+// cursor.next() performs only a single operation.
+const shardCursorWithTimeout = shardColl.find().batchSize(1);
+const shardCursorWithNoTimeout = shardColl.find().batchSize(1);
+shardCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
+
+// Execute initial find on each cursor.
+routerCursorWithTimeout.next();
+routerCursorWithNoTimeout.next();
+shardCursorWithTimeout.next();
+shardCursorWithNoTimeout.next();
+
+// Wait until the idle cursor background job has killed the cursors that do not have the "no
+// timeout" flag set. We use the "cursorTimeoutMillis" and "clientCursorMonitorFrequencySecs"
+// setParameters above to reduce the amount of time we need to wait here.
+assert.soon(function() {
+ return routerColl.getDB().serverStatus().metrics.cursor.timedOut > 0;
+}, "sharded cursor failed to time out");
+
+// Wait for the shard to have two open cursors on it (routerCursorWithNoTimeout and
+// shardCursorWithNoTimeout).
+// We cannot reliably use metrics.cursor.timedOut here, because this will be 2 if
+// routerCursorWithTimeout is killed for timing out on the shard, and 1 if
+// routerCursorWithTimeout is killed by a killCursors command from the mongos.
+assert.soon(function() {
+ return shardColl.getDB().serverStatus().metrics.cursor.open.total == 2;
+}, "cursor failed to time out");
+
+assert.throws(function() {
+ routerCursorWithTimeout.itcount();
+});
+assert.throws(function() {
+ shardCursorWithTimeout.itcount();
+});
+
+// +1 because we already advanced once
+assert.eq(routerColl.count(), routerCursorWithNoTimeout.itcount() + 1);
+assert.eq(shardColl.count(), shardCursorWithNoTimeout.itcount() + 1);
+
+st.stop();
})();
diff --git a/jstests/sharding/cursor_valid_after_shard_stepdown.js b/jstests/sharding/cursor_valid_after_shard_stepdown.js
index b717d73cac8..c26de68b8c6 100644
--- a/jstests/sharding/cursor_valid_after_shard_stepdown.js
+++ b/jstests/sharding/cursor_valid_after_shard_stepdown.js
@@ -4,43 +4,42 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1, rs: {nodes: 2}});
+var st = new ShardingTest({shards: 1, rs: {nodes: 2}});
- assert.commandWorked(st.s0.adminCommand({enablesharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardcollection: 'TestDB.TestColl', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({enablesharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardcollection: 'TestDB.TestColl', key: {x: 1}}));
- var db = st.s0.getDB('TestDB');
- var coll = db.TestColl;
+var db = st.s0.getDB('TestDB');
+var coll = db.TestColl;
- // Insert documents for the test
- assert.writeOK(coll.insert({x: 1, value: 'Test value 1'}));
- assert.writeOK(coll.insert({x: 2, value: 'Test value 2'}));
+// Insert documents for the test
+assert.writeOK(coll.insert({x: 1, value: 'Test value 1'}));
+assert.writeOK(coll.insert({x: 2, value: 'Test value 2'}));
- // Establish a cursor on the primary (by not using slaveOk read)
- var findCursor = assert.commandWorked(db.runCommand({find: 'TestColl', batchSize: 1})).cursor;
+// Establish a cursor on the primary (by not using slaveOk read)
+var findCursor = assert.commandWorked(db.runCommand({find: 'TestColl', batchSize: 1})).cursor;
- var shardVersionBeforeStepdown =
- assert.commandWorked(st.rs0.getPrimary().adminCommand({getShardVersion: 'TestDB.TestColl'}))
- .global;
- assert.neq(Timestamp(0, 0), shardVersionBeforeStepdown);
+var shardVersionBeforeStepdown =
+ assert.commandWorked(st.rs0.getPrimary().adminCommand({getShardVersion: 'TestDB.TestColl'}))
+ .global;
+assert.neq(Timestamp(0, 0), shardVersionBeforeStepdown);
- // Stepdown the primary of the shard and ensure that that cursor can still be read
- assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 60, force: 1}));
+// Stepdown the primary of the shard and ensure that that cursor can still be read
+assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 60, force: 1}));
- var getMoreCursor =
- assert.commandWorked(db.runCommand({getMore: findCursor.id, collection: 'TestColl'}))
- .cursor;
- assert.eq(0, getMoreCursor.id);
- assert.eq(2, getMoreCursor.nextBatch[0].x);
+var getMoreCursor =
+ assert.commandWorked(db.runCommand({getMore: findCursor.id, collection: 'TestColl'})).cursor;
+assert.eq(0, getMoreCursor.id);
+assert.eq(2, getMoreCursor.nextBatch[0].x);
- // After stepdown, the shard version will be reset
- var shardVersionAfterStepdown =
- assert.commandWorked(st.rs0.getPrimary().adminCommand({getShardVersion: 'TestDB.TestColl'}))
- .global;
- assert.eq("UNKNOWN", shardVersionAfterStepdown);
+// After stepdown, the shard version will be reset
+var shardVersionAfterStepdown =
+ assert.commandWorked(st.rs0.getPrimary().adminCommand({getShardVersion: 'TestDB.TestColl'}))
+ .global;
+assert.eq("UNKNOWN", shardVersionAfterStepdown);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/database_and_shard_versioning_all_commands.js b/jstests/sharding/database_and_shard_versioning_all_commands.js
index d78133d1386..a0fe0fa5da6 100644
--- a/jstests/sharding/database_and_shard_versioning_all_commands.js
+++ b/jstests/sharding/database_and_shard_versioning_all_commands.js
@@ -3,622 +3,615 @@
* verifies that the commands match the specification.
*/
(function() {
- 'use strict';
-
- load('jstests/libs/profiler.js');
- load('jstests/sharding/libs/last_stable_mongos_commands.js');
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- const SHARD_VERSION_UNSHARDED = [Timestamp(0, 0), ObjectId("000000000000000000000000")];
-
- function validateTestCase(testCase) {
- assert(testCase.skip || testCase.command,
- "must specify exactly one of 'skip' or 'command' for test case " + tojson(testCase));
-
- if (testCase.skip) {
- for (let key of Object.keys(testCase)) {
- assert(
- key === "skip" || key === "conditional",
- "if a test case specifies 'skip', it must not specify any other fields besides 'conditional': " +
- key + ": " + tojson(testCase));
- }
- return;
+'use strict';
+
+load('jstests/libs/profiler.js');
+load('jstests/sharding/libs/last_stable_mongos_commands.js');
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+const SHARD_VERSION_UNSHARDED = [Timestamp(0, 0), ObjectId("000000000000000000000000")];
+
+function validateTestCase(testCase) {
+ assert(testCase.skip || testCase.command,
+ "must specify exactly one of 'skip' or 'command' for test case " + tojson(testCase));
+
+ if (testCase.skip) {
+ for (let key of Object.keys(testCase)) {
+ assert(
+ key === "skip" || key === "conditional",
+ "if a test case specifies 'skip', it must not specify any other fields besides 'conditional': " +
+ key + ": " + tojson(testCase));
}
-
- // Check that required fields are present.
- assert(testCase.hasOwnProperty("sendsDbVersion"),
- "must specify 'sendsDbVersion' for test case " + tojson(testCase));
- assert(testCase.hasOwnProperty("sendsShardVersion"),
- "must specify 'sendsShardVersion' for test case " + tojson(testCase));
-
- // Check that all present fields are of the correct type.
- assert(typeof(testCase.command) === "object");
- assert(testCase.runsAgainstAdminDb ? typeof(testCase.runsAgainstAdminDb) === "boolean"
- : true);
- assert(testCase.skipProfilerCheck ? typeof(testCase.skipProfilerCheck) === "boolean"
- : true);
- assert(typeof(testCase.sendsDbVersion) === "boolean");
- assert(typeof(testCase.sendsShardVersion) === "boolean");
- assert(testCase.setUp ? typeof(testCase.setUp) === "function" : true,
- "setUp must be a function: " + tojson(testCase));
- assert(testCase.cleanUp ? typeof(testCase.cleanUp) === "function" : true,
- "cleanUp must be a function: " + tojson(testCase));
+ return;
}
- let testCases = {
- _hashBSONElement: {skip: "executes locally on mongos (not sent to any remote node)"},
- _isSelf: {skip: "executes locally on mongos (not sent to any remote node)"},
- _mergeAuthzCollections: {skip: "always targets the config server"},
- abortTransaction: {skip: "unversioned and uses special targetting rules"},
- addShard: {skip: "not on a user database"},
- addShardToZone: {skip: "not on a user database"},
- aggregate: {
- sendsDbVersion: false,
- sendsShardVersion: true,
- command: {aggregate: collName, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
- },
- authenticate: {skip: "does not forward command to primary shard"},
- availableQueryOptions: {skip: "executes locally on mongos (not sent to any remote node)"},
- balancerStart: {skip: "not on a user database"},
- balancerStatus: {skip: "not on a user database"},
- balancerStop: {skip: "not on a user database"},
- buildInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
- clearLog: {skip: "executes locally on mongos (not sent to any remote node)"},
- collMod: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {collMod: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- collStats: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {collStats: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- commitTransaction: {skip: "unversioned and uses special targetting rules"},
- compact: {skip: "not allowed through mongos"},
- configureFailPoint: {skip: "executes locally on mongos (not sent to any remote node)"},
- connPoolStats: {skip: "executes locally on mongos (not sent to any remote node)"},
- connPoolSync: {skip: "executes locally on mongos (not sent to any remote node)"},
- connectionStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
- convertToCapped: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {convertToCapped: collName, size: 8192},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- count: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- command: {count: collName, query: {x: 1}},
- },
- create: {
- sendsDbVersion: false,
- // The collection doesn't exist yet, so no shardVersion is sent.
- sendsShardVersion: false,
- command: {create: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- createIndexes: {
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {createIndexes: collName, indexes: [{key: {a: 1}, name: "index"}]},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- },
- },
- createRole: {skip: "always targets the config server"},
- createUser: {skip: "always targets the config server"},
- currentOp: {skip: "not on a user database"},
- dataSize: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {dataSize: ns},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- dbStats: {
- sendsDbVersion: false,
- // dbStats is always broadcast to all shards
- sendsShardVersion: false,
- command: {dbStats: 1, scale: 1}
- },
- delete: {
- skipProfilerCheck: true,
- sendsDbVersion: false,
- // The profiler extracts the individual deletes from the 'deletes' array, and so loses
- // the overall delete command's attached shardVersion, though one is sent.
- sendsShardVersion: true,
- command: {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]}
+ // Check that required fields are present.
+ assert(testCase.hasOwnProperty("sendsDbVersion"),
+ "must specify 'sendsDbVersion' for test case " + tojson(testCase));
+ assert(testCase.hasOwnProperty("sendsShardVersion"),
+ "must specify 'sendsShardVersion' for test case " + tojson(testCase));
+
+ // Check that all present fields are of the correct type.
+ assert(typeof (testCase.command) === "object");
+ assert(testCase.runsAgainstAdminDb ? typeof (testCase.runsAgainstAdminDb) === "boolean" : true);
+ assert(testCase.skipProfilerCheck ? typeof (testCase.skipProfilerCheck) === "boolean" : true);
+ assert(typeof (testCase.sendsDbVersion) === "boolean");
+ assert(typeof (testCase.sendsShardVersion) === "boolean");
+ assert(testCase.setUp ? typeof (testCase.setUp) === "function" : true,
+ "setUp must be a function: " + tojson(testCase));
+ assert(testCase.cleanUp ? typeof (testCase.cleanUp) === "function" : true,
+ "cleanUp must be a function: " + tojson(testCase));
+}
+
+let testCases = {
+ _hashBSONElement: {skip: "executes locally on mongos (not sent to any remote node)"},
+ _isSelf: {skip: "executes locally on mongos (not sent to any remote node)"},
+ _mergeAuthzCollections: {skip: "always targets the config server"},
+ abortTransaction: {skip: "unversioned and uses special targetting rules"},
+ addShard: {skip: "not on a user database"},
+ addShardToZone: {skip: "not on a user database"},
+ aggregate: {
+ sendsDbVersion: false,
+ sendsShardVersion: true,
+ command: {aggregate: collName, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
+ },
+ authenticate: {skip: "does not forward command to primary shard"},
+ availableQueryOptions: {skip: "executes locally on mongos (not sent to any remote node)"},
+ balancerStart: {skip: "not on a user database"},
+ balancerStatus: {skip: "not on a user database"},
+ balancerStop: {skip: "not on a user database"},
+ buildInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
+ clearLog: {skip: "executes locally on mongos (not sent to any remote node)"},
+ collMod: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- distinct: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- command: {distinct: collName, key: "x"},
+ command: {collMod: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ collStats: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- drop: {
- skipProfilerCheck: true,
- sendsDbVersion: false,
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {drop: collName},
+ command: {collStats: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ commitTransaction: {skip: "unversioned and uses special targetting rules"},
+ compact: {skip: "not allowed through mongos"},
+ configureFailPoint: {skip: "executes locally on mongos (not sent to any remote node)"},
+ connPoolStats: {skip: "executes locally on mongos (not sent to any remote node)"},
+ connPoolSync: {skip: "executes locally on mongos (not sent to any remote node)"},
+ connectionStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
+ convertToCapped: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- dropAllRolesFromDatabase: {skip: "always targets the config server"},
- dropAllUsersFromDatabase: {skip: "always targets the config server"},
- dropConnections: {skip: "not on a user database"},
- dropDatabase: {skip: "drops the database from the cluster, changing the UUID"},
- dropIndexes: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {dropIndexes: collName, index: "*"},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ command: {convertToCapped: collName, size: 8192},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ count: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ command: {count: collName, query: {x: 1}},
+ },
+ create: {
+ sendsDbVersion: false,
+ // The collection doesn't exist yet, so no shardVersion is sent.
+ sendsShardVersion: false,
+ command: {create: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ createIndexes: {
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- dropRole: {skip: "always targets the config server"},
- dropUser: {skip: "always targets the config server"},
- echo: {skip: "does not forward command to primary shard"},
- enableSharding: {skip: "does not forward command to primary shard"},
- endSessions: {skip: "goes through the cluster write path"},
- explain: {skip: "TODO SERVER-31226"},
- features: {skip: "executes locally on mongos (not sent to any remote node)"},
- filemd5: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- command: {filemd5: ObjectId(), root: collName}
+ command: {createIndexes: collName, indexes: [{key: {a: 1}, name: "index"}]},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
},
- find: {
- sendsDbVersion: false,
- sendsShardVersion: true,
- command: {find: collName, filter: {x: 1}},
+ },
+ createRole: {skip: "always targets the config server"},
+ createUser: {skip: "always targets the config server"},
+ currentOp: {skip: "not on a user database"},
+ dataSize: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- findAndModify: {
- sendsDbVersion: false,
- sendsShardVersion: true,
- command: {findAndModify: collName, query: {_id: 0}, remove: true}
+ command: {dataSize: ns},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ dbStats: {
+ sendsDbVersion: false,
+ // dbStats is always broadcast to all shards
+ sendsShardVersion: false,
+ command: {dbStats: 1, scale: 1}
+ },
+ delete: {
+ skipProfilerCheck: true,
+ sendsDbVersion: false,
+ // The profiler extracts the individual deletes from the 'deletes' array, and so loses
+ // the overall delete command's attached shardVersion, though one is sent.
+ sendsShardVersion: true,
+ command: {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]}
+ },
+ distinct: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ command: {distinct: collName, key: "x"},
+ },
+ drop: {
+ skipProfilerCheck: true,
+ sendsDbVersion: false,
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- flushRouterConfig: {skip: "executes locally on mongos (not sent to any remote node)"},
- fsync: {skip: "broadcast to all shards"},
- getCmdLineOpts: {skip: "executes locally on mongos (not sent to any remote node)"},
- getDiagnosticData: {skip: "executes locally on mongos (not sent to any remote node)"},
- getLastError: {skip: "does not forward command to primary shard"},
- getLog: {skip: "executes locally on mongos (not sent to any remote node)"},
- getMore: {skip: "requires a previously established cursor"},
- getParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
- getShardMap: {skip: "executes locally on mongos (not sent to any remote node)"},
- getShardVersion: {skip: "executes locally on mongos (not sent to any remote node)"},
- getnonce: {skip: "not on a user database"},
- grantPrivilegesToRole: {skip: "always targets the config server"},
- grantRolesToRole: {skip: "always targets the config server"},
- grantRolesToUser: {skip: "always targets the config server"},
- hostInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
- insert: {
- sendsDbVersion: false,
- sendsShardVersion: true,
- command: {insert: collName, documents: [{_id: 1}]},
- cleanUp: function(mongosConn) {
- // Implicitly creates the collection.
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ command: {drop: collName},
+ },
+ dropAllRolesFromDatabase: {skip: "always targets the config server"},
+ dropAllUsersFromDatabase: {skip: "always targets the config server"},
+ dropConnections: {skip: "not on a user database"},
+ dropDatabase: {skip: "drops the database from the cluster, changing the UUID"},
+ dropIndexes: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- invalidateUserCache: {skip: "executes locally on mongos (not sent to any remote node)"},
- isdbgrid: {skip: "executes locally on mongos (not sent to any remote node)"},
- isMaster: {skip: "executes locally on mongos (not sent to any remote node)"},
- killCursors: {skip: "requires a previously established cursor"},
- killAllSessions: {skip: "always broadcast to all hosts in the cluster"},
- killAllSessionsByPattern: {skip: "always broadcast to all hosts in the cluster"},
- killOp: {skip: "does not forward command to primary shard"},
- killSessions: {skip: "always broadcast to all hosts in the cluster"},
- listCollections: {
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: true,
- command: {listCollections: 1},
+ command: {dropIndexes: collName, index: "*"},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ dropRole: {skip: "always targets the config server"},
+ dropUser: {skip: "always targets the config server"},
+ echo: {skip: "does not forward command to primary shard"},
+ enableSharding: {skip: "does not forward command to primary shard"},
+ endSessions: {skip: "goes through the cluster write path"},
+ explain: {skip: "TODO SERVER-31226"},
+ features: {skip: "executes locally on mongos (not sent to any remote node)"},
+ filemd5: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ command: {filemd5: ObjectId(), root: collName}
+ },
+ find: {
+ sendsDbVersion: false,
+ sendsShardVersion: true,
+ command: {find: collName, filter: {x: 1}},
+ },
+ findAndModify: {
+ sendsDbVersion: false,
+ sendsShardVersion: true,
+ command: {findAndModify: collName, query: {_id: 0}, remove: true}
+ },
+ flushRouterConfig: {skip: "executes locally on mongos (not sent to any remote node)"},
+ fsync: {skip: "broadcast to all shards"},
+ getCmdLineOpts: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getDiagnosticData: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getLastError: {skip: "does not forward command to primary shard"},
+ getLog: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getMore: {skip: "requires a previously established cursor"},
+ getParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getShardMap: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getShardVersion: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getnonce: {skip: "not on a user database"},
+ grantPrivilegesToRole: {skip: "always targets the config server"},
+ grantRolesToRole: {skip: "always targets the config server"},
+ grantRolesToUser: {skip: "always targets the config server"},
+ hostInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
+ insert: {
+ sendsDbVersion: false,
+ sendsShardVersion: true,
+ command: {insert: collName, documents: [{_id: 1}]},
+ cleanUp: function(mongosConn) {
+ // Implicitly creates the collection.
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ invalidateUserCache: {skip: "executes locally on mongos (not sent to any remote node)"},
+ isdbgrid: {skip: "executes locally on mongos (not sent to any remote node)"},
+ isMaster: {skip: "executes locally on mongos (not sent to any remote node)"},
+ killCursors: {skip: "requires a previously established cursor"},
+ killAllSessions: {skip: "always broadcast to all hosts in the cluster"},
+ killAllSessionsByPattern: {skip: "always broadcast to all hosts in the cluster"},
+ killOp: {skip: "does not forward command to primary shard"},
+ killSessions: {skip: "always broadcast to all hosts in the cluster"},
+ listCollections: {
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ command: {listCollections: 1},
+ },
+ listCommands: {skip: "executes locally on mongos (not sent to any remote node)"},
+ listDatabases: {skip: "does not forward command to primary shard"},
+ listIndexes: {
+ sendsDbVersion: true,
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- listCommands: {skip: "executes locally on mongos (not sent to any remote node)"},
- listDatabases: {skip: "does not forward command to primary shard"},
- listIndexes: {
- sendsDbVersion: true,
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {listIndexes: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ command: {listIndexes: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ listShards: {skip: "does not forward command to primary shard"},
+ logApplicationMessage: {skip: "not on a user database", conditional: true},
+ logRotate: {skip: "executes locally on mongos (not sent to any remote node)"},
+ logout: {skip: "not on a user database"},
+ mapReduce: {
+ sendsDbVersion: false,
+ // mapReduce uses connection versioning rather than sending shardVersion in the command.
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- listShards: {skip: "does not forward command to primary shard"},
- logApplicationMessage: {skip: "not on a user database", conditional: true},
- logRotate: {skip: "executes locally on mongos (not sent to any remote node)"},
- logout: {skip: "not on a user database"},
- mapReduce: {
- sendsDbVersion: false,
- // mapReduce uses connection versioning rather than sending shardVersion in the command.
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
+ command: {
+ mapReduce: collName,
+ map: function() {
+ emit(this.x, 1);
},
- command: {
- mapReduce: collName,
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: {inline: 1}
+ reduce: function(key, values) {
+ return Array.sum(values);
},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- assert(mongosConn.getDB(dbName).getCollection(collName + "_renamed").drop());
- }
- },
- mergeChunks: {skip: "does not forward command to primary shard"},
- moveChunk: {skip: "does not forward command to primary shard"},
- movePrimary: {skip: "reads primary shard from sharding catalog with readConcern: local"},
- multicast: {skip: "does not forward command to primary shard"},
- netstat: {skip: "executes locally on mongos (not sent to any remote node)"},
- ping: {skip: "executes locally on mongos (not sent to any remote node)"},
- planCacheClear: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- command: {planCacheClear: collName}
+ out: {inline: 1}
},
- planCacheClearFilters: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- command: {planCacheClearFilters: collName}
- },
- planCacheListFilters: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- command: {planCacheListFilters: collName}
- },
- planCacheListPlans: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {planCacheListPlans: collName, query: {_id: "A"}},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ assert(mongosConn.getDB(dbName).getCollection(collName + "_renamed").drop());
+ }
+ },
+ mergeChunks: {skip: "does not forward command to primary shard"},
+ moveChunk: {skip: "does not forward command to primary shard"},
+ movePrimary: {skip: "reads primary shard from sharding catalog with readConcern: local"},
+ multicast: {skip: "does not forward command to primary shard"},
+ netstat: {skip: "executes locally on mongos (not sent to any remote node)"},
+ ping: {skip: "executes locally on mongos (not sent to any remote node)"},
+ planCacheClear: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ command: {planCacheClear: collName}
+ },
+ planCacheClearFilters: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ command: {planCacheClearFilters: collName}
+ },
+ planCacheListFilters: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ command: {planCacheListFilters: collName}
+ },
+ planCacheListPlans: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- planCacheListQueryShapes: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- command: {planCacheListQueryShapes: collName}
+ command: {planCacheListPlans: collName, query: {_id: "A"}},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ planCacheListQueryShapes: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ command: {planCacheListQueryShapes: collName}
+ },
+ planCacheSetFilter: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- planCacheSetFilter: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {planCacheSetFilter: collName, query: {_id: "A"}, indexes: [{_id: 1}]},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ command: {planCacheSetFilter: collName, query: {_id: "A"}, indexes: [{_id: 1}]},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ profile: {skip: "not supported in mongos"},
+ reapLogicalSessionCacheNow: {skip: "is a no-op on mongos"},
+ refreshLogicalSessionCacheNow: {skip: "goes through the cluster write path"},
+ refreshSessions: {skip: "executes locally on mongos (not sent to any remote node)"},
+ refreshSessionsInternal:
+ {skip: "executes locally on mongos (not sent to any remote node)", conditional: true},
+ removeShard: {skip: "not on a user database"},
+ removeShardFromZone: {skip: "not on a user database"},
+ renameCollection: {
+ runsAgainstAdminDb: true,
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- profile: {skip: "not supported in mongos"},
- reapLogicalSessionCacheNow: {skip: "is a no-op on mongos"},
- refreshLogicalSessionCacheNow: {skip: "goes through the cluster write path"},
- refreshSessions: {skip: "executes locally on mongos (not sent to any remote node)"},
- refreshSessionsInternal:
- {skip: "executes locally on mongos (not sent to any remote node)", conditional: true},
- removeShard: {skip: "not on a user database"},
- removeShardFromZone: {skip: "not on a user database"},
- renameCollection: {
- runsAgainstAdminDb: true,
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {
- renameCollection: dbName + "." + collName,
- to: dbName + "." + collName + "_renamed"
- },
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName + "_renamed").drop());
- }
+ command:
+ {renameCollection: dbName + "." + collName, to: dbName + "." + collName + "_renamed"},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName + "_renamed").drop());
+ }
+ },
+ replSetGetStatus: {skip: "not supported in mongos"},
+ resetError: {skip: "not on a user database"},
+ restartCatalog: {skip: "not on a user database"},
+ revokePrivilegesFromRole: {skip: "always targets the config server"},
+ revokeRolesFromRole: {skip: "always targets the config server"},
+ revokeRolesFromUser: {skip: "always targets the config server"},
+ rolesInfo: {skip: "always targets the config server"},
+ saslContinue: {skip: "not on a user database"},
+ saslStart: {skip: "not on a user database"},
+ serverStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
+ setIndexCommitQuorum: {
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- replSetGetStatus: {skip: "not supported in mongos"},
- resetError: {skip: "not on a user database"},
- restartCatalog: {skip: "not on a user database"},
- revokePrivilegesFromRole: {skip: "always targets the config server"},
- revokeRolesFromRole: {skip: "always targets the config server"},
- revokeRolesFromUser: {skip: "always targets the config server"},
- rolesInfo: {skip: "always targets the config server"},
- saslContinue: {skip: "not on a user database"},
- saslStart: {skip: "not on a user database"},
- serverStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
- setIndexCommitQuorum: {
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command:
- {setIndexCommitQuorum: collName, indexNames: ["index"], commitQuorum: "majority"},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- },
+ command: {setIndexCommitQuorum: collName, indexNames: ["index"], commitQuorum: "majority"},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
},
- setFeatureCompatibilityVersion: {skip: "not on a user database"},
- setFreeMonitoring:
- {skip: "explicitly fails for mongos, primary mongod only", conditional: true},
- setParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
- shardCollection: {skip: "does not forward command to primary shard"},
- shardConnPoolStats: {skip: "does not forward command to primary shard"},
- shutdown: {skip: "does not forward command to primary shard"},
- split: {skip: "does not forward command to primary shard"},
- splitVector: {skip: "does not forward command to primary shard"},
- startRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
- startSession: {skip: "executes locally on mongos (not sent to any remote node)"},
- stopRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
- update: {
- skipProfilerCheck: true,
- sendsDbVersion: false,
- // The profiler extracts the individual updates from the 'updates' array, and so loses
- // the overall update command's attached shardVersion, though one is sent.
- sendsShardVersion: true,
- command: {
- update: collName,
- updates: [{q: {_id: 2}, u: {_id: 2}, upsert: true, multi: false}]
- }
+ },
+ setFeatureCompatibilityVersion: {skip: "not on a user database"},
+ setFreeMonitoring:
+ {skip: "explicitly fails for mongos, primary mongod only", conditional: true},
+ setParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
+ shardCollection: {skip: "does not forward command to primary shard"},
+ shardConnPoolStats: {skip: "does not forward command to primary shard"},
+ shutdown: {skip: "does not forward command to primary shard"},
+ split: {skip: "does not forward command to primary shard"},
+ splitVector: {skip: "does not forward command to primary shard"},
+ startRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
+ startSession: {skip: "executes locally on mongos (not sent to any remote node)"},
+ stopRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
+ update: {
+ skipProfilerCheck: true,
+ sendsDbVersion: false,
+ // The profiler extracts the individual updates from the 'updates' array, and so loses
+ // the overall update command's attached shardVersion, though one is sent.
+ sendsShardVersion: true,
+ command:
+ {update: collName, updates: [{q: {_id: 2}, u: {_id: 2}, upsert: true, multi: false}]}
+ },
+ updateRole: {skip: "always targets the config server"},
+ updateUser: {skip: "always targets the config server"},
+ updateZoneKeyRange: {skip: "not on a user database"},
+ usersInfo: {skip: "always targets the config server"},
+ validate: {
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- updateRole: {skip: "always targets the config server"},
- updateUser: {skip: "always targets the config server"},
- updateZoneKeyRange: {skip: "not on a user database"},
- usersInfo: {skip: "always targets the config server"},
- validate: {
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {validate: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- },
+ command: {validate: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
},
- whatsmyuri: {skip: "executes locally on mongos (not sent to any remote node)"},
- };
-
- commandsRemovedFromMongosIn42.forEach(function(cmd) {
- testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
- });
-
- class AllCommandsTestRunner {
- constructor() {
- this.st = new ShardingTest(this.getShardingTestOptions());
- let db = this.st.s.getDB(dbName);
- // We do this create and drop so that we create an entry for the database in the
- // sharding catalog.
- assert.commandWorked(db.createCollection(collName));
- assert.commandWorked(db.runCommand({drop: collName}));
- this.primaryShard = this.st.shard0;
- this.st.ensurePrimaryShard(dbName, this.primaryShard.shardName);
-
- this.dbVersion =
- this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
- this.previousDbVersion = null;
-
- let res = this.st.s.adminCommand({listCommands: 1});
- assert.commandWorked(res);
- this.commands = Object.keys(res.commands);
- }
+ },
+ whatsmyuri: {skip: "executes locally on mongos (not sent to any remote node)"},
+};
+
+commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+});
+
+class AllCommandsTestRunner {
+ constructor() {
+ this.st = new ShardingTest(this.getShardingTestOptions());
+ let db = this.st.s.getDB(dbName);
+ // We do this create and drop so that we create an entry for the database in the
+ // sharding catalog.
+ assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(db.runCommand({drop: collName}));
+ this.primaryShard = this.st.shard0;
+ this.st.ensurePrimaryShard(dbName, this.primaryShard.shardName);
+
+ this.dbVersion =
+ this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
+ this.previousDbVersion = null;
+
+ let res = this.st.s.adminCommand({listCommands: 1});
+ assert.commandWorked(res);
+ this.commands = Object.keys(res.commands);
+ }
- shutdown() {
- this.st.stop();
- }
+ shutdown() {
+ this.st.stop();
+ }
- getShardingTestOptions() {
- throw new Error("not implemented");
+ getShardingTestOptions() {
+ throw new Error("not implemented");
+ }
+ makeShardDatabaseCacheStale() {
+ throw new Error("not implemented");
+ }
+
+ assertSentDatabaseVersion(testCase, commandProfile) {
+ const res = this.primaryShard.adminCommand({getDatabaseVersion: dbName});
+ assert.commandWorked(res);
+ assert.eq(this.dbVersion, res.dbVersion);
+
+ // If the test case is marked as not tracked by the profiler, then we won't be able to
+ // verify the version was not sent here. Any test cases marked with this flag should be
+ // fixed in SERVER-33499.
+ if (!testCase.skipProfilerCheck) {
+ commandProfile["command.databaseVersion"] = this.dbVersion;
+ profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: this.primaryShard.getDB(dbName), filter: commandProfile});
}
- makeShardDatabaseCacheStale() {
- throw new Error("not implemented");
+ }
+
+ assertDidNotSendDatabaseVersion(testCase, commandProfile) {
+ const res = this.primaryShard.adminCommand({getDatabaseVersion: dbName});
+ assert.commandWorked(res);
+ assert.eq({}, res.dbVersion);
+
+ // If the test case is marked as not tracked by the profiler, then we won't be able to
+ // verify the version was not sent here. Any test cases marked with this flag should be
+ // fixed in SERVER-33499.
+ if (!testCase.skipProfilerCheck) {
+ commandProfile["command.databaseVersion"] = {$exists: false};
+ profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: this.primaryShard.getDB(dbName), filter: commandProfile});
}
+ }
- assertSentDatabaseVersion(testCase, commandProfile) {
- const res = this.primaryShard.adminCommand({getDatabaseVersion: dbName});
- assert.commandWorked(res);
- assert.eq(this.dbVersion, res.dbVersion);
-
- // If the test case is marked as not tracked by the profiler, then we won't be able to
- // verify the version was not sent here. Any test cases marked with this flag should be
- // fixed in SERVER-33499.
- if (!testCase.skipProfilerCheck) {
- commandProfile["command.databaseVersion"] = this.dbVersion;
- profilerHasSingleMatchingEntryOrThrow(
- {profileDB: this.primaryShard.getDB(dbName), filter: commandProfile});
+ runCommands() {
+ // Use the profiler to check that the command was received with or without a
+ // databaseVersion and shardVersion as expected by the 'testCase' for the command.
+ for (let command of this.commands) {
+ let testCase = testCases[command];
+ assert(testCase !== undefined,
+ "coverage failure: must define a test case for " + command);
+ if (!testCases[command].validated) {
+ validateTestCase(testCase);
+ testCases[command].validated = true;
}
- }
- assertDidNotSendDatabaseVersion(testCase, commandProfile) {
- const res = this.primaryShard.adminCommand({getDatabaseVersion: dbName});
- assert.commandWorked(res);
- assert.eq({}, res.dbVersion);
-
- // If the test case is marked as not tracked by the profiler, then we won't be able to
- // verify the version was not sent here. Any test cases marked with this flag should be
- // fixed in SERVER-33499.
- if (!testCase.skipProfilerCheck) {
- commandProfile["command.databaseVersion"] = {$exists: false};
- profilerHasSingleMatchingEntryOrThrow(
- {profileDB: this.primaryShard.getDB(dbName), filter: commandProfile});
+ if (testCase.skip) {
+ print("skipping " + command + ": " + testCase.skip);
+ continue;
}
- }
- runCommands() {
- // Use the profiler to check that the command was received with or without a
- // databaseVersion and shardVersion as expected by the 'testCase' for the command.
- for (let command of this.commands) {
- let testCase = testCases[command];
- assert(testCase !== undefined,
- "coverage failure: must define a test case for " + command);
- if (!testCases[command].validated) {
- validateTestCase(testCase);
- testCases[command].validated = true;
- }
-
- if (testCase.skip) {
- print("skipping " + command + ": " + testCase.skip);
- continue;
- }
-
- this.primaryShard.getDB(dbName).setProfilingLevel(2);
-
- jsTest.log("testing command " + tojson(testCase.command));
-
- if (testCase.setUp) {
- testCase.setUp(this.st.s);
- }
-
- let commandProfile = buildCommandProfile(testCase.command, false);
- commandProfile["command.shardVersion"] =
- testCase.sendsShardVersion ? SHARD_VERSION_UNSHARDED : {$exists: false};
-
- if (testCase.runsAgainstAdminDb) {
- assert.commandWorked(this.st.s.adminCommand(testCase.command));
- } else {
- assert.commandWorked(this.st.s.getDB(dbName).runCommand(testCase.command));
- }
-
- if (testCase.sendsDbVersion) {
- this.assertSentDatabaseVersion(testCase, commandProfile);
- } else {
- this.assertDidNotSendDatabaseVersion(testCase, commandProfile);
- }
-
- if (testCase.cleanUp) {
- testCase.cleanUp(this.st.s);
- }
-
- // Clear the profiler collection in between testing each command.
- this.primaryShard.getDB(dbName).setProfilingLevel(0);
- assert(this.primaryShard.getDB(dbName).getCollection("system.profile").drop());
-
- this.makeShardDatabaseCacheStale();
+ this.primaryShard.getDB(dbName).setProfilingLevel(2);
+
+ jsTest.log("testing command " + tojson(testCase.command));
+
+ if (testCase.setUp) {
+ testCase.setUp(this.st.s);
}
- // After iterating through all the existing commands, ensure there were no additional
- // test cases that did not correspond to any mongos command.
- for (let key of Object.keys(testCases)) {
- // We have defined real test cases for commands added in 4.2 so that the test cases
- // are exercised in the regular suites, but because these test cases can't run in
- // the last stable suite, we skip processing them here to avoid failing the below
- // assertion. We have defined "skip" test cases for commands removed in 4.2 so the
- // test case is defined in last stable suites (in which these commands still exist
- // on the mongos), but these test cases won't be run in regular suites, so we skip
- // processing them below as well.
- if (commandsAddedToMongosIn42.includes(key) ||
- commandsRemovedFromMongosIn42.includes(key)) {
- continue;
- }
- assert(testCases[key].validated || testCases[key].conditional,
- "you defined a test case for a command '" + key +
- "' that does not exist on mongos: " + tojson(testCases[key]));
+ let commandProfile = buildCommandProfile(testCase.command, false);
+ commandProfile["command.shardVersion"] =
+ testCase.sendsShardVersion ? SHARD_VERSION_UNSHARDED : {$exists: false};
+
+ if (testCase.runsAgainstAdminDb) {
+ assert.commandWorked(this.st.s.adminCommand(testCase.command));
+ } else {
+ assert.commandWorked(this.st.s.getDB(dbName).runCommand(testCase.command));
}
- }
- }
- class DropDatabaseTestRunner extends AllCommandsTestRunner {
- getShardingTestOptions() {
- return {shards: 1};
+ if (testCase.sendsDbVersion) {
+ this.assertSentDatabaseVersion(testCase, commandProfile);
+ } else {
+ this.assertDidNotSendDatabaseVersion(testCase, commandProfile);
+ }
+
+ if (testCase.cleanUp) {
+ testCase.cleanUp(this.st.s);
+ }
+
+ // Clear the profiler collection in between testing each command.
+ this.primaryShard.getDB(dbName).setProfilingLevel(0);
+ assert(this.primaryShard.getDB(dbName).getCollection("system.profile").drop());
+
+ this.makeShardDatabaseCacheStale();
}
- makeShardDatabaseCacheStale() {
- // Drop the database from the shard to clear the shard's cached in-memory database info.
- assert.commandWorked(this.primaryShard.getDB(dbName).runCommand({dropDatabase: 1}));
+ // After iterating through all the existing commands, ensure there were no additional
+ // test cases that did not correspond to any mongos command.
+ for (let key of Object.keys(testCases)) {
+ // We have defined real test cases for commands added in 4.2 so that the test cases
+ // are exercised in the regular suites, but because these test cases can't run in
+ // the last stable suite, we skip processing them here to avoid failing the below
+ // assertion. We have defined "skip" test cases for commands removed in 4.2 so the
+ // test case is defined in last stable suites (in which these commands still exist
+ // on the mongos), but these test cases won't be run in regular suites, so we skip
+ // processing them below as well.
+ if (commandsAddedToMongosIn42.includes(key) ||
+ commandsRemovedFromMongosIn42.includes(key)) {
+ continue;
+ }
+ assert(testCases[key].validated || testCases[key].conditional,
+ "you defined a test case for a command '" + key +
+ "' that does not exist on mongos: " + tojson(testCases[key]));
}
}
+}
- class MovePrimaryTestRunner extends AllCommandsTestRunner {
- getShardingTestOptions() {
- return {shards: 2};
- }
+class DropDatabaseTestRunner extends AllCommandsTestRunner {
+ getShardingTestOptions() {
+ return {shards: 1};
+ }
- makeShardDatabaseCacheStale() {
- let fromShard = this.st.getPrimaryShard(dbName);
- let toShard = this.st.getOther(fromShard);
+ makeShardDatabaseCacheStale() {
+ // Drop the database from the shard to clear the shard's cached in-memory database info.
+ assert.commandWorked(this.primaryShard.getDB(dbName).runCommand({dropDatabase: 1}));
+ }
+}
- this.primaryShard = toShard;
- this.previousDbVersion = this.dbVersion;
+class MovePrimaryTestRunner extends AllCommandsTestRunner {
+ getShardingTestOptions() {
+ return {shards: 2};
+ }
- assert.commandWorked(this.st.s0.adminCommand({movePrimary: dbName, to: toShard.name}));
- this.dbVersion =
- this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
+ makeShardDatabaseCacheStale() {
+ let fromShard = this.st.getPrimaryShard(dbName);
+ let toShard = this.st.getOther(fromShard);
- // The dbVersion should have changed due to the movePrimary operation.
- assert.eq(this.dbVersion.lastMod, this.previousDbVersion.lastMod + 1);
- }
+ this.primaryShard = toShard;
+ this.previousDbVersion = this.dbVersion;
+
+ assert.commandWorked(this.st.s0.adminCommand({movePrimary: dbName, to: toShard.name}));
+ this.dbVersion =
+ this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
+
+ // The dbVersion should have changed due to the movePrimary operation.
+ assert.eq(this.dbVersion.lastMod, this.previousDbVersion.lastMod + 1);
}
+}
- let dropDatabaseTestRunner = new DropDatabaseTestRunner();
- dropDatabaseTestRunner.runCommands();
- dropDatabaseTestRunner.shutdown();
+let dropDatabaseTestRunner = new DropDatabaseTestRunner();
+dropDatabaseTestRunner.runCommands();
+dropDatabaseTestRunner.shutdown();
- let movePrimaryTestRunner = new MovePrimaryTestRunner();
- movePrimaryTestRunner.runCommands();
- movePrimaryTestRunner.shutdown();
+let movePrimaryTestRunner = new MovePrimaryTestRunner();
+movePrimaryTestRunner.runCommands();
+movePrimaryTestRunner.shutdown();
})();
diff --git a/jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js b/jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js
index f6dc4560462..9beaa8c2f0c 100644
--- a/jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js
+++ b/jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js
@@ -4,58 +4,56 @@
// its on-disk cache so that the version can also be picked up by secondaries.
(function() {
- const st = new ShardingTest({shards: 1, rs: {nodes: 2}, other: {verbose: 2}});
-
- assert.commandWorked(st.s.getDB("test").getCollection("foo").insert({x: 1}));
-
- // The database is created with a version.
- const versionOnConfig =
- st.s.getDB("config").getCollection("databases").findOne({_id: "test"}).version;
- assert.neq(null, versionOnConfig);
-
- // Before the shard refreshes, it does not have a cache entry for the database.
- assert.eq(null,
- st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}));
-
- // After the shard refreshes, it has a cache entry for the database with version matching the
- // version on the config server.
- assert.commandWorked(st.shard0.adminCommand({_flushDatabaseCacheUpdates: "test"}));
- const versionOnShard =
- st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version;
- assert.docEq(versionOnConfig, versionOnShard);
-
- // The shard primary's in-memory version matches the on-disk version.
- assert.eq(versionOnShard, st.shard0.adminCommand({getDatabaseVersion: "test"}).dbVersion);
-
- jsTest.log("Remove the database version from the shard's cache entry");
- assert.commandWorked(
- st.shard0.getDB("config").getCollection("cache.databases").update({_id: "test"}, {
- $unset: {version: ""}
- }));
- assert.eq(
- null,
- st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version);
-
- // Deleting the version field from the on-disk entry did not affect the in-memory version.
- assert.eq(versionOnShard, st.shard0.adminCommand({getDatabaseVersion: "test"}).dbVersion);
-
- // The shard secondary does not have a version cached in memory.
- assert.eq({}, st.rs0.getSecondary().adminCommand({getDatabaseVersion: "test"}).dbVersion);
-
- // A versioned request against the shard secondary makes the shard primary refresh and update
- // the on-disk cache entry with a version, even though it already had an on-disk cache entry and
- // had the up-to-date version cached in memory.
- // Use readConcern 'local' because the default on secondaries is 'available'.
- assert.commandWorked(st.s.getDB("test").runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
- const versionOnShard2 =
- st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version;
- assert.docEq(versionOnConfig, versionOnShard2);
-
- // The shard secondary's in-memory version now matches the on-disk version.
- assert.eq(versionOnShard,
- st.rs0.getSecondary().adminCommand({getDatabaseVersion: "test"}).dbVersion);
-
- st.stop();
-
+const st = new ShardingTest({shards: 1, rs: {nodes: 2}, other: {verbose: 2}});
+
+assert.commandWorked(st.s.getDB("test").getCollection("foo").insert({x: 1}));
+
+// The database is created with a version.
+const versionOnConfig =
+ st.s.getDB("config").getCollection("databases").findOne({_id: "test"}).version;
+assert.neq(null, versionOnConfig);
+
+// Before the shard refreshes, it does not have a cache entry for the database.
+assert.eq(null, st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}));
+
+// After the shard refreshes, it has a cache entry for the database with version matching the
+// version on the config server.
+assert.commandWorked(st.shard0.adminCommand({_flushDatabaseCacheUpdates: "test"}));
+const versionOnShard =
+ st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version;
+assert.docEq(versionOnConfig, versionOnShard);
+
+// The shard primary's in-memory version matches the on-disk version.
+assert.eq(versionOnShard, st.shard0.adminCommand({getDatabaseVersion: "test"}).dbVersion);
+
+jsTest.log("Remove the database version from the shard's cache entry");
+assert.commandWorked(
+ st.shard0.getDB("config").getCollection("cache.databases").update({_id: "test"}, {
+ $unset: {version: ""}
+ }));
+assert.eq(
+ null,
+ st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version);
+
+// Deleting the version field from the on-disk entry did not affect the in-memory version.
+assert.eq(versionOnShard, st.shard0.adminCommand({getDatabaseVersion: "test"}).dbVersion);
+
+// The shard secondary does not have a version cached in memory.
+assert.eq({}, st.rs0.getSecondary().adminCommand({getDatabaseVersion: "test"}).dbVersion);
+
+// A versioned request against the shard secondary makes the shard primary refresh and update
+// the on-disk cache entry with a version, even though it already had an on-disk cache entry and
+// had the up-to-date version cached in memory.
+// Use readConcern 'local' because the default on secondaries is 'available'.
+assert.commandWorked(st.s.getDB("test").runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+const versionOnShard2 =
+ st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version;
+assert.docEq(versionOnConfig, versionOnShard2);
+
+// The shard secondary's in-memory version now matches the on-disk version.
+assert.eq(versionOnShard,
+ st.rs0.getSecondary().adminCommand({getDatabaseVersion: "test"}).dbVersion);
+
+st.stop();
})();
diff --git a/jstests/sharding/database_versioning_safe_secondary_reads.js b/jstests/sharding/database_versioning_safe_secondary_reads.js
index 301f246f4e7..a4062f68686 100644
--- a/jstests/sharding/database_versioning_safe_secondary_reads.js
+++ b/jstests/sharding/database_versioning_safe_secondary_reads.js
@@ -6,217 +6,217 @@
* - the movePrimary critical section is entered on the primary node
*/
(function() {
- "use strict";
- load("jstests/libs/database_versioning.js");
-
- const dbName = "test";
-
- const st = new ShardingTest({
- mongos: 2,
- rs0: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
- rs1: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
- verbose: 2
- });
-
- // Before creating the database, none of the nodes have a cached entry for the database either
- // in memory or on disk.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Use 'enableSharding' to create the database only in the sharding catalog (the database will
- // not exist on any shards).
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-
- // Check that a command that attaches databaseVersion returns empty results, even though the
- // database does not actually exist on any shard (because the version won't be checked).
- assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
-
- // Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
- // in-memory and on-disk caches.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- assert.commandWorked(st.s.getDB(dbName).runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
-
- // Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
- // its in-memory cache (its on-disk cache was updated when the primary refreshed, above).
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Use 'movePrimary' to ensure shard0 is the primary shard. This will create the database on the
- // shards only if shard0 was not already the primary shard.
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- const dbEntry1 = st.s.getDB("config").getCollection("databases").findOne({_id: dbName});
-
- // Ensure the database actually gets created on the primary shard by creating a collection in
- // it.
- assert.commandWorked(st.s.getDB(dbName).runCommand({create: "foo"}));
-
- // Run a command that attaches databaseVersion to cause the current primary shard's primary to
- // refresh its in-memory cached database version.
- jsTest.log("About to do listCollections with readPref=primary");
- assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
-
- // Ensure the current primary shard's primary has written the new database entry to disk.
- st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
-
- // Ensure the database entry on the current primary shard has replicated to the secondary.
- st.rs0.awaitReplication();
-
- // The primary shard's primary should have refreshed.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Now run a command that attaches databaseVersion with readPref=secondary to make the current
- // primary shard's secondary refresh its in-memory database version from its on-disk entry.
- jsTest.log("About to do listCollections with readPref=secondary");
- assert.commandWorked(st.s.getDB(dbName).runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
-
- // The primary shard's secondary should have refreshed.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1.version);
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Make "staleMongos" load the stale database info into memory.
- const freshMongos = st.s0;
- const staleMongos = st.s1;
- staleMongos.getDB(dbName).runCommand({listCollections: 1});
-
- // Run movePrimary to ensure the movePrimary critical section clears the in-memory cache on the
- // old primary shard.
- jsTest.log("About to do movePrimary");
- assert.commandWorked(freshMongos.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));
- const dbEntry2 = freshMongos.getDB("config").getCollection("databases").findOne({_id: dbName});
- assert.eq(dbEntry2.version.uuid, dbEntry1.version.uuid);
- assert.eq(dbEntry2.version.lastMod, dbEntry1.version.lastMod + 1);
-
- // Ensure the old primary shard's primary has written the 'enterCriticalSectionSignal' flag to
- // its on-disk database entry.
- st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
-
- // Ensure 'enterCriticalSectionSignal' flag has replicated to the secondary.
- st.rs0.awaitReplication();
-
- // The in-memory cached version should have been cleared on the old primary shard's primary and
- // secondary nodes.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Run listCollections with readPref=secondary from the stale mongos. First, this should cause
- // the old primary shard's secondary to provoke the old primary shard's primary to refresh. Then
- // once the stale mongos refreshes, it should cause the new primary shard's secondary to provoke
- // the new primary shard's primary to refresh.
- jsTest.log("About to do listCollections with readPref=secondary after movePrimary");
- assert.commandWorked(staleMongos.getDB(dbName).runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
-
- // All nodes should have refreshed.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2.version);
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2.version);
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2.version);
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2.version);
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
-
- // Ensure that dropping the database drops it from all shards, which clears their in-memory
- // caches but not their on-disk caches.
- jsTest.log("About to drop database from the cluster");
- assert.commandWorked(freshMongos.getDB(dbName).runCommand({dropDatabase: 1}));
-
- // Ensure the drop has replicated to all nodes.
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
-
- // Once SERVER-34431 goes in, this should not have caused the in-memory versions to be cleared.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
-
- // Confirm that we have a bug (SERVER-34431), where if a database is dropped and recreated on a
- // different shard, a stale mongos that has cached the old database's primary shard will *not*
- // be routed to the new database's primary shard (and will see an empty database).
-
- // Use 'enableSharding' to create the database only in the sharding catalog (the database will
- // not exist on any shards).
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-
- // Simulate that the database was created on 'shard0' by directly modifying the database entry
- // (we cannot use movePrimary, since movePrimary creates the database on the shards).
- const dbEntry = st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
- assert.writeOK(st.s.getDB("config").getCollection("databases").update({_id: dbName}, {
- $set: {primary: st.shard0.shardName}
- }));
-
- assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
-
- // Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
- // in-memory and on-disk caches.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
-
- assert.commandWorked(st.s.getDB(dbName).runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
-
- // Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
- // its in-memory cache (its on-disk cache was already updated when the primary refreshed).
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
-
- st.stop();
+"use strict";
+load("jstests/libs/database_versioning.js");
+
+const dbName = "test";
+
+const st = new ShardingTest({
+ mongos: 2,
+ rs0: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
+ rs1: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
+ verbose: 2
+});
+
+// Before creating the database, none of the nodes have a cached entry for the database either
+// in memory or on disk.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Use 'enableSharding' to create the database only in the sharding catalog (the database will
+// not exist on any shards).
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+
+// Check that a command that attaches databaseVersion returns empty results, even though the
+// database does not actually exist on any shard (because the version won't be checked).
+assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
+
+// Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
+// in-memory and on-disk caches.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+assert.commandWorked(st.s.getDB(dbName).runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+
+// Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
+// its in-memory cache (its on-disk cache was updated when the primary refreshed, above).
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Use 'movePrimary' to ensure shard0 is the primary shard. This will create the database on the
+// shards only if shard0 was not already the primary shard.
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+const dbEntry1 = st.s.getDB("config").getCollection("databases").findOne({_id: dbName});
+
+// Ensure the database actually gets created on the primary shard by creating a collection in
+// it.
+assert.commandWorked(st.s.getDB(dbName).runCommand({create: "foo"}));
+
+// Run a command that attaches databaseVersion to cause the current primary shard's primary to
+// refresh its in-memory cached database version.
+jsTest.log("About to do listCollections with readPref=primary");
+assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
+
+// Ensure the current primary shard's primary has written the new database entry to disk.
+st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
+
+// Ensure the database entry on the current primary shard has replicated to the secondary.
+st.rs0.awaitReplication();
+
+// The primary shard's primary should have refreshed.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Now run a command that attaches databaseVersion with readPref=secondary to make the current
+// primary shard's secondary refresh its in-memory database version from its on-disk entry.
+jsTest.log("About to do listCollections with readPref=secondary");
+assert.commandWorked(st.s.getDB(dbName).runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+
+// The primary shard's secondary should have refreshed.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1.version);
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Make "staleMongos" load the stale database info into memory.
+const freshMongos = st.s0;
+const staleMongos = st.s1;
+staleMongos.getDB(dbName).runCommand({listCollections: 1});
+
+// Run movePrimary to ensure the movePrimary critical section clears the in-memory cache on the
+// old primary shard.
+jsTest.log("About to do movePrimary");
+assert.commandWorked(freshMongos.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));
+const dbEntry2 = freshMongos.getDB("config").getCollection("databases").findOne({_id: dbName});
+assert.eq(dbEntry2.version.uuid, dbEntry1.version.uuid);
+assert.eq(dbEntry2.version.lastMod, dbEntry1.version.lastMod + 1);
+
+// Ensure the old primary shard's primary has written the 'enterCriticalSectionSignal' flag to
+// its on-disk database entry.
+st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
+
+// Ensure 'enterCriticalSectionSignal' flag has replicated to the secondary.
+st.rs0.awaitReplication();
+
+// The in-memory cached version should have been cleared on the old primary shard's primary and
+// secondary nodes.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Run listCollections with readPref=secondary from the stale mongos. First, this should cause
+// the old primary shard's secondary to provoke the old primary shard's primary to refresh. Then
+// once the stale mongos refreshes, it should cause the new primary shard's secondary to provoke
+// the new primary shard's primary to refresh.
+jsTest.log("About to do listCollections with readPref=secondary after movePrimary");
+assert.commandWorked(staleMongos.getDB(dbName).runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+
+// All nodes should have refreshed.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2.version);
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2.version);
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2.version);
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2.version);
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+
+// Ensure that dropping the database drops it from all shards, which clears their in-memory
+// caches but not their on-disk caches.
+jsTest.log("About to drop database from the cluster");
+assert.commandWorked(freshMongos.getDB(dbName).runCommand({dropDatabase: 1}));
+
+// Ensure the drop has replicated to all nodes.
+st.rs0.awaitReplication();
+st.rs1.awaitReplication();
+
+// Once SERVER-34431 goes in, this should not have caused the in-memory versions to be cleared.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+
+// Confirm that we have a bug (SERVER-34431), where if a database is dropped and recreated on a
+// different shard, a stale mongos that has cached the old database's primary shard will *not*
+// be routed to the new database's primary shard (and will see an empty database).
+
+// Use 'enableSharding' to create the database only in the sharding catalog (the database will
+// not exist on any shards).
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+
+// Simulate that the database was created on 'shard0' by directly modifying the database entry
+// (we cannot use movePrimary, since movePrimary creates the database on the shards).
+const dbEntry = st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
+assert.writeOK(st.s.getDB("config").getCollection("databases").update({_id: dbName}, {
+ $set: {primary: st.shard0.shardName}
+}));
+
+assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
+
+// Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
+// in-memory and on-disk caches.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+
+assert.commandWorked(st.s.getDB(dbName).runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+
+// Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
+// its in-memory cache (its on-disk cache was already updated when the primary refreshed).
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+
+st.stop();
})();
diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js
index 04c3075b1f1..87b13519678 100644
--- a/jstests/sharding/delete_during_migrate.js
+++ b/jstests/sharding/delete_during_migrate.js
@@ -8,38 +8,38 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- var dbname = "test";
- var coll = "foo";
- var ns = dbname + "." + coll;
+var dbname = "test";
+var coll = "foo";
+var ns = dbname + "." + coll;
- assert.commandWorked(st.s0.adminCommand({enablesharding: dbname}));
- st.ensurePrimaryShard(dbname, st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: dbname}));
+st.ensurePrimaryShard(dbname, st.shard1.shardName);
- var t = st.s0.getDB(dbname).getCollection(coll);
+var t = st.s0.getDB(dbname).getCollection(coll);
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < 200000; i++) {
- bulk.insert({a: i});
- }
- assert.writeOK(bulk.execute());
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < 200000; i++) {
+ bulk.insert({a: i});
+}
+assert.writeOK(bulk.execute());
- // enable sharding of the collection. Only 1 chunk.
- t.ensureIndex({a: 1});
+// enable sharding of the collection. Only 1 chunk.
+t.ensureIndex({a: 1});
- assert.commandWorked(st.s0.adminCommand({shardcollection: ns, key: {a: 1}}));
+assert.commandWorked(st.s0.adminCommand({shardcollection: ns, key: {a: 1}}));
- // start a parallel shell that deletes things
- var join = startParallelShell("db." + coll + ".remove({});", st.s0.port);
+// start a parallel shell that deletes things
+var join = startParallelShell("db." + coll + ".remove({});", st.s0.port);
- // migrate while deletions are happening
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name}));
+// migrate while deletions are happening
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name}));
- join();
+join();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 0dbfa0b9502..26347ec8330 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -1,25 +1,25 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2});
+var s = new ShardingTest({shards: 2});
- assert.eq(2, s.config.shards.count(), "server count wrong");
+assert.eq(2, s.config.shards.count(), "server count wrong");
- var test1 = s.getDB("test1").foo;
- assert.writeOK(test1.insert({a: 1}));
- assert.writeOK(test1.insert({a: 2}));
- assert.writeOK(test1.insert({a: 3}));
- assert.eq(3, test1.count());
+var test1 = s.getDB("test1").foo;
+assert.writeOK(test1.insert({a: 1}));
+assert.writeOK(test1.insert({a: 2}));
+assert.writeOK(test1.insert({a: 3}));
+assert.eq(3, test1.count());
- assert.commandFailed(s.s0.adminCommand({addshard: "sdd$%", maxTimeMS: 60000}), "Bad hostname");
+assert.commandFailed(s.s0.adminCommand({addshard: "sdd$%", maxTimeMS: 60000}), "Bad hostname");
- var portWithoutHostRunning = allocatePort();
- assert.commandFailed(
- s.s0.adminCommand({addshard: "127.0.0.1:" + portWithoutHostRunning, maxTimeMS: 60000}),
- "Host which is not up");
- assert.commandFailed(
- s.s0.adminCommand({addshard: "10.0.0.1:" + portWithoutHostRunning, maxTimeMS: 60000}),
- "Allowed shard in IP when config is localhost");
+var portWithoutHostRunning = allocatePort();
+assert.commandFailed(
+ s.s0.adminCommand({addshard: "127.0.0.1:" + portWithoutHostRunning, maxTimeMS: 60000}),
+ "Host which is not up");
+assert.commandFailed(
+ s.s0.adminCommand({addshard: "10.0.0.1:" + portWithoutHostRunning, maxTimeMS: 60000}),
+ "Allowed shard in IP when config is localhost");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/disable_autosplit.js b/jstests/sharding/disable_autosplit.js
index d6982c3280a..03d14cc970c 100644
--- a/jstests/sharding/disable_autosplit.js
+++ b/jstests/sharding/disable_autosplit.js
@@ -1,34 +1,33 @@
// Tests disabling of autosplit.
(function() {
- 'use strict';
+'use strict';
- var chunkSizeMB = 1;
+var chunkSizeMB = 1;
- // Autosplit is disabled by default, but specify it anyway in case the default changes,
- // especially since it defaults to the enableBalancer setting.
- var st = new ShardingTest(
- {shards: 1, mongos: 1, other: {chunkSize: chunkSizeMB, enableAutoSplit: false}});
+// Autosplit is disabled by default, but specify it anyway in case the default changes,
+// especially since it defaults to the enableBalancer setting.
+var st = new ShardingTest(
+ {shards: 1, mongos: 1, other: {chunkSize: chunkSizeMB, enableAutoSplit: false}});
- var data = "x";
- while (data.length < chunkSizeMB * 1024 * 1024) {
- data += data;
- }
+var data = "x";
+while (data.length < chunkSizeMB * 1024 * 1024) {
+ data += data;
+}
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- for (var i = 0; i < 20; i++) {
- coll.insert({data: data});
- }
+for (var i = 0; i < 20; i++) {
+ coll.insert({data: data});
+}
- // Make sure we haven't split
- assert.eq(1, config.chunks.find({ns: coll + ""}).count());
-
- st.stop();
+// Make sure we haven't split
+assert.eq(1, config.chunks.find({ns: coll + ""}).count());
+st.stop();
})();
diff --git a/jstests/sharding/drop_configdb.js b/jstests/sharding/drop_configdb.js
index 63a3b533597..180741530b9 100644
--- a/jstests/sharding/drop_configdb.js
+++ b/jstests/sharding/drop_configdb.js
@@ -1,35 +1,35 @@
// Test that dropping the config database is completely disabled via
// mongos and via mongod, if started with --configsvr
(function() {
- "use strict";
+"use strict";
- var getConfigsvrToWriteTo = function(st) {
- if (st.configRS) {
- return st.configRS.getPrimary();
- } else {
- return st._configServers[0];
- }
- };
+var getConfigsvrToWriteTo = function(st) {
+ if (st.configRS) {
+ return st.configRS.getPrimary();
+ } else {
+ return st._configServers[0];
+ }
+};
- var st = new ShardingTest({shards: 2});
- var mongos = st.s;
- var config = getConfigsvrToWriteTo(st).getDB('config');
+var st = new ShardingTest({shards: 2});
+var mongos = st.s;
+var config = getConfigsvrToWriteTo(st).getDB('config');
- // Try to drop config db via configsvr
+// Try to drop config db via configsvr
- print("1: Try to drop config database via configsvr");
- assert.eq(0, config.dropDatabase().ok);
- assert.eq("Cannot drop 'config' database if mongod started with --configsvr",
- config.dropDatabase().errmsg);
+print("1: Try to drop config database via configsvr");
+assert.eq(0, config.dropDatabase().ok);
+assert.eq("Cannot drop 'config' database if mongod started with --configsvr",
+ config.dropDatabase().errmsg);
- // Try to drop config db via mongos
- var config = mongos.getDB("config");
+// Try to drop config db via mongos
+var config = mongos.getDB("config");
- print("1: Try to drop config database via mongos");
- assert.eq(0, config.dropDatabase().ok);
+print("1: Try to drop config database via mongos");
+assert.eq(0, config.dropDatabase().ok);
- // 20 = ErrorCodes::IllegalOperation
- assert.eq(20, config.dropDatabase().code);
+// 20 = ErrorCodes::IllegalOperation
+assert.eq(20, config.dropDatabase().code);
- st.stop();
+st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
index 95ca3abd500..9de2ecb6d4a 100644
--- a/jstests/sharding/drop_sharded_db.js
+++ b/jstests/sharding/drop_sharded_db.js
@@ -1,67 +1,65 @@
// Tests the dropping of a sharded database SERVER-3471 SERVER-1726
(function() {
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0;
- var config = mongos.getDB("config");
+var mongos = st.s0;
+var config = mongos.getDB("config");
- var dbA = mongos.getDB("DropSharded_A");
- var dbB = mongos.getDB("DropSharded_B");
- var dbC = mongos.getDB("DropSharded_C");
+var dbA = mongos.getDB("DropSharded_A");
+var dbB = mongos.getDB("DropSharded_B");
+var dbC = mongos.getDB("DropSharded_C");
- // Dropping a database that doesn't exist will result in an info field in the response.
- var res = assert.commandWorked(dbA.dropDatabase());
- assert.eq('database does not exist', res.info);
+// Dropping a database that doesn't exist will result in an info field in the response.
+var res = assert.commandWorked(dbA.dropDatabase());
+assert.eq('database does not exist', res.info);
- var numDocs = 3000;
- var numColls = 10;
- for (var i = 0; i < numDocs; i++) {
- dbA.getCollection("data" + (i % numColls)).insert({_id: i});
- dbB.getCollection("data" + (i % numColls)).insert({_id: i});
- dbC.getCollection("data" + (i % numColls)).insert({_id: i});
- }
+var numDocs = 3000;
+var numColls = 10;
+for (var i = 0; i < numDocs; i++) {
+ dbA.getCollection("data" + (i % numColls)).insert({_id: i});
+ dbB.getCollection("data" + (i % numColls)).insert({_id: i});
+ dbC.getCollection("data" + (i % numColls)).insert({_id: i});
+}
- var key = {_id: 1};
- for (var i = 0; i < numColls; i++) {
- st.shardColl(dbA.getCollection("data" + i), key);
- st.shardColl(dbB.getCollection("data" + i), key);
- st.shardColl(dbC.getCollection("data" + i), key);
- }
+var key = {_id: 1};
+for (var i = 0; i < numColls; i++) {
+ st.shardColl(dbA.getCollection("data" + i), key);
+ st.shardColl(dbB.getCollection("data" + i), key);
+ st.shardColl(dbC.getCollection("data" + i), key);
+}
- // Insert a document to an unsharded collection and make sure that the document is there.
- assert.writeOK(dbA.unsharded.insert({dummy: 1}));
- var shardName = config.databases.findOne({_id: dbA.getName()}).primary;
- var shardHostConn = new Mongo(config.shards.findOne({_id: shardName}).host);
- var dbAOnShard = shardHostConn.getDB(dbA.getName());
- assert.neq(null, dbAOnShard.unsharded.findOne({dummy: 1}));
+// Insert a document to an unsharded collection and make sure that the document is there.
+assert.writeOK(dbA.unsharded.insert({dummy: 1}));
+var shardName = config.databases.findOne({_id: dbA.getName()}).primary;
+var shardHostConn = new Mongo(config.shards.findOne({_id: shardName}).host);
+var dbAOnShard = shardHostConn.getDB(dbA.getName());
+assert.neq(null, dbAOnShard.unsharded.findOne({dummy: 1}));
- // Drop the non-suffixed db and ensure that it is the only one that was dropped.
- assert.commandWorked(dbA.dropDatabase());
- var dbs = mongos.getDBNames();
- for (var i = 0; i < dbs.length; i++) {
- assert.neq(dbs[i], "" + dbA);
- }
+// Drop the non-suffixed db and ensure that it is the only one that was dropped.
+assert.commandWorked(dbA.dropDatabase());
+var dbs = mongos.getDBNames();
+for (var i = 0; i < dbs.length; i++) {
+ assert.neq(dbs[i], "" + dbA);
+}
- assert.eq(0, config.databases.count({_id: dbA.getName()}));
- assert.eq(1, config.databases.count({_id: dbB.getName()}));
- assert.eq(1, config.databases.count({_id: dbC.getName()}));
+assert.eq(0, config.databases.count({_id: dbA.getName()}));
+assert.eq(1, config.databases.count({_id: dbB.getName()}));
+assert.eq(1, config.databases.count({_id: dbC.getName()}));
- // 10 dropped collections
- assert.eq(numColls,
- config.collections.count({_id: RegExp("^" + dbA + "\\..*"), dropped: true}));
+// 10 dropped collections
+assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbA + "\\..*"), dropped: true}));
- // 20 active (dropped is missing)
- assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbB + "\\..*")}));
- assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbC + "\\..*")}));
+// 20 active (dropped is missing)
+assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbB + "\\..*")}));
+assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbC + "\\..*")}));
- for (var i = 0; i < numColls; i++) {
- assert.eq(numDocs / numColls, dbB.getCollection("data" + (i % numColls)).find().itcount());
- assert.eq(numDocs / numColls, dbC.getCollection("data" + (i % numColls)).find().itcount());
- }
+for (var i = 0; i < numColls; i++) {
+ assert.eq(numDocs / numColls, dbB.getCollection("data" + (i % numColls)).find().itcount());
+ assert.eq(numDocs / numColls, dbC.getCollection("data" + (i % numColls)).find().itcount());
+}
- // Check that the unsharded collection should have been dropped.
- assert.eq(null, dbAOnShard.unsharded.findOne());
-
- st.stop();
+// Check that the unsharded collection should have been dropped.
+assert.eq(null, dbAOnShard.unsharded.findOne());
+st.stop();
})();
diff --git a/jstests/sharding/drop_sharded_db_tags_cleanup.js b/jstests/sharding/drop_sharded_db_tags_cleanup.js
index e58b21d39eb..e5c89b7a551 100644
--- a/jstests/sharding/drop_sharded_db_tags_cleanup.js
+++ b/jstests/sharding/drop_sharded_db_tags_cleanup.js
@@ -1,29 +1,29 @@
// Tests that dropping a database also removes the zones associated with the
// collections in the database.
(function() {
- var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
- var shardName = configDB.shards.findOne()._id;
+var st = new ShardingTest({shards: 1});
+var configDB = st.s.getDB('config');
+var shardName = configDB.shards.findOne()._id;
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
- var tagDoc = configDB.tags.findOne();
- assert.eq(1, configDB.tags.find().length());
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+var tagDoc = configDB.tags.findOne();
+assert.eq(1, configDB.tags.find().length());
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- var db = st.s.getDB("test");
- db.dropDatabase();
+var db = st.s.getDB("test");
+db.dropDatabase();
- assert.eq(null, configDB.tags.findOne());
- assert.commandWorked(st.s.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
- assert.commandWorked(st.removeRangeFromZone('test.user', {x: 0}, {x: 10}));
+assert.eq(null, configDB.tags.findOne());
+assert.commandWorked(st.s.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
+assert.commandWorked(st.removeRangeFromZone('test.user', {x: 0}, {x: 10}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/dump_coll_metadata.js b/jstests/sharding/dump_coll_metadata.js
index c1254985dd6..f8292262249 100644
--- a/jstests/sharding/dump_coll_metadata.js
+++ b/jstests/sharding/dump_coll_metadata.js
@@ -2,59 +2,58 @@
// Tests that we can dump collection metadata via getShardVersion()
//
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
- var shardAdmin = st.shard0.getDB("admin");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-
- assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
-
- // Make sure we have chunks information on the shard after the shard collection call
- var result = assert.commandWorked(
- shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
- printjson(result);
- var metadata = result.metadata;
-
- assert.eq(metadata.chunks.length, 1);
- assert.eq(metadata.pending.length, 0);
- assert.eq(metadata.chunks[0][0]._id, MinKey);
- assert.eq(metadata.chunks[0][1]._id, MaxKey);
- assert.eq(metadata.shardVersion, result.global);
-
- // Make sure a collection with no metadata still returns the metadata field
- assert.neq(shardAdmin.runCommand({getShardVersion: coll + "xyz", fullMetadata: true}).metadata,
- undefined);
-
- // Make sure we get multiple chunks after a split and refresh -- splits by themselves do not
- // cause the shard to refresh.
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- assert.commandWorked(
- st.shard0.getDB('admin').runCommand({_flushRoutingTableCacheUpdates: coll + ""}));
-
- assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
- printjson(shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
-
- // Make sure we have chunks info
- result = shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true});
- assert.commandWorked(result);
- metadata = result.metadata;
-
- assert.eq(metadata.chunks.length, 2);
- assert.eq(metadata.pending.length, 0);
- assert(metadata.chunks[0][0]._id + "" == MinKey + "");
- assert(metadata.chunks[0][1]._id == 0);
- assert(metadata.chunks[1][0]._id == 0);
- assert(metadata.chunks[1][1]._id + "" == MaxKey + "");
- assert(metadata.shardVersion + "" == result.global + "");
-
- st.stop();
-
+'use strict';
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+var shardAdmin = st.shard0.getDB("admin");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+
+assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
+
+// Make sure we have chunks information on the shard after the shard collection call
+var result =
+ assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
+printjson(result);
+var metadata = result.metadata;
+
+assert.eq(metadata.chunks.length, 1);
+assert.eq(metadata.pending.length, 0);
+assert.eq(metadata.chunks[0][0]._id, MinKey);
+assert.eq(metadata.chunks[0][1]._id, MaxKey);
+assert.eq(metadata.shardVersion, result.global);
+
+// Make sure a collection with no metadata still returns the metadata field
+assert.neq(shardAdmin.runCommand({getShardVersion: coll + "xyz", fullMetadata: true}).metadata,
+ undefined);
+
+// Make sure we get multiple chunks after a split and refresh -- splits by themselves do not
+// cause the shard to refresh.
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+assert.commandWorked(
+ st.shard0.getDB('admin').runCommand({_flushRoutingTableCacheUpdates: coll + ""}));
+
+assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
+printjson(shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
+
+// Make sure we have chunks info
+result = shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true});
+assert.commandWorked(result);
+metadata = result.metadata;
+
+assert.eq(metadata.chunks.length, 2);
+assert.eq(metadata.pending.length, 0);
+assert(metadata.chunks[0][0]._id + "" == MinKey + "");
+assert(metadata.chunks[0][1]._id == 0);
+assert(metadata.chunks[1][0]._id == 0);
+assert(metadata.chunks[1][1]._id + "" == MaxKey + "");
+assert(metadata.shardVersion + "" == result.global + "");
+
+st.stop();
})();
diff --git a/jstests/sharding/empty_doc_results.js b/jstests/sharding/empty_doc_results.js
index 0ee44a76988..65fe1cccd7f 100644
--- a/jstests/sharding/empty_doc_results.js
+++ b/jstests/sharding/empty_doc_results.js
@@ -1,59 +1,59 @@
// Verifies that mongos correctly handles empty documents when all fields are projected out
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2});
-
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
- printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
-
- assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
-
- st.printShardingStatus();
-
- // Insert 100 documents, half of which have an extra field
- for (var i = -50; i < 50; i++) {
- var doc = {};
- if (i >= 0)
- doc.positiveId = true;
- assert.writeOK(coll.insert(doc));
- }
-
- //
- //
- // Ensure projecting out all fields still returns the same number of documents
- assert.eq(100, coll.find({}).itcount());
- assert.eq(100, coll.find({}).sort({positiveId: 1}).itcount());
- assert.eq(100, coll.find({}, {_id: 0, positiveId: 0}).itcount());
- // Can't remove sort key from projection (SERVER-11877) but some documents will still be empty
- assert.eq(100, coll.find({}, {_id: 0}).sort({positiveId: 1}).itcount());
-
- //
- //
- // Ensure projecting out all fields still returns the same ordering of documents
- var assertLast50Positive = function(sortedDocs) {
- assert.eq(100, sortedDocs.length);
- var positiveCount = 0;
- for (var i = 0; i < sortedDocs.length; ++i) {
- if (sortedDocs[i].positiveId) {
- positiveCount++;
- } else {
- // Make sure only the last set of documents have "positiveId" set
- assert.eq(positiveCount, 0);
- }
+'use strict';
+
+var st = new ShardingTest({shards: 2});
+
+var mongos = st.s0;
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+
+assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+
+st.printShardingStatus();
+
+// Insert 100 documents, half of which have an extra field
+for (var i = -50; i < 50; i++) {
+ var doc = {};
+ if (i >= 0)
+ doc.positiveId = true;
+ assert.writeOK(coll.insert(doc));
+}
+
+//
+//
+// Ensure projecting out all fields still returns the same number of documents
+assert.eq(100, coll.find({}).itcount());
+assert.eq(100, coll.find({}).sort({positiveId: 1}).itcount());
+assert.eq(100, coll.find({}, {_id: 0, positiveId: 0}).itcount());
+// Can't remove sort key from projection (SERVER-11877) but some documents will still be empty
+assert.eq(100, coll.find({}, {_id: 0}).sort({positiveId: 1}).itcount());
+
+//
+//
+// Ensure projecting out all fields still returns the same ordering of documents
+var assertLast50Positive = function(sortedDocs) {
+ assert.eq(100, sortedDocs.length);
+ var positiveCount = 0;
+ for (var i = 0; i < sortedDocs.length; ++i) {
+ if (sortedDocs[i].positiveId) {
+ positiveCount++;
+ } else {
+ // Make sure only the last set of documents have "positiveId" set
+ assert.eq(positiveCount, 0);
}
- assert.eq(positiveCount, 50);
- };
+ }
+ assert.eq(positiveCount, 50);
+};
- assertLast50Positive(coll.find({}).sort({positiveId: 1}).toArray());
- assertLast50Positive(coll.find({}, {_id: 0}).sort({positiveId: 1}).toArray());
+assertLast50Positive(coll.find({}).sort({positiveId: 1}).toArray());
+assertLast50Positive(coll.find({}, {_id: 0}).sort({positiveId: 1}).toArray());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/enable_sharding_basic.js b/jstests/sharding/enable_sharding_basic.js
index fb0cbdbdb4a..d185ff11b6e 100644
--- a/jstests/sharding/enable_sharding_basic.js
+++ b/jstests/sharding/enable_sharding_basic.js
@@ -3,57 +3,56 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 2, shards: 2});
+var st = new ShardingTest({mongos: 2, shards: 2});
- // enableSharding can run only on mongos.
- assert.commandFailedWithCode(
- st.rs0.getPrimary().getDB('admin').runCommand({enableSharding: 'db'}),
- ErrorCodes.CommandNotFound);
+// enableSharding can run only on mongos.
+assert.commandFailedWithCode(st.rs0.getPrimary().getDB('admin').runCommand({enableSharding: 'db'}),
+ ErrorCodes.CommandNotFound);
- // enableSharding can run only against the admin database.
- assert.commandFailedWithCode(st.s0.getDB('test').runCommand({enableSharding: 'db'}),
- ErrorCodes.Unauthorized);
+// enableSharding can run only against the admin database.
+assert.commandFailedWithCode(st.s0.getDB('test').runCommand({enableSharding: 'db'}),
+ ErrorCodes.Unauthorized);
- // Can't shard 'local' database.
- assert.commandFailed(st.s0.adminCommand({enableSharding: 'local'}));
+// Can't shard 'local' database.
+assert.commandFailed(st.s0.adminCommand({enableSharding: 'local'}));
- // Can't shard 'admin' database.
- assert.commandFailed(st.s0.adminCommand({enableSharding: 'admin'}));
+// Can't shard 'admin' database.
+assert.commandFailed(st.s0.adminCommand({enableSharding: 'admin'}));
- // Can't shard db with the name that just differ on case.
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
- assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+// Can't shard db with the name that just differ on case.
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
+assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
- assert.commandFailedWithCode(st.s0.adminCommand({enableSharding: 'DB'}),
- ErrorCodes.DatabaseDifferCase);
+assert.commandFailedWithCode(st.s0.adminCommand({enableSharding: 'DB'}),
+ ErrorCodes.DatabaseDifferCase);
- // Can't shard invalid db name.
- assert.commandFailed(st.s0.adminCommand({enableSharding: 'a.b'}));
- assert.commandFailed(st.s0.adminCommand({enableSharding: ''}));
+// Can't shard invalid db name.
+assert.commandFailed(st.s0.adminCommand({enableSharding: 'a.b'}));
+assert.commandFailed(st.s0.adminCommand({enableSharding: ''}));
- // Attempting to shard already sharded database returns success.
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
- assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+// Attempting to shard already sharded database returns success.
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
+assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
- // Verify config.databases metadata.
- assert.writeOK(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
- assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'unsharded'}));
- assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
+// Verify config.databases metadata.
+assert.writeOK(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
+assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'unsharded'}));
+assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
- // Sharding a collection before 'enableSharding' is called fails
- assert.commandFailed(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- assert.commandFailed(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+// Sharding a collection before 'enableSharding' is called fails
+assert.commandFailed(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+assert.commandFailed(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- assert.writeOK(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
- assert.writeOK(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
+assert.writeOK(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
+assert.writeOK(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
- // Calling 'enableSharding' on one mongos and 'shardCollection' through another must work
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- assert.commandWorked(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+// Calling 'enableSharding' on one mongos and 'shardCollection' through another must work
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+assert.commandWorked(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/enforce_zone_policy.js b/jstests/sharding/enforce_zone_policy.js
index 4c57885ce10..259d05ff716 100644
--- a/jstests/sharding/enforce_zone_policy.js
+++ b/jstests/sharding/enforce_zone_policy.js
@@ -1,105 +1,103 @@
// Tests changing the zones on a shard at runtime results in a correct distribution of chunks across
// the cluster
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 1});
+var st = new ShardingTest({shards: 3, mongos: 1});
- assert.commandWorked(st.s0.adminCommand({enablesharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- var testDB = st.s0.getDB('test');
- var configDB = st.s0.getDB('config');
+var testDB = st.s0.getDB('test');
+var configDB = st.s0.getDB('config');
- var bulk = testDB.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 9; i++) {
- bulk.insert({_id: i, x: i});
- }
- assert.writeOK(bulk.execute());
+var bulk = testDB.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 9; i++) {
+ bulk.insert({_id: i, x: i});
+}
+assert.writeOK(bulk.execute());
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
- // Produce 9 chunks with min value at the documents just inserted
- for (var i = 0; i < 8; i++) {
- assert.commandWorked(st.s0.adminCommand({split: 'test.foo', middle: {_id: i}}));
- }
+// Produce 9 chunks with min value at the documents just inserted
+for (var i = 0; i < 8; i++) {
+ assert.commandWorked(st.s0.adminCommand({split: 'test.foo', middle: {_id: i}}));
+}
- /**
- * Waits for the balancer state described by the checking logic function (checkFunc) to be
- * reached and ensures that it does not change from that state at the next balancer round.
- */
- function assertBalanceCompleteAndStable(checkFunc, stepName) {
- st.printShardingStatus(true);
+/**
+ * Waits for the balancer state described by the checking logic function (checkFunc) to be
+ * reached and ensures that it does not change from that state at the next balancer round.
+ */
+function assertBalanceCompleteAndStable(checkFunc, stepName) {
+ st.printShardingStatus(true);
- assert.soon(
- checkFunc, 'Balance at step ' + stepName + ' did not happen', 3 * 60 * 1000, 2000);
+ assert.soon(checkFunc, 'Balance at step ' + stepName + ' did not happen', 3 * 60 * 1000, 2000);
- st.waitForBalancer(true, 60000);
- st.printShardingStatus(true);
- assert(checkFunc());
+ st.waitForBalancer(true, 60000);
+ st.printShardingStatus(true);
+ assert(checkFunc());
- jsTestLog('Completed step ' + stepName);
- }
+ jsTestLog('Completed step ' + stepName);
+}
- /**
- * Checker function to be used with assertBalanceCompleteAndStable, which ensures that the
- * cluster is evenly balanced.
- */
- function checkClusterEvenlyBalanced() {
- var maxChunkDiff = st.chunkDiff('foo', 'test');
- return maxChunkDiff <= 1;
- }
+/**
+ * Checker function to be used with assertBalanceCompleteAndStable, which ensures that the
+ * cluster is evenly balanced.
+ */
+function checkClusterEvenlyBalanced() {
+ var maxChunkDiff = st.chunkDiff('foo', 'test');
+ return maxChunkDiff <= 1;
+}
- st.startBalancer();
+st.startBalancer();
- // Initial balance
- assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'initial');
+// Initial balance
+assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'initial');
- // Spread chunks correctly across zones
- st.addShardTag(st.shard0.shardName, 'a');
- st.addShardTag(st.shard1.shardName, 'a');
- st.addTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
+// Spread chunks correctly across zones
+st.addShardTag(st.shard0.shardName, 'a');
+st.addShardTag(st.shard1.shardName, 'a');
+st.addTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
- st.addShardTag(st.shard2.shardName, 'b');
- st.addTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
- st.addTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
+st.addShardTag(st.shard2.shardName, 'b');
+st.addTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
+st.addTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
- assertBalanceCompleteAndStable(function() {
- var chunksOnShard2 = configDB.chunks.find({ns: 'test.foo', shard: st.shard2.shardName})
- .sort({min: 1})
- .toArray();
+assertBalanceCompleteAndStable(function() {
+ var chunksOnShard2 =
+ configDB.chunks.find({ns: 'test.foo', shard: st.shard2.shardName}).sort({min: 1}).toArray();
- jsTestLog('Chunks on shard2: ' + tojson(chunksOnShard2));
+ jsTestLog('Chunks on shard2: ' + tojson(chunksOnShard2));
- if (chunksOnShard2.length != 2) {
- return false;
- }
+ if (chunksOnShard2.length != 2) {
+ return false;
+ }
- return chunksOnShard2[0].min._id == MinKey && chunksOnShard2[0].max._id == -100 &&
- chunksOnShard2[1].min._id == 100 && chunksOnShard2[1].max._id == MaxKey;
- }, 'chunks to zones a and b');
+ return chunksOnShard2[0].min._id == MinKey && chunksOnShard2[0].max._id == -100 &&
+ chunksOnShard2[1].min._id == 100 && chunksOnShard2[1].max._id == MaxKey;
+}, 'chunks to zones a and b');
- // Tag the entire collection to shard0 and wait for everything to move to that shard
- st.removeTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
- st.removeTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
- st.removeTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
+// Tag the entire collection to shard0 and wait for everything to move to that shard
+st.removeTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
+st.removeTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
+st.removeTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
- st.removeShardTag(st.shard1.shardName, 'a');
- st.removeShardTag(st.shard2.shardName, 'b');
- st.addTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
+st.removeShardTag(st.shard1.shardName, 'a');
+st.removeShardTag(st.shard2.shardName, 'b');
+st.addTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
- assertBalanceCompleteAndStable(function() {
- var counts = st.chunkCounts('foo');
- printjson(counts);
- return counts[st.shard0.shardName] == 11 && counts[st.shard1.shardName] == 0 &&
- counts[st.shard2.shardName] == 0;
- }, 'all chunks to zone a');
+assertBalanceCompleteAndStable(function() {
+ var counts = st.chunkCounts('foo');
+ printjson(counts);
+ return counts[st.shard0.shardName] == 11 && counts[st.shard1.shardName] == 0 &&
+ counts[st.shard2.shardName] == 0;
+}, 'all chunks to zone a');
- // Remove all zones and ensure collection is correctly redistributed
- st.removeShardTag(st.shard0.shardName, 'a');
- st.removeTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
+// Remove all zones and ensure collection is correctly redistributed
+st.removeShardTag(st.shard0.shardName, 'a');
+st.removeTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
- assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'final');
+assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'final');
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/error_during_agg_getmore.js b/jstests/sharding/error_during_agg_getmore.js
index d6f3f8a2f90..74933437c16 100644
--- a/jstests/sharding/error_during_agg_getmore.js
+++ b/jstests/sharding/error_during_agg_getmore.js
@@ -1,52 +1,51 @@
// This test was designed to reproduce SERVER-31475. It issues sharded aggregations with an error
// returned from one shard, and a delayed response from another shard.
(function() {
- "use strict";
-
- const st = new ShardingTest({shards: 2, useBridge: true});
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}));
- assert.writeOK(mongosColl.insert({_id: 1}));
-
- // Delay messages between shard 1 and the mongos, long enough that shard 1's responses will
- // likely arrive after the response from shard 0, but not so long that the background cluster
- // client cleanup job will have been given a chance to run.
- const delayMillis = 100;
- st.rs1.getPrimary().delayMessagesFrom(st.s, delayMillis);
-
- const nTrials = 10;
- for (let i = 1; i < 10; ++i) {
- // This will trigger an error on shard 0, but not shard 1. We set up a delay from shard 1,
- // so the response should get back after the error has been returned to the client. We use a
- // batch size of 0 to ensure the error happens during a getMore.
- assert.throws(
- () => mongosColl
- .aggregate([{$project: {_id: 0, x: {$divide: [2, {$add: ["$_id", 1]}]}}}],
- {cursor: {batchSize: 0}})
- .itcount());
- }
-
- st.stop();
+"use strict";
+
+const st = new ShardingTest({shards: 2, useBridge: true});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey] chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
+
+// Write a document to each chunk.
+assert.writeOK(mongosColl.insert({_id: -1}));
+assert.writeOK(mongosColl.insert({_id: 1}));
+
+// Delay messages between shard 1 and the mongos, long enough that shard 1's responses will
+// likely arrive after the response from shard 0, but not so long that the background cluster
+// client cleanup job will have been given a chance to run.
+const delayMillis = 100;
+st.rs1.getPrimary().delayMessagesFrom(st.s, delayMillis);
+
+const nTrials = 10;
+for (let i = 1; i < 10; ++i) {
+ // This will trigger an error on shard 0, but not shard 1. We set up a delay from shard 1,
+ // so the response should get back after the error has been returned to the client. We use a
+ // batch size of 0 to ensure the error happens during a getMore.
+ assert.throws(() =>
+ mongosColl
+ .aggregate([{$project: {_id: 0, x: {$divide: [2, {$add: ["$_id", 1]}]}}}],
+ {cursor: {batchSize: 0}})
+ .itcount());
+}
+
+st.stop();
}());
diff --git a/jstests/sharding/error_propagation.js b/jstests/sharding/error_propagation.js
index 596534bf094..7fe4822e295 100644
--- a/jstests/sharding/error_propagation.js
+++ b/jstests/sharding/error_propagation.js
@@ -1,24 +1,24 @@
(function() {
- // Tests that errors encountered on shards are correctly returned to the client when mongos uses
- // the legacy DBClientCursor method of executing commands on shards. We use aggregation here
- // specifically because it is one of the few query paths that still uses the legacy DBClient
- // classes in mongos.
- "use strict";
+// Tests that errors encountered on shards are correctly returned to the client when mongos uses
+// the legacy DBClientCursor method of executing commands on shards. We use aggregation here
+// specifically because it is one of the few query paths that still uses the legacy DBClient
+// classes in mongos.
+"use strict";
- var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 3}});
+var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 3}});
- var db = st.getDB('test');
- db.setSlaveOk(true);
+var db = st.getDB('test');
+db.setSlaveOk(true);
- assert.writeOK(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
- assert.commandWorked(db.runCommand(
- {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}}));
+assert.writeOK(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
+assert.commandWorked(db.runCommand(
+ {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}}));
- assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
+assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
- var res = db.runCommand(
- {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}});
- assert.commandFailed(res);
- assert.eq("$add only supports numeric or date types, not array", res.errmsg, printjson(res));
- st.stop();
+var res = db.runCommand(
+ {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}});
+assert.commandFailed(res);
+assert.eq("$add only supports numeric or date types, not array", res.errmsg, printjson(res));
+st.stop();
}());
diff --git a/jstests/sharding/explainFind_stale_mongos.js b/jstests/sharding/explainFind_stale_mongos.js
index d4ed2972541..93a5d1489cc 100644
--- a/jstests/sharding/explainFind_stale_mongos.js
+++ b/jstests/sharding/explainFind_stale_mongos.js
@@ -3,31 +3,31 @@
* find sent using the legacy query mode (it retries on the stale shardVersion error internally).
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({mongos: 2, shards: 1, verbose: 2});
+const st = new ShardingTest({mongos: 2, shards: 1, verbose: 2});
- let staleMongos = st.s0;
- let freshMongos = st.s1;
+let staleMongos = st.s0;
+let freshMongos = st.s1;
- jsTest.log("Make the stale mongos load a cache entry for db " + dbName + " once");
- assert.writeOK(staleMongos.getDB(dbName).getCollection(collName).insert({_id: 1}));
+jsTest.log("Make the stale mongos load a cache entry for db " + dbName + " once");
+assert.writeOK(staleMongos.getDB(dbName).getCollection(collName).insert({_id: 1}));
- jsTest.log("Call shardCollection on " + ns + " from the fresh mongos");
- assert.commandWorked(freshMongos.adminCommand({enableSharding: dbName}));
- assert.commandWorked(freshMongos.adminCommand({shardCollection: ns, key: {"_id": 1}}));
+jsTest.log("Call shardCollection on " + ns + " from the fresh mongos");
+assert.commandWorked(freshMongos.adminCommand({enableSharding: dbName}));
+assert.commandWorked(freshMongos.adminCommand({shardCollection: ns, key: {"_id": 1}}));
- jsTest.log("Ensure the shard knows " + ns + " is sharded");
- assert.commandWorked(
- st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns, syncFromConfig: true}));
+jsTest.log("Ensure the shard knows " + ns + " is sharded");
+assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns, syncFromConfig: true}));
- jsTest.log("Run explain find on " + ns + " from the stale mongos");
- staleMongos.getDB(dbName).getMongo().forceReadMode("legacy");
- staleMongos.getDB(dbName).getCollection(collName).find({$query: {}, $explain: true}).next();
+jsTest.log("Run explain find on " + ns + " from the stale mongos");
+staleMongos.getDB(dbName).getMongo().forceReadMode("legacy");
+staleMongos.getDB(dbName).getCollection(collName).find({$query: {}, $explain: true}).next();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/explain_agg_read_pref.js b/jstests/sharding/explain_agg_read_pref.js
index 820ab9799cc..0e774e4d8a8 100644
--- a/jstests/sharding/explain_agg_read_pref.js
+++ b/jstests/sharding/explain_agg_read_pref.js
@@ -2,160 +2,153 @@
* Tests that readPref applies on an explain for an aggregation command.
*/
(function() {
- "use strict";
-
- load("jstests/libs/profiler.js"); // For profilerHasAtLeastOneMatchingEntryOrThrow.
-
- const st = new ShardingTest({
- name: "agg_explain_readPref",
- shards: 2,
- other: {
- rs0: {
- nodes: [
- {rsConfig: {priority: 1, tags: {"tag": "primary"}}},
- {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
- ]
- },
- rs1: {
- nodes: [
- {rsConfig: {priority: 1, tags: {"tag": "primary"}}},
- {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
- ]
- },
- enableBalancer: false
- }
- });
-
- const mongos = st.s;
- const config = mongos.getDB("config");
- const mongosDB = mongos.getDB("agg_explain_readPref");
- assert.commandWorked(mongosDB.dropDatabase());
-
- const coll = mongosDB.getCollection("coll");
-
- assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), "agg_explain_readPref-rs0");
- const rs0Primary = st.rs0.getPrimary();
- const rs0Secondary = st.rs0.getSecondary();
- const rs1Primary = st.rs1.getPrimary();
- const rs1Secondary = st.rs1.getSecondary();
-
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+"use strict";
+
+load("jstests/libs/profiler.js"); // For profilerHasAtLeastOneMatchingEntryOrThrow.
+
+const st = new ShardingTest({
+ name: "agg_explain_readPref",
+ shards: 2,
+ other: {
+ rs0: {
+ nodes: [
+ {rsConfig: {priority: 1, tags: {"tag": "primary"}}},
+ {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
+ ]
+ },
+ rs1: {
+ nodes: [
+ {rsConfig: {priority: 1, tags: {"tag": "primary"}}},
+ {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
+ ]
+ },
+ enableBalancer: false
}
+});
+
+const mongos = st.s;
+const config = mongos.getDB("config");
+const mongosDB = mongos.getDB("agg_explain_readPref");
+assert.commandWorked(mongosDB.dropDatabase());
+
+const coll = mongosDB.getCollection("coll");
+
+assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), "agg_explain_readPref-rs0");
+const rs0Primary = st.rs0.getPrimary();
+const rs0Secondary = st.rs0.getSecondary();
+const rs1Primary = st.rs1.getPrimary();
+const rs1Secondary = st.rs1.getSecondary();
+
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+//
+// Confirms that aggregations with explain run against mongos are executed against a tagged
+// secondary or primary, as per readPreference setting.
+//
+function confirmReadPreference(primary, secondary) {
+ assert.commandWorked(secondary.setProfilingLevel(2));
+ assert.commandWorked(primary.setProfilingLevel(2));
+
+ // [<pref>, <tags>, <target>, <comment>]
+ [['primary', [{}], primary, "primary"],
+ ['primaryPreferred', [{tag: 'secondary'}], primary, "primaryPreferred"],
+ ['secondary', [{}], secondary, "secondary"],
+ ['secondary', [{tag: 'secondary'}], secondary, "secondaryTag"],
+ ['secondaryPreferred', [{tag: 'secondary'}], secondary, "secondaryPreferred"],
+ ['secondaryPreferred', [{tag: 'primary'}], primary, "secondaryPreferredTagPrimary"]]
+ .forEach(function(args) {
+ const pref = args[0], tagSets = args[1], target = args[2], name = args[3];
+
+ //
+ // Tests that explain within an aggregate command and an explicit $readPreference
+ // targets the correct node in the replica set given by 'target'.
+ //
+ let comment = name + "_explain_within_query";
+ assert.commandWorked(mongosDB.runCommand({
+ query:
+ {aggregate: "coll", pipeline: [], comment: comment, cursor: {}, explain: true},
+ $readPreference: {mode: pref, tags: tagSets}
+ }));
+
+ // Look for an operation without an exception, since the shard throws a stale config
+ // exception if the shard or mongos has stale routing metadata, and the operation
+ // gets retried.
+ // Note, we look for *at least one* (not exactly one) matching entry: Mongos cancels
+ // requests to all shards on receiving a stale version error from any shard.
+ // However, the requests may have reached the other shards before they are canceled.
+ // If the other shards were already fresh, they will re-receive the request in the
+ // next attempt, meaning the request can show up more than once in the profiler.
+ profilerHasAtLeastOneMatchingEntryOrThrow({
+ profileDB: target,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.explain.aggregate": coll.getName(),
+ "command.explain.comment": comment,
+ "command.$readPreference.mode": pref == 'primary' ? null : pref,
+ "errMsg": {"$exists": false}
+ }
+ });
- //
- // Confirms that aggregations with explain run against mongos are executed against a tagged
- // secondary or primary, as per readPreference setting.
- //
- function confirmReadPreference(primary, secondary) {
- assert.commandWorked(secondary.setProfilingLevel(2));
- assert.commandWorked(primary.setProfilingLevel(2));
-
- // [<pref>, <tags>, <target>, <comment>]
- [['primary', [{}], primary, "primary"],
- ['primaryPreferred', [{tag: 'secondary'}], primary, "primaryPreferred"],
- ['secondary', [{}], secondary, "secondary"],
- ['secondary', [{tag: 'secondary'}], secondary, "secondaryTag"],
- ['secondaryPreferred', [{tag: 'secondary'}], secondary, "secondaryPreferred"],
- ['secondaryPreferred', [{tag: 'primary'}], primary, "secondaryPreferredTagPrimary"]]
- .forEach(function(args) {
- const pref = args[0], tagSets = args[1], target = args[2], name = args[3];
-
- //
- // Tests that explain within an aggregate command and an explicit $readPreference
- // targets the correct node in the replica set given by 'target'.
- //
- let comment = name + "_explain_within_query";
- assert.commandWorked(mongosDB.runCommand({
- query: {
+ //
+ // Tests that an aggregation command wrapped in an explain with explicit
+ // $queryOptions targets the correct node in the replica set given by 'target'.
+ //
+ comment = name + "_explain_wrapped_agg";
+ assert.commandWorked(mongosDB.runCommand({
+ $query: {
+ explain: {
aggregate: "coll",
pipeline: [],
comment: comment,
cursor: {},
- explain: true
- },
- $readPreference: {mode: pref, tags: tagSets}
- }));
-
- // Look for an operation without an exception, since the shard throws a stale config
- // exception if the shard or mongos has stale routing metadata, and the operation
- // gets retried.
- // Note, we look for *at least one* (not exactly one) matching entry: Mongos cancels
- // requests to all shards on receiving a stale version error from any shard.
- // However, the requests may have reached the other shards before they are canceled.
- // If the other shards were already fresh, they will re-receive the request in the
- // next attempt, meaning the request can show up more than once in the profiler.
- profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: target,
- filter: {
- "ns": coll.getFullName(),
- "command.explain.aggregate": coll.getName(),
- "command.explain.comment": comment,
- "command.$readPreference.mode": pref == 'primary' ? null : pref,
- "errMsg": {"$exists": false}
- }
- });
-
- //
- // Tests that an aggregation command wrapped in an explain with explicit
- // $queryOptions targets the correct node in the replica set given by 'target'.
- //
- comment = name + "_explain_wrapped_agg";
- assert.commandWorked(mongosDB.runCommand({
- $query: {
- explain: {
- aggregate: "coll",
- pipeline: [],
- comment: comment,
- cursor: {},
- }
- },
- $readPreference: {mode: pref, tags: tagSets}
- }));
-
- // Look for an operation without an exception, since the shard throws a stale config
- // exception if the shard or mongos has stale routing metadata, and the operation
- // gets retried.
- // Note, we look for *at least one* (not exactly one) matching entry: Mongos cancels
- // requests to all shards on receiving a stale version error from any shard.
- // However, the requests may have reached the other shards before they are canceled.
- // If the other shards were already fresh, they will re-receive the request in the
- // next attempt, meaning the request can show up more than once in the profiler.
- profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: target,
- filter: {
- "ns": coll.getFullName(),
- "command.explain.aggregate": coll.getName(),
- "command.explain.comment": comment,
- "command.$readPreference.mode": pref == 'primary' ? null : pref,
- "errMsg": {"$exists": false}
}
- });
+ },
+ $readPreference: {mode: pref, tags: tagSets}
+ }));
+
+ // Look for an operation without an exception, since the shard throws a stale config
+ // exception if the shard or mongos has stale routing metadata, and the operation
+ // gets retried.
+ // Note, we look for *at least one* (not exactly one) matching entry: Mongos cancels
+ // requests to all shards on receiving a stale version error from any shard.
+ // However, the requests may have reached the other shards before they are canceled.
+ // If the other shards were already fresh, they will re-receive the request in the
+ // next attempt, meaning the request can show up more than once in the profiler.
+ profilerHasAtLeastOneMatchingEntryOrThrow({
+ profileDB: target,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.explain.aggregate": coll.getName(),
+ "command.explain.comment": comment,
+ "command.$readPreference.mode": pref == 'primary' ? null : pref,
+ "errMsg": {"$exists": false}
+ }
});
-
- assert.commandWorked(secondary.setProfilingLevel(0));
- assert.commandWorked(primary.setProfilingLevel(0));
- }
-
- //
- // Test aggregate explains run against an unsharded collection.
- //
- confirmReadPreference(rs0Primary.getDB(mongosDB.getName()),
- rs0Secondary.getDB(mongosDB.getName()));
-
- //
- // Test aggregate explains run against a sharded collection.
- //
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
- assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 6}}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 25}, to: "agg_explain_readPref-rs1"}));
-
- // Sharded tests are run against the non-primary shard for the "agg_explain_readPref" db.
- confirmReadPreference(rs1Primary.getDB(mongosDB.getName()),
- rs1Secondary.getDB(mongosDB.getName()));
-
- st.stop();
+ });
+
+ assert.commandWorked(secondary.setProfilingLevel(0));
+ assert.commandWorked(primary.setProfilingLevel(0));
+}
+
+//
+// Test aggregate explains run against an unsharded collection.
+//
+confirmReadPreference(rs0Primary.getDB(mongosDB.getName()), rs0Secondary.getDB(mongosDB.getName()));
+
+//
+// Test aggregate explains run against a sharded collection.
+//
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 6}}));
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: coll.getFullName(), find: {a: 25}, to: "agg_explain_readPref-rs1"}));
+
+// Sharded tests are run against the non-primary shard for the "agg_explain_readPref" db.
+confirmReadPreference(rs1Primary.getDB(mongosDB.getName()), rs1Secondary.getDB(mongosDB.getName()));
+
+st.stop();
})();
diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js
index 5a984f5f610..2e3cb631f85 100644
--- a/jstests/sharding/explain_cmd.js
+++ b/jstests/sharding/explain_cmd.js
@@ -1,160 +1,155 @@
// Tests for the mongos explain command.
(function() {
- 'use strict';
-
- // Create a cluster with 3 shards.
- var st = new ShardingTest({shards: 2});
-
- var db = st.s.getDB("test");
- var explain;
-
- // Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on
- // 'b'.
- var collSharded = db.getCollection("mongos_explain_cmd");
- collSharded.drop();
- collSharded.ensureIndex({a: 1});
- collSharded.ensureIndex({b: 1});
-
- // Enable sharding.
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard1.shardName);
- db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
-
- // Pre-split the collection to ensure that both shards have chunks. Explicitly
- // move chunks since the balancer is disabled.
- assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 1}}));
- printjson(db.adminCommand(
- {moveChunk: collSharded.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
-
- assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 2}}));
- printjson(db.adminCommand(
- {moveChunk: collSharded.getFullName(), find: {a: 2}, to: st.shard1.shardName}));
-
- // Put data on each shard.
- for (var i = 0; i < 3; i++) {
- collSharded.insert({_id: i, a: i, b: 1});
- }
-
- st.printShardingStatus();
-
- // Test a scatter-gather count command.
- assert.eq(3, collSharded.count({b: 1}));
-
- // Explain the scatter-gather count.
- explain = db.runCommand(
- {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
-
- // Validate some basic properties of the result.
- printjson(explain);
- assert.commandWorked(explain);
- assert("queryPlanner" in explain);
- assert("executionStats" in explain);
- assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
- assert.eq(2, explain.executionStats.executionStages.shards.length);
-
- // An explain of a command that doesn't exist should fail gracefully.
- explain = db.runCommand({
- explain: {nonexistent: collSharded.getName(), query: {b: 1}},
- verbosity: "allPlansExecution"
- });
- printjson(explain);
- assert.commandFailed(explain);
-
- // -------
-
- // Setup a collection that is not sharded.
- var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded");
- collUnsharded.drop();
- collUnsharded.ensureIndex({a: 1});
- collUnsharded.ensureIndex({b: 1});
-
- for (var i = 0; i < 3; i++) {
- collUnsharded.insert({_id: i, a: i, b: 1});
- }
- assert.eq(3, collUnsharded.count({b: 1}));
-
- // -------
-
- // Explain a delete operation and verify that it hits all shards without the shard key
- explain = db.runCommand({
- explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
- verbosity: "allPlansExecution"
- });
- assert.commandWorked(explain, tojson(explain));
- assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
- assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
- assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
- // Check that the deletes didn't actually happen.
- assert.eq(3, collSharded.count({b: 1}));
-
- // Explain a delete operation and verify that it hits only one shard with the shard key
- explain = db.runCommand({
- explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
- verbosity: "allPlansExecution"
- });
- assert.commandWorked(explain, tojson(explain));
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
- // Check that the deletes didn't actually happen.
- assert.eq(3, collSharded.count({b: 1}));
-
- // Check that we fail gracefully if we try to do an explain of a write batch that has more
- // than one operation in it.
- explain = db.runCommand({
- explain: {
- delete: collSharded.getName(),
- deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]
- },
- verbosity: "allPlansExecution"
- });
- assert.commandFailed(explain, tojson(explain));
-
- // Explain a multi upsert operation and verify that it hits all shards
- explain = db.runCommand({
- explain:
- {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
- verbosity: "allPlansExecution"
- });
- assert.commandWorked(explain, tojson(explain));
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
- assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
- assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE");
- assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE");
- // Check that the update didn't actually happen.
- assert.eq(0, collSharded.count({b: 10}));
-
- // Explain an upsert operation and verify that it hits only a single shard
- explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
- verbosity: "allPlansExecution"
- });
- assert.commandWorked(explain, tojson(explain));
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
- // Check that the upsert didn't actually happen.
- assert.eq(0, collSharded.count({a: 10}));
-
- // Explain an upsert operation which cannot be targeted, ensure an error is thrown
- explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
- verbosity: "allPlansExecution"
- });
- assert.commandFailed(explain, tojson(explain));
-
- // Explain a changeStream, ensure an error is thrown under snapshot read concern.
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(db.getName());
- explain = sessionDB.runCommand({
- aggregate: "coll",
- pipeline: [{$changeStream: {}}],
- explain: true,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(0),
- startTransaction: true,
- autocommit: false
- });
- assert.commandFailedWithCode(
- explain, ErrorCodes.OperationNotSupportedInTransaction, tojson(explain));
-
- st.stop();
+'use strict';
+
+// Create a cluster with 3 shards.
+var st = new ShardingTest({shards: 2});
+
+var db = st.s.getDB("test");
+var explain;
+
+// Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on
+// 'b'.
+var collSharded = db.getCollection("mongos_explain_cmd");
+collSharded.drop();
+collSharded.ensureIndex({a: 1});
+collSharded.ensureIndex({b: 1});
+
+// Enable sharding.
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard1.shardName);
+db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
+
+// Pre-split the collection to ensure that both shards have chunks. Explicitly
+// move chunks since the balancer is disabled.
+assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 1}}));
+printjson(
+ db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
+
+assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 2}}));
+printjson(
+ db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: 2}, to: st.shard1.shardName}));
+
+// Put data on each shard.
+for (var i = 0; i < 3; i++) {
+ collSharded.insert({_id: i, a: i, b: 1});
+}
+
+st.printShardingStatus();
+
+// Test a scatter-gather count command.
+assert.eq(3, collSharded.count({b: 1}));
+
+// Explain the scatter-gather count.
+explain = db.runCommand(
+ {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
+
+// Validate some basic properties of the result.
+printjson(explain);
+assert.commandWorked(explain);
+assert("queryPlanner" in explain);
+assert("executionStats" in explain);
+assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
+assert.eq(2, explain.executionStats.executionStages.shards.length);
+
+// An explain of a command that doesn't exist should fail gracefully.
+explain = db.runCommand(
+ {explain: {nonexistent: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
+printjson(explain);
+assert.commandFailed(explain);
+
+// -------
+
+// Setup a collection that is not sharded.
+var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded");
+collUnsharded.drop();
+collUnsharded.ensureIndex({a: 1});
+collUnsharded.ensureIndex({b: 1});
+
+for (var i = 0; i < 3; i++) {
+ collUnsharded.insert({_id: i, a: i, b: 1});
+}
+assert.eq(3, collUnsharded.count({b: 1}));
+
+// -------
+
+// Explain a delete operation and verify that it hits all shards without the shard key
+explain = db.runCommand({
+ explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandWorked(explain, tojson(explain));
+assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
+assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
+// Check that the deletes didn't actually happen.
+assert.eq(3, collSharded.count({b: 1}));
+
+// Explain a delete operation and verify that it hits only one shard with the shard key
+explain = db.runCommand({
+ explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandWorked(explain, tojson(explain));
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
+// Check that the deletes didn't actually happen.
+assert.eq(3, collSharded.count({b: 1}));
+
+// Check that we fail gracefully if we try to do an explain of a write batch that has more
+// than one operation in it.
+explain = db.runCommand({
+ explain:
+ {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandFailed(explain, tojson(explain));
+
+// Explain a multi upsert operation and verify that it hits all shards
+explain = db.runCommand({
+ explain: {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandWorked(explain, tojson(explain));
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE");
+assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE");
+// Check that the update didn't actually happen.
+assert.eq(0, collSharded.count({b: 10}));
+
+// Explain an upsert operation and verify that it hits only a single shard
+explain = db.runCommand({
+ explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandWorked(explain, tojson(explain));
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
+// Check that the upsert didn't actually happen.
+assert.eq(0, collSharded.count({a: 10}));
+
+// Explain an upsert operation which cannot be targeted, ensure an error is thrown
+explain = db.runCommand({
+ explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandFailed(explain, tojson(explain));
+
+// Explain a changeStream, ensure an error is thrown under snapshot read concern.
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(db.getName());
+explain = sessionDB.runCommand({
+ aggregate: "coll",
+ pipeline: [{$changeStream: {}}],
+ explain: true,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(0),
+ startTransaction: true,
+ autocommit: false
+});
+assert.commandFailedWithCode(
+ explain, ErrorCodes.OperationNotSupportedInTransaction, tojson(explain));
+
+st.stop();
})();
diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js
index 3066666c82d..a8dad43a201 100644
--- a/jstests/sharding/explain_find_and_modify_sharded.js
+++ b/jstests/sharding/explain_find_and_modify_sharded.js
@@ -3,88 +3,86 @@
* and the collection is sharded.
*/
(function() {
- 'use strict';
+'use strict';
- var collName = 'explain_find_and_modify';
+var collName = 'explain_find_and_modify';
- // Create a cluster with 2 shards.
- var st = new ShardingTest({shards: 2});
+// Create a cluster with 2 shards.
+var st = new ShardingTest({shards: 2});
- var testDB = st.s.getDB('test');
- var shardKey = {a: 1};
+var testDB = st.s.getDB('test');
+var shardKey = {a: 1};
- // Create a collection with an index on the intended shard key.
- var shardedColl = testDB.getCollection(collName);
- shardedColl.drop();
- assert.commandWorked(testDB.createCollection(collName));
- assert.commandWorked(shardedColl.ensureIndex(shardKey));
+// Create a collection with an index on the intended shard key.
+var shardedColl = testDB.getCollection(collName);
+shardedColl.drop();
+assert.commandWorked(testDB.createCollection(collName));
+assert.commandWorked(shardedColl.ensureIndex(shardKey));
- // Enable sharding on the database and shard the collection.
- // Use "st.shard0.shardName" as the primary shard.
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.toString(), st.shard0.shardName);
- assert.commandWorked(
- testDB.adminCommand({shardCollection: shardedColl.getFullName(), key: shardKey}));
+// Enable sharding on the database and shard the collection.
+// Use "st.shard0.shardName" as the primary shard.
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.toString(), st.shard0.shardName);
+assert.commandWorked(
+ testDB.adminCommand({shardCollection: shardedColl.getFullName(), key: shardKey}));
- // Split and move the chunks so that
- // chunk { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } is on
- // st.shard0.shardName
- // chunk { "a" : 10 } -->> { "a" : { "$maxKey" : 1 } } is on
- // st.shard1.shardName
- assert.commandWorked(testDB.adminCommand({split: shardedColl.getFullName(), middle: {a: 10}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {a: 10}, to: st.shard1.shardName}));
+// Split and move the chunks so that
+// chunk { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } is on
+// st.shard0.shardName
+// chunk { "a" : 10 } -->> { "a" : { "$maxKey" : 1 } } is on
+// st.shard1.shardName
+assert.commandWorked(testDB.adminCommand({split: shardedColl.getFullName(), middle: {a: 10}}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {a: 10}, to: st.shard1.shardName}));
- var res;
+var res;
- // Queries that do not involve the shard key are invalid.
- res = testDB.runCommand({
- explain: {findAndModify: collName, query: {b: 1}, remove: true},
- verbosity: 'queryPlanner'
- });
- assert.commandFailed(res);
+// Queries that do not involve the shard key are invalid.
+res = testDB.runCommand(
+ {explain: {findAndModify: collName, query: {b: 1}, remove: true}, verbosity: 'queryPlanner'});
+assert.commandFailed(res);
- // Queries that have non-equality queries on the shard key are invalid.
- res = testDB.runCommand({
- explain: {
- findAndModify: collName,
- query: {a: {$gt: 5}},
- update: {$inc: {b: 7}},
- },
- verbosity: 'allPlansExecution'
- });
- assert.commandFailed(res);
+// Queries that have non-equality queries on the shard key are invalid.
+res = testDB.runCommand({
+ explain: {
+ findAndModify: collName,
+ query: {a: {$gt: 5}},
+ update: {$inc: {b: 7}},
+ },
+ verbosity: 'allPlansExecution'
+});
+assert.commandFailed(res);
- // Asserts that the explain command ran on the specified shard and used the given stage
- // for performing the findAndModify command.
- function assertExplainResult(explainOut, outerKey, innerKey, shardName, expectedStage) {
- assert(explainOut.hasOwnProperty(outerKey));
- assert(explainOut[outerKey].hasOwnProperty(innerKey));
+// Asserts that the explain command ran on the specified shard and used the given stage
+// for performing the findAndModify command.
+function assertExplainResult(explainOut, outerKey, innerKey, shardName, expectedStage) {
+ assert(explainOut.hasOwnProperty(outerKey));
+ assert(explainOut[outerKey].hasOwnProperty(innerKey));
- var shardStage = explainOut[outerKey][innerKey];
- assert.eq('SINGLE_SHARD', shardStage.stage);
- assert.eq(1, shardStage.shards.length);
- assert.eq(shardName, shardStage.shards[0].shardName);
- assert.eq(expectedStage, shardStage.shards[0][innerKey].stage);
- }
+ var shardStage = explainOut[outerKey][innerKey];
+ assert.eq('SINGLE_SHARD', shardStage.stage);
+ assert.eq(1, shardStage.shards.length);
+ assert.eq(shardName, shardStage.shards[0].shardName);
+ assert.eq(expectedStage, shardStage.shards[0][innerKey].stage);
+}
- // Test that the explain command is routed to "st.shard0.shardName" when targeting the lower
- // chunk range.
- res = testDB.runCommand({
- explain: {findAndModify: collName, query: {a: 0}, update: {$inc: {b: 7}}, upsert: true},
- verbosity: 'queryPlanner'
- });
- assert.commandWorked(res);
- assertExplainResult(res, 'queryPlanner', 'winningPlan', st.shard0.shardName, 'UPDATE');
+// Test that the explain command is routed to "st.shard0.shardName" when targeting the lower
+// chunk range.
+res = testDB.runCommand({
+ explain: {findAndModify: collName, query: {a: 0}, update: {$inc: {b: 7}}, upsert: true},
+ verbosity: 'queryPlanner'
+});
+assert.commandWorked(res);
+assertExplainResult(res, 'queryPlanner', 'winningPlan', st.shard0.shardName, 'UPDATE');
- // Test that the explain command is routed to "st.shard1.shardName" when targeting the higher
- // chunk range.
- res = testDB.runCommand({
- explain: {findAndModify: collName, query: {a: 20, c: 5}, remove: true},
- verbosity: 'executionStats'
- });
- assert.commandWorked(res);
- assertExplainResult(res, 'executionStats', 'executionStages', st.shard1.shardName, 'DELETE');
+// Test that the explain command is routed to "st.shard1.shardName" when targeting the higher
+// chunk range.
+res = testDB.runCommand({
+ explain: {findAndModify: collName, query: {a: 20, c: 5}, remove: true},
+ verbosity: 'executionStats'
+});
+assert.commandWorked(res);
+assertExplainResult(res, 'executionStats', 'executionStages', st.shard1.shardName, 'DELETE');
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/explain_read_pref.js b/jstests/sharding/explain_read_pref.js
index e84393607d3..ce5e2cf47af 100644
--- a/jstests/sharding/explain_read_pref.js
+++ b/jstests/sharding/explain_read_pref.js
@@ -27,7 +27,6 @@ var assertCorrectTargeting = function(explain, isMongos, secExpected) {
};
var testAllModes = function(conn, isMongos) {
-
// The primary is tagged with { tag: 'one' } and the secondary with
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
diff --git a/jstests/sharding/failcommand_failpoint_not_parallel.js b/jstests/sharding/failcommand_failpoint_not_parallel.js
index c759986a11c..18117e0e4b8 100644
--- a/jstests/sharding/failcommand_failpoint_not_parallel.js
+++ b/jstests/sharding/failcommand_failpoint_not_parallel.js
@@ -1,21 +1,21 @@
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 3, mongos: 1});
- const db = st.s.getDB("test_failcommand_noparallel");
+const st = new ShardingTest({shards: 3, mongos: 1});
+const db = st.s.getDB("test_failcommand_noparallel");
- // Test times when closing connection.
- // Sharding tests require failInternalCommands: true, since the mongos appears to mongod to be
- // an internal client.
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {closeConnection: true, failCommands: ["find"], failInternalCommands: true}
- }));
- assert.throws(() => db.runCommand({find: "c"}));
- assert.throws(() => db.runCommand({find: "c"}));
- assert.commandWorked(db.runCommand({find: "c"}));
- assert.commandWorked(st.s.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test times when closing connection.
+// Sharding tests require failInternalCommands: true, since the mongos appears to mongod to be
+// an internal client.
+assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {closeConnection: true, failCommands: ["find"], failInternalCommands: true}
+}));
+assert.throws(() => db.runCommand({find: "c"}));
+assert.throws(() => db.runCommand({find: "c"}));
+assert.commandWorked(db.runCommand({find: "c"}));
+assert.commandWorked(st.s.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/failcommand_ignores_internal.js b/jstests/sharding/failcommand_ignores_internal.js
index 64789c64c96..7e4f0413cb1 100644
--- a/jstests/sharding/failcommand_ignores_internal.js
+++ b/jstests/sharding/failcommand_ignores_internal.js
@@ -1,54 +1,55 @@
// Tests that the "failCommand" failpoint ignores commands from internal clients: SERVER-34943.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 1});
- const mongosDB = st.s0.getDB("test_failcommand_ignores_internal");
+const st = new ShardingTest({shards: 1});
+const mongosDB = st.s0.getDB("test_failcommand_ignores_internal");
- // Enough documents for three getMores.
- assert.commandWorked(mongosDB.collection.insertMany([{}, {}, {}]));
- const findReply = assert.commandWorked(mongosDB.runCommand({find: "collection", batchSize: 0}));
- const cursorId = findReply.cursor.id;
+// Enough documents for three getMores.
+assert.commandWorked(mongosDB.collection.insertMany([{}, {}, {}]));
+const findReply = assert.commandWorked(mongosDB.runCommand({find: "collection", batchSize: 0}));
+const cursorId = findReply.cursor.id;
- // Test failing "getMore" twice with a particular error code.
- assert.commandWorked(mongosDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {errorCode: ErrorCodes.BadValue, failCommands: ["getMore"]}
- }));
- const getMore = {getMore: cursorId, collection: "collection", batchSize: 1};
- assert.commandFailedWithCode(mongosDB.runCommand(getMore), ErrorCodes.BadValue);
- assert.commandFailedWithCode(mongosDB.runCommand(getMore), ErrorCodes.BadValue);
- assert.commandWorked(mongosDB.runCommand(getMore));
+// Test failing "getMore" twice with a particular error code.
+assert.commandWorked(mongosDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {errorCode: ErrorCodes.BadValue, failCommands: ["getMore"]}
+}));
+const getMore = {
+ getMore: cursorId,
+ collection: "collection",
+ batchSize: 1
+};
+assert.commandFailedWithCode(mongosDB.runCommand(getMore), ErrorCodes.BadValue);
+assert.commandFailedWithCode(mongosDB.runCommand(getMore), ErrorCodes.BadValue);
+assert.commandWorked(mongosDB.runCommand(getMore));
- // Setting a failpoint for "distinct" on a shard has no effect on mongos.
- assert.commandWorked(st.shard0.getDB("admin").runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.BadValue, failCommands: ["distinct"]}
- }));
- const distinct = {distinct: "collection", key: "x"};
- assert.commandFailedWithCode(
- st.shard0.getDB("test_failcommand_ignores_internal").runCommand(distinct),
- ErrorCodes.BadValue);
- assert.commandWorked(mongosDB.runCommand(distinct));
- assert.commandWorked(
- st.shard0.getDB("admin").runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Setting a failpoint for "distinct" on a shard has no effect on mongos.
+assert.commandWorked(st.shard0.getDB("admin").runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.BadValue, failCommands: ["distinct"]}
+}));
+const distinct = {
+ distinct: "collection",
+ key: "x"
+};
+assert.commandFailedWithCode(
+ st.shard0.getDB("test_failcommand_ignores_internal").runCommand(distinct), ErrorCodes.BadValue);
+assert.commandWorked(mongosDB.runCommand(distinct));
+assert.commandWorked(
+ st.shard0.getDB("admin").runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Setting a failpoint for "distinct" on a shard with failInternalCommands DOES affect mongos.
- assert.commandWorked(st.shard0.getDB("admin").runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.BadValue,
- failCommands: ["distinct"],
- failInternalCommands: true
- }
- }));
- assert.commandFailedWithCode(mongosDB.runCommand(distinct), ErrorCodes.BadValue);
- assert.commandFailedWithCode(
- st.shard0.getDB("test_failcommand_ignores_internal").runCommand(distinct),
- ErrorCodes.BadValue);
+// Setting a failpoint for "distinct" on a shard with failInternalCommands DOES affect mongos.
+assert.commandWorked(st.shard0.getDB("admin").runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.BadValue, failCommands: ["distinct"], failInternalCommands: true}
+}));
+assert.commandFailedWithCode(mongosDB.runCommand(distinct), ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ st.shard0.getDB("test_failcommand_ignores_internal").runCommand(distinct), ErrorCodes.BadValue);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 3f3e86056b1..e92e9c4d713 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -1,132 +1,131 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({name: "features1", shards: 2, mongos: 1});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- // ---- can't shard system namespaces ----
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.system.blah", key: {num: 1}}),
- "shard system namespace");
-
- // ---- setup test.foo -----
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- let db = s.s0.getDB("test");
-
- assert.commandWorked(db.foo.createIndex({y: 1}));
-
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 10}}));
- assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
-
- assert.writeOK(db.foo.insert({num: 5}));
- assert.writeOK(db.foo.save({num: 15}));
-
- let a = s.rs0.getPrimary().getDB("test");
- let b = s.rs1.getPrimary().getDB("test");
-
- // ---- make sure shard key index is everywhere ----
- assert.eq(3, a.foo.getIndexKeys().length, "a index 1");
- assert.eq(3, b.foo.getIndexKeys().length, "b index 1");
-
- // ---- make sure if you add an index it goes everywhere ------
- assert.commandWorked(db.foo.createIndex({x: 1}));
- assert.eq(4, a.foo.getIndexKeys().length, "a index 2");
- assert.eq(4, b.foo.getIndexKeys().length, "b index 2");
-
- // ---- no unique indexes allowed that do not include the shard key ------
- assert.commandFailed(db.foo.createIndex({z: 1}, true));
- assert.eq(4, a.foo.getIndexKeys().length, "a index 3");
- assert.eq(4, b.foo.getIndexKeys().length, "b index 3");
-
- // ---- unique indexes that include the shard key are allowed ------
- assert.commandWorked(db.foo.createIndex({num: 1, bar: 1}, true));
- assert.eq(5, b.foo.getIndexKeys().length, "c index 3");
-
- // ---- can't shard thing with unique indexes ------
- assert.commandWorked(db.foo2.createIndex({a: 1}));
- printjson(db.foo2.getIndexes());
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo2", key: {num: 1}}),
- "shard with index");
-
- assert.commandWorked(db.foo3.createIndex({a: 1}, true));
- printjson(db.foo3.getIndexes());
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo3", key: {num: 1}}),
- "shard with unique index");
-
- assert.commandWorked(db.foo7.createIndex({num: 1, a: 1}, true));
- printjson(db.foo7.getIndexes());
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo7", key: {num: 1}}),
- "shard with ok unique index");
-
- // ---- unique shard key ----
- assert.commandWorked(
- s.s0.adminCommand({shardcollection: "test.foo4", key: {num: 1}, unique: true}),
- "shard with index and unique");
- assert.commandWorked(s.s0.adminCommand({split: "test.foo4", middle: {num: 10}}));
- assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
-
- assert.writeOK(db.foo4.insert({num: 5}));
- assert.writeOK(db.foo4.insert({num: 15}));
-
- assert.eq(1, a.foo4.count(), "ua1");
- assert.eq(1, b.foo4.count(), "ub1");
-
- assert.eq(2, a.foo4.getIndexes().length, "ua2");
- assert.eq(2, b.foo4.getIndexes().length, "ub2");
-
- assert(a.foo4.getIndexes()[1].unique, "ua3");
- assert(b.foo4.getIndexes()[1].unique, "ub3");
-
- assert.eq(2, db.foo4.count(), "uc1");
- assert.writeOK(db.foo4.insert({num: 7}));
- assert.eq(3, db.foo4.count(), "uc2");
- assert.writeError(db.foo4.insert({num: 7}));
- assert.eq(3, db.foo4.count(), "uc4");
-
- // --- don't let you convertToCapped ----
- assert(!db.foo4.isCapped(), "ca1");
- assert(!a.foo4.isCapped(), "ca2");
- assert(!b.foo4.isCapped(), "ca3");
-
- assert.commandFailed(db.foo4.convertToCapped(30000), "ca30");
- assert(!db.foo4.isCapped(), "ca4");
- assert(!a.foo4.isCapped(), "ca5");
- assert(!b.foo4.isCapped(), "ca6");
-
- // make sure i didn't break anything
- db.foo4a.save({a: 1});
- assert(!db.foo4a.isCapped(), "ca7");
- db.foo4a.convertToCapped(30000);
- assert(db.foo4a.isCapped(), "ca8");
-
- // --- don't let you shard a capped collection
- db.createCollection("foo5", {capped: true, size: 30000});
- assert(db.foo5.isCapped(), "cb1");
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo5", key: {num: 1}}));
-
- // ---- can't shard non-empty collection without index -----
- assert.writeOK(db.foo8.insert({a: 1}));
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo8", key: {a: 1}}),
- "non-empty collection");
-
- // ---- can't shard non-empty collection with null values in shard key ----
- assert.writeOK(db.foo9.insert({b: 1}));
- assert.commandWorked(db.foo9.createIndex({a: 1}));
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo9", key: {a: 1}}),
- "entry with null value");
-
- // --- listDatabases ---
- var r = db.getMongo().getDBs();
- assert.eq(3, r.databases.length, tojson(r));
- assert.eq("number", typeof(r.totalSize), "listDatabases 3 : " + tojson(r));
-
- // --- flushRouterconfig ---
- assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 1}));
- assert.commandWorked(s.s0.adminCommand({flushRouterConfig: true}));
- assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 'TestDB'}));
- assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 'TestDB.TestColl'}));
-
- s.stop();
+'use strict';
+
+var s = new ShardingTest({name: "features1", shards: 2, mongos: 1});
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+// ---- can't shard system namespaces ----
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.system.blah", key: {num: 1}}),
+ "shard system namespace");
+
+// ---- setup test.foo -----
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+let db = s.s0.getDB("test");
+
+assert.commandWorked(db.foo.createIndex({y: 1}));
+
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 10}}));
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
+
+assert.writeOK(db.foo.insert({num: 5}));
+assert.writeOK(db.foo.save({num: 15}));
+
+let a = s.rs0.getPrimary().getDB("test");
+let b = s.rs1.getPrimary().getDB("test");
+
+// ---- make sure shard key index is everywhere ----
+assert.eq(3, a.foo.getIndexKeys().length, "a index 1");
+assert.eq(3, b.foo.getIndexKeys().length, "b index 1");
+
+// ---- make sure if you add an index it goes everywhere ------
+assert.commandWorked(db.foo.createIndex({x: 1}));
+assert.eq(4, a.foo.getIndexKeys().length, "a index 2");
+assert.eq(4, b.foo.getIndexKeys().length, "b index 2");
+
+// ---- no unique indexes allowed that do not include the shard key ------
+assert.commandFailed(db.foo.createIndex({z: 1}, true));
+assert.eq(4, a.foo.getIndexKeys().length, "a index 3");
+assert.eq(4, b.foo.getIndexKeys().length, "b index 3");
+
+// ---- unique indexes that include the shard key are allowed ------
+assert.commandWorked(db.foo.createIndex({num: 1, bar: 1}, true));
+assert.eq(5, b.foo.getIndexKeys().length, "c index 3");
+
+// ---- can't shard thing with unique indexes ------
+assert.commandWorked(db.foo2.createIndex({a: 1}));
+printjson(db.foo2.getIndexes());
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo2", key: {num: 1}}),
+ "shard with index");
+
+assert.commandWorked(db.foo3.createIndex({a: 1}, true));
+printjson(db.foo3.getIndexes());
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo3", key: {num: 1}}),
+ "shard with unique index");
+
+assert.commandWorked(db.foo7.createIndex({num: 1, a: 1}, true));
+printjson(db.foo7.getIndexes());
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo7", key: {num: 1}}),
+ "shard with ok unique index");
+
+// ---- unique shard key ----
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo4", key: {num: 1}, unique: true}),
+ "shard with index and unique");
+assert.commandWorked(s.s0.adminCommand({split: "test.foo4", middle: {num: 10}}));
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
+
+assert.writeOK(db.foo4.insert({num: 5}));
+assert.writeOK(db.foo4.insert({num: 15}));
+
+assert.eq(1, a.foo4.count(), "ua1");
+assert.eq(1, b.foo4.count(), "ub1");
+
+assert.eq(2, a.foo4.getIndexes().length, "ua2");
+assert.eq(2, b.foo4.getIndexes().length, "ub2");
+
+assert(a.foo4.getIndexes()[1].unique, "ua3");
+assert(b.foo4.getIndexes()[1].unique, "ub3");
+
+assert.eq(2, db.foo4.count(), "uc1");
+assert.writeOK(db.foo4.insert({num: 7}));
+assert.eq(3, db.foo4.count(), "uc2");
+assert.writeError(db.foo4.insert({num: 7}));
+assert.eq(3, db.foo4.count(), "uc4");
+
+// --- don't let you convertToCapped ----
+assert(!db.foo4.isCapped(), "ca1");
+assert(!a.foo4.isCapped(), "ca2");
+assert(!b.foo4.isCapped(), "ca3");
+
+assert.commandFailed(db.foo4.convertToCapped(30000), "ca30");
+assert(!db.foo4.isCapped(), "ca4");
+assert(!a.foo4.isCapped(), "ca5");
+assert(!b.foo4.isCapped(), "ca6");
+
+// make sure i didn't break anything
+db.foo4a.save({a: 1});
+assert(!db.foo4a.isCapped(), "ca7");
+db.foo4a.convertToCapped(30000);
+assert(db.foo4a.isCapped(), "ca8");
+
+// --- don't let you shard a capped collection
+db.createCollection("foo5", {capped: true, size: 30000});
+assert(db.foo5.isCapped(), "cb1");
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo5", key: {num: 1}}));
+
+// ---- can't shard non-empty collection without index -----
+assert.writeOK(db.foo8.insert({a: 1}));
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo8", key: {a: 1}}),
+ "non-empty collection");
+
+// ---- can't shard non-empty collection with null values in shard key ----
+assert.writeOK(db.foo9.insert({b: 1}));
+assert.commandWorked(db.foo9.createIndex({a: 1}));
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo9", key: {a: 1}}),
+ "entry with null value");
+
+// --- listDatabases ---
+var r = db.getMongo().getDBs();
+assert.eq(3, r.databases.length, tojson(r));
+assert.eq("number", typeof (r.totalSize), "listDatabases 3 : " + tojson(r));
+
+// --- flushRouterconfig ---
+assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 1}));
+assert.commandWorked(s.s0.adminCommand({flushRouterConfig: true}));
+assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 'TestDB'}));
+assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 'TestDB.TestColl'}));
+
+s.stop();
})();
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index 16d28c4d1ba..374acb15518 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -1,187 +1,185 @@
(function() {
- "use strict";
+"use strict";
- var s = new ShardingTest({name: "features2", shards: 2, mongos: 1});
+var s = new ShardingTest({name: "features2", shards: 2, mongos: 1});
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
- let a = s._connections[0].getDB("test");
- let b = s._connections[1].getDB("test");
+let a = s._connections[0].getDB("test");
+let b = s._connections[1].getDB("test");
- let db = s.getDB("test");
+let db = s.getDB("test");
- // ---- distinct ----
+// ---- distinct ----
- db.foo.save({x: 1});
- db.foo.save({x: 2});
- db.foo.save({x: 3});
- db.foo.ensureIndex({x: 1});
+db.foo.save({x: 1});
+db.foo.save({x: 2});
+db.foo.save({x: 3});
+db.foo.ensureIndex({x: 1});
- assert.eq("1,2,3", db.foo.distinct("x"), "distinct 1");
- assert(a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3, "distinct 2");
- assert(a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0, "distinct 3");
+assert.eq("1,2,3", db.foo.distinct("x"), "distinct 1");
+assert(a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3, "distinct 2");
+assert(a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0, "distinct 3");
- assert.eq(1, s.onNumShards("foo"), "A1");
+assert.eq(1, s.onNumShards("foo"), "A1");
- s.shardColl("foo", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
+s.shardColl("foo", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
- assert.eq(2, s.onNumShards("foo"), "A2");
+assert.eq(2, s.onNumShards("foo"), "A2");
- assert.eq("1,2,3", db.foo.distinct("x"), "distinct 4");
+assert.eq("1,2,3", db.foo.distinct("x"), "distinct 4");
- // ----- delete ---
+// ----- delete ---
- assert.eq(3, db.foo.count(), "D1");
+assert.eq(3, db.foo.count(), "D1");
- db.foo.remove({x: 3});
- assert.eq(2, db.foo.count(), "D2");
+db.foo.remove({x: 3});
+assert.eq(2, db.foo.count(), "D2");
- db.foo.save({x: 3});
- assert.eq(3, db.foo.count(), "D3");
+db.foo.save({x: 3});
+assert.eq(3, db.foo.count(), "D3");
- db.foo.remove({x: {$gt: 2}});
- assert.eq(2, db.foo.count(), "D4");
+db.foo.remove({x: {$gt: 2}});
+assert.eq(2, db.foo.count(), "D4");
- db.foo.remove({x: {$gt: -1}});
- assert.eq(0, db.foo.count(), "D5");
+db.foo.remove({x: {$gt: -1}});
+assert.eq(0, db.foo.count(), "D5");
- db.foo.save({x: 1});
- db.foo.save({x: 2});
- db.foo.save({x: 3});
- assert.eq(3, db.foo.count(), "D6");
- db.foo.remove({});
- assert.eq(0, db.foo.count(), "D7");
+db.foo.save({x: 1});
+db.foo.save({x: 2});
+db.foo.save({x: 3});
+assert.eq(3, db.foo.count(), "D6");
+db.foo.remove({});
+assert.eq(0, db.foo.count(), "D7");
- // --- _id key ---
+// --- _id key ---
- db.foo2.save({_id: new ObjectId()});
- db.foo2.save({_id: new ObjectId()});
- db.foo2.save({_id: new ObjectId()});
+db.foo2.save({_id: new ObjectId()});
+db.foo2.save({_id: new ObjectId()});
+db.foo2.save({_id: new ObjectId()});
- assert.eq(1, s.onNumShards("foo2"), "F1");
+assert.eq(1, s.onNumShards("foo2"), "F1");
- printjson(db.foo2.getIndexes());
- s.adminCommand({shardcollection: "test.foo2", key: {_id: 1}});
+printjson(db.foo2.getIndexes());
+s.adminCommand({shardcollection: "test.foo2", key: {_id: 1}});
- assert.eq(3, db.foo2.count(), "F2");
- db.foo2.insert({});
- assert.eq(4, db.foo2.count(), "F3");
+assert.eq(3, db.foo2.count(), "F2");
+db.foo2.insert({});
+assert.eq(4, db.foo2.count(), "F3");
- // --- map/reduce
+// --- map/reduce
- db.mr.save({x: 1, tags: ["a", "b"]});
- db.mr.save({x: 2, tags: ["b", "c"]});
- db.mr.save({x: 3, tags: ["c", "a"]});
- db.mr.save({x: 4, tags: ["b", "c"]});
- db.mr.ensureIndex({x: 1});
+db.mr.save({x: 1, tags: ["a", "b"]});
+db.mr.save({x: 2, tags: ["b", "c"]});
+db.mr.save({x: 3, tags: ["c", "a"]});
+db.mr.save({x: 4, tags: ["b", "c"]});
+db.mr.ensureIndex({x: 1});
- let m = function() {
- this.tags.forEach(function(z) {
- emit(z, {count: 1});
- });
- };
+let m = function() {
+ this.tags.forEach(function(z) {
+ emit(z, {count: 1});
+ });
+};
- let r = function(key, values) {
- var total = 0;
- for (var i = 0; i < values.length; i++) {
- total += values[i].count;
- }
- return {count: total};
- };
+let r = function(key, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i].count;
+ }
+ return {count: total};
+};
- let doMR = function(n) {
- print(n);
+let doMR = function(n) {
+ print(n);
- // on-disk
+ // on-disk
- var res = db.mr.mapReduce(m, r, "smr1_out");
- printjson(res);
- assert.eq(4, res.counts.input, "MR T0 " + n);
+ var res = db.mr.mapReduce(m, r, "smr1_out");
+ printjson(res);
+ assert.eq(4, res.counts.input, "MR T0 " + n);
- var x = db[res.result];
- assert.eq(3, x.find().count(), "MR T1 " + n);
+ var x = db[res.result];
+ assert.eq(3, x.find().count(), "MR T1 " + n);
- var z = {};
- x.find().forEach(function(a) {
- z[a._id] = a.value.count;
- });
- assert.eq(3, Object.keySet(z).length, "MR T2 " + n);
- assert.eq(2, z.a, "MR T3 " + n);
- assert.eq(3, z.b, "MR T4 " + n);
- assert.eq(3, z.c, "MR T5 " + n);
+ var z = {};
+ x.find().forEach(function(a) {
+ z[a._id] = a.value.count;
+ });
+ assert.eq(3, Object.keySet(z).length, "MR T2 " + n);
+ assert.eq(2, z.a, "MR T3 " + n);
+ assert.eq(3, z.b, "MR T4 " + n);
+ assert.eq(3, z.c, "MR T5 " + n);
- x.drop();
+ x.drop();
- // inline
+ // inline
- var res = db.mr.mapReduce(m, r, {out: {inline: 1}});
- printjson(res);
- assert.eq(4, res.counts.input, "MR T6 " + n);
+ var res = db.mr.mapReduce(m, r, {out: {inline: 1}});
+ printjson(res);
+ assert.eq(4, res.counts.input, "MR T6 " + n);
- var z = {};
- res.find().forEach(function(a) {
- z[a._id] = a.value.count;
- });
- printjson(z);
- assert.eq(3, Object.keySet(z).length, "MR T7 " + n);
- assert.eq(2, z.a, "MR T8 " + n);
- assert.eq(3, z.b, "MR T9 " + n);
- assert.eq(3, z.c, "MR TA " + n);
+ var z = {};
+ res.find().forEach(function(a) {
+ z[a._id] = a.value.count;
+ });
+ printjson(z);
+ assert.eq(3, Object.keySet(z).length, "MR T7 " + n);
+ assert.eq(2, z.a, "MR T8 " + n);
+ assert.eq(3, z.b, "MR T9 " + n);
+ assert.eq(3, z.c, "MR TA " + n);
+};
- };
+doMR("before");
- doMR("before");
+assert.eq(1, s.onNumShards("mr"), "E1");
+s.shardColl("mr", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
+assert.eq(2, s.onNumShards("mr"), "E1");
- assert.eq(1, s.onNumShards("mr"), "E1");
- s.shardColl("mr", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
- assert.eq(2, s.onNumShards("mr"), "E1");
-
- doMR("after");
-
- s.adminCommand({split: 'test.mr', middle: {x: 3}});
- s.adminCommand({split: 'test.mr', middle: {x: 4}});
- s.adminCommand({movechunk: 'test.mr', find: {x: 3}, to: s.getPrimaryShard('test').name});
-
- doMR("after extra split");
-
- let cmd = {mapreduce: "mr", map: "emit( ", reduce: "fooz + ", out: "broken1"};
-
- let x = db.runCommand(cmd);
- let y = s._connections[0].getDB("test").runCommand(cmd);
-
- printjson(x);
- printjson(y);
-
- // count
-
- db.countaa.save({"regex": /foo/i});
- db.countaa.save({"regex": /foo/i});
- db.countaa.save({"regex": /foo/i});
- assert.eq(3, db.countaa.count(), "counta1");
- assert.eq(3, db.countaa.find().itcount(), "counta1");
-
- // isMaster and query-wrapped-command
- let isMaster = db.runCommand({isMaster: 1});
- assert(isMaster.ismaster);
- assert.eq('isdbgrid', isMaster.msg);
- delete isMaster.localTime;
- delete isMaster.$clusterTime;
- delete isMaster.operationTime;
-
- let im2 = db.runCommand({query: {isMaster: 1}});
- delete im2.localTime;
- delete im2.$clusterTime;
- delete im2.operationTime;
- assert.eq(isMaster, im2);
-
- im2 = db.runCommand({$query: {isMaster: 1}});
- delete im2.localTime;
- delete im2.$clusterTime;
- delete im2.operationTime;
- assert.eq(isMaster, im2);
-
- s.stop();
+doMR("after");
+
+s.adminCommand({split: 'test.mr', middle: {x: 3}});
+s.adminCommand({split: 'test.mr', middle: {x: 4}});
+s.adminCommand({movechunk: 'test.mr', find: {x: 3}, to: s.getPrimaryShard('test').name});
+
+doMR("after extra split");
+
+let cmd = {mapreduce: "mr", map: "emit( ", reduce: "fooz + ", out: "broken1"};
+
+let x = db.runCommand(cmd);
+let y = s._connections[0].getDB("test").runCommand(cmd);
+
+printjson(x);
+printjson(y);
+// count
+
+db.countaa.save({"regex": /foo/i});
+db.countaa.save({"regex": /foo/i});
+db.countaa.save({"regex": /foo/i});
+assert.eq(3, db.countaa.count(), "counta1");
+assert.eq(3, db.countaa.find().itcount(), "counta1");
+
+// isMaster and query-wrapped-command
+let isMaster = db.runCommand({isMaster: 1});
+assert(isMaster.ismaster);
+assert.eq('isdbgrid', isMaster.msg);
+delete isMaster.localTime;
+delete isMaster.$clusterTime;
+delete isMaster.operationTime;
+
+let im2 = db.runCommand({query: {isMaster: 1}});
+delete im2.localTime;
+delete im2.$clusterTime;
+delete im2.operationTime;
+assert.eq(isMaster, im2);
+
+im2 = db.runCommand({$query: {isMaster: 1}});
+delete im2.localTime;
+delete im2.$clusterTime;
+delete im2.operationTime;
+assert.eq(isMaster, im2);
+
+s.stop();
})();
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index cee22543b7e..65b3ba2019d 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -6,153 +6,152 @@
// - Verifies a $where query can be killed on multiple DBs
// - Tests fsync and fsync+lock permissions on sharded db
(function() {
- 'use strict';
-
- var s = new ShardingTest({shards: 2, mongos: 1});
- var dbForTest = s.getDB("test");
- var admin = s.getDB("admin");
- dbForTest.foo.drop();
-
- var numDocs = 10000;
-
- // shard test.foo and add a split point
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- s.adminCommand({split: "test.foo", middle: {_id: numDocs / 2}});
-
- // move a chunk range to the non-primary shard
- s.adminCommand({
- moveChunk: "test.foo",
- find: {_id: 3},
- to: s.getNonPrimaries("test")[0],
- _waitForDelete: true
- });
-
- // restart balancer
- s.startBalancer();
-
- // insert 10k small documents into the sharded collection
- var bulk = dbForTest.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
-
- var x = dbForTest.foo.stats();
-
- // verify the colleciton has been sharded and documents are evenly distributed
- assert.eq("test.foo", x.ns, "namespace mismatch");
- assert(x.sharded, "collection is not sharded");
- assert.eq(numDocs, x.count, "total count");
- assert.eq(numDocs / 2, x.shards[s.shard0.shardName].count, "count on " + s.shard0.shardName);
- assert.eq(numDocs / 2, x.shards[s.shard1.shardName].count, "count on " + s.shard1.shardName);
- assert(x.totalIndexSize > 0);
-
- // insert one doc into a non-sharded collection
- dbForTest.bar.insert({x: 1});
- var x = dbForTest.bar.stats();
- assert.eq(1, x.count, "XXX1");
- assert.eq("test.bar", x.ns, "XXX2");
- assert(!x.sharded, "XXX3: " + tojson(x));
-
- // fork shell and start querying the data
- var start = new Date();
-
- var whereKillSleepTime = 1000;
- var parallelCommand = "db.foo.find(function() { " + " sleep(" + whereKillSleepTime + "); " +
- " return false; " + "}).itcount(); ";
-
- // fork a parallel shell, but do not wait for it to start
- print("about to fork new shell at: " + Date());
- var awaitShell = startParallelShell(parallelCommand, s.s.port);
- print("done forking shell at: " + Date());
-
- // Get all current $where operations
- function getInProgWhereOps() {
- let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
- let inProgressStr = '';
-
- // Find all the where queries
- var myProcs = [];
- while (inProgressOps.hasNext()) {
- let op = inProgressOps.next();
- inProgressStr += tojson(op);
- if (op.command && op.command.filter && op.command.filter.$where) {
- myProcs.push(op);
- }
- }
-
- if (myProcs.length == 0) {
- print('No $where operations found: ' + inProgressStr);
- } else {
- print('Found ' + myProcs.length + ' $where operations: ' + tojson(myProcs));
+'use strict';
+
+var s = new ShardingTest({shards: 2, mongos: 1});
+var dbForTest = s.getDB("test");
+var admin = s.getDB("admin");
+dbForTest.foo.drop();
+
+var numDocs = 10000;
+
+// shard test.foo and add a split point
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+s.adminCommand({split: "test.foo", middle: {_id: numDocs / 2}});
+
+// move a chunk range to the non-primary shard
+s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: 3},
+ to: s.getNonPrimaries("test")[0],
+ _waitForDelete: true
+});
+
+// restart balancer
+s.startBalancer();
+
+// insert 10k small documents into the sharded collection
+var bulk = dbForTest.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
+
+var x = dbForTest.foo.stats();
+
+// verify the colleciton has been sharded and documents are evenly distributed
+assert.eq("test.foo", x.ns, "namespace mismatch");
+assert(x.sharded, "collection is not sharded");
+assert.eq(numDocs, x.count, "total count");
+assert.eq(numDocs / 2, x.shards[s.shard0.shardName].count, "count on " + s.shard0.shardName);
+assert.eq(numDocs / 2, x.shards[s.shard1.shardName].count, "count on " + s.shard1.shardName);
+assert(x.totalIndexSize > 0);
+
+// insert one doc into a non-sharded collection
+dbForTest.bar.insert({x: 1});
+var x = dbForTest.bar.stats();
+assert.eq(1, x.count, "XXX1");
+assert.eq("test.bar", x.ns, "XXX2");
+assert(!x.sharded, "XXX3: " + tojson(x));
+
+// fork shell and start querying the data
+var start = new Date();
+
+var whereKillSleepTime = 1000;
+var parallelCommand = "db.foo.find(function() { " +
+ " sleep(" + whereKillSleepTime + "); " +
+ " return false; " +
+ "}).itcount(); ";
+
+// fork a parallel shell, but do not wait for it to start
+print("about to fork new shell at: " + Date());
+var awaitShell = startParallelShell(parallelCommand, s.s.port);
+print("done forking shell at: " + Date());
+
+// Get all current $where operations
+function getInProgWhereOps() {
+ let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
+ let inProgressStr = '';
+
+ // Find all the where queries
+ var myProcs = [];
+ while (inProgressOps.hasNext()) {
+ let op = inProgressOps.next();
+ inProgressStr += tojson(op);
+ if (op.command && op.command.filter && op.command.filter.$where) {
+ myProcs.push(op);
}
-
- return myProcs;
}
- var curOpState = 0; // 0 = not found, 1 = killed
- var killTime = null;
- var mine;
-
- assert.soon(function() {
- // Get all the current operations
- mine = getInProgWhereOps();
-
- // Wait for the queries to start (one per shard, so 2 total)
- if (curOpState == 0 && mine.length == 2) {
- // queries started
- curOpState = 1;
- // kill all $where
- mine.forEach(function(z) {
- printjson(dbForTest.getSisterDB("admin").killOp(z.opid));
- });
- killTime = new Date();
- }
- // Wait for killed queries to end
- else if (curOpState == 1 && mine.length == 0) {
- // Queries ended
- curOpState = 2;
- return true;
- }
-
- }, "Couldn't kill the $where operations.", 2 * 60 * 1000);
+ if (myProcs.length == 0) {
+ print('No $where operations found: ' + inProgressStr);
+ } else {
+ print('Found ' + myProcs.length + ' $where operations: ' + tojson(myProcs));
+ }
- print("after loop: " + Date());
- assert(killTime, "timed out waiting too kill last mine:" + tojson(mine));
+ return myProcs;
+}
+
+var curOpState = 0; // 0 = not found, 1 = killed
+var killTime = null;
+var mine;
+
+assert.soon(function() {
+ // Get all the current operations
+ mine = getInProgWhereOps();
+
+ // Wait for the queries to start (one per shard, so 2 total)
+ if (curOpState == 0 && mine.length == 2) {
+ // queries started
+ curOpState = 1;
+ // kill all $where
+ mine.forEach(function(z) {
+ printjson(dbForTest.getSisterDB("admin").killOp(z.opid));
+ });
+ killTime = new Date();
+ }
+ // Wait for killed queries to end
+ else if (curOpState == 1 && mine.length == 0) {
+ // Queries ended
+ curOpState = 2;
+ return true;
+ }
+}, "Couldn't kill the $where operations.", 2 * 60 * 1000);
- assert.eq(2, curOpState, "failed killing");
+print("after loop: " + Date());
+assert(killTime, "timed out waiting too kill last mine:" + tojson(mine));
- killTime = new Date().getTime() - killTime.getTime();
- print("killTime: " + killTime);
- print("time if run full: " + (numDocs * whereKillSleepTime));
- assert.gt(whereKillSleepTime * numDocs / 20, killTime, "took too long to kill");
+assert.eq(2, curOpState, "failed killing");
- // wait for the parallel shell we spawned to complete
- var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(
- 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
+killTime = new Date().getTime() - killTime.getTime();
+print("killTime: " + killTime);
+print("time if run full: " + (numDocs * whereKillSleepTime));
+assert.gt(whereKillSleepTime * numDocs / 20, killTime, "took too long to kill");
- var end = new Date();
- print("elapsed: " + (end.getTime() - start.getTime()));
+// wait for the parallel shell we spawned to complete
+var exitCode = awaitShell({checkExitSuccess: false});
+assert.neq(0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
- // test fsync command on non-admin db
- x = dbForTest.runCommand("fsync");
- assert(!x.ok, "fsync on non-admin namespace should fail : " + tojson(x));
- assert(x.code == 13, "fsync on non-admin succeeded, but should have failed: " + tojson(x));
+var end = new Date();
+print("elapsed: " + (end.getTime() - start.getTime()));
- // test fsync on admin db
- x = dbForTest._adminCommand("fsync");
- assert(x.ok == 1, "fsync failed: " + tojson(x));
- if (x.all[s.shard0.shardName] > 0) {
- assert(x.numFiles > 0, "fsync failed: " + tojson(x));
- }
+// test fsync command on non-admin db
+x = dbForTest.runCommand("fsync");
+assert(!x.ok, "fsync on non-admin namespace should fail : " + tojson(x));
+assert(x.code == 13, "fsync on non-admin succeeded, but should have failed: " + tojson(x));
- // test fsync+lock on admin db
- x = dbForTest._adminCommand({"fsync": 1, lock: true});
- assert(!x.ok, "lock should fail: " + tojson(x));
+// test fsync on admin db
+x = dbForTest._adminCommand("fsync");
+assert(x.ok == 1, "fsync failed: " + tojson(x));
+if (x.all[s.shard0.shardName] > 0) {
+ assert(x.numFiles > 0, "fsync failed: " + tojson(x));
+}
- s.stop();
+// test fsync+lock on admin db
+x = dbForTest._adminCommand({"fsync": 1, lock: true});
+assert(!x.ok, "lock should fail: " + tojson(x));
+s.stop();
})();
diff --git a/jstests/sharding/find_and_modify_after_multi_write.js b/jstests/sharding/find_and_modify_after_multi_write.js
index 749f999c54c..81c6db44a3f 100644
--- a/jstests/sharding/find_and_modify_after_multi_write.js
+++ b/jstests/sharding/find_and_modify_after_multi_write.js
@@ -1,73 +1,72 @@
(function() {
- "use strict";
+"use strict";
- /**
- * Test that a targetted findAndModify will be properly routed after executing a write that
- * does not perform any shard version checks.
- */
- var runTest = function(writeFunc) {
- var st = new ShardingTest({shards: 2, mongos: 2});
+/**
+ * Test that a targetted findAndModify will be properly routed after executing a write that
+ * does not perform any shard version checks.
+ */
+var runTest = function(writeFunc) {
+ var st = new ShardingTest({shards: 2, mongos: 2});
- var testDB = st.s.getDB('test');
+ var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- var testDB2 = st.s1.getDB('test');
- testDB2.user.insert({x: 123456});
+ var testDB2 = st.s1.getDB('test');
+ testDB2.user.insert({x: 123456});
- // Move chunk to bump version on a different mongos.
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+ // Move chunk to bump version on a different mongos.
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Issue a targetted findAndModify and check that it was upserted to the right shard.
- assert.commandWorked(testDB2.runCommand(
- {findAndModify: 'user', query: {x: 100}, update: {$set: {y: 1}}, upsert: true}));
+ // Issue a targetted findAndModify and check that it was upserted to the right shard.
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 100}, update: {$set: {y: 1}}, upsert: true}));
- assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 100}));
- assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 100}));
+ assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 100}));
+ assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 100}));
- // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
- // incremented to 3
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
+ // incremented to 3
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(testDB2.runCommand(
- {findAndModify: 'user', query: {x: 200}, update: {$set: {y: 1}}, upsert: true}));
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 200}, update: {$set: {y: 1}}, upsert: true}));
- assert.eq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 200}));
- assert.neq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 200}));
+ assert.eq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 200}));
+ assert.neq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 200}));
- // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
- // incremented to 4
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+ // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
+ // incremented to 4
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Ensure that write commands with multi version do not reset the connection shard version
- // to
- // ignored.
- writeFunc(testDB2);
+ // Ensure that write commands with multi version do not reset the connection shard version
+ // to
+ // ignored.
+ writeFunc(testDB2);
- assert.commandWorked(testDB2.runCommand(
- {findAndModify: 'user', query: {x: 300}, update: {$set: {y: 1}}, upsert: true}));
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 300}, update: {$set: {y: 1}}, upsert: true}));
- assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 300}));
- assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 300}));
+ assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 300}));
+ assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 300}));
- st.stop();
- };
+ st.stop();
+};
- runTest(function(db) {
- db.user.update({}, {$inc: {y: 987654}}, false, true);
- });
-
- runTest(function(db) {
- db.user.remove({y: 'noMatch'}, false);
- });
+runTest(function(db) {
+ db.user.update({}, {$inc: {y: 987654}}, false, true);
+});
+runTest(function(db) {
+ db.user.remove({y: 'noMatch'}, false);
+});
})();
diff --git a/jstests/sharding/find_collname_uuid_test.js b/jstests/sharding/find_collname_uuid_test.js
index 59f2e4e7674..846a24b0e21 100644
--- a/jstests/sharding/find_collname_uuid_test.js
+++ b/jstests/sharding/find_collname_uuid_test.js
@@ -2,20 +2,20 @@
* Test ClusterFindCmd with UUID for collection name fails (but does not crash)
*/
(function() {
- "use strict";
+"use strict";
- var cmdRes;
- var cursorId;
+var cmdRes;
+var cursorId;
- var st = new ShardingTest({shards: 2});
- st.stopBalancer();
+var st = new ShardingTest({shards: 2});
+st.stopBalancer();
- var db = st.s.getDB("test");
+var db = st.s.getDB("test");
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- cmdRes = db.adminCommand({find: UUID()});
- assert.commandFailed(cmdRes);
+cmdRes = db.adminCommand({find: UUID()});
+assert.commandFailed(cmdRes);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/find_getmore_cmd.js b/jstests/sharding/find_getmore_cmd.js
index e688fc0f9ca..d711c7cda53 100644
--- a/jstests/sharding/find_getmore_cmd.js
+++ b/jstests/sharding/find_getmore_cmd.js
@@ -2,163 +2,163 @@
* Test issuing raw find and getMore commands to mongos using db.runCommand().
*/
(function() {
- "use strict";
-
- var cmdRes;
- var cursorId;
-
- var st = new ShardingTest({shards: 2});
- st.stopBalancer();
-
- // Set up a collection sharded by "_id" with one chunk on each of the two shards.
- var db = st.s.getDB("test");
- var coll = db.getCollection("find_getmore_cmd");
-
- coll.drop();
- assert.writeOK(coll.insert({_id: -9, a: 4, b: "foo foo"}));
- assert.writeOK(coll.insert({_id: -5, a: 8}));
- assert.writeOK(coll.insert({_id: -1, a: 10, b: "foo"}));
- assert.writeOK(coll.insert({_id: 1, a: 5}));
- assert.writeOK(coll.insert({_id: 5, a: 20, b: "foo foo foo"}));
- assert.writeOK(coll.insert({_id: 9, a: 3}));
-
- assert.commandWorked(coll.ensureIndex({b: "text"}));
-
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- db.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}});
- assert.commandWorked(db.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(
- db.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
-
- // Find with no options.
- cmdRes = db.runCommand({find: coll.getName()});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 6);
-
- // Find with batchSize greater than the number of docs residing on each shard. This means that a
- // getMore is required between mongos and the shell, but no getMores are issued between mongos
- // and mongod.
- cmdRes = db.runCommand({find: coll.getName(), batchSize: 4});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 4);
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 2);
-
- // Find with batchSize less than the number of docs residing on each shard. This time getMores
- // will be issued between mongos and mongod.
- cmdRes = db.runCommand({find: coll.getName(), batchSize: 2});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 2);
- cursorId = cmdRes.cursor.id;
- cmdRes = db.runCommand({getMore: cursorId, collection: coll.getName(), batchSize: 2});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, cursorId);
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 2);
- cmdRes = db.runCommand({getMore: cursorId, collection: coll.getName()});
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 2);
-
- // Combine skip, limit, and sort.
- cmdRes = db.runCommand({find: coll.getName(), skip: 4, limit: 1, sort: {_id: -1}});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
- assert.eq(cmdRes.cursor.firstBatch[0], {_id: -5, a: 8});
-
- // Find where adding limit/ntoreturn and skip overflows.
- var largeInt = new NumberLong('9223372036854775807');
- cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, limit: largeInt});
- assert.commandFailed(cmdRes);
- cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, ntoreturn: largeInt});
- assert.commandFailed(cmdRes);
- cmdRes = db.runCommand(
- {find: coll.getName(), skip: largeInt, ntoreturn: largeInt, singleBatch: true});
- assert.commandFailed(cmdRes);
-
- // A predicate with $where.
- cmdRes = db.runCommand({find: coll.getName(), filter: {$where: "this._id == 5"}});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
- assert.eq(cmdRes.cursor.firstBatch[0], {_id: 5, a: 20, b: "foo foo foo"});
-
- // Tailable option should result in a failure because the collection is not capped.
- cmdRes = db.runCommand({find: coll.getName(), tailable: true});
- assert.commandFailed(cmdRes);
-
- // $natural sort.
- cmdRes = db.runCommand({find: coll.getName(), sort: {$natural: 1}});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 6);
-
- // Should be able to sort despite projecting out the sort key.
- cmdRes = db.runCommand({find: coll.getName(), sort: {a: 1}, projection: {_id: 1}});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 6);
- assert.eq(cmdRes.cursor.firstBatch[0], {_id: 9});
- assert.eq(cmdRes.cursor.firstBatch[1], {_id: -9});
- assert.eq(cmdRes.cursor.firstBatch[2], {_id: 1});
- assert.eq(cmdRes.cursor.firstBatch[3], {_id: -5});
- assert.eq(cmdRes.cursor.firstBatch[4], {_id: -1});
- assert.eq(cmdRes.cursor.firstBatch[5], {_id: 5});
-
- // Ensure textScore meta-sort works in mongos.
- cmdRes = db.runCommand({
- find: coll.getName(),
- filter: {$text: {$search: "foo"}},
- sort: {score: {$meta: "textScore"}},
- projection: {score: {$meta: "textScore"}}
- });
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 3);
- assert.eq(cmdRes.cursor.firstBatch[0]["_id"], 5);
- assert.eq(cmdRes.cursor.firstBatch[1]["_id"], -9);
- assert.eq(cmdRes.cursor.firstBatch[2]["_id"], -1);
-
- // User projection on $sortKey is illegal.
- cmdRes = db.runCommand({find: coll.getName(), projection: {$sortKey: 1}, sort: {_id: 1}});
- assert.commandFailed(cmdRes);
- cmdRes = db.runCommand(
- {find: coll.getName(), projection: {$sortKey: {$meta: 'sortKey'}}, sort: {_id: 1}});
- assert.commandFailed(cmdRes);
-
- // User should be able to issue a sortKey meta-projection, as long as it's not on the reserved
- // $sortKey field.
- cmdRes = db.runCommand({
- find: coll.getName(),
- projection: {_id: 0, a: 0, b: 0, key: {$meta: 'sortKey'}},
- sort: {_id: 1}
- });
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 6);
- assert.eq(cmdRes.cursor.firstBatch[0], {key: {"": -9}});
- assert.eq(cmdRes.cursor.firstBatch[1], {key: {"": -5}});
- assert.eq(cmdRes.cursor.firstBatch[2], {key: {"": -1}});
- assert.eq(cmdRes.cursor.firstBatch[3], {key: {"": 1}});
- assert.eq(cmdRes.cursor.firstBatch[4], {key: {"": 5}});
- assert.eq(cmdRes.cursor.firstBatch[5], {key: {"": 9}});
-
- st.stop();
+"use strict";
+
+var cmdRes;
+var cursorId;
+
+var st = new ShardingTest({shards: 2});
+st.stopBalancer();
+
+// Set up a collection sharded by "_id" with one chunk on each of the two shards.
+var db = st.s.getDB("test");
+var coll = db.getCollection("find_getmore_cmd");
+
+coll.drop();
+assert.writeOK(coll.insert({_id: -9, a: 4, b: "foo foo"}));
+assert.writeOK(coll.insert({_id: -5, a: 8}));
+assert.writeOK(coll.insert({_id: -1, a: 10, b: "foo"}));
+assert.writeOK(coll.insert({_id: 1, a: 5}));
+assert.writeOK(coll.insert({_id: 5, a: 20, b: "foo foo foo"}));
+assert.writeOK(coll.insert({_id: 9, a: 3}));
+
+assert.commandWorked(coll.ensureIndex({b: "text"}));
+
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+db.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}});
+assert.commandWorked(db.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ db.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
+
+// Find with no options.
+cmdRes = db.runCommand({find: coll.getName()});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 6);
+
+// Find with batchSize greater than the number of docs residing on each shard. This means that a
+// getMore is required between mongos and the shell, but no getMores are issued between mongos
+// and mongod.
+cmdRes = db.runCommand({find: coll.getName(), batchSize: 4});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 4);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 2);
+
+// Find with batchSize less than the number of docs residing on each shard. This time getMores
+// will be issued between mongos and mongod.
+cmdRes = db.runCommand({find: coll.getName(), batchSize: 2});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 2);
+cursorId = cmdRes.cursor.id;
+cmdRes = db.runCommand({getMore: cursorId, collection: coll.getName(), batchSize: 2});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, cursorId);
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 2);
+cmdRes = db.runCommand({getMore: cursorId, collection: coll.getName()});
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 2);
+
+// Combine skip, limit, and sort.
+cmdRes = db.runCommand({find: coll.getName(), skip: 4, limit: 1, sort: {_id: -1}});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 1);
+assert.eq(cmdRes.cursor.firstBatch[0], {_id: -5, a: 8});
+
+// Find where adding limit/ntoreturn and skip overflows.
+var largeInt = new NumberLong('9223372036854775807');
+cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, limit: largeInt});
+assert.commandFailed(cmdRes);
+cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, ntoreturn: largeInt});
+assert.commandFailed(cmdRes);
+cmdRes =
+ db.runCommand({find: coll.getName(), skip: largeInt, ntoreturn: largeInt, singleBatch: true});
+assert.commandFailed(cmdRes);
+
+// A predicate with $where.
+cmdRes = db.runCommand({find: coll.getName(), filter: {$where: "this._id == 5"}});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 1);
+assert.eq(cmdRes.cursor.firstBatch[0], {_id: 5, a: 20, b: "foo foo foo"});
+
+// Tailable option should result in a failure because the collection is not capped.
+cmdRes = db.runCommand({find: coll.getName(), tailable: true});
+assert.commandFailed(cmdRes);
+
+// $natural sort.
+cmdRes = db.runCommand({find: coll.getName(), sort: {$natural: 1}});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 6);
+
+// Should be able to sort despite projecting out the sort key.
+cmdRes = db.runCommand({find: coll.getName(), sort: {a: 1}, projection: {_id: 1}});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 6);
+assert.eq(cmdRes.cursor.firstBatch[0], {_id: 9});
+assert.eq(cmdRes.cursor.firstBatch[1], {_id: -9});
+assert.eq(cmdRes.cursor.firstBatch[2], {_id: 1});
+assert.eq(cmdRes.cursor.firstBatch[3], {_id: -5});
+assert.eq(cmdRes.cursor.firstBatch[4], {_id: -1});
+assert.eq(cmdRes.cursor.firstBatch[5], {_id: 5});
+
+// Ensure textScore meta-sort works in mongos.
+cmdRes = db.runCommand({
+ find: coll.getName(),
+ filter: {$text: {$search: "foo"}},
+ sort: {score: {$meta: "textScore"}},
+ projection: {score: {$meta: "textScore"}}
+});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 3);
+assert.eq(cmdRes.cursor.firstBatch[0]["_id"], 5);
+assert.eq(cmdRes.cursor.firstBatch[1]["_id"], -9);
+assert.eq(cmdRes.cursor.firstBatch[2]["_id"], -1);
+
+// User projection on $sortKey is illegal.
+cmdRes = db.runCommand({find: coll.getName(), projection: {$sortKey: 1}, sort: {_id: 1}});
+assert.commandFailed(cmdRes);
+cmdRes = db.runCommand(
+ {find: coll.getName(), projection: {$sortKey: {$meta: 'sortKey'}}, sort: {_id: 1}});
+assert.commandFailed(cmdRes);
+
+// User should be able to issue a sortKey meta-projection, as long as it's not on the reserved
+// $sortKey field.
+cmdRes = db.runCommand({
+ find: coll.getName(),
+ projection: {_id: 0, a: 0, b: 0, key: {$meta: 'sortKey'}},
+ sort: {_id: 1}
+});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 6);
+assert.eq(cmdRes.cursor.firstBatch[0], {key: {"": -9}});
+assert.eq(cmdRes.cursor.firstBatch[1], {key: {"": -5}});
+assert.eq(cmdRes.cursor.firstBatch[2], {key: {"": -1}});
+assert.eq(cmdRes.cursor.firstBatch[3], {key: {"": 1}});
+assert.eq(cmdRes.cursor.firstBatch[4], {key: {"": 5}});
+assert.eq(cmdRes.cursor.firstBatch[5], {key: {"": 9}});
+
+st.stop();
})();
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index 59d2745861f..001a9a386d7 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -1,80 +1,78 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({shards: 2});
-
- // Make sure that findAndModify with upsert against a non-existent database and collection will
- // implicitly create them both
- assert.eq(undefined,
- assert.commandWorked(s.s0.adminCommand({listDatabases: 1, nameOnly: 1}))
- .databases.find((dbInfo) => {
- return (dbInfo.name === 'NewUnshardedDB');
- }));
-
- var newlyCreatedDb = s.getDB('NewUnshardedDB');
- assert.eq(0, newlyCreatedDb.unsharded_coll.find({}).itcount());
- newlyCreatedDb.unsharded_coll.findAndModify(
- {query: {_id: 1}, update: {$set: {Value: 'Value'}}, upsert: true});
- assert.eq(1, newlyCreatedDb.unsharded_coll.find({}).itcount());
-
- // Tests with sharded database
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.sharded_coll", key: {_id: 1}}));
-
- var db = s.getDB('test');
-
- var numObjs = 20;
-
- // Pre-split the collection so to avoid interference from auto-split
- assert.commandWorked(
- s.s0.adminCommand({split: "test.sharded_coll", middle: {_id: numObjs / 2}}));
- assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.sharded_coll", find: {_id: numObjs / 2}, to: s.shard0.shardName}));
-
- var bulk = db.sharded_coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numObjs; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
-
- // Put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
- for (var i = 2; i < numObjs; i += 2) {
- if (i == numObjs / 2)
- continue;
-
- assert.commandWorked(s.s0.adminCommand({split: "test.sharded_coll", middle: {_id: i}}));
- }
-
- s.printChunks();
- assert.eq(
- numObjs / 2, s.config.chunks.count({"ns": "test.sharded_coll"}), 'Split was incorrect');
- assert.eq(numObjs / 4,
- s.config.chunks.count({shard: s.shard0.shardName, "ns": "test.sharded_coll"}));
- assert.eq(numObjs / 4,
- s.config.chunks.count({shard: s.shard1.shardName, "ns": "test.sharded_coll"}));
-
- // update
- for (var i = 0; i < numObjs; i++) {
- assert.eq(db.sharded_coll.count({b: 1}), i, "2 A");
-
- var out = db.sharded_coll.findAndModify({query: {_id: i, b: null}, update: {$set: {b: 1}}});
- assert.eq(out._id, i, "2 E");
-
- assert.eq(db.sharded_coll.count({b: 1}), i + 1, "2 B");
- }
-
- // remove
- for (var i = 0; i < numObjs; i++) {
- assert.eq(db.sharded_coll.count(), numObjs - i, "3 A");
- assert.eq(db.sharded_coll.count({_id: i}), 1, "3 B");
-
- var out = db.sharded_coll.findAndModify({remove: true, query: {_id: i}});
-
- assert.eq(db.sharded_coll.count(), numObjs - i - 1, "3 C");
- assert.eq(db.sharded_coll.count({_id: i}), 0, "3 D");
- assert.eq(out._id, i, "3 E");
- }
-
- s.stop();
+'use strict';
+
+var s = new ShardingTest({shards: 2});
+
+// Make sure that findAndModify with upsert against a non-existent database and collection will
+// implicitly create them both
+assert.eq(undefined,
+ assert.commandWorked(s.s0.adminCommand({listDatabases: 1, nameOnly: 1}))
+ .databases.find((dbInfo) => {
+ return (dbInfo.name === 'NewUnshardedDB');
+ }));
+
+var newlyCreatedDb = s.getDB('NewUnshardedDB');
+assert.eq(0, newlyCreatedDb.unsharded_coll.find({}).itcount());
+newlyCreatedDb.unsharded_coll.findAndModify(
+ {query: {_id: 1}, update: {$set: {Value: 'Value'}}, upsert: true});
+assert.eq(1, newlyCreatedDb.unsharded_coll.find({}).itcount());
+
+// Tests with sharded database
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.sharded_coll", key: {_id: 1}}));
+
+var db = s.getDB('test');
+
+var numObjs = 20;
+
+// Pre-split the collection so to avoid interference from auto-split
+assert.commandWorked(s.s0.adminCommand({split: "test.sharded_coll", middle: {_id: numObjs / 2}}));
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.sharded_coll", find: {_id: numObjs / 2}, to: s.shard0.shardName}));
+
+var bulk = db.sharded_coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numObjs; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
+
+// Put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
+for (var i = 2; i < numObjs; i += 2) {
+ if (i == numObjs / 2)
+ continue;
+
+ assert.commandWorked(s.s0.adminCommand({split: "test.sharded_coll", middle: {_id: i}}));
+}
+
+s.printChunks();
+assert.eq(numObjs / 2, s.config.chunks.count({"ns": "test.sharded_coll"}), 'Split was incorrect');
+assert.eq(numObjs / 4,
+ s.config.chunks.count({shard: s.shard0.shardName, "ns": "test.sharded_coll"}));
+assert.eq(numObjs / 4,
+ s.config.chunks.count({shard: s.shard1.shardName, "ns": "test.sharded_coll"}));
+
+// update
+for (var i = 0; i < numObjs; i++) {
+ assert.eq(db.sharded_coll.count({b: 1}), i, "2 A");
+
+ var out = db.sharded_coll.findAndModify({query: {_id: i, b: null}, update: {$set: {b: 1}}});
+ assert.eq(out._id, i, "2 E");
+
+ assert.eq(db.sharded_coll.count({b: 1}), i + 1, "2 B");
+}
+
+// remove
+for (var i = 0; i < numObjs; i++) {
+ assert.eq(db.sharded_coll.count(), numObjs - i, "3 A");
+ assert.eq(db.sharded_coll.count({_id: i}), 1, "3 B");
+
+ var out = db.sharded_coll.findAndModify({remove: true, query: {_id: i}});
+
+ assert.eq(db.sharded_coll.count(), numObjs - i - 1, "3 C");
+ assert.eq(db.sharded_coll.count({_id: i}), 0, "3 D");
+ assert.eq(out._id, i, "3 E");
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js
index a1aa58ffefb..17af6c1a685 100644
--- a/jstests/sharding/findandmodify2.js
+++ b/jstests/sharding/findandmodify2.js
@@ -1,124 +1,124 @@
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
-
- var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
-
- var db = s.getDB("test");
- s.ensurePrimaryShard('test', s.shard1.shardName);
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
-
- var n = 100;
- var collection = "stuff";
- var minChunks = 2;
-
- var col_update = collection + '_col_update';
- var col_update_upsert = col_update + '_upsert';
- var col_fam = collection + '_col_fam';
- var col_fam_upsert = col_fam + '_upsert';
-
- var big = "x";
- for (var i = 0; i < 15; i++) {
- big += big;
+'use strict';
+load('jstests/sharding/autosplit_include.js');
+
+var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+
+var db = s.getDB("test");
+s.ensurePrimaryShard('test', s.shard1.shardName);
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
+
+var n = 100;
+var collection = "stuff";
+var minChunks = 2;
+
+var col_update = collection + '_col_update';
+var col_update_upsert = col_update + '_upsert';
+var col_fam = collection + '_col_fam';
+var col_fam_upsert = col_fam + '_upsert';
+
+var big = "x";
+for (var i = 0; i < 15; i++) {
+ big += big;
+}
+
+// drop the collection
+db[col_update].drop();
+db[col_update_upsert].drop();
+db[col_fam].drop();
+db[col_fam_upsert].drop();
+
+// shard the collection on _id
+s.adminCommand({shardcollection: 'test.' + col_update, key: {_id: 1}});
+s.adminCommand({shardcollection: 'test.' + col_update_upsert, key: {_id: 1}});
+s.adminCommand({shardcollection: 'test.' + col_fam, key: {_id: 1}});
+s.adminCommand({shardcollection: 'test.' + col_fam_upsert, key: {_id: 1}});
+
+// update via findAndModify
+function via_fam() {
+ for (var i = 0; i < n; i++) {
+ db[col_fam].save({_id: i});
}
- // drop the collection
- db[col_update].drop();
- db[col_update_upsert].drop();
- db[col_fam].drop();
- db[col_fam_upsert].drop();
-
- // shard the collection on _id
- s.adminCommand({shardcollection: 'test.' + col_update, key: {_id: 1}});
- s.adminCommand({shardcollection: 'test.' + col_update_upsert, key: {_id: 1}});
- s.adminCommand({shardcollection: 'test.' + col_fam, key: {_id: 1}});
- s.adminCommand({shardcollection: 'test.' + col_fam_upsert, key: {_id: 1}});
-
- // update via findAndModify
- function via_fam() {
- for (var i = 0; i < n; i++) {
- db[col_fam].save({_id: i});
- }
-
- for (var i = 0; i < n; i++) {
- db[col_fam].findAndModify({query: {_id: i}, update: {$set: {big: big}}});
- }
+ for (var i = 0; i < n; i++) {
+ db[col_fam].findAndModify({query: {_id: i}, update: {$set: {big: big}}});
}
+}
- // upsert via findAndModify
- function via_fam_upsert() {
- for (var i = 0; i < n; i++) {
- db[col_fam_upsert].findAndModify(
- {query: {_id: i}, update: {$set: {big: big}}, upsert: true});
- }
+// upsert via findAndModify
+function via_fam_upsert() {
+ for (var i = 0; i < n; i++) {
+ db[col_fam_upsert].findAndModify(
+ {query: {_id: i}, update: {$set: {big: big}}, upsert: true});
}
+}
- // update data using basic update
- function via_update() {
- for (var i = 0; i < n; i++) {
- db[col_update].save({_id: i});
- }
+// update data using basic update
+function via_update() {
+ for (var i = 0; i < n; i++) {
+ db[col_update].save({_id: i});
+ }
- for (var i = 0; i < n; i++) {
- db[col_update].update({_id: i}, {$set: {big: big}});
- }
+ for (var i = 0; i < n; i++) {
+ db[col_update].update({_id: i}, {$set: {big: big}});
}
+}
- // upsert data using basic update
- function via_update_upsert() {
- for (var i = 0; i < n; i++) {
- db[col_update_upsert].update({_id: i}, {$set: {big: big}}, true);
- }
+// upsert data using basic update
+function via_update_upsert() {
+ for (var i = 0; i < n; i++) {
+ db[col_update_upsert].update({_id: i}, {$set: {big: big}}, true);
}
+}
- print("---------- Update via findAndModify...");
- via_fam();
- waitForOngoingChunkSplits(s);
+print("---------- Update via findAndModify...");
+via_fam();
+waitForOngoingChunkSplits(s);
- print("---------- Done.");
+print("---------- Done.");
- print("---------- Upsert via findAndModify...");
- via_fam_upsert();
- waitForOngoingChunkSplits(s);
+print("---------- Upsert via findAndModify...");
+via_fam_upsert();
+waitForOngoingChunkSplits(s);
- print("---------- Done.");
+print("---------- Done.");
- print("---------- Basic update...");
- via_update();
- waitForOngoingChunkSplits(s);
+print("---------- Basic update...");
+via_update();
+waitForOngoingChunkSplits(s);
- print("---------- Done.");
+print("---------- Done.");
- print("---------- Basic update with upsert...");
- via_update_upsert();
- waitForOngoingChunkSplits(s);
+print("---------- Basic update with upsert...");
+via_update_upsert();
+waitForOngoingChunkSplits(s);
- print("---------- Done.");
+print("---------- Done.");
- print("---------- Printing chunks:");
- s.printChunks();
+print("---------- Printing chunks:");
+s.printChunks();
- print("---------- Verifying that both codepaths resulted in splits...");
- assert.gte(s.config.chunks.count({"ns": "test." + col_fam}),
- minChunks,
- "findAndModify update code path didn't result in splits");
- assert.gte(s.config.chunks.count({"ns": "test." + col_fam_upsert}),
- minChunks,
- "findAndModify upsert code path didn't result in splits");
- assert.gte(s.config.chunks.count({"ns": "test." + col_update}),
- minChunks,
- "update code path didn't result in splits");
- assert.gte(s.config.chunks.count({"ns": "test." + col_update_upsert}),
- minChunks,
- "upsert code path didn't result in splits");
+print("---------- Verifying that both codepaths resulted in splits...");
+assert.gte(s.config.chunks.count({"ns": "test." + col_fam}),
+ minChunks,
+ "findAndModify update code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_fam_upsert}),
+ minChunks,
+ "findAndModify upsert code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_update}),
+ minChunks,
+ "update code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_update_upsert}),
+ minChunks,
+ "upsert code path didn't result in splits");
- printjson(db[col_update].stats());
+printjson(db[col_update].stats());
- // ensure that all chunks are smaller than chunkSize
- // make sure not teensy
- // test update without upsert and with upsert
+// ensure that all chunks are smaller than chunkSize
+// make sure not teensy
+// test update without upsert and with upsert
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
index 260c37fea0d..0548e74f027 100644
--- a/jstests/sharding/geo_near_random1.js
+++ b/jstests/sharding/geo_near_random1.js
@@ -5,47 +5,47 @@
load("jstests/libs/geo_near_random.js");
(function() {
- 'use strict';
-
- var testName = "geo_near_random1";
- var s = new ShardingTest({shards: 3});
-
- var db = s.getDB("test");
-
- var test = new GeoNearRandomTest(testName, db);
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}}));
-
- test.insertPts(50);
- var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
- for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
- assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}}));
- try {
- assert.commandWorked(s.s0.adminCommand({
- moveChunk: ('test.' + testName),
- find: {_id: i - 1},
- to: (shardList[i % 3]),
- _waitForDelete: true
- }));
- } catch (e) {
- // ignore this error
- if (!e.message.match(/that chunk is already on that shard/)) {
- throw e;
- }
+'use strict';
+
+var testName = "geo_near_random1";
+var s = new ShardingTest({shards: 3});
+
+var db = s.getDB("test");
+
+var test = new GeoNearRandomTest(testName, db);
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}}));
+
+test.insertPts(50);
+var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
+for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
+ assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}}));
+ try {
+ assert.commandWorked(s.s0.adminCommand({
+ moveChunk: ('test.' + testName),
+ find: {_id: i - 1},
+ to: (shardList[i % 3]),
+ _waitForDelete: true
+ }));
+ } catch (e) {
+ // ignore this error
+ if (!e.message.match(/that chunk is already on that shard/)) {
+ throw e;
}
}
+}
- // Turn balancer back on, for actual tests
- // s.startBalancer(); // SERVER-13365
+// Turn balancer back on, for actual tests
+// s.startBalancer(); // SERVER-13365
- var opts = {};
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
+var opts = {};
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
index 0b874e5aafe..43b89b77392 100644
--- a/jstests/sharding/geo_near_random2.js
+++ b/jstests/sharding/geo_near_random2.js
@@ -5,54 +5,54 @@
load("jstests/libs/geo_near_random.js");
(function() {
- 'use strict';
-
- var testName = "geo_near_random2";
- var s = new ShardingTest({shards: 3});
-
- var db = s.getDB("test");
-
- var test = new GeoNearRandomTest(testName, db);
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}}));
-
- test.insertPts(5000);
- var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
- for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
- assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}}));
- try {
- assert.commandWorked(s.s0.adminCommand({
- moveChunk: ('test.' + testName),
- find: {_id: i - 1},
- to: shardList[i % 3],
- _waitForDelete: true
- }));
- } catch (e) {
- // ignore this error
- if (!e.message.match(/that chunk is already on that shard/)) {
- throw e;
- }
+'use strict';
+
+var testName = "geo_near_random2";
+var s = new ShardingTest({shards: 3});
+
+var db = s.getDB("test");
+
+var test = new GeoNearRandomTest(testName, db);
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}}));
+
+test.insertPts(5000);
+var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
+for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
+ assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}}));
+ try {
+ assert.commandWorked(s.s0.adminCommand({
+ moveChunk: ('test.' + testName),
+ find: {_id: i - 1},
+ to: shardList[i % 3],
+ _waitForDelete: true
+ }));
+ } catch (e) {
+ // ignore this error
+ if (!e.message.match(/that chunk is already on that shard/)) {
+ throw e;
}
}
-
- // Turn balancer back on, for actual tests
- // s.startBalancer(); // SERVER-13365
-
- var opts = {sphere: 0, nToTest: test.nPts * 0.01};
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
-
- opts.sphere = 1;
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
-
- s.stop();
+}
+
+// Turn balancer back on, for actual tests
+// s.startBalancer(); // SERVER-13365
+
+var opts = {sphere: 0, nToTest: test.nPts * 0.01};
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+
+opts.sphere = 1;
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+
+s.stop();
})();
diff --git a/jstests/sharding/geo_near_sharded.js b/jstests/sharding/geo_near_sharded.js
index 361468cec18..714876e7fda 100644
--- a/jstests/sharding/geo_near_sharded.js
+++ b/jstests/sharding/geo_near_sharded.js
@@ -1,67 +1,67 @@
// SERVER-7906
(function() {
- 'use strict';
+'use strict';
- var coll = 'points';
+var coll = 'points';
- function test(st, db, sharded, indexType) {
- printjson(db);
+function test(st, db, sharded, indexType) {
+ printjson(db);
- if (sharded) {
- var shards = [st.shard0, st.shard1, st.shard2];
+ if (sharded) {
+ var shards = [st.shard0, st.shard1, st.shard2];
+ assert.commandWorked(
+ st.s0.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}));
+ for (var i = 1; i < 10; i++) {
+ // split at 0.1, 0.2, ... 0.9
assert.commandWorked(
- st.s0.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}));
- for (var i = 1; i < 10; i++) {
- // split at 0.1, 0.2, ... 0.9
- assert.commandWorked(
- st.s0.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}));
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: db[coll].getFullName(),
- find: {rand: i / 10},
- to: shards[i % shards.length].shardName
- }));
- }
-
- var config = db.getSiblingDB("config");
- assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
+ st.s0.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}));
+ assert.commandWorked(st.s0.adminCommand({
+ moveChunk: db[coll].getFullName(),
+ find: {rand: i / 10},
+ to: shards[i % shards.length].shardName
+ }));
}
- Random.setRandomSeed();
-
- var bulk = db[coll].initializeUnorderedBulkOp();
- var numPts = 10 * 1000;
- for (var i = 0; i < numPts; i++) {
- var lat = 90 - Random.rand() * 180;
- var lng = 180 - Random.rand() * 360;
- bulk.insert({rand: Math.random(), loc: [lng, lat]});
- }
- assert.writeOK(bulk.execute());
- assert.eq(db[coll].count(), numPts);
+ var config = db.getSiblingDB("config");
+ assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
+ }
- assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
+ Random.setRandomSeed();
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll,
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: [0, 0],
- spherical: true,
- includeLocs: "match",
- distanceField: "dist",
- }
- }]
- }),
- tojson({sharded: sharded, indexType: indexType}));
- assert.gt(res.cursor.firstBatch.length, 0, tojson(res));
+ var bulk = db[coll].initializeUnorderedBulkOp();
+ var numPts = 10 * 1000;
+ for (var i = 0; i < numPts; i++) {
+ var lat = 90 - Random.rand() * 180;
+ var lng = 180 - Random.rand() * 360;
+ bulk.insert({rand: Math.random(), loc: [lng, lat]});
}
+ assert.writeOK(bulk.execute());
+ assert.eq(db[coll].count(), numPts);
+
+ assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
+
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll,
+ cursor: {},
+ pipeline: [{
+ $geoNear: {
+ near: [0, 0],
+ spherical: true,
+ includeLocs: "match",
+ distanceField: "dist",
+ }
+ }]
+ }),
+ tojson({sharded: sharded, indexType: indexType}));
+ assert.gt(res.cursor.firstBatch.length, 0, tojson(res));
+}
- // TODO: SERVER-33954 Remove shardAsReplicaSet: false
- var st = new ShardingTest({shards: 3, mongos: 1, other: {shardAsReplicaSet: false}});
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+// TODO: SERVER-33954 Remove shardAsReplicaSet: false
+var st = new ShardingTest({shards: 3, mongos: 1, other: {shardAsReplicaSet: false}});
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- test(st, st.getDB('test'), true, '2dsphere');
- st.stop();
+test(st, st.getDB('test'), true, '2dsphere');
+st.stop();
})();
diff --git a/jstests/sharding/geo_near_sort.js b/jstests/sharding/geo_near_sort.js
index fa839a78551..e2f0292904e 100644
--- a/jstests/sharding/geo_near_sort.js
+++ b/jstests/sharding/geo_near_sort.js
@@ -1,69 +1,77 @@
// Tests that the sort specification is obeyed when the query contains $near/$nearSphere.
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2});
- const db = st.getDB("test");
- const coll = db.geo_near_sort;
- const caseInsensitive = {locale: "en_US", strength: 2};
+const st = new ShardingTest({shards: 2});
+const db = st.getDB("test");
+const coll = db.geo_near_sort;
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
- assert.commandWorked(st.s0.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
- // Split the data into 2 chunks and move the chunk with _id > 0 to shard 1.
- assert.commandWorked(st.s0.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(st.s0.adminCommand(
- {movechunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
+// Split the data into 2 chunks and move the chunk with _id > 0 to shard 1.
+assert.commandWorked(st.s0.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ st.s0.adminCommand({movechunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
- // Insert some documents. The sort order by distance from the origin is [-2, 1, -1, 2] (under 2d
- // or 2dsphere geometry). The sort order by {a: 1} under the case-insensitive collation is [2,
- // -1, 1, -2]. The sort order by {b: 1} is [2. -1, 1, -2].
- const docMinus2 = {_id: -2, geo: [0, 0], a: "BB", b: 3};
- const docMinus1 = {_id: -1, geo: [0, 2], a: "aB", b: 1};
- const doc1 = {_id: 1, geo: [0, 1], a: "Ba", b: 2};
- const doc2 = {_id: 2, geo: [0, 3], a: "aa", b: 0};
- assert.writeOK(coll.insert(docMinus2));
- assert.writeOK(coll.insert(docMinus1));
- assert.writeOK(coll.insert(doc1));
- assert.writeOK(coll.insert(doc2));
+// Insert some documents. The sort order by distance from the origin is [-2, 1, -1, 2] (under 2d
+// or 2dsphere geometry). The sort order by {a: 1} under the case-insensitive collation is [2,
+// -1, 1, -2]. The sort order by {b: 1} is [2. -1, 1, -2].
+const docMinus2 = {
+ _id: -2,
+ geo: [0, 0],
+ a: "BB",
+ b: 3
+};
+const docMinus1 = {
+ _id: -1,
+ geo: [0, 2],
+ a: "aB",
+ b: 1
+};
+const doc1 = {
+ _id: 1,
+ geo: [0, 1],
+ a: "Ba",
+ b: 2
+};
+const doc2 = {
+ _id: 2,
+ geo: [0, 3],
+ a: "aa",
+ b: 0
+};
+assert.writeOK(coll.insert(docMinus2));
+assert.writeOK(coll.insert(docMinus1));
+assert.writeOK(coll.insert(doc1));
+assert.writeOK(coll.insert(doc2));
- function testSortOrders(query, indexSpec) {
- assert.commandWorked(coll.createIndex(indexSpec));
+function testSortOrders(query, indexSpec) {
+ assert.commandWorked(coll.createIndex(indexSpec));
- // Test a $near/$nearSphere query without a specified sort. The results should be sorted by
- // distance from the origin.
- let res = coll.find(query).toArray();
- assert.eq(res.length, 4, tojson(res));
- assert.eq(res[0], docMinus2, tojson(res));
- assert.eq(res[1], doc1, tojson(res));
- assert.eq(res[2], docMinus1, tojson(res));
- assert.eq(res[3], doc2, tojson(res));
-
- // Test with a limit.
- res = coll.find(query).limit(2).toArray();
- assert.eq(res.length, 2, tojson(res));
- assert.eq(res[0], docMinus2, tojson(res));
- assert.eq(res[1], doc1, tojson(res));
-
- if (db.getMongo().useReadCommands()) {
- // Test a $near/$nearSphere query sorted by {a: 1} with the case-insensitive collation.
- res = coll.find(query).collation(caseInsensitive).sort({a: 1}).toArray();
- assert.eq(res.length, 4, tojson(res));
- assert.eq(res[0], doc2, tojson(res));
- assert.eq(res[1], docMinus1, tojson(res));
- assert.eq(res[2], doc1, tojson(res));
- assert.eq(res[3], docMinus2, tojson(res));
+ // Test a $near/$nearSphere query without a specified sort. The results should be sorted by
+ // distance from the origin.
+ let res = coll.find(query).toArray();
+ assert.eq(res.length, 4, tojson(res));
+ assert.eq(res[0], docMinus2, tojson(res));
+ assert.eq(res[1], doc1, tojson(res));
+ assert.eq(res[2], docMinus1, tojson(res));
+ assert.eq(res[3], doc2, tojson(res));
- // Test with a limit.
- res = coll.find(query).collation(caseInsensitive).sort({a: 1}).limit(2).toArray();
- assert.eq(res.length, 2, tojson(res));
- assert.eq(res[0], doc2, tojson(res));
- assert.eq(res[1], docMinus1, tojson(res));
- }
+ // Test with a limit.
+ res = coll.find(query).limit(2).toArray();
+ assert.eq(res.length, 2, tojson(res));
+ assert.eq(res[0], docMinus2, tojson(res));
+ assert.eq(res[1], doc1, tojson(res));
- // Test a $near/$nearSphere query sorted by {b: 1}.
- res = coll.find(query).sort({b: 1}).toArray();
+ if (db.getMongo().useReadCommands()) {
+ // Test a $near/$nearSphere query sorted by {a: 1} with the case-insensitive collation.
+ res = coll.find(query).collation(caseInsensitive).sort({a: 1}).toArray();
assert.eq(res.length, 4, tojson(res));
assert.eq(res[0], doc2, tojson(res));
assert.eq(res[1], docMinus1, tojson(res));
@@ -71,20 +79,35 @@
assert.eq(res[3], docMinus2, tojson(res));
// Test with a limit.
- res = coll.find(query).sort({b: 1}).limit(2).toArray();
+ res = coll.find(query).collation(caseInsensitive).sort({a: 1}).limit(2).toArray();
assert.eq(res.length, 2, tojson(res));
assert.eq(res[0], doc2, tojson(res));
assert.eq(res[1], docMinus1, tojson(res));
-
- assert.commandWorked(coll.dropIndex(indexSpec));
}
- testSortOrders({geo: {$near: [0, 0]}}, {geo: "2d"});
- testSortOrders({geo: {$nearSphere: [0, 0]}}, {geo: "2d"});
- testSortOrders({geo: {$near: {$geometry: {type: "Point", coordinates: [0, 0]}}}},
- {geo: "2dsphere"});
- testSortOrders({geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}},
- {geo: "2dsphere"});
+ // Test a $near/$nearSphere query sorted by {b: 1}.
+ res = coll.find(query).sort({b: 1}).toArray();
+ assert.eq(res.length, 4, tojson(res));
+ assert.eq(res[0], doc2, tojson(res));
+ assert.eq(res[1], docMinus1, tojson(res));
+ assert.eq(res[2], doc1, tojson(res));
+ assert.eq(res[3], docMinus2, tojson(res));
+
+ // Test with a limit.
+ res = coll.find(query).sort({b: 1}).limit(2).toArray();
+ assert.eq(res.length, 2, tojson(res));
+ assert.eq(res[0], doc2, tojson(res));
+ assert.eq(res[1], docMinus1, tojson(res));
+
+ assert.commandWorked(coll.dropIndex(indexSpec));
+}
+
+testSortOrders({geo: {$near: [0, 0]}}, {geo: "2d"});
+testSortOrders({geo: {$nearSphere: [0, 0]}}, {geo: "2d"});
+testSortOrders({geo: {$near: {$geometry: {type: "Point", coordinates: [0, 0]}}}},
+ {geo: "2dsphere"});
+testSortOrders({geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}},
+ {geo: "2dsphere"});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/graph_lookup.js b/jstests/sharding/graph_lookup.js
index bd3236f2c84..4678ba2f9a9 100644
--- a/jstests/sharding/graph_lookup.js
+++ b/jstests/sharding/graph_lookup.js
@@ -1,18 +1,18 @@
// Test aggregating a sharded collection while using $graphLookup on an unsharded collection.
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- assert.commandWorked(st.s0.adminCommand({shardCollection: "test.foo", key: {_id: "hashed"}}));
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: "test.foo", key: {_id: "hashed"}}));
- let db = st.s0.getDB("test");
+let db = st.s0.getDB("test");
- assert.writeOK(db.foo.insert([{}, {}, {}, {}]));
- assert.writeOK(db.bar.insert({_id: 1, x: 1}));
+assert.writeOK(db.foo.insert([{}, {}, {}, {}]));
+assert.writeOK(db.bar.insert({_id: 1, x: 1}));
- const res = db.foo
+const res = db.foo
.aggregate([{
$graphLookup: {
from: "bar",
@@ -24,17 +24,17 @@
}])
.toArray();
- assert.eq(res.length, 4);
- res.forEach(function(c) {
- assert.eq(c.res.length, 1);
- assert.eq(c.res[0]._id, 1);
- assert.eq(c.res[0].x, 1);
- });
-
- // Be sure $graphLookup is banned on sharded foreign collection.
- assert.commandWorked(st.s0.adminCommand({shardCollection: "test.baz", key: {_id: "hashed"}}));
- assert.commandWorked(db.baz.insert({_id: 1, x: 1}));
- const err = assert.throws(() => db.foo.aggregate([{
+assert.eq(res.length, 4);
+res.forEach(function(c) {
+ assert.eq(c.res.length, 1);
+ assert.eq(c.res[0]._id, 1);
+ assert.eq(c.res[0].x, 1);
+});
+
+// Be sure $graphLookup is banned on sharded foreign collection.
+assert.commandWorked(st.s0.adminCommand({shardCollection: "test.baz", key: {_id: "hashed"}}));
+assert.commandWorked(db.baz.insert({_id: 1, x: 1}));
+const err = assert.throws(() => db.foo.aggregate([{
$graphLookup: {
from: "baz",
startWith: {$literal: 1},
@@ -43,7 +43,7 @@
as: "res"
}
}]));
- assert.eq(28769, err.code);
+assert.eq(28769, err.code);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js
index bffada99eef..1435611e7fb 100644
--- a/jstests/sharding/hash_basic.js
+++ b/jstests/sharding/hash_basic.js
@@ -1,57 +1,56 @@
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, chunkSize: 1});
+var st = new ShardingTest({shards: 2, chunkSize: 1});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}}));
- var configDB = st.s0.getDB('config');
- var chunkCountBefore = configDB.chunks.count({ns: 'test.user'});
- assert.gt(chunkCountBefore, 1);
+var configDB = st.s0.getDB('config');
+var chunkCountBefore = configDB.chunks.count({ns: 'test.user'});
+assert.gt(chunkCountBefore, 1);
- var testDB = st.s0.getDB('test');
- for (var x = 0; x < 1000; x++) {
- testDB.user.insert({x: x});
- }
+var testDB = st.s0.getDB('test');
+for (var x = 0; x < 1000; x++) {
+ testDB.user.insert({x: x});
+}
- var chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).next();
- var min = chunkDoc.min;
- var max = chunkDoc.max;
+var chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).next();
+var min = chunkDoc.min;
+var max = chunkDoc.max;
- // Assumption: There are documents in the MinKey chunk, otherwise, splitVector will fail.
- //
- // Note: This chunk will have 267 documents if collection was presplit to 4.
- var cmdRes =
- assert.commandWorked(st.s0.adminCommand({split: 'test.user', bounds: [min, max]}),
- 'Split on bounds failed for chunk [' + tojson(chunkDoc) + ']');
+// Assumption: There are documents in the MinKey chunk, otherwise, splitVector will fail.
+//
+// Note: This chunk will have 267 documents if collection was presplit to 4.
+var cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', bounds: [min, max]}),
+ 'Split on bounds failed for chunk [' + tojson(chunkDoc) + ']');
- chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).skip(1).next();
+chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).skip(1).next();
- var middle = NumberLong(chunkDoc.min.x + 1000000);
- cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: middle}}),
- 'Split failed with middle [' + middle + ']');
+var middle = NumberLong(chunkDoc.min.x + 1000000);
+cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: middle}}),
+ 'Split failed with middle [' + middle + ']');
- cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', find: {x: 7}}),
- 'Split failed with find.');
+cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', find: {x: 7}}),
+ 'Split failed with find.');
- var chunkList = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).toArray();
- assert.eq(chunkCountBefore + 3, chunkList.length);
+var chunkList = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).toArray();
+assert.eq(chunkCountBefore + 3, chunkList.length);
- chunkList.forEach(function(chunkToMove) {
- var toShard = configDB.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
+chunkList.forEach(function(chunkToMove) {
+ var toShard = configDB.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
- print('Moving chunk ' + chunkToMove._id + ' from shard ' + chunkToMove.shard + ' to ' +
- toShard + ' ...');
+ print('Moving chunk ' + chunkToMove._id + ' from shard ' + chunkToMove.shard + ' to ' +
+ toShard + ' ...');
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: 'test.user',
- bounds: [chunkToMove.min, chunkToMove.max],
- to: toShard,
- _waitForDelete: true
- }));
- });
+ assert.commandWorked(st.s0.adminCommand({
+ moveChunk: 'test.user',
+ bounds: [chunkToMove.min, chunkToMove.max],
+ to: toShard,
+ _waitForDelete: true
+ }));
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js
index b551ad53df2..03bd2da845d 100644
--- a/jstests/sharding/hash_shard_num_chunks.js
+++ b/jstests/sharding/hash_shard_num_chunks.js
@@ -1,35 +1,34 @@
// Hash sharding with initial chunk count set.
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 3});
+var s = new ShardingTest({shards: 3});
- var dbname = "test";
- var coll = "foo";
- var db = s.getDB(dbname);
+var dbname = "test";
+var coll = "foo";
+var db = s.getDB(dbname);
- assert.commandWorked(db.adminCommand({enablesharding: dbname}));
- s.ensurePrimaryShard(dbname, s.shard1.shardName);
+assert.commandWorked(db.adminCommand({enablesharding: dbname}));
+s.ensurePrimaryShard(dbname, s.shard1.shardName);
- assert.commandWorked(db.adminCommand(
- {shardcollection: dbname + "." + coll, key: {a: "hashed"}, numInitialChunks: 500}));
+assert.commandWorked(db.adminCommand(
+ {shardcollection: dbname + "." + coll, key: {a: "hashed"}, numInitialChunks: 500}));
- s.printShardingStatus();
+s.printShardingStatus();
- var numChunks = s.config.chunks.count({"ns": "test.foo"});
- assert.eq(numChunks, 500, "should be exactly 500 chunks");
+var numChunks = s.config.chunks.count({"ns": "test.foo"});
+assert.eq(numChunks, 500, "should be exactly 500 chunks");
- s.config.shards.find().forEach(
- // Check that each shard has one third the numInitialChunks
- function(shard) {
- var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
- assert.gte(numChunksOnShard, Math.floor(500 / 3));
- });
+s.config.shards.find().forEach(
+ // Check that each shard has one third the numInitialChunks
+ function(shard) {
+ var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
+ assert.gte(numChunksOnShard, Math.floor(500 / 3));
+ });
- // Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails
- // to create the collection on all shards).
- assert.commandWorked(db.runCommand({"drop": coll}),
- "couldn't drop empty, pre-split collection");
+// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails
+// to create the collection on all shards).
+assert.commandWorked(db.runCommand({"drop": coll}), "couldn't drop empty, pre-split collection");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js
index 51adb85f1d5..056bf2bdbca 100644
--- a/jstests/sharding/hash_shard_unique_compound.js
+++ b/jstests/sharding/hash_shard_unique_compound.js
@@ -7,41 +7,41 @@
// SERVER-36321.
// @tags: [blacklist_from_rhel_67_s390x]
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 1, mongos: 1});
- var dbName = "test";
- var collName = "foo";
- var ns = dbName + "." + collName;
- var db = s.getDB(dbName);
- var coll = db.getCollection(collName);
+var s = new ShardingTest({shards: 1, mongos: 1});
+var dbName = "test";
+var collName = "foo";
+var ns = dbName + "." + collName;
+var db = s.getDB(dbName);
+var coll = db.getCollection(collName);
- // Enable sharding on DB
- assert.commandWorked(db.adminCommand({enablesharding: dbName}));
+// Enable sharding on DB
+assert.commandWorked(db.adminCommand({enablesharding: dbName}));
- // Shard a fresh collection using a hashed shard key
- assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
+// Shard a fresh collection using a hashed shard key
+assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
- // Create unique index
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
+// Create unique index
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
- jsTest.log("------ indexes -------");
- jsTest.log(tojson(coll.getIndexes()));
+jsTest.log("------ indexes -------");
+jsTest.log(tojson(coll.getIndexes()));
- // Second Part
- jsTest.log("------ dropping sharded collection to start part 2 -------");
- coll.drop();
+// Second Part
+jsTest.log("------ dropping sharded collection to start part 2 -------");
+coll.drop();
- // Create unique index
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
+// Create unique index
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
- // shard a fresh collection using a hashed shard key
- assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
- "shardcollection didn't worked 2");
+// shard a fresh collection using a hashed shard key
+assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
+ "shardcollection didn't worked 2");
- s.printShardingStatus();
- jsTest.log("------ indexes 2-------");
- jsTest.log(tojson(coll.getIndexes()));
+s.printShardingStatus();
+jsTest.log("------ indexes 2-------");
+jsTest.log(tojson(coll.getIndexes()));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/implicit_db_creation.js b/jstests/sharding/implicit_db_creation.js
index 9ec911e6f52..0f45dbb94ae 100644
--- a/jstests/sharding/implicit_db_creation.js
+++ b/jstests/sharding/implicit_db_creation.js
@@ -2,45 +2,44 @@
* This tests the basic cases for implicit database creation in a sharded cluster.
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2});
- var configDB = st.s.getDB('config');
+var st = new ShardingTest({shards: 2});
+var configDB = st.s.getDB('config');
- assert.eq(null, configDB.databases.findOne());
+assert.eq(null, configDB.databases.findOne());
- var testDB = st.s.getDB('test');
+var testDB = st.s.getDB('test');
- // Test that reads will not result into a new config.databases entry.
- assert.eq(null, testDB.user.findOne());
- assert.eq(null, configDB.databases.findOne({_id: 'test'}));
+// Test that reads will not result into a new config.databases entry.
+assert.eq(null, testDB.user.findOne());
+assert.eq(null, configDB.databases.findOne({_id: 'test'}));
- assert.writeOK(testDB.user.insert({x: 1}));
+assert.writeOK(testDB.user.insert({x: 1}));
- var testDBDoc = configDB.databases.findOne();
- assert.eq('test', testDBDoc._id, tojson(testDBDoc));
+var testDBDoc = configDB.databases.findOne();
+assert.eq('test', testDBDoc._id, tojson(testDBDoc));
- // Test that inserting to another collection in the same database will not modify the existing
- // config.databases entry.
- assert.writeOK(testDB.bar.insert({y: 1}));
- assert.eq(testDBDoc, configDB.databases.findOne());
+// Test that inserting to another collection in the same database will not modify the existing
+// config.databases entry.
+assert.writeOK(testDB.bar.insert({y: 1}));
+assert.eq(testDBDoc, configDB.databases.findOne());
- st.s.adminCommand({enableSharding: 'foo'});
- var fooDBDoc = configDB.databases.findOne({_id: 'foo'});
+st.s.adminCommand({enableSharding: 'foo'});
+var fooDBDoc = configDB.databases.findOne({_id: 'foo'});
- assert.neq(null, fooDBDoc);
- assert(fooDBDoc.partitioned);
+assert.neq(null, fooDBDoc);
+assert(fooDBDoc.partitioned);
- var newShardConn = MongoRunner.runMongod({'shardsvr': ""});
- var unshardedDB = newShardConn.getDB('unshardedDB');
+var newShardConn = MongoRunner.runMongod({'shardsvr': ""});
+var unshardedDB = newShardConn.getDB('unshardedDB');
- unshardedDB.user.insert({z: 1});
+unshardedDB.user.insert({z: 1});
- assert.commandWorked(st.s.adminCommand({addShard: newShardConn.name}));
+assert.commandWorked(st.s.adminCommand({addShard: newShardConn.name}));
- assert.neq(null, configDB.databases.findOne({_id: 'unshardedDB'}));
-
- MongoRunner.stopMongod(newShardConn);
- st.stop();
+assert.neq(null, configDB.databases.findOne({_id: 'unshardedDB'}));
+MongoRunner.stopMongod(newShardConn);
+st.stop();
})();
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js
index 328ae508f5a..0d4971c796c 100644
--- a/jstests/sharding/in_memory_sort_limit.js
+++ b/jstests/sharding/in_memory_sort_limit.js
@@ -2,49 +2,48 @@
// doesn't cause the in-memory sort limit to be reached, then make sure the same limit also doesn't
// cause the in-memory sort limit to be reached when running through a mongos.
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
-
- // Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a
- // single-shard query (which doesn't exercise the bug)
- assert.commandWorked(st.s.adminCommand(
- {shardCollection: 'test.skip', key: {_id: 'hashed'}, numInitialChunks: 64}));
-
- var mongosCol = st.s.getDB('test').getCollection('skip');
- var shardCol = st.shard0.getDB('test').getCollection('skip');
-
- // Create enough data to exceed the 32MB in-memory sort limit (per shard)
- var filler = new Array(10240).toString();
- var bulkOp = mongosCol.initializeOrderedBulkOp();
- for (var i = 0; i < 12800; i++) {
- bulkOp.insert({x: i, str: filler});
- }
- assert.writeOK(bulkOp.execute());
-
- var passLimit = 2000;
- var failLimit = 4000;
-
- // Test on MongoD
- jsTestLog("Test no error with limit of " + passLimit + " on mongod");
- assert.eq(passLimit, shardCol.find().sort({x: 1}).limit(passLimit).itcount());
-
- jsTestLog("Test error with limit of " + failLimit + " on mongod");
- assert.throws(function() {
- shardCol.find().sort({x: 1}).limit(failLimit).itcount();
- });
-
- // Test on MongoS
- jsTestLog("Test no error with limit of " + passLimit + " on mongos");
- assert.eq(passLimit, mongosCol.find().sort({x: 1}).limit(passLimit).itcount());
-
- jsTestLog("Test error with limit of " + failLimit + " on mongos");
- assert.throws(function() {
- mongosCol.find().sort({x: 1}).limit(failLimit).itcount();
- });
-
- st.stop();
-
+'use strict';
+
+var st = new ShardingTest({shards: 2});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+
+// Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a
+// single-shard query (which doesn't exercise the bug)
+assert.commandWorked(
+ st.s.adminCommand({shardCollection: 'test.skip', key: {_id: 'hashed'}, numInitialChunks: 64}));
+
+var mongosCol = st.s.getDB('test').getCollection('skip');
+var shardCol = st.shard0.getDB('test').getCollection('skip');
+
+// Create enough data to exceed the 32MB in-memory sort limit (per shard)
+var filler = new Array(10240).toString();
+var bulkOp = mongosCol.initializeOrderedBulkOp();
+for (var i = 0; i < 12800; i++) {
+ bulkOp.insert({x: i, str: filler});
+}
+assert.writeOK(bulkOp.execute());
+
+var passLimit = 2000;
+var failLimit = 4000;
+
+// Test on MongoD
+jsTestLog("Test no error with limit of " + passLimit + " on mongod");
+assert.eq(passLimit, shardCol.find().sort({x: 1}).limit(passLimit).itcount());
+
+jsTestLog("Test error with limit of " + failLimit + " on mongod");
+assert.throws(function() {
+ shardCol.find().sort({x: 1}).limit(failLimit).itcount();
+});
+
+// Test on MongoS
+jsTestLog("Test no error with limit of " + passLimit + " on mongos");
+assert.eq(passLimit, mongosCol.find().sort({x: 1}).limit(passLimit).itcount());
+
+jsTestLog("Test error with limit of " + failLimit + " on mongos");
+assert.throws(function() {
+ mongosCol.find().sort({x: 1}).limit(failLimit).itcount();
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index 482137a9d03..b440c535230 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -1,363 +1,360 @@
// SERVER-2326 - make sure that sharding only works with unique indices
(function() {
- var s = new ShardingTest({name: "shard_index", shards: 2, mongos: 1});
+var s = new ShardingTest({name: "shard_index", shards: 2, mongos: 1});
- // Regenerate fully because of SERVER-2782
- for (var i = 0; i < 22; i++) {
- var coll = s.admin._mongo.getDB("test").getCollection("foo" + i);
- coll.drop();
+// Regenerate fully because of SERVER-2782
+for (var i = 0; i < 22; i++) {
+ var coll = s.admin._mongo.getDB("test").getCollection("foo" + i);
+ coll.drop();
- var bulk = coll.initializeUnorderedBulkOp();
- for (var j = 0; j < 300; j++) {
- bulk.insert({num: j, x: 1});
- }
- assert.writeOK(bulk.execute());
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var j = 0; j < 300; j++) {
+ bulk.insert({num: j, x: 1});
+ }
+ assert.writeOK(bulk.execute());
- if (i == 0) {
- s.adminCommand({enablesharding: "" + coll._db});
- s.ensurePrimaryShard(coll.getDB().getName(), s.shard1.shardName);
- }
+ if (i == 0) {
+ s.adminCommand({enablesharding: "" + coll._db});
+ s.ensurePrimaryShard(coll.getDB().getName(), s.shard1.shardName);
+ }
+
+ print("\n\n\n\n\nTest # " + i);
+
+ if (i == 0) {
+ // Unique index exists, but not the right one.
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({x: 1});
- print("\n\n\n\n\nTest # " + i);
-
- if (i == 0) {
- // Unique index exists, but not the right one.
- coll.ensureIndex({num: 1}, {unique: true});
- coll.ensureIndex({x: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not shard collection when another unique index exists!");
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 1) {
- // Unique index exists as prefix, also index exists
- coll.ensureIndex({x: 1});
- coll.ensureIndex({x: 1, num: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard non-unique index without unique option.");
- }
+ assert(!passed, "Should not shard collection when another unique index exists!");
+ }
+ if (i == 1) {
+ // Unique index exists as prefix, also index exists
+ coll.ensureIndex({x: 1});
+ coll.ensureIndex({x: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard non-unique index without unique option.");
}
- if (i == 2) {
- // Non-unique index exists as prefix, also index exists. No unique index.
- coll.ensureIndex({x: 1});
- coll.ensureIndex({x: 1, num: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
-
- } catch (e) {
- print(e);
- assert(
- !passed,
- "Should be able to shard collection with no unique index if unique not specified.");
- }
+ }
+ if (i == 2) {
+ // Non-unique index exists as prefix, also index exists. No unique index.
+ coll.ensureIndex({x: 1});
+ coll.ensureIndex({x: 1, num: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+
+ } catch (e) {
+ print(e);
+ assert(
+ !passed,
+ "Should be able to shard collection with no unique index if unique not specified.");
}
- if (i == 3) {
- // Unique index exists as prefix, also unique index exists
- coll.ensureIndex({num: 1}, {unique: true});
- coll.ensureIndex({num: 1, x: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard collection with unique prefix index.");
- }
+ }
+ if (i == 3) {
+ // Unique index exists as prefix, also unique index exists
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({num: 1, x: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique prefix index.");
}
- if (i == 4) {
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex({_id: 1, num: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {_id: 1}, unique: true});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard collection with unique id index.");
- }
+ }
+ if (i == 4) {
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex({_id: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {_id: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique id index.");
}
- if (i == 5) {
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex({_id: 1, num: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {_id: 1, num: 1}, unique: true});
- } catch (e) {
- print(e);
- assert(false,
- "Should be able to shard collection with unique combination id index.");
- }
+ }
+ if (i == 5) {
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex({_id: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {_id: 1, num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique combination id index.");
+ }
+ }
+ if (i == 6) {
+ coll.remove({});
+
+ // Unique index does not exist, also unique prefix index exists
+ coll.ensureIndex({num: 1, _id: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(
+ false,
+ "Should be able to shard collection with no unique index but with a unique prefix index.");
}
- if (i == 6) {
- coll.remove({});
-
- // Unique index does not exist, also unique prefix index exists
- coll.ensureIndex({num: 1, _id: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
- } catch (e) {
- print(e);
- assert(
- false,
- "Should be able to shard collection with no unique index but with a unique prefix index.");
- }
-
- printjson(coll.getIndexes());
-
- // Make sure the index created is unique!
- assert.eq(1,
- coll.getIndexes()
- .filter(function(z) {
- return friendlyEqual(z.key, {num: 1}) && z.unique;
- })
- .length);
+
+ printjson(coll.getIndexes());
+
+ // Make sure the index created is unique!
+ assert.eq(1,
+ coll.getIndexes()
+ .filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ })
+ .length);
+ }
+ if (i == 7) {
+ coll.remove({});
+
+ // No index exists
+
+ try {
+ assert.eq(coll.find().itcount(), 0);
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with no index on shard key.");
}
- if (i == 7) {
- coll.remove({});
-
- // No index exists
-
- try {
- assert.eq(coll.find().itcount(), 0);
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard collection with no index on shard key.");
- }
+ }
+ if (i == 8) {
+ coll.remove({});
+
+ // No index exists
+
+ passed = false;
+ try {
+ assert.eq(coll.find().itcount(), 0);
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 8) {
- coll.remove({});
-
- // No index exists
-
- passed = false;
- try {
- assert.eq(coll.find().itcount(), 0);
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(
- passed,
- "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.");
-
- printjson(coll.getIndexes());
-
- // Make sure the index created is unique!
- assert.eq(1,
- coll.getIndexes()
- .filter(function(z) {
- return friendlyEqual(z.key, {num: 1}) && z.unique;
- })
- .length);
+ assert(
+ passed,
+ "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.");
+
+ printjson(coll.getIndexes());
+
+ // Make sure the index created is unique!
+ assert.eq(1,
+ coll.getIndexes()
+ .filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ })
+ .length);
+ }
+ if (i == 9) {
+ // Unique index exists on a different field as well
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 9) {
- // Unique index exists on a different field as well
- coll.ensureIndex({num: 1}, {unique: true});
- coll.ensureIndex({x: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not shard collection when another unique index exists!");
+ assert(!passed, "Should not shard collection when another unique index exists!");
+ }
+ if (i == 10) {
+ // try sharding non-empty collection without any index
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 10) {
- // try sharding non-empty collection without any index
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard without index");
-
- // now add containing index and try sharding by prefix
- coll.ensureIndex({num: 1, x: 1});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(passed, "Should be able to shard collection with prefix of existing index");
-
- printjson(coll.getIndexes());
-
- // make sure no extra index is created
- assert.eq(2, coll.getIndexes().length);
+ assert(!passed, "Should not be able to shard without index");
+
+ // now add containing index and try sharding by prefix
+ coll.ensureIndex({num: 1, x: 1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 11) {
- coll.remove({});
+ assert(passed, "Should be able to shard collection with prefix of existing index");
- // empty collection with useful index. check new index not created
- coll.ensureIndex({num: 1, x: 1});
+ printjson(coll.getIndexes());
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(passed, "Should be able to shard collection with prefix of existing index");
+ // make sure no extra index is created
+ assert.eq(2, coll.getIndexes().length);
+ }
+ if (i == 11) {
+ coll.remove({});
- printjson(coll.getIndexes());
+ // empty collection with useful index. check new index not created
+ coll.ensureIndex({num: 1, x: 1});
- // make sure no extra index is created
- assert.eq(2, coll.getIndexes().length);
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 12) {
- // check multikey values for x make index unusable for shard key
- coll.save({num: 100, x: [2, 3]});
- coll.ensureIndex({num: 1, x: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard collection with mulikey index");
+ assert(passed, "Should be able to shard collection with prefix of existing index");
+
+ printjson(coll.getIndexes());
+
+ // make sure no extra index is created
+ assert.eq(2, coll.getIndexes().length);
+ }
+ if (i == 12) {
+ // check multikey values for x make index unusable for shard key
+ coll.save({num: 100, x: [2, 3]});
+ coll.ensureIndex({num: 1, x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 13) {
- coll.save({num: [100, 200], x: 10});
- coll.ensureIndex({num: 1, x: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard collection with mulikey index");
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 13) {
+ coll.save({num: [100, 200], x: 10});
+ coll.ensureIndex({num: 1, x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 14) {
- coll.save({num: 100, x: 10, y: [1, 2]});
- coll.ensureIndex({num: 1, x: 1, y: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard collection with mulikey index");
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 14) {
+ coll.save({num: 100, x: 10, y: [1, 2]});
+ coll.ensureIndex({num: 1, x: 1, y: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 15) {
- // try sharding with a hashed index
- coll.ensureIndex({num: "hashed"});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard collection with hashed index.");
- }
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 15) {
+ // try sharding with a hashed index
+ coll.ensureIndex({num: "hashed"});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with hashed index.");
}
- if (i == 16) {
- // create hashed index, but try to declare it unique when sharding
- coll.ensureIndex({num: "hashed"});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}, unique: true});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to declare hashed shard key unique.");
+ }
+ if (i == 16) {
+ // create hashed index, but try to declare it unique when sharding
+ coll.ensureIndex({num: "hashed"});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}, unique: true});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 17) {
- // create hashed index, but unrelated unique index present
- coll.ensureIndex({x: "hashed"});
- coll.ensureIndex({num: 1}, {unique: true});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: "hashed"}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed,
- "Should not be able to shard on hashed index with another unique index");
+ assert(!passed, "Should not be able to declare hashed shard key unique.");
+ }
+ if (i == 17) {
+ // create hashed index, but unrelated unique index present
+ coll.ensureIndex({x: "hashed"});
+ coll.ensureIndex({num: 1}, {unique: true});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: "hashed"}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 18) {
- // create hashed index, and a regular unique index exists on same field
- coll.ensureIndex({num: "hashed"});
- coll.ensureIndex({num: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard coll with hashed and regular unique index");
- }
+ assert(!passed, "Should not be able to shard on hashed index with another unique index");
+ }
+ if (i == 18) {
+ // create hashed index, and a regular unique index exists on same field
+ coll.ensureIndex({num: "hashed"});
+ coll.ensureIndex({num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard coll with hashed and regular unique index");
}
- if (i == 19) {
- // Create sparse index.
- coll.ensureIndex({x: 1}, {sparse: true});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard coll with sparse index");
+ }
+ if (i == 19) {
+ // Create sparse index.
+ coll.ensureIndex({x: 1}, {sparse: true});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 20) {
- // Create partial index.
- coll.ensureIndex({x: 1}, {filter: {num: {$gt: 1}}});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard coll with partial index");
+ assert(!passed, "Should not be able to shard coll with sparse index");
+ }
+ if (i == 20) {
+ // Create partial index.
+ coll.ensureIndex({x: 1}, {filter: {num: {$gt: 1}}});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 21) {
- // Ensure that a collection with a normal index and a partial index can be sharded,
- // where
- // both are prefixed by the shard key.
-
- coll.ensureIndex({x: 1, num: 1}, {filter: {num: {$gt: 1}}});
- coll.ensureIndex({x: 1, num: -1});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard coll with regular and partial index");
- }
+ assert(!passed, "Should not be able to shard coll with partial index");
+ }
+ if (i == 21) {
+ // Ensure that a collection with a normal index and a partial index can be sharded,
+ // where
+ // both are prefixed by the shard key.
+
+ coll.ensureIndex({x: 1, num: 1}, {filter: {num: {$gt: 1}}});
+ coll.ensureIndex({x: 1, num: -1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard coll with regular and partial index");
}
}
+}
- s.stop();
-
+s.stop();
})();
diff --git a/jstests/sharding/index_and_collection_option_propagation.js b/jstests/sharding/index_and_collection_option_propagation.js
index 7e50856014f..114d3c57cf5 100644
--- a/jstests/sharding/index_and_collection_option_propagation.js
+++ b/jstests/sharding/index_and_collection_option_propagation.js
@@ -14,214 +14,216 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- // Helper function that runs listIndexes against shards to check for the existence of an index.
- function checkShardIndexes(indexKey, shardsWithIndex, shardsWithoutIndex) {
- function shardHasIndex(indexKey, shard) {
- const res = shard.getDB(dbName).runCommand({listIndexes: collName});
- if (res.code === ErrorCodes.NamespaceNotFound) {
- return [res, false];
- }
- assert.commandWorked(res);
- for (index of res.cursor.firstBatch) {
- if (index.key.hasOwnProperty(indexKey)) {
- return [res, true];
- }
- }
+// Helper function that runs listIndexes against shards to check for the existence of an index.
+function checkShardIndexes(indexKey, shardsWithIndex, shardsWithoutIndex) {
+ function shardHasIndex(indexKey, shard) {
+ const res = shard.getDB(dbName).runCommand({listIndexes: collName});
+ if (res.code === ErrorCodes.NamespaceNotFound) {
return [res, false];
}
-
- for (shard of shardsWithIndex) {
- [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
- assert(foundIndex,
- "expected to see index with key " + indexKey + " in listIndexes response from " +
- shard + ": " + tojson(listIndexesRes));
+ assert.commandWorked(res);
+ for (index of res.cursor.firstBatch) {
+ if (index.key.hasOwnProperty(indexKey)) {
+ return [res, true];
+ }
}
+ return [res, false];
+ }
- for (shard of shardsWithoutIndex) {
- [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
- assert(!foundIndex,
- "expected not to see index with key " + indexKey +
- " in listIndexes response from " + shard + ": " + tojson(listIndexesRes));
- }
+ for (shard of shardsWithIndex) {
+ [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
+ assert(foundIndex,
+ "expected to see index with key " + indexKey + " in listIndexes response from " +
+ shard + ": " + tojson(listIndexesRes));
}
- // Helper function that runs listCollections against shards to check for the existence of a
- // collection option.
- function checkShardCollOption(optionKey, optionValue, shardsWithOption, shardsWithoutOption) {
- function shardHasOption(optionKey, optionValue, shard) {
- const res =
- shard.getDB(dbName).runCommand({listCollections: 1, filter: {name: collName}});
- assert.commandWorked(res);
- if (res.cursor.firstBatch.length === 0) {
- return [res, false];
- }
- assert.eq(1, res.cursor.firstBatch.length);
- if (friendlyEqual(res.cursor.firstBatch[0].options[optionKey], optionValue)) {
- return [res, true];
- }
+ for (shard of shardsWithoutIndex) {
+ [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
+ assert(!foundIndex,
+ "expected not to see index with key " + indexKey + " in listIndexes response from " +
+ shard + ": " + tojson(listIndexesRes));
+ }
+}
+
+// Helper function that runs listCollections against shards to check for the existence of a
+// collection option.
+function checkShardCollOption(optionKey, optionValue, shardsWithOption, shardsWithoutOption) {
+ function shardHasOption(optionKey, optionValue, shard) {
+ const res = shard.getDB(dbName).runCommand({listCollections: 1, filter: {name: collName}});
+ assert.commandWorked(res);
+ if (res.cursor.firstBatch.length === 0) {
return [res, false];
}
-
- for (shard of shardsWithOption) {
- [listCollsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
- assert(foundOption,
- "expected to see option " + optionKey + " in listCollections response from " +
- shard + ": " + tojson(listCollsRes));
+ assert.eq(1, res.cursor.firstBatch.length);
+ if (friendlyEqual(res.cursor.firstBatch[0].options[optionKey], optionValue)) {
+ return [res, true];
}
+ return [res, false];
+ }
- for (shard of shardsWithoutOption) {
- [listOptionsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
- assert(!foundOption,
- "expected not to see option " + optionKey +
- " in listCollections response from " + shard + ": " + tojson(listCollsRes));
- }
+ for (shard of shardsWithOption) {
+ [listCollsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
+ assert(foundOption,
+ "expected to see option " + optionKey + " in listCollections response from " +
+ shard + ": " + tojson(listCollsRes));
}
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- var st = new ShardingTest(
- {shards: {rs0: {nodes: 1}, rs1: {nodes: 1}, rs2: {nodes: 1}}, other: {config: 3}});
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.name);
-
- // When creating index or setting a collection option on an unsharded collection, only the
- // primary shard is affected.
-
- assert.commandWorked(st.s.getDB(dbName).getCollection(collName).createIndex({"idx1": 1}));
- checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
-
- const validationOption1 = {dummyField1: {$type: "string"}};
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- collMod: collName,
- validator: validationOption1,
- validationLevel: "moderate",
- validationAction: "warn"
- }));
- checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
-
- // After sharding the collection but before any migrations, only the primary shard has the
- // index and collection option.
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
- checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
- checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
-
- // After a migration, only shards that own data for the collection have the index and collection
- // option.
- assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName}));
- checkShardIndexes("idx1", [st.shard0, st.shard1], [st.shard2]);
- checkShardCollOption("validator", validationOption1, [st.shard0, st.shard1], [st.shard2]);
-
- // Though some shards don't own data for the sharded collection, createIndex, reIndex,
- // dropIndex, and collMod (which are broadcast to all shards) report overall success (that is,
- // NamespaceNotFound-type errors from shards are ignored, and they are not included in the 'raw'
- // shard responses).
-
- var res;
-
- // createIndex
- res = st.s.getDB(dbName).getCollection(collName).createIndex({"idx2": 1});
- assert.commandWorked(res);
- assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
- assert.eq(undefined,
- res.raw[st.shard2.host],
- tojson(res)); // CannotImplicitlyCreateCollection is ignored
- checkShardIndexes("idx2", [st.shard0, st.shard1], [st.shard2]);
-
- // dropIndex
- res = st.s.getDB(dbName).getCollection(collName).dropIndex("idx1_1");
- assert.commandWorked(res);
- assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
- assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // NamespaceNotFound is ignored
- checkShardIndexes("idx1", [], [st.shard0, st.shard1, st.shard2]);
-
- // collMod
- const validationOption2 = {dummyField2: {$type: "string"}};
- res = st.s.getDB(dbName).runCommand({
- collMod: collName,
- validator: validationOption2,
- validationLevel: "moderate",
- validationAction: "warn"
- });
- assert.commandWorked(res);
- assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
- assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // NamespaceNotFound is ignored
- checkShardCollOption("validator", validationOption2, [st.shard0, st.shard1], [st.shard2]);
-
- // Check that errors from shards are aggregated correctly.
-
- // If no shard returns success, then errors that are usually ignored should be reported.
- res = st.s.getDB(dbName).getCollection("unshardedColl").dropIndex("nonexistentIndex");
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- assert.eq(res.code, ErrorCodes.NamespaceNotFound, tojson(res));
- assert.eq("NamespaceNotFound", res.codeName, tojson(res));
- assert.neq(null, res.errmsg, tojson(res));
-
- // If all shards report the same error, the overall command error should be set to that error.
- res = st.s.getDB(dbName).getCollection(collName).createIndex({});
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
- assert.eq(res.raw[st.shard2.host].ok, 0, tojson(res));
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.code, res.raw[st.shard1.host].code, tojson(res));
- assert.eq(res.code, res.raw[st.shard2.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard1.host].codeName, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard2.host].codeName, tojson(res));
- assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
- assert.eq("CannotCreateIndex", res.codeName, tojson(res));
- assert.neq(null, res.errmsg, tojson(res));
-
- // If all the non-ignorable errors reported by shards are the same, the overall command error
- // should be set to that error.
- res = st.s.getDB(dbName).getCollection(collName).createIndex({z: 1}, {unique: true});
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
- assert.eq(null, res.raw[st.shard2.host], tojson(res));
- assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard1.host].code, tojson(res));
- assert.eq("CannotCreateIndex", res.raw[st.shard0.host].codeName, tojson(res));
- assert.eq("CannotCreateIndex", res.raw[st.shard1.host].codeName, tojson(res));
- assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
- assert.eq("CannotCreateIndex", res.codeName, tojson(res));
- assert.neq(null, res.errmsg, tojson(res));
-
- st.rs0.stopSet();
-
- // If we receive a non-ignorable error, it should be reported as the command error.
- res = st.s.getDB(dbName).getCollection("unshardedColl").createIndex({"validIdx": 1});
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- // We might see 'HostUnreachable' the first time if the mongos's ReplicaSetMonitor does not yet
- // know that the shard is down.
- assert(res.code === ErrorCodes.HostUnreachable ||
- res.code === ErrorCodes.FailedToSatisfyReadPreference,
- tojson(res));
- assert(res.codeName === "HostUnreachable" || res.codeName === "FailedToSatisfyReadPreference",
- tojson(res));
-
- // If some shard returns a non-ignorable error, it should be reported as the command error, even
- // if other shards returned ignorable errors.
- res = st.s.getDB(dbName).getCollection(collName).createIndex({"validIdx": 1});
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res)); // shard was down
- assert.eq(
- res.raw[st.shard1.host].ok, 1, tojson(res)); // gets created on shard that owns chunks
- assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // shard does not own chunks
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- // We can expect to see 'FailedToSatisfyReadPreference' this time, because after the previous
- // createIndexes attempt, mongos's ReplicaSetMonitor should have been updated.
- assert.eq(res.code, ErrorCodes.FailedToSatisfyReadPreference, tojson(res));
- assert.eq("FailedToSatisfyReadPreference", res.codeName, tojson(res));
- assert.neq(null, res.errmsg, tojson(res));
-
- st.stop();
+ for (shard of shardsWithoutOption) {
+ [listOptionsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
+ assert(!foundOption,
+ "expected not to see option " + optionKey + " in listCollections response from " +
+ shard + ": " + tojson(listCollsRes));
+ }
+}
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+var st = new ShardingTest(
+ {shards: {rs0: {nodes: 1}, rs1: {nodes: 1}, rs2: {nodes: 1}}, other: {config: 3}});
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.name);
+
+// When creating index or setting a collection option on an unsharded collection, only the
+// primary shard is affected.
+
+assert.commandWorked(st.s.getDB(dbName).getCollection(collName).createIndex({"idx1": 1}));
+checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
+
+const validationOption1 = {
+ dummyField1: {$type: "string"}
+};
+assert.commandWorked(st.s.getDB(dbName).runCommand({
+ collMod: collName,
+ validator: validationOption1,
+ validationLevel: "moderate",
+ validationAction: "warn"
+}));
+checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
+
+// After sharding the collection but before any migrations, only the primary shard has the
+// index and collection option.
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
+checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
+checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
+
+// After a migration, only shards that own data for the collection have the index and collection
+// option.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName}));
+checkShardIndexes("idx1", [st.shard0, st.shard1], [st.shard2]);
+checkShardCollOption("validator", validationOption1, [st.shard0, st.shard1], [st.shard2]);
+
+// Though some shards don't own data for the sharded collection, createIndex, reIndex,
+// dropIndex, and collMod (which are broadcast to all shards) report overall success (that is,
+// NamespaceNotFound-type errors from shards are ignored, and they are not included in the 'raw'
+// shard responses).
+
+var res;
+
+// createIndex
+res = st.s.getDB(dbName).getCollection(collName).createIndex({"idx2": 1});
+assert.commandWorked(res);
+assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
+assert.eq(undefined,
+ res.raw[st.shard2.host],
+ tojson(res)); // CannotImplicitlyCreateCollection is ignored
+checkShardIndexes("idx2", [st.shard0, st.shard1], [st.shard2]);
+
+// dropIndex
+res = st.s.getDB(dbName).getCollection(collName).dropIndex("idx1_1");
+assert.commandWorked(res);
+assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
+assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // NamespaceNotFound is ignored
+checkShardIndexes("idx1", [], [st.shard0, st.shard1, st.shard2]);
+
+// collMod
+const validationOption2 = {
+ dummyField2: {$type: "string"}
+};
+res = st.s.getDB(dbName).runCommand({
+ collMod: collName,
+ validator: validationOption2,
+ validationLevel: "moderate",
+ validationAction: "warn"
+});
+assert.commandWorked(res);
+assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
+assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // NamespaceNotFound is ignored
+checkShardCollOption("validator", validationOption2, [st.shard0, st.shard1], [st.shard2]);
+
+// Check that errors from shards are aggregated correctly.
+
+// If no shard returns success, then errors that are usually ignored should be reported.
+res = st.s.getDB(dbName).getCollection("unshardedColl").dropIndex("nonexistentIndex");
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
+assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
+assert.eq(res.code, ErrorCodes.NamespaceNotFound, tojson(res));
+assert.eq("NamespaceNotFound", res.codeName, tojson(res));
+assert.neq(null, res.errmsg, tojson(res));
+
+// If all shards report the same error, the overall command error should be set to that error.
+res = st.s.getDB(dbName).getCollection(collName).createIndex({});
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
+assert.eq(res.raw[st.shard2.host].ok, 0, tojson(res));
+assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(res.code, res.raw[st.shard1.host].code, tojson(res));
+assert.eq(res.code, res.raw[st.shard2.host].code, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard1.host].codeName, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard2.host].codeName, tojson(res));
+assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
+assert.eq("CannotCreateIndex", res.codeName, tojson(res));
+assert.neq(null, res.errmsg, tojson(res));
+
+// If all the non-ignorable errors reported by shards are the same, the overall command error
+// should be set to that error.
+res = st.s.getDB(dbName).getCollection(collName).createIndex({z: 1}, {unique: true});
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
+assert.eq(null, res.raw[st.shard2.host], tojson(res));
+assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard1.host].code, tojson(res));
+assert.eq("CannotCreateIndex", res.raw[st.shard0.host].codeName, tojson(res));
+assert.eq("CannotCreateIndex", res.raw[st.shard1.host].codeName, tojson(res));
+assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
+assert.eq("CannotCreateIndex", res.codeName, tojson(res));
+assert.neq(null, res.errmsg, tojson(res));
+
+st.rs0.stopSet();
+
+// If we receive a non-ignorable error, it should be reported as the command error.
+res = st.s.getDB(dbName).getCollection("unshardedColl").createIndex({"validIdx": 1});
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
+assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
+// We might see 'HostUnreachable' the first time if the mongos's ReplicaSetMonitor does not yet
+// know that the shard is down.
+assert(res.code === ErrorCodes.HostUnreachable ||
+ res.code === ErrorCodes.FailedToSatisfyReadPreference,
+ tojson(res));
+assert(res.codeName === "HostUnreachable" || res.codeName === "FailedToSatisfyReadPreference",
+ tojson(res));
+
+// If some shard returns a non-ignorable error, it should be reported as the command error, even
+// if other shards returned ignorable errors.
+res = st.s.getDB(dbName).getCollection(collName).createIndex({"validIdx": 1});
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res)); // shard was down
+assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res)); // gets created on shard that owns chunks
+assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // shard does not own chunks
+assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
+// We can expect to see 'FailedToSatisfyReadPreference' this time, because after the previous
+// createIndexes attempt, mongos's ReplicaSetMonitor should have been updated.
+assert.eq(res.code, ErrorCodes.FailedToSatisfyReadPreference, tojson(res));
+assert.eq("FailedToSatisfyReadPreference", res.codeName, tojson(res));
+assert.neq(null, res.errmsg, tojson(res));
+
+st.stop();
})();
diff --git a/jstests/sharding/initial_split_validate_shard_collections.js b/jstests/sharding/initial_split_validate_shard_collections.js
index a2bc2070622..537b03a8783 100644
--- a/jstests/sharding/initial_split_validate_shard_collections.js
+++ b/jstests/sharding/initial_split_validate_shard_collections.js
@@ -7,68 +7,68 @@ load("jstests/libs/feature_compatibility_version.js");
load("jstests/libs/uuid_util.js");
(function() {
- 'use strict';
-
- let st = new ShardingTest({shards: 2});
- let mongos = st.s0;
-
- assert.commandWorked(mongos.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
-
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: 'test.user', key: {x: 'hashed'}, numInitialChunks: 2}));
-
- // Ensure that all the pending (received chunks) have been incorporated in the shard's filtering
- // metadata so they will show up in the getShardVersion command
- assert.eq(0, mongos.getDB('test').user.find({}).itcount());
-
- st.printShardingStatus();
-
- function checkMetadata(metadata) {
- jsTestLog(tojson(metadata));
-
- assert.eq(1, metadata.chunks.length);
- assert.eq(0, metadata.pending.length);
-
- // Check that the single chunk on the shard's metadata is a valid chunk (getShardVersion
- // represents chunks as an array of [min, max])
- let chunks = metadata.chunks;
- assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0);
- }
-
- // Check that the shards' in-memory catalog caches were refreshed
- checkMetadata(assert
- .commandWorked(st.rs0.getPrimary().adminCommand(
- {getShardVersion: 'test.user', fullMetadata: true}))
- .metadata);
- checkMetadata(assert
- .commandWorked(st.rs1.getPrimary().adminCommand(
- {getShardVersion: 'test.user', fullMetadata: true}))
- .metadata);
-
- // Check that the shards' catalogs have the correct UUIDs
- const configUUID = getUUIDFromConfigCollections(mongos, 'test.user');
- const shard0UUID = getUUIDFromListCollections(st.shard0.getDB('test'), 'user');
- const shard1UUID = getUUIDFromListCollections(st.shard1.getDB('test'), 'user');
- assert.eq(configUUID, shard0UUID);
- assert.eq(configUUID, shard1UUID);
-
- // Check that the shards' on-disk caches have the correct number of chunks
- assert.commandWorked(st.shard0.adminCommand(
- {_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
- assert.commandWorked(st.shard1.adminCommand(
- {_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
-
- const chunksOnConfigCount = st.config.chunks.count({ns: 'test.user'});
- assert.eq(2, chunksOnConfigCount);
-
- const cacheChunksOnShard0 =
- st.shard0.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
- const cacheChunksOnShard1 =
- st.shard1.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
- assert.eq(chunksOnConfigCount, cacheChunksOnShard0.length);
- assert.eq(chunksOnConfigCount, cacheChunksOnShard1.length);
- assert.eq(cacheChunksOnShard0, cacheChunksOnShard1);
-
- st.stop();
+'use strict';
+
+let st = new ShardingTest({shards: 2});
+let mongos = st.s0;
+
+assert.commandWorked(mongos.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
+
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}, numInitialChunks: 2}));
+
+// Ensure that all the pending (received chunks) have been incorporated in the shard's filtering
+// metadata so they will show up in the getShardVersion command
+assert.eq(0, mongos.getDB('test').user.find({}).itcount());
+
+st.printShardingStatus();
+
+function checkMetadata(metadata) {
+ jsTestLog(tojson(metadata));
+
+ assert.eq(1, metadata.chunks.length);
+ assert.eq(0, metadata.pending.length);
+
+ // Check that the single chunk on the shard's metadata is a valid chunk (getShardVersion
+ // represents chunks as an array of [min, max])
+ let chunks = metadata.chunks;
+ assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0);
+}
+
+// Check that the shards' in-memory catalog caches were refreshed
+checkMetadata(assert
+ .commandWorked(st.rs0.getPrimary().adminCommand(
+ {getShardVersion: 'test.user', fullMetadata: true}))
+ .metadata);
+checkMetadata(assert
+ .commandWorked(st.rs1.getPrimary().adminCommand(
+ {getShardVersion: 'test.user', fullMetadata: true}))
+ .metadata);
+
+// Check that the shards' catalogs have the correct UUIDs
+const configUUID = getUUIDFromConfigCollections(mongos, 'test.user');
+const shard0UUID = getUUIDFromListCollections(st.shard0.getDB('test'), 'user');
+const shard1UUID = getUUIDFromListCollections(st.shard1.getDB('test'), 'user');
+assert.eq(configUUID, shard0UUID);
+assert.eq(configUUID, shard1UUID);
+
+// Check that the shards' on-disk caches have the correct number of chunks
+assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
+assert.commandWorked(
+ st.shard1.adminCommand({_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
+
+const chunksOnConfigCount = st.config.chunks.count({ns: 'test.user'});
+assert.eq(2, chunksOnConfigCount);
+
+const cacheChunksOnShard0 =
+ st.shard0.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
+const cacheChunksOnShard1 =
+ st.shard1.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
+assert.eq(chunksOnConfigCount, cacheChunksOnShard0.length);
+assert.eq(chunksOnConfigCount, cacheChunksOnShard1.length);
+assert.eq(cacheChunksOnShard0, cacheChunksOnShard1);
+
+st.stop();
})();
diff --git a/jstests/sharding/inserts_consistent.js b/jstests/sharding/inserts_consistent.js
index 86db9ae4e82..4bea7d95474 100644
--- a/jstests/sharding/inserts_consistent.js
+++ b/jstests/sharding/inserts_consistent.js
@@ -1,71 +1,71 @@
// Test write re-routing on version mismatch.
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 2});
+var st = new ShardingTest({shards: 2, mongos: 2});
- var mongos = st.s;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = st.s.getCollection('TestDB.coll');
+var mongos = st.s;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = st.s.getCollection('TestDB.coll');
- assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.coll', key: {_id: 1}}));
+assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.coll', key: {_id: 1}}));
- jsTest.log("Refreshing second mongos...");
+jsTest.log("Refreshing second mongos...");
- var mongosB = st.s1;
- var adminB = mongosB.getDB("admin");
- var collB = mongosB.getCollection(coll + "");
+var mongosB = st.s1;
+var adminB = mongosB.getDB("admin");
+var collB = mongosB.getCollection(coll + "");
- // Make sure mongosB knows about the coll
- assert.eq(0, collB.find().itcount());
+// Make sure mongosB knows about the coll
+assert.eq(0, collB.find().itcount());
- jsTest.log("Moving chunk to create stale mongos...");
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
+jsTest.log("Moving chunk to create stale mongos...");
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
- jsTest.log("Inserting docs that needs to be retried...");
+jsTest.log("Inserting docs that needs to be retried...");
- var nextId = -1;
- for (var i = 0; i < 2; i++) {
- printjson("Inserting " + nextId);
- assert.writeOK(collB.insert({_id: nextId--, hello: "world"}));
- }
+var nextId = -1;
+for (var i = 0; i < 2; i++) {
+ printjson("Inserting " + nextId);
+ assert.writeOK(collB.insert({_id: nextId--, hello: "world"}));
+}
- jsTest.log("Inserting doc which successfully goes through...");
+jsTest.log("Inserting doc which successfully goes through...");
- // Do second write
- assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
+// Do second write
+assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
- // Assert that write went through
- assert.eq(coll.find().itcount(), 3);
+// Assert that write went through
+assert.eq(coll.find().itcount(), 3);
- jsTest.log("Now try moving the actual chunk we're writing to...");
+jsTest.log("Now try moving the actual chunk we're writing to...");
- // Now move the actual chunk we're writing to
- printjson(admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
+// Now move the actual chunk we're writing to
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
- jsTest.log("Inserting second docs to get written back...");
+jsTest.log("Inserting second docs to get written back...");
- // Will fail entirely if too many of these, waiting for write to get applied can get too long.
- for (var i = 0; i < 2; i++) {
- collB.insert({_id: nextId--, hello: "world"});
- }
+// Will fail entirely if too many of these, waiting for write to get applied can get too long.
+for (var i = 0; i < 2; i++) {
+ collB.insert({_id: nextId--, hello: "world"});
+}
- // Refresh server
- printjson(adminB.runCommand({flushRouterConfig: 1}));
+// Refresh server
+printjson(adminB.runCommand({flushRouterConfig: 1}));
- jsTest.log("Inserting second doc which successfully goes through...");
+jsTest.log("Inserting second doc which successfully goes through...");
- // Do second write
- assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
+// Do second write
+assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
- jsTest.log("All docs written this time!");
+jsTest.log("All docs written this time!");
- // Assert that writes went through.
- assert.eq(coll.find().itcount(), 6);
+// Assert that writes went through.
+assert.eq(coll.find().itcount(), 6);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/invalid_system_views_sharded_collection.js b/jstests/sharding/invalid_system_views_sharded_collection.js
index 1248a7aee3a..899d4482987 100644
--- a/jstests/sharding/invalid_system_views_sharded_collection.js
+++ b/jstests/sharding/invalid_system_views_sharded_collection.js
@@ -4,76 +4,72 @@
*/
(function() {
- "use strict";
-
- function runTest(st, badViewDefinition) {
- const mongos = st.s;
- const config = mongos.getDB("config");
- const db = mongos.getDB("invalid_system_views");
- assert.commandWorked(db.dropDatabase());
-
- assert.commandWorked(config.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
-
- // Create sharded and unsharded collections, then insert an invalid view into system.views.
- const viewsCollection = db.getCollection("coll");
- const staticCollection = db.getCollection("staticCollection");
- assert.commandWorked(
- config.adminCommand({shardCollection: viewsCollection.getFullName(), key: {a: 1}}));
- assert.commandWorked(
- config.adminCommand({shardCollection: staticCollection.getFullName(), key: {a: 1}}));
-
- assert.commandWorked(viewsCollection.createIndex({x: 1}));
-
- const unshardedColl = db.getCollection("unshardedColl");
- assert.writeOK(unshardedColl.insert({b: "boo"}));
-
- assert.writeOK(db.system.views.insert(badViewDefinition),
- "failed to insert " + tojson(badViewDefinition));
-
- // Test that a command involving views properly fails with a views-specific error code.
- assert.commandFailedWithCode(
- db.runCommand({listCollections: 1}),
- ErrorCodes.InvalidViewDefinition,
- "listCollections should have failed in the presence of an invalid view");
-
- // Helper function to create a message to use if an assertion fails.
- function makeErrorMessage(msg) {
- return msg +
- " should work on a valid, existing collection, despite the presence of bad views" +
- " in system.views";
- }
+"use strict";
+
+function runTest(st, badViewDefinition) {
+ const mongos = st.s;
+ const config = mongos.getDB("config");
+ const db = mongos.getDB("invalid_system_views");
+ assert.commandWorked(db.dropDatabase());
+
+ assert.commandWorked(config.adminCommand({enableSharding: db.getName()}));
+ st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+
+ // Create sharded and unsharded collections, then insert an invalid view into system.views.
+ const viewsCollection = db.getCollection("coll");
+ const staticCollection = db.getCollection("staticCollection");
+ assert.commandWorked(
+ config.adminCommand({shardCollection: viewsCollection.getFullName(), key: {a: 1}}));
+ assert.commandWorked(
+ config.adminCommand({shardCollection: staticCollection.getFullName(), key: {a: 1}}));
+
+ assert.commandWorked(viewsCollection.createIndex({x: 1}));
+
+ const unshardedColl = db.getCollection("unshardedColl");
+ assert.writeOK(unshardedColl.insert({b: "boo"}));
+
+ assert.writeOK(db.system.views.insert(badViewDefinition),
+ "failed to insert " + tojson(badViewDefinition));
+
+ // Test that a command involving views properly fails with a views-specific error code.
+ assert.commandFailedWithCode(
+ db.runCommand({listCollections: 1}),
+ ErrorCodes.InvalidViewDefinition,
+ "listCollections should have failed in the presence of an invalid view");
+
+ // Helper function to create a message to use if an assertion fails.
+ function makeErrorMessage(msg) {
+ return msg +
+ " should work on a valid, existing collection, despite the presence of bad views" +
+ " in system.views";
+ }
- assert.writeOK(viewsCollection.insert({y: "baz", a: 5}), makeErrorMessage("insert"));
+ assert.writeOK(viewsCollection.insert({y: "baz", a: 5}), makeErrorMessage("insert"));
- assert.writeOK(viewsCollection.update({y: "baz"}, {$set: {y: "qux"}}),
- makeErrorMessage("update"));
+ assert.writeOK(viewsCollection.update({y: "baz"}, {$set: {y: "qux"}}),
+ makeErrorMessage("update"));
- assert.writeOK(viewsCollection.remove({y: "baz"}), makeErrorMessage("remove"));
+ assert.writeOK(viewsCollection.remove({y: "baz"}), makeErrorMessage("remove"));
- assert.commandWorked(
- db.runCommand(
- {findAndModify: viewsCollection.getName(), query: {x: 1, a: 1}, update: {x: 2}}),
- makeErrorMessage("findAndModify with update"));
+ assert.commandWorked(
+ db.runCommand(
+ {findAndModify: viewsCollection.getName(), query: {x: 1, a: 1}, update: {x: 2}}),
+ makeErrorMessage("findAndModify with update"));
- assert.commandWorked(
- db.runCommand(
- {findAndModify: viewsCollection.getName(), query: {x: 2, a: 1}, remove: true}),
- makeErrorMessage("findAndModify with remove"));
+ assert.commandWorked(
+ db.runCommand(
+ {findAndModify: viewsCollection.getName(), query: {x: 2, a: 1}, remove: true}),
+ makeErrorMessage("findAndModify with remove"));
- const lookup = {
- $lookup: {
- from: unshardedColl.getName(),
- localField: "_id",
- foreignField: "_id",
- as: "match"
- }
- };
- assert.commandWorked(
- db.runCommand({aggregate: viewsCollection.getName(), pipeline: [lookup], cursor: {}}),
- makeErrorMessage("aggregate with $lookup"));
+ const lookup = {
+ $lookup:
+ {from: unshardedColl.getName(), localField: "_id", foreignField: "_id", as: "match"}
+ };
+ assert.commandWorked(
+ db.runCommand({aggregate: viewsCollection.getName(), pipeline: [lookup], cursor: {}}),
+ makeErrorMessage("aggregate with $lookup"));
- const graphLookup = {
+ const graphLookup = {
$graphLookup: {
from: unshardedColl.getName(),
startWith: "$_id",
@@ -82,54 +78,46 @@
as: "match"
}
};
- assert.commandWorked(
- db.runCommand(
- {aggregate: viewsCollection.getName(), pipeline: [graphLookup], cursor: {}}),
- makeErrorMessage("aggregate with $graphLookup"));
-
- assert.commandWorked(db.runCommand({dropIndexes: viewsCollection.getName(), index: "x_1"}),
- makeErrorMessage("dropIndexes"));
-
- assert.commandWorked(viewsCollection.createIndex({x: 1}),
- makeErrorMessage("createIndexes"));
-
- assert.commandWorked(
- db.runCommand({collMod: viewsCollection.getName(), validator: {x: {$type: "string"}}}),
- makeErrorMessage("collMod"));
-
- assert.commandWorked(db.runCommand({drop: viewsCollection.getName()}),
- makeErrorMessage("drop"));
- assert.commandWorked(db.runCommand({drop: staticCollection.getName()}),
- makeErrorMessage("drop"));
- assert.commandWorked(db.runCommand({drop: unshardedColl.getName()}),
- makeErrorMessage("drop"));
-
- // Drop the offending view so that the validate hook succeeds.
- db.system.views.remove(badViewDefinition);
- }
-
- const st = new ShardingTest({name: "views_sharded", shards: 2, other: {enableBalancer: false}});
-
- runTest(st,
- {_id: "invalid_system_views.badViewStringPipeline", viewOn: "coll", pipeline: "bad"});
- runTest(st,
- {_id: "invalid_system_views.badViewEmptyObjectPipeline", viewOn: "coll", pipeline: {}});
- runTest(st,
- {_id: "invalid_system_views.badViewNumericalPipeline", viewOn: "coll", pipeline: 7});
- runTest(st, {
- _id: "invalid_system_views.badViewArrayWithIntegerPipeline",
- viewOn: "coll",
- pipeline: [1]
- });
- runTest(st, {
- _id: "invalid_system_views.badViewArrayWithEmptyArrayPipeline",
- viewOn: "coll",
- pipeline: [[]]
- });
- runTest(st, {_id: 7, viewOn: "coll", pipeline: []});
- runTest(st, {_id: "invalid_system_views.embedded\0null", viewOn: "coll", pipeline: []});
- runTest(st, {_id: "invalidNotFullyQualifiedNs", viewOn: "coll", pipeline: []});
- runTest(st, {_id: "invalid_system_views.missingViewOnField", pipeline: []});
-
- st.stop();
+ assert.commandWorked(
+ db.runCommand({aggregate: viewsCollection.getName(), pipeline: [graphLookup], cursor: {}}),
+ makeErrorMessage("aggregate with $graphLookup"));
+
+ assert.commandWorked(db.runCommand({dropIndexes: viewsCollection.getName(), index: "x_1"}),
+ makeErrorMessage("dropIndexes"));
+
+ assert.commandWorked(viewsCollection.createIndex({x: 1}), makeErrorMessage("createIndexes"));
+
+ assert.commandWorked(
+ db.runCommand({collMod: viewsCollection.getName(), validator: {x: {$type: "string"}}}),
+ makeErrorMessage("collMod"));
+
+ assert.commandWorked(db.runCommand({drop: viewsCollection.getName()}),
+ makeErrorMessage("drop"));
+ assert.commandWorked(db.runCommand({drop: staticCollection.getName()}),
+ makeErrorMessage("drop"));
+ assert.commandWorked(db.runCommand({drop: unshardedColl.getName()}), makeErrorMessage("drop"));
+
+ // Drop the offending view so that the validate hook succeeds.
+ db.system.views.remove(badViewDefinition);
+}
+
+const st = new ShardingTest({name: "views_sharded", shards: 2, other: {enableBalancer: false}});
+
+runTest(st, {_id: "invalid_system_views.badViewStringPipeline", viewOn: "coll", pipeline: "bad"});
+runTest(st, {_id: "invalid_system_views.badViewEmptyObjectPipeline", viewOn: "coll", pipeline: {}});
+runTest(st, {_id: "invalid_system_views.badViewNumericalPipeline", viewOn: "coll", pipeline: 7});
+runTest(
+ st,
+ {_id: "invalid_system_views.badViewArrayWithIntegerPipeline", viewOn: "coll", pipeline: [1]});
+runTest(st, {
+ _id: "invalid_system_views.badViewArrayWithEmptyArrayPipeline",
+ viewOn: "coll",
+ pipeline: [[]]
+});
+runTest(st, {_id: 7, viewOn: "coll", pipeline: []});
+runTest(st, {_id: "invalid_system_views.embedded\0null", viewOn: "coll", pipeline: []});
+runTest(st, {_id: "invalidNotFullyQualifiedNs", viewOn: "coll", pipeline: []});
+runTest(st, {_id: "invalid_system_views.missingViewOnField", pipeline: []});
+
+st.stop();
}());
diff --git a/jstests/sharding/json_schema.js b/jstests/sharding/json_schema.js
index 1c24f427eed..5a4a68102b7 100644
--- a/jstests/sharding/json_schema.js
+++ b/jstests/sharding/json_schema.js
@@ -2,61 +2,64 @@
* Tests for $jsonSchema queries in a sharded cluster.
*/
(function() {
- "use strict";
-
- const dbName = "json_schema_sharding";
-
- var st = new ShardingTest({shards: 2, mongos: 1, config: 1});
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.name);
-
- const testDB = st.s.getDB(dbName);
- const coll = testDB.json_schema_sharding;
- coll.drop();
-
- // Shard the collection on _id.
- assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: -100}}));
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 100}}));
-
- // Move the [0, 100) and [100, MaxKey) chunks to st.shard1.shardName.
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
-
- // Write one document into each of the chunks.
- assert.writeOK(coll.insert({_id: -150, a: 1}));
- assert.writeOK(coll.insert({_id: -50, a: 10}));
- assert.writeOK(coll.insert({_id: 50, a: "str"}));
- assert.writeOK(coll.insert({_id: 150}));
-
- // Test that $jsonSchema in a find command returns the correct results.
- assert.eq(4, coll.find({$jsonSchema: {}}).itcount());
- assert.eq(3, coll.find({$jsonSchema: {properties: {a: {type: "number"}}}}).itcount());
- assert.eq(4, coll.find({$jsonSchema: {required: ["_id"]}}).itcount());
- assert.eq(1, coll.find({$jsonSchema: {properties: {_id: {minimum: 150}}}}).itcount());
-
- // Test that $jsonSchema works correctly in an update command.
- let res = coll.update(
- {$jsonSchema: {properties: {_id: {type: "number", minimum: 100}, a: {type: "number"}}}},
- {$inc: {a: 1}},
- {multi: true});
- assert.writeOK(res);
- assert.eq(1, res.nModified);
-
- const schema = {properties: {_id: {type: "number", minimum: 100}}, required: ["_id"]};
- res = coll.update({$jsonSchema: schema}, {$set: {b: 1}}, {multi: true});
- assert.writeOK(res);
- assert.eq(1, res.nModified);
-
- // Test that $jsonSchema works correctly in a findAndModify command.
- res = coll.findAndModify({query: {_id: 150, $jsonSchema: schema}, update: {$set: {b: 1}}});
- assert.eq(1, res.b);
-
- st.stop();
+"use strict";
+
+const dbName = "json_schema_sharding";
+
+var st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.name);
+
+const testDB = st.s.getDB(dbName);
+const coll = testDB.json_schema_sharding;
+coll.drop();
+
+// Shard the collection on _id.
+assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: -100}}));
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 100}}));
+
+// Move the [0, 100) and [100, MaxKey) chunks to st.shard1.shardName.
+assert.commandWorked(
+ testDB.adminCommand({moveChunk: coll.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
+
+// Write one document into each of the chunks.
+assert.writeOK(coll.insert({_id: -150, a: 1}));
+assert.writeOK(coll.insert({_id: -50, a: 10}));
+assert.writeOK(coll.insert({_id: 50, a: "str"}));
+assert.writeOK(coll.insert({_id: 150}));
+
+// Test that $jsonSchema in a find command returns the correct results.
+assert.eq(4, coll.find({$jsonSchema: {}}).itcount());
+assert.eq(3, coll.find({$jsonSchema: {properties: {a: {type: "number"}}}}).itcount());
+assert.eq(4, coll.find({$jsonSchema: {required: ["_id"]}}).itcount());
+assert.eq(1, coll.find({$jsonSchema: {properties: {_id: {minimum: 150}}}}).itcount());
+
+// Test that $jsonSchema works correctly in an update command.
+let res = coll.update(
+ {$jsonSchema: {properties: {_id: {type: "number", minimum: 100}, a: {type: "number"}}}},
+ {$inc: {a: 1}},
+ {multi: true});
+assert.writeOK(res);
+assert.eq(1, res.nModified);
+
+const schema = {
+ properties: {_id: {type: "number", minimum: 100}},
+ required: ["_id"]
+};
+res = coll.update({$jsonSchema: schema}, {$set: {b: 1}}, {multi: true});
+assert.writeOK(res);
+assert.eq(1, res.nModified);
+
+// Test that $jsonSchema works correctly in a findAndModify command.
+res = coll.findAndModify({query: {_id: 150, $jsonSchema: schema}, update: {$set: {b: 1}}});
+assert.eq(1, res.b);
+
+st.stop();
})();
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index 573d7b1e5f7..fe8e04e7492 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -1,58 +1,57 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
- var db = s.getDB("test");
+var db = s.getDB("test");
- const big = 'X'.repeat(10000);
+const big = 'X'.repeat(10000);
- // Create sufficient documents to create a jumbo chunk, and use the same shard key in all of
- // them so that the chunk cannot be split.
- var x = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 500; i++) {
- bulk.insert({x: x, big: big});
- }
+// Create sufficient documents to create a jumbo chunk, and use the same shard key in all of
+// them so that the chunk cannot be split.
+var x = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 500; i++) {
+ bulk.insert({x: x, big: big});
+}
- // Create documents with different shard keys that can be split and moved without issue.
- for (; x < 1500; x++) {
- bulk.insert({x: x, big: big});
- }
+// Create documents with different shard keys that can be split and moved without issue.
+for (; x < 1500; x++) {
+ bulk.insert({x: x, big: big});
+}
- assert.writeOK(bulk.execute());
+assert.writeOK(bulk.execute());
+s.printShardingStatus(true);
+
+s.startBalancer();
+
+function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
+ Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
+}
+
+assert.soon(function() {
+ var d = diff1();
+ print("diff: " + d);
s.printShardingStatus(true);
+ return d < 5;
+}, "balance didn't happen", 1000 * 60 * 10, 5000);
+
+// Check that the jumbo chunk did not move, which shouldn't be possible.
+var jumboChunk =
+ s.getDB('config').chunks.findOne({ns: 'test.foo', min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
+assert.eq(s.shard1.shardName, jumboChunk.shard, 'jumbo chunk ' + tojson(jumboChunk) + ' was moved');
+
+// TODO: SERVER-26531 Make sure that balancer marked the first chunk as jumbo.
+// Assumption: balancer favors moving the lowest valued chunk out of a shard.
+// assert(jumboChunk.jumbo, tojson(jumboChunk));
- s.startBalancer();
-
- function diff1() {
- var x = s.chunkCounts("foo");
- printjson(x);
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
- }
-
- assert.soon(function() {
- var d = diff1();
- print("diff: " + d);
- s.printShardingStatus(true);
- return d < 5;
- }, "balance didn't happen", 1000 * 60 * 10, 5000);
-
- // Check that the jumbo chunk did not move, which shouldn't be possible.
- var jumboChunk =
- s.getDB('config').chunks.findOne({ns: 'test.foo', min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
- assert.eq(
- s.shard1.shardName, jumboChunk.shard, 'jumbo chunk ' + tojson(jumboChunk) + ' was moved');
-
- // TODO: SERVER-26531 Make sure that balancer marked the first chunk as jumbo.
- // Assumption: balancer favors moving the lowest valued chunk out of a shard.
- // assert(jumboChunk.jumbo, tojson(jumboChunk));
-
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index c671e691f94..e0d19e8874b 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -1,252 +1,240 @@
(function() {
- 'use strict';
-
- // Values have to be sorted - you must have exactly 6 values in each array
- var types = [
- {name: "string", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "k"},
- {name: "double", values: [1.2, 3.5, 4.5, 4.6, 6.7, 9.9], keyfield: "a"},
- {
- name: "date",
- values: [
- new Date(1000000),
- new Date(2000000),
- new Date(3000000),
- new Date(4000000),
- new Date(5000000),
- new Date(6000000)
- ],
- keyfield: "a"
- },
- {
- name: "string_id",
- values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
- keyfield: "_id"
- },
- {
- name: "embedded 1",
- values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
- keyfield: "a.b"
- },
- {
- name: "embedded 2",
- values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
- keyfield: "a.b.c"
- },
- {
- name: "object",
- values: [
- {a: 1, b: 1.2},
- {a: 1, b: 3.5},
- {a: 1, b: 4.5},
- {a: 2, b: 1.2},
- {a: 2, b: 3.5},
- {a: 2, b: 4.5}
- ],
- keyfield: "o"
- },
- {
- name: "compound",
- values: [
- {a: 1, b: 1.2},
- {a: 1, b: 3.5},
- {a: 1, b: 4.5},
- {a: 2, b: 1.2},
- {a: 2, b: 3.5},
- {a: 2, b: 4.5}
- ],
- keyfield: "o",
- compound: true
- },
- {
- name: "oid_id",
- values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
- keyfield: "_id"
- },
- {
- name: "oid_other",
- values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
- keyfield: "o"
- },
- ];
-
- var s = new ShardingTest({name: "key_many", shards: 2});
-
- assert.commandWorked(s.s0.adminCommand({enableSharding: 'test'}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- var db = s.getDB('test');
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
-
- var curT;
-
- function makeObjectDotted(v) {
- var o = {};
- if (curT.compound) {
- var prefix = curT.keyfield + '.';
- if (typeof(v) == 'object') {
- for (var key in v)
- o[prefix + key] = v[key];
- } else {
- for (var key in curT.values[0])
- o[prefix + key] = v;
- }
+'use strict';
+
+// Values have to be sorted - you must have exactly 6 values in each array
+var types = [
+ {name: "string", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "k"},
+ {name: "double", values: [1.2, 3.5, 4.5, 4.6, 6.7, 9.9], keyfield: "a"},
+ {
+ name: "date",
+ values: [
+ new Date(1000000),
+ new Date(2000000),
+ new Date(3000000),
+ new Date(4000000),
+ new Date(5000000),
+ new Date(6000000)
+ ],
+ keyfield: "a"
+ },
+ {name: "string_id", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "_id"},
+ {name: "embedded 1", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "a.b"},
+ {
+ name: "embedded 2",
+ values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
+ keyfield: "a.b.c"
+ },
+ {
+ name: "object",
+ values: [
+ {a: 1, b: 1.2},
+ {a: 1, b: 3.5},
+ {a: 1, b: 4.5},
+ {a: 2, b: 1.2},
+ {a: 2, b: 3.5},
+ {a: 2, b: 4.5}
+ ],
+ keyfield: "o"
+ },
+ {
+ name: "compound",
+ values: [
+ {a: 1, b: 1.2},
+ {a: 1, b: 3.5},
+ {a: 1, b: 4.5},
+ {a: 2, b: 1.2},
+ {a: 2, b: 3.5},
+ {a: 2, b: 4.5}
+ ],
+ keyfield: "o",
+ compound: true
+ },
+ {
+ name: "oid_id",
+ values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
+ keyfield: "_id"
+ },
+ {
+ name: "oid_other",
+ values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
+ keyfield: "o"
+ },
+];
+
+var s = new ShardingTest({name: "key_many", shards: 2});
+
+assert.commandWorked(s.s0.adminCommand({enableSharding: 'test'}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+var db = s.getDB('test');
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
+
+var curT;
+
+function makeObjectDotted(v) {
+ var o = {};
+ if (curT.compound) {
+ var prefix = curT.keyfield + '.';
+ if (typeof (v) == 'object') {
+ for (var key in v)
+ o[prefix + key] = v[key];
} else {
- o[curT.keyfield] = v;
+ for (var key in curT.values[0])
+ o[prefix + key] = v;
}
- return o;
+ } else {
+ o[curT.keyfield] = v;
}
+ return o;
+}
- function makeObject(v) {
- var o = {};
- var p = o;
+function makeObject(v) {
+ var o = {};
+ var p = o;
- var keys = curT.keyfield.split('.');
- for (var i = 0; i < keys.length - 1; i++) {
- p[keys[i]] = {};
- p = p[keys[i]];
- }
+ var keys = curT.keyfield.split('.');
+ for (var i = 0; i < keys.length - 1; i++) {
+ p[keys[i]] = {};
+ p = p[keys[i]];
+ }
- p[keys[i]] = v;
+ p[keys[i]] = v;
- return o;
- }
+ return o;
+}
- function makeInQuery() {
- if (curT.compound) {
- // cheating a bit...
- return {'o.a': {$in: [1, 2]}};
- } else {
- return makeObjectDotted({$in: curT.values});
- }
+function makeInQuery() {
+ if (curT.compound) {
+ // cheating a bit...
+ return {'o.a': {$in: [1, 2]}};
+ } else {
+ return makeObjectDotted({$in: curT.values});
}
+}
- function getKey(o) {
- var keys = curT.keyfield.split('.');
- for (var i = 0; i < keys.length; i++) {
- o = o[keys[i]];
- }
- return o;
+function getKey(o) {
+ var keys = curT.keyfield.split('.');
+ for (var i = 0; i < keys.length; i++) {
+ o = o[keys[i]];
}
+ return o;
+}
- Random.setRandomSeed();
+Random.setRandomSeed();
- for (var i = 0; i < types.length; i++) {
- curT = types[i];
+for (var i = 0; i < types.length; i++) {
+ curT = types[i];
- print("\n\n#### Now Testing " + curT.name + " ####\n\n");
+ print("\n\n#### Now Testing " + curT.name + " ####\n\n");
- var shortName = "foo_" + curT.name;
- var longName = "test." + shortName;
+ var shortName = "foo_" + curT.name;
+ var longName = "test." + shortName;
- var c = db[shortName];
- s.adminCommand({shardcollection: longName, key: makeObjectDotted(1)});
+ var c = db[shortName];
+ s.adminCommand({shardcollection: longName, key: makeObjectDotted(1)});
- assert.eq(1, s.config.chunks.find({ns: longName}).count(), curT.name + " sanity check A");
+ assert.eq(1, s.config.chunks.find({ns: longName}).count(), curT.name + " sanity check A");
- var unsorted = Array.shuffle(Object.extend([], curT.values));
- c.insert(makeObject(unsorted[0]));
- for (var x = 1; x < unsorted.length; x++) {
- c.save(makeObject(unsorted[x]));
- }
-
- assert.eq(6, c.find().count(), curT.name + " basic count");
-
- s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[0])});
- s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[2])});
- s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[5])});
-
- s.adminCommand({
- movechunk: longName,
- find: makeObjectDotted(curT.values[2]),
- to: secondary.getMongo().name,
- _waitForDelete: true
- });
-
- s.printChunks();
-
- assert.eq(3, primary[shortName].find().toArray().length, curT.name + " primary count");
- assert.eq(3, secondary[shortName].find().toArray().length, curT.name + " secondary count");
-
- assert.eq(6, c.find().toArray().length, curT.name + " total count");
- assert.eq(6,
- c.find().sort(makeObjectDotted(1)).toArray().length,
- curT.name + " total count sorted");
-
- assert.eq(
- 6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
-
- assert.eq(2,
- c.find({
- $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
- }).count(),
- curT.name + " $or count()");
- assert.eq(2,
- c.find({
- $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
- }).itcount(),
- curT.name + " $or itcount()");
- assert.eq(4,
- c.find({
- $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
- }).count(),
- curT.name + " $nor count()");
- assert.eq(4,
- c.find({
- $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
- }).itcount(),
- curT.name + " $nor itcount()");
-
- var stats = c.stats();
- printjson(stats);
- assert.eq(6, stats.count, curT.name + " total count with stats()");
-
- var count = 0;
- for (var shard in stats.shards) {
- count += stats.shards[shard].count;
- }
- assert.eq(6, count, curT.name + " total count with stats() sum");
-
- assert.eq(curT.values,
- c.find().sort(makeObjectDotted(1)).toArray().map(getKey),
- curT.name + " sort 1");
- assert.eq(curT.values,
- c.find(makeInQuery()).sort(makeObjectDotted(1)).toArray().map(getKey),
- curT.name + " sort 1 - $in");
- assert.eq(curT.values.reverse(),
- c.find().sort(makeObjectDotted(-1)).toArray().map(getKey),
- curT.name + " sort 2");
-
- assert.eq(0, c.find({xx: 17}).sort({zz: 1}).count(), curT.name + " xx 0a ");
- assert.eq(0, c.find({xx: 17}).sort(makeObjectDotted(1)).count(), curT.name + " xx 0b ");
- assert.eq(0, c.find({xx: 17}).count(), curT.name + " xx 0c ");
- assert.eq(0, c.find({xx: {$exists: true}}).count(), curT.name + " xx 1 ");
-
- c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}});
- assert.eq(1, c.find({xx: {$exists: true}}).count(), curT.name + " xx 2 ");
- assert.eq(curT.values[3], getKey(c.findOne({xx: 17})), curT.name + " xx 3 ");
-
- assert.writeOK(
- c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
-
- assert.commandWorked(c.ensureIndex({_id: 1}));
-
- // multi update
- var mysum = 0;
- c.find().forEach(function(z) {
- mysum += z.xx || 0;
- });
- assert.eq(17, mysum, curT.name + " multi update pre");
-
- c.update({}, {$inc: {xx: 1}}, false, true);
-
- var mysum = 0;
- c.find().forEach(function(z) {
- mysum += z.xx || 0;
- });
- assert.eq(23, mysum, curT.name + " multi update");
+ var unsorted = Array.shuffle(Object.extend([], curT.values));
+ c.insert(makeObject(unsorted[0]));
+ for (var x = 1; x < unsorted.length; x++) {
+ c.save(makeObject(unsorted[x]));
}
- s.stop();
-
+ assert.eq(6, c.find().count(), curT.name + " basic count");
+
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[0])});
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[2])});
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[5])});
+
+ s.adminCommand({
+ movechunk: longName,
+ find: makeObjectDotted(curT.values[2]),
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+ });
+
+ s.printChunks();
+
+ assert.eq(3, primary[shortName].find().toArray().length, curT.name + " primary count");
+ assert.eq(3, secondary[shortName].find().toArray().length, curT.name + " secondary count");
+
+ assert.eq(6, c.find().toArray().length, curT.name + " total count");
+ assert.eq(
+ 6, c.find().sort(makeObjectDotted(1)).toArray().length, curT.name + " total count sorted");
+
+ assert.eq(
+ 6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
+
+ assert.eq(
+ 2,
+ c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count(),
+ curT.name + " $or count()");
+ assert.eq(2,
+ c.find({
+ $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).itcount(),
+ curT.name + " $or itcount()");
+ assert.eq(4,
+ c.find({
+ $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).count(),
+ curT.name + " $nor count()");
+ assert.eq(4,
+ c.find({
+ $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).itcount(),
+ curT.name + " $nor itcount()");
+
+ var stats = c.stats();
+ printjson(stats);
+ assert.eq(6, stats.count, curT.name + " total count with stats()");
+
+ var count = 0;
+ for (var shard in stats.shards) {
+ count += stats.shards[shard].count;
+ }
+ assert.eq(6, count, curT.name + " total count with stats() sum");
+
+ assert.eq(curT.values,
+ c.find().sort(makeObjectDotted(1)).toArray().map(getKey),
+ curT.name + " sort 1");
+ assert.eq(curT.values,
+ c.find(makeInQuery()).sort(makeObjectDotted(1)).toArray().map(getKey),
+ curT.name + " sort 1 - $in");
+ assert.eq(curT.values.reverse(),
+ c.find().sort(makeObjectDotted(-1)).toArray().map(getKey),
+ curT.name + " sort 2");
+
+ assert.eq(0, c.find({xx: 17}).sort({zz: 1}).count(), curT.name + " xx 0a ");
+ assert.eq(0, c.find({xx: 17}).sort(makeObjectDotted(1)).count(), curT.name + " xx 0b ");
+ assert.eq(0, c.find({xx: 17}).count(), curT.name + " xx 0c ");
+ assert.eq(0, c.find({xx: {$exists: true}}).count(), curT.name + " xx 1 ");
+
+ c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}});
+ assert.eq(1, c.find({xx: {$exists: true}}).count(), curT.name + " xx 2 ");
+ assert.eq(curT.values[3], getKey(c.findOne({xx: 17})), curT.name + " xx 3 ");
+
+ assert.writeOK(c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
+
+ assert.commandWorked(c.ensureIndex({_id: 1}));
+
+ // multi update
+ var mysum = 0;
+ c.find().forEach(function(z) {
+ mysum += z.xx || 0;
+ });
+ assert.eq(17, mysum, curT.name + " multi update pre");
+
+ c.update({}, {$inc: {xx: 1}}, false, true);
+
+ var mysum = 0;
+ c.find().forEach(function(z) {
+ mysum += z.xx || 0;
+ });
+ assert.eq(23, mysum, curT.name + " multi update");
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/key_rotation.js b/jstests/sharding/key_rotation.js
index 7067efa1cd5..da969e087f5 100644
--- a/jstests/sharding/key_rotation.js
+++ b/jstests/sharding/key_rotation.js
@@ -14,79 +14,79 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- let st = new ShardingTest({shards: {rs0: {nodes: 2}}});
-
- // Verify after startup there is a new key in admin.system.keys.
- jsTestLog("Verify the admin.system.keys collection after startup.");
-
- let startupKeys = st.s.getDB("admin").system.keys.find();
- assert(startupKeys.count() >= 2); // Should be at least two generations of keys available.
- startupKeys.toArray().forEach(function(key, i) {
- assert.hasFields(
- key,
- ["purpose", "key", "expiresAt"],
- "key document " + i + ": " + tojson(key) + ", did not have all of the expected fields");
- });
-
- // Verify there is a $clusterTime with a signature in the response.
- jsTestLog("Verify a signature is included in the cluster time in a response.");
-
- let res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
- assert.hasFields(res, ["$clusterTime"]);
- assert.hasFields(res.$clusterTime, ["signature"]);
- assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
-
- // Verify manual key rotation.
- jsTestLog("Verify manual key rotation.");
-
- // Pause key generation on the config server primary.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- st.configRS.nodes[i].adminCommand(
- {configureFailPoint: "disableKeyGeneration", mode: "alwaysOn"});
- }
-
- // Delete all existing keys.
- res = st.configRS.getPrimary().getDB("admin").system.keys.remove({purpose: "HMAC"});
- assert(res.nRemoved >= 2);
- assert(st.s.getDB("admin").system.keys.find().count() == 0);
-
- // Restart the config servers, so they will create new keys once the failpoint is disabled.
- st.configRS.stopSet(null /* signal */, true /* forRestart */);
- st.configRS.startSet(
- {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
-
- // Limit the max time between refreshes on the config server, so new keys are created quickly.
- st.configRS.getPrimary().adminCommand({
- "configureFailPoint": "maxKeyRefreshWaitTimeOverrideMS",
- "mode": "alwaysOn",
- "data": {"overrideMS": 1000}
- });
-
- // Kill and restart all shards and mongos processes so they have no keys in memory.
- st.rs0.stopSet(null /* signal */, true /* forRestart */);
- st.rs0.startSet({restart: true});
-
- // The shard primary should return a dummy signed cluster time, because there are no keys.
- res = assert.commandWorked(st.rs0.getPrimary().getDB("test").runCommand({isMaster: 1}));
- assert.hasFields(res, ["$clusterTime", "operationTime"]);
- assert.eq(res.$clusterTime.signature.keyId, NumberLong(0));
-
- // Resume key generation.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- st.configRS.getPrimary().adminCommand(
- {configureFailPoint: "disableKeyGeneration", mode: "off"});
- }
-
- st.restartMongos(0);
-
- // Wait for config server primary to create new keys.
- assert.soonNoExcept(function() {
- let keys = st.s.getDB("admin").system.keys.find();
- assert(keys.count() >= 2);
- return true;
- }, "expected the config server primary to create new keys");
-
- st.stop();
+"use strict";
+
+let st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+
+// Verify after startup there is a new key in admin.system.keys.
+jsTestLog("Verify the admin.system.keys collection after startup.");
+
+let startupKeys = st.s.getDB("admin").system.keys.find();
+assert(startupKeys.count() >= 2); // Should be at least two generations of keys available.
+startupKeys.toArray().forEach(function(key, i) {
+ assert.hasFields(
+ key,
+ ["purpose", "key", "expiresAt"],
+ "key document " + i + ": " + tojson(key) + ", did not have all of the expected fields");
+});
+
+// Verify there is a $clusterTime with a signature in the response.
+jsTestLog("Verify a signature is included in the cluster time in a response.");
+
+let res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
+assert.hasFields(res, ["$clusterTime"]);
+assert.hasFields(res.$clusterTime, ["signature"]);
+assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
+
+// Verify manual key rotation.
+jsTestLog("Verify manual key rotation.");
+
+// Pause key generation on the config server primary.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ st.configRS.nodes[i].adminCommand(
+ {configureFailPoint: "disableKeyGeneration", mode: "alwaysOn"});
+}
+
+// Delete all existing keys.
+res = st.configRS.getPrimary().getDB("admin").system.keys.remove({purpose: "HMAC"});
+assert(res.nRemoved >= 2);
+assert(st.s.getDB("admin").system.keys.find().count() == 0);
+
+// Restart the config servers, so they will create new keys once the failpoint is disabled.
+st.configRS.stopSet(null /* signal */, true /* forRestart */);
+st.configRS.startSet(
+ {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
+
+// Limit the max time between refreshes on the config server, so new keys are created quickly.
+st.configRS.getPrimary().adminCommand({
+ "configureFailPoint": "maxKeyRefreshWaitTimeOverrideMS",
+ "mode": "alwaysOn",
+ "data": {"overrideMS": 1000}
+});
+
+// Kill and restart all shards and mongos processes so they have no keys in memory.
+st.rs0.stopSet(null /* signal */, true /* forRestart */);
+st.rs0.startSet({restart: true});
+
+// The shard primary should return a dummy signed cluster time, because there are no keys.
+res = assert.commandWorked(st.rs0.getPrimary().getDB("test").runCommand({isMaster: 1}));
+assert.hasFields(res, ["$clusterTime", "operationTime"]);
+assert.eq(res.$clusterTime.signature.keyId, NumberLong(0));
+
+// Resume key generation.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ st.configRS.getPrimary().adminCommand(
+ {configureFailPoint: "disableKeyGeneration", mode: "off"});
+}
+
+st.restartMongos(0);
+
+// Wait for config server primary to create new keys.
+assert.soonNoExcept(function() {
+ let keys = st.s.getDB("admin").system.keys.find();
+ assert(keys.count() >= 2);
+ return true;
+}, "expected the config server primary to create new keys");
+
+st.stop();
})();
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index c3fc654bf11..78fa4c4d5dc 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -1,68 +1,67 @@
(function() {
- var s = new ShardingTest({name: "keystring", shards: 2});
+var s = new ShardingTest({name: "keystring", shards: 2});
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
- primary = s.getPrimaryShard("test").getDB("test");
- seconday = s.getOther(primary).getDB("test");
+primary = s.getPrimaryShard("test").getDB("test");
+seconday = s.getOther(primary).getDB("test");
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
- var db = s.getDB("test");
+var db = s.getDB("test");
- db.foo.save({name: "eliot"});
- db.foo.save({name: "sara"});
- db.foo.save({name: "bob"});
- db.foo.save({name: "joe"});
- db.foo.save({name: "mark"});
- db.foo.save({name: "allan"});
+db.foo.save({name: "eliot"});
+db.foo.save({name: "sara"});
+db.foo.save({name: "bob"});
+db.foo.save({name: "joe"});
+db.foo.save({name: "mark"});
+db.foo.save({name: "allan"});
- assert.eq(6, db.foo.find().count(), "basic count");
+assert.eq(6, db.foo.find().count(), "basic count");
+s.adminCommand({split: "test.foo", middle: {name: "allan"}});
+s.adminCommand({split: "test.foo", middle: {name: "sara"}});
+s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "eliot"},
+ to: seconday.getMongo().name,
+ _waitForDelete: true
+});
+
+s.printChunks();
+
+assert.eq(3, primary.foo.find().toArray().length, "primary count");
+assert.eq(3, seconday.foo.find().toArray().length, "secondary count");
+
+assert.eq(6, db.foo.find().toArray().length, "total count");
+assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count sorted");
+
+assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count()");
+
+assert.eq("allan,bob,eliot,joe,mark,sara",
+ db.foo.find().sort({name: 1}).toArray().map(function(z) {
+ return z.name;
+ }),
+ "sort 1");
+assert.eq("sara,mark,joe,eliot,bob,allan",
+ db.foo.find().sort({name: -1}).toArray().map(function(z) {
+ return z.name;
+ }),
+ "sort 2");
+
+// make sure we can't foce a split on an extreme key
+// [allan->joe)
+assert.throws(function() {
s.adminCommand({split: "test.foo", middle: {name: "allan"}});
- s.adminCommand({split: "test.foo", middle: {name: "sara"}});
+});
+assert.throws(function() {
s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+});
- s.adminCommand({
- movechunk: "test.foo",
- find: {name: "eliot"},
- to: seconday.getMongo().name,
- _waitForDelete: true
- });
-
- s.printChunks();
-
- assert.eq(3, primary.foo.find().toArray().length, "primary count");
- assert.eq(3, seconday.foo.find().toArray().length, "secondary count");
-
- assert.eq(6, db.foo.find().toArray().length, "total count");
- assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count sorted");
-
- assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count()");
-
- assert.eq("allan,bob,eliot,joe,mark,sara",
- db.foo.find().sort({name: 1}).toArray().map(function(z) {
- return z.name;
- }),
- "sort 1");
- assert.eq("sara,mark,joe,eliot,bob,allan",
- db.foo.find().sort({name: -1}).toArray().map(function(z) {
- return z.name;
- }),
- "sort 2");
-
- // make sure we can't foce a split on an extreme key
- // [allan->joe)
- assert.throws(function() {
- s.adminCommand({split: "test.foo", middle: {name: "allan"}});
- });
- assert.throws(function() {
- s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
- });
-
- s.stop();
-
+s.stop();
})();
diff --git a/jstests/sharding/keys_rotation_interval_sec.js b/jstests/sharding/keys_rotation_interval_sec.js
index 310b3cd612e..4234786df35 100644
--- a/jstests/sharding/keys_rotation_interval_sec.js
+++ b/jstests/sharding/keys_rotation_interval_sec.js
@@ -3,28 +3,28 @@
*/
(function() {
- "use strict";
- const kRotationInterval = 30;
- let st = new ShardingTest({
- mongos: 1,
- shards: {rs0: {nodes: 2}},
- other: {configOptions: {setParameter: "KeysRotationIntervalSec=30"}}
- });
+"use strict";
+const kRotationInterval = 30;
+let st = new ShardingTest({
+ mongos: 1,
+ shards: {rs0: {nodes: 2}},
+ other: {configOptions: {setParameter: "KeysRotationIntervalSec=30"}}
+});
- let keys = st.s.getDB("admin").system.keys.find();
- // add a few seconds to the expire timestamp to account for rounding that may happen.
- let maxExpireTime = Timestamp(Date.now() / 1000 + kRotationInterval * 2 + 5, 0);
+let keys = st.s.getDB("admin").system.keys.find();
+// add a few seconds to the expire timestamp to account for rounding that may happen.
+let maxExpireTime = Timestamp(Date.now() / 1000 + kRotationInterval * 2 + 5, 0);
- assert(keys.count() >= 2);
- keys.toArray().forEach(function(key, i) {
- assert.hasFields(
- key,
- ["purpose", "key", "expiresAt"],
- "key document " + i + ": " + tojson(key) + ", did not have all of the expected fields");
- assert.lte(bsonWoCompare(key.expiresAt, maxExpireTime),
- 0,
- "key document " + i + ": " + tojson(key) + "expiresAt value is greater than: " +
- maxExpireTime);
- });
- st.stop();
+assert(keys.count() >= 2);
+keys.toArray().forEach(function(key, i) {
+ assert.hasFields(
+ key,
+ ["purpose", "key", "expiresAt"],
+ "key document " + i + ": " + tojson(key) + ", did not have all of the expected fields");
+ assert.lte(bsonWoCompare(key.expiresAt, maxExpireTime),
+ 0,
+ "key document " + i + ": " + tojson(key) +
+ "expiresAt value is greater than: " + maxExpireTime);
+});
+st.stop();
})();
diff --git a/jstests/sharding/kill_op_overflow.js b/jstests/sharding/kill_op_overflow.js
index 6ca5c236bab..b433ba60702 100644
--- a/jstests/sharding/kill_op_overflow.js
+++ b/jstests/sharding/kill_op_overflow.js
@@ -3,10 +3,10 @@
* failure being propagated back to the client.
*/
(function() {
- "use strict";
- var st = new ShardingTest({name: "shard1", shards: 1, mongos: 1});
+"use strict";
+var st = new ShardingTest({name: "shard1", shards: 1, mongos: 1});
- assert.commandFailed(st.s.getDB("admin").runCommand(
- {killOp: 1, op: st.shard0.shardName + ":99999999999999999999999"}));
- st.stop();
+assert.commandFailed(st.s.getDB("admin").runCommand(
+ {killOp: 1, op: st.shard0.shardName + ":99999999999999999999999"}));
+st.stop();
})();
diff --git a/jstests/sharding/kill_pinned_cursor.js b/jstests/sharding/kill_pinned_cursor.js
index d7e4017d273..1c19626ab77 100644
--- a/jstests/sharding/kill_pinned_cursor.js
+++ b/jstests/sharding/kill_pinned_cursor.js
@@ -7,234 +7,234 @@
*/
(function() {
- "use strict";
-
- // This test manually simulates a session, which is not compatible with implicit sessions.
- TestData.disableImplicitSessions = true;
-
- const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
- const kFailpointOptions = {shouldCheckForInterrupt: true};
-
- const st = new ShardingTest({shards: 2});
- const kDBName = "test";
- const mongosDB = st.s.getDB(kDBName);
- const shard0DB = st.shard0.getDB(kDBName);
- const shard1DB = st.shard1.getDB(kDBName);
-
- let coll = mongosDB.jstest_kill_pinned_cursor;
- coll.drop();
-
- for (let i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({_id: i}));
+"use strict";
+
+// This test manually simulates a session, which is not compatible with implicit sessions.
+TestData.disableImplicitSessions = true;
+
+const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+const kFailpointOptions = {
+ shouldCheckForInterrupt: true
+};
+
+const st = new ShardingTest({shards: 2});
+const kDBName = "test";
+const mongosDB = st.s.getDB(kDBName);
+const shard0DB = st.shard0.getDB(kDBName);
+const shard1DB = st.shard1.getDB(kDBName);
+
+let coll = mongosDB.jstest_kill_pinned_cursor;
+coll.drop();
+
+for (let i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({_id: i}));
+}
+
+st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
+st.ensurePrimaryShard(kDBName, st.shard0.name);
+
+// The startParallelShell function will take the string it's given and serialize it into a
+// string. This means that we can't pass it functions which capture variables. Instead we use
+// the trick below, by putting the values for the variables we'd like to capture inside the
+// string. Kudos to Dave Storch for coming up with this idea.
+function makeParallelShellFunctionString(cursorId, getMoreErrCodes, useSession, sessionId) {
+ let code = `const cursorId = ${cursorId.toString()};`;
+ code += `const kDBName = "${kDBName}";`;
+ code += `let collName = "${coll.getName()}";`;
+ code += `const useSession = ${useSession};`;
+
+ TestData.getMoreErrCodes = getMoreErrCodes;
+ if (useSession) {
+ TestData.sessionId = sessionId;
}
- st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
- st.ensurePrimaryShard(kDBName, st.shard0.name);
-
- // The startParallelShell function will take the string it's given and serialize it into a
- // string. This means that we can't pass it functions which capture variables. Instead we use
- // the trick below, by putting the values for the variables we'd like to capture inside the
- // string. Kudos to Dave Storch for coming up with this idea.
- function makeParallelShellFunctionString(cursorId, getMoreErrCodes, useSession, sessionId) {
- let code = `const cursorId = ${cursorId.toString()};`;
- code += `const kDBName = "${kDBName}";`;
- code += `let collName = "${coll.getName()}";`;
- code += `const useSession = ${useSession};`;
+ const runGetMore = function() {
+ let getMoreCmd = {getMore: cursorId, collection: collName, batchSize: 4};
- TestData.getMoreErrCodes = getMoreErrCodes;
if (useSession) {
- TestData.sessionId = sessionId;
+ getMoreCmd.lsid = TestData.sessionId;
}
- const runGetMore = function() {
- let getMoreCmd = {getMore: cursorId, collection: collName, batchSize: 4};
-
- if (useSession) {
- getMoreCmd.lsid = TestData.sessionId;
- }
-
- // We expect that the operation will get interrupted and fail.
- assert.commandFailedWithCode(db.runCommand(getMoreCmd), TestData.getMoreErrCodes);
-
- if (useSession) {
- assert.commandWorked(db.adminCommand({endSessions: [TestData.sessionId]}));
- }
- };
-
- code += `(${runGetMore.toString()})();`;
- return code;
- }
-
- // Tests that the various cursors involved in a sharded query can be killed, even when pinned.
- //
- // Sets up a sharded cursor, opens a mongos cursor, and uses failpoints to cause the mongos
- // cursor to hang due to getMore commands hanging on each of the shards. Then invokes
- // 'killFunc', and verifies the cursors on the shards and the mongos cursor get cleaned up.
- //
- // 'getMoreErrCodes' are the error codes with which we expect the getMore to fail (e.g. a
- // killCursors command should cause getMore to fail with "CursorKilled", but killOp should cause
- // a getMore to fail with "Interrupted").
- function testShardedKillPinned(
- {killFunc: killFunc, getMoreErrCodes: getMoreErrCodes, useSession: useSession}) {
- let getMoreJoiner = null;
- let cursorId;
- let sessionId;
-
- try {
- // Set up the mongods to hang on a getMore request. ONLY set the failpoint on the
- // mongods. Setting the failpoint on the mongos will only cause it to spin, and not
- // actually send any requests out.
- assert.commandWorked(shard0DB.adminCommand(
- {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
- assert.commandWorked(shard1DB.adminCommand(
- {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
-
- // Run a find against mongos. This should open cursors on both of the shards.
- let findCmd = {find: coll.getName(), batchSize: 2};
-
- if (useSession) {
- // Manually start a session so it can be continued from inside a parallel shell.
- sessionId = assert.commandWorked(mongosDB.adminCommand({startSession: 1})).id;
- findCmd.lsid = sessionId;
- }
-
- let cmdRes = mongosDB.runCommand(findCmd);
- assert.commandWorked(cmdRes);
- cursorId = cmdRes.cursor.id;
- assert.neq(cursorId, NumberLong(0));
-
- const parallelShellFn =
- makeParallelShellFunctionString(cursorId, getMoreErrCodes, useSession, sessionId);
- getMoreJoiner = startParallelShell(parallelShellFn, st.s.port);
+ // We expect that the operation will get interrupted and fail.
+ assert.commandFailedWithCode(db.runCommand(getMoreCmd), TestData.getMoreErrCodes);
- // Sleep until we know the mongod cursors are pinned.
- assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.pinned > 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned > 0);
+ if (useSession) {
+ assert.commandWorked(db.adminCommand({endSessions: [TestData.sessionId]}));
+ }
+ };
+
+ code += `(${runGetMore.toString()})();`;
+ return code;
+}
+
+// Tests that the various cursors involved in a sharded query can be killed, even when pinned.
+//
+// Sets up a sharded cursor, opens a mongos cursor, and uses failpoints to cause the mongos
+// cursor to hang due to getMore commands hanging on each of the shards. Then invokes
+// 'killFunc', and verifies the cursors on the shards and the mongos cursor get cleaned up.
+//
+// 'getMoreErrCodes' are the error codes with which we expect the getMore to fail (e.g. a
+// killCursors command should cause getMore to fail with "CursorKilled", but killOp should cause
+// a getMore to fail with "Interrupted").
+function testShardedKillPinned(
+ {killFunc: killFunc, getMoreErrCodes: getMoreErrCodes, useSession: useSession}) {
+ let getMoreJoiner = null;
+ let cursorId;
+ let sessionId;
+
+ try {
+ // Set up the mongods to hang on a getMore request. ONLY set the failpoint on the
+ // mongods. Setting the failpoint on the mongos will only cause it to spin, and not
+ // actually send any requests out.
+ assert.commandWorked(shard0DB.adminCommand(
+ {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
+ assert.commandWorked(shard1DB.adminCommand(
+ {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
+
+ // Run a find against mongos. This should open cursors on both of the shards.
+ let findCmd = {find: coll.getName(), batchSize: 2};
- // Use the function provided by the caller to kill the sharded query.
- killFunc(cursorId);
+ if (useSession) {
+ // Manually start a session so it can be continued from inside a parallel shell.
+ sessionId = assert.commandWorked(mongosDB.adminCommand({startSession: 1})).id;
+ findCmd.lsid = sessionId;
+ }
- // The getMore should finish now that we've killed the cursor (even though the failpoint
- // is still enabled).
+ let cmdRes = mongosDB.runCommand(findCmd);
+ assert.commandWorked(cmdRes);
+ cursorId = cmdRes.cursor.id;
+ assert.neq(cursorId, NumberLong(0));
+
+ const parallelShellFn =
+ makeParallelShellFunctionString(cursorId, getMoreErrCodes, useSession, sessionId);
+ getMoreJoiner = startParallelShell(parallelShellFn, st.s.port);
+
+ // Sleep until we know the mongod cursors are pinned.
+ assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.pinned > 0);
+ assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned > 0);
+
+ // Use the function provided by the caller to kill the sharded query.
+ killFunc(cursorId);
+
+ // The getMore should finish now that we've killed the cursor (even though the failpoint
+ // is still enabled).
+ getMoreJoiner();
+ getMoreJoiner = null;
+
+ // By now, the getMore run against the mongos has returned with an indication that the
+ // cursor has been killed. Verify that the cursor is really gone by running a
+ // killCursors command, and checking that the cursor is reported as "not found".
+ let killRes = mongosDB.runCommand({killCursors: coll.getName(), cursors: [cursorId]});
+ assert.commandWorked(killRes);
+ assert.eq(killRes.cursorsAlive, []);
+ assert.eq(killRes.cursorsNotFound, [cursorId]);
+ assert.eq(killRes.cursorsUnknown, []);
+
+ // Eventually the cursors on the mongods should also be cleaned up. They should be
+ // killed by mongos when the mongos cursor gets killed.
+ assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.pinned == 0);
+ assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned == 0);
+ assert.eq(shard0DB.serverStatus().metrics.cursor.open.total, 0);
+ assert.eq(shard1DB.serverStatus().metrics.cursor.open.total, 0);
+ } finally {
+ assert.commandWorked(
+ shard0DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
+ assert.commandWorked(
+ shard1DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
+ if (getMoreJoiner) {
getMoreJoiner();
- getMoreJoiner = null;
-
- // By now, the getMore run against the mongos has returned with an indication that the
- // cursor has been killed. Verify that the cursor is really gone by running a
- // killCursors command, and checking that the cursor is reported as "not found".
- let killRes = mongosDB.runCommand({killCursors: coll.getName(), cursors: [cursorId]});
- assert.commandWorked(killRes);
- assert.eq(killRes.cursorsAlive, []);
- assert.eq(killRes.cursorsNotFound, [cursorId]);
- assert.eq(killRes.cursorsUnknown, []);
-
- // Eventually the cursors on the mongods should also be cleaned up. They should be
- // killed by mongos when the mongos cursor gets killed.
- assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.pinned == 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned == 0);
- assert.eq(shard0DB.serverStatus().metrics.cursor.open.total, 0);
- assert.eq(shard1DB.serverStatus().metrics.cursor.open.total, 0);
- } finally {
- assert.commandWorked(
- shard0DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
- assert.commandWorked(
- shard1DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
- if (getMoreJoiner) {
- getMoreJoiner();
- }
}
}
+}
- for (let useSession of[true, false]) {
- // Test that running 'killCursors' against a pinned mongos cursor (with pinned mongod
- // cursors) correctly cleans up all of the involved cursors.
- testShardedKillPinned({
- killFunc: function(mongosCursorId) {
- // Run killCursors against the mongos cursor. Verify that the cursor is reported as
- // killed successfully, and does not hang or return a "CursorInUse" error.
- let cmdRes =
- mongosDB.runCommand({killCursors: coll.getName(), cursors: [mongosCursorId]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, [mongosCursorId]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsNotFound, []);
- assert.eq(cmdRes.cursorsUnknown, []);
- },
- getMoreErrCodes: ErrorCodes.CursorKilled,
- useSession: useSession
- });
-
- // Test that running killOp against one of the cursors pinned on mongod causes all involved
- // cursors to be killed.
- testShardedKillPinned({
- // This function ignores the mongos cursor id, since it instead uses currentOp to
- // obtain an op id to kill.
- killFunc: function() {
- let currentGetMoresArray =
- shard0DB.getSiblingDB("admin")
- .aggregate(
- [{$currentOp: {}}, {$match: {"command.getMore": {$exists: true}}}])
- .toArray();
- assert.eq(1, currentGetMoresArray.length);
- let currentGetMore = currentGetMoresArray[0];
- let killOpResult = shard0DB.killOp(currentGetMore.opid);
- assert.commandWorked(killOpResult);
- },
- getMoreErrCodes: ErrorCodes.Interrupted,
- useSession: useSession
- });
-
- // Test that running killCursors against one of the cursors pinned on mongod causes all
- // involved cursors to be killed.
- testShardedKillPinned({
- // This function ignores the mongos cursor id, since it instead uses currentOp to
- // obtain the cursor id of one of the shard cursors.
- killFunc: function() {
- let currentGetMoresArray =
- shard0DB.getSiblingDB("admin")
- .aggregate(
- [{$currentOp: {}}, {$match: {"command.getMore": {$exists: true}}}])
- .toArray();
- assert.eq(1, currentGetMoresArray.length);
- let currentGetMore = currentGetMoresArray[0];
- let shardCursorId = currentGetMore.command.getMore;
- let cmdRes =
- shard0DB.runCommand({killCursors: coll.getName(), cursors: [shardCursorId]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, [shardCursorId]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsNotFound, []);
- assert.eq(cmdRes.cursorsUnknown, []);
- },
- getMoreErrCodes: ErrorCodes.CursorKilled,
- useSession: useSession
- });
- }
+for (let useSession of [true, false]) {
+ // Test that running 'killCursors' against a pinned mongos cursor (with pinned mongod
+ // cursors) correctly cleans up all of the involved cursors.
+ testShardedKillPinned({
+ killFunc: function(mongosCursorId) {
+ // Run killCursors against the mongos cursor. Verify that the cursor is reported as
+ // killed successfully, and does not hang or return a "CursorInUse" error.
+ let cmdRes =
+ mongosDB.runCommand({killCursors: coll.getName(), cursors: [mongosCursorId]});
+ assert.commandWorked(cmdRes);
+ assert.eq(cmdRes.cursorsKilled, [mongosCursorId]);
+ assert.eq(cmdRes.cursorsAlive, []);
+ assert.eq(cmdRes.cursorsNotFound, []);
+ assert.eq(cmdRes.cursorsUnknown, []);
+ },
+ getMoreErrCodes: ErrorCodes.CursorKilled,
+ useSession: useSession
+ });
- // Test that running killSessions on the session which is running the getMore causes the
- // cursor to be killed.
+ // Test that running killOp against one of the cursors pinned on mongod causes all involved
+ // cursors to be killed.
testShardedKillPinned({
- // This function ignores the mongos cursor id, since it instead uses listLocalSessions
- // to obtain the session id of the session running the getMore.
+ // This function ignores the mongos cursor id, since it instead uses currentOp to
+ // obtain an op id to kill.
killFunc: function() {
- // Must sort by 'lastUse' because there may be sessions left over on the server from
- // the previous runs. We will only call killSessions on the most recently used one.
- const localSessions = mongosDB
- .aggregate([
- {$listLocalSessions: {allUsers: true}},
- {$sort: {"lastUse": -1}},
- ])
- .toArray();
-
- const sessionUUID = localSessions[0]._id.id;
- assert.commandWorked(mongosDB.runCommand({killSessions: [{id: sessionUUID}]}));
+ let currentGetMoresArray =
+ shard0DB.getSiblingDB("admin")
+ .aggregate([{$currentOp: {}}, {$match: {"command.getMore": {$exists: true}}}])
+ .toArray();
+ assert.eq(1, currentGetMoresArray.length);
+ let currentGetMore = currentGetMoresArray[0];
+ let killOpResult = shard0DB.killOp(currentGetMore.opid);
+ assert.commandWorked(killOpResult);
},
- // Killing a session on mongos kills all matching remote cursors (through KillCursors) then
- // all matching local operations (through KillOp), so the getMore can fail with either
- // CursorKilled or Interrupted depending on which response is returned first.
- getMoreErrCodes: [ErrorCodes.CursorKilled, ErrorCodes.Interrupted],
- useSession: true,
+ getMoreErrCodes: ErrorCodes.Interrupted,
+ useSession: useSession
});
- st.stop();
+ // Test that running killCursors against one of the cursors pinned on mongod causes all
+ // involved cursors to be killed.
+ testShardedKillPinned({
+ // This function ignores the mongos cursor id, since it instead uses currentOp to
+ // obtain the cursor id of one of the shard cursors.
+ killFunc: function() {
+ let currentGetMoresArray =
+ shard0DB.getSiblingDB("admin")
+ .aggregate([{$currentOp: {}}, {$match: {"command.getMore": {$exists: true}}}])
+ .toArray();
+ assert.eq(1, currentGetMoresArray.length);
+ let currentGetMore = currentGetMoresArray[0];
+ let shardCursorId = currentGetMore.command.getMore;
+ let cmdRes =
+ shard0DB.runCommand({killCursors: coll.getName(), cursors: [shardCursorId]});
+ assert.commandWorked(cmdRes);
+ assert.eq(cmdRes.cursorsKilled, [shardCursorId]);
+ assert.eq(cmdRes.cursorsAlive, []);
+ assert.eq(cmdRes.cursorsNotFound, []);
+ assert.eq(cmdRes.cursorsUnknown, []);
+ },
+ getMoreErrCodes: ErrorCodes.CursorKilled,
+ useSession: useSession
+ });
+}
+
+// Test that running killSessions on the session which is running the getMore causes the
+// cursor to be killed.
+testShardedKillPinned({
+ // This function ignores the mongos cursor id, since it instead uses listLocalSessions
+ // to obtain the session id of the session running the getMore.
+ killFunc: function() {
+ // Must sort by 'lastUse' because there may be sessions left over on the server from
+ // the previous runs. We will only call killSessions on the most recently used one.
+ const localSessions = mongosDB
+ .aggregate([
+ {$listLocalSessions: {allUsers: true}},
+ {$sort: {"lastUse": -1}},
+ ])
+ .toArray();
+
+ const sessionUUID = localSessions[0]._id.id;
+ assert.commandWorked(mongosDB.runCommand({killSessions: [{id: sessionUUID}]}));
+ },
+ // Killing a session on mongos kills all matching remote cursors (through KillCursors) then
+ // all matching local operations (through KillOp), so the getMore can fail with either
+ // CursorKilled or Interrupted depending on which response is returned first.
+ getMoreErrCodes: [ErrorCodes.CursorKilled, ErrorCodes.Interrupted],
+ useSession: true,
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/kill_sessions.js b/jstests/sharding/kill_sessions.js
index a3ad23139a0..b96bf4bd326 100644
--- a/jstests/sharding/kill_sessions.js
+++ b/jstests/sharding/kill_sessions.js
@@ -1,63 +1,63 @@
load("jstests/libs/kill_sessions.js");
(function() {
- 'use strict';
-
- // TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
- // if the kill command is sent with an implicit session.
- TestData.disableImplicitSessions = true;
-
- function runTests(needAuth) {
- var other = {
- rs: true,
- rs0: {nodes: 3},
- rs1: {nodes: 3},
- };
- if (needAuth) {
- other.keyFile = 'jstests/libs/key1';
- }
+'use strict';
+
+// TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
+// if the kill command is sent with an implicit session.
+TestData.disableImplicitSessions = true;
+
+function runTests(needAuth) {
+ var other = {
+ rs: true,
+ rs0: {nodes: 3},
+ rs1: {nodes: 3},
+ };
+ if (needAuth) {
+ other.keyFile = 'jstests/libs/key1';
+ }
- var st = new ShardingTest({shards: 2, mongos: 1, config: 1, other: other});
+ var st = new ShardingTest({shards: 2, mongos: 1, config: 1, other: other});
- var forExec = st.s0;
+ var forExec = st.s0;
- if (needAuth) {
- KillSessionsTestHelper.initializeAuth(forExec);
- }
+ if (needAuth) {
+ KillSessionsTestHelper.initializeAuth(forExec);
+ }
- var forKill = new Mongo(forExec.host);
+ var forKill = new Mongo(forExec.host);
- var r = forExec.getDB("admin").runCommand({
- multicast: {ping: 1},
- db: "admin",
- });
- assert(r.ok);
-
- var hosts = [];
- for (var host in r["hosts"]) {
- var host = new Mongo(host);
- if (needAuth) {
- host.getDB("local").auth("__system", "foopdedoop");
- }
- hosts.push(host);
-
- assert.soon(function() {
- var fcv = host.getDB("admin").runCommand(
- {getParameter: 1, featureCompatibilityVersion: 1});
- return fcv["ok"] && fcv["featureCompatibilityVersion"] != "3.4";
- });
- }
+ var r = forExec.getDB("admin").runCommand({
+ multicast: {ping: 1},
+ db: "admin",
+ });
+ assert(r.ok);
- var args = [forExec, forKill, hosts];
+ var hosts = [];
+ for (var host in r["hosts"]) {
+ var host = new Mongo(host);
if (needAuth) {
- KillSessionsTestHelper.runAuth.apply({}, args);
- } else {
- KillSessionsTestHelper.runNoAuth.apply({}, args);
+ host.getDB("local").auth("__system", "foopdedoop");
}
+ hosts.push(host);
+
+ assert.soon(function() {
+ var fcv =
+ host.getDB("admin").runCommand({getParameter: 1, featureCompatibilityVersion: 1});
+ return fcv["ok"] && fcv["featureCompatibilityVersion"] != "3.4";
+ });
+ }
- st.stop();
+ var args = [forExec, forKill, hosts];
+ if (needAuth) {
+ KillSessionsTestHelper.runAuth.apply({}, args);
+ } else {
+ KillSessionsTestHelper.runNoAuth.apply({}, args);
}
- runTests(true);
- runTests(false);
+ st.stop();
+}
+
+runTests(true);
+runTests(false);
})();
diff --git a/jstests/sharding/killop.js b/jstests/sharding/killop.js
index 39c9c36538b..7f2e4d23173 100644
--- a/jstests/sharding/killop.js
+++ b/jstests/sharding/killop.js
@@ -2,67 +2,66 @@
// @tags: [requires_replication, requires_sharding]
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
- const conn = st.s;
+const st = new ShardingTest({shards: 2});
+const conn = st.s;
- const db = conn.getDB("killOp");
- const coll = db.test;
- assert.writeOK(db.getCollection(coll.getName()).insert({x: 1}));
+const db = conn.getDB("killOp");
+const coll = db.test;
+assert.writeOK(db.getCollection(coll.getName()).insert({x: 1}));
- const kFailPointName = "waitInFindBeforeMakingBatch";
- assert.commandWorked(
- conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "alwaysOn"}));
+const kFailPointName = "waitInFindBeforeMakingBatch";
+assert.commandWorked(conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "alwaysOn"}));
- const queryToKill = `assert.commandFailedWithCode(db.getSiblingDB('${db.getName()}')` +
- `.runCommand({find: '${coll.getName()}', filter: {x: 1}}), ErrorCodes.Interrupted);`;
- const awaitShell = startParallelShell(queryToKill, conn.port);
+const queryToKill = `assert.commandFailedWithCode(db.getSiblingDB('${db.getName()}')` +
+ `.runCommand({find: '${coll.getName()}', filter: {x: 1}}), ErrorCodes.Interrupted);`;
+const awaitShell = startParallelShell(queryToKill, conn.port);
- function runCurOp() {
- const filter = {"ns": coll.getFullName(), "command.filter": {x: 1}};
- return db.getSiblingDB("admin")
- .aggregate([{$currentOp: {localOps: true}}, {$match: filter}])
- .toArray();
- }
+function runCurOp() {
+ const filter = {"ns": coll.getFullName(), "command.filter": {x: 1}};
+ return db.getSiblingDB("admin")
+ .aggregate([{$currentOp: {localOps: true}}, {$match: filter}])
+ .toArray();
+}
- let opId;
+let opId;
- // Wait for the operation to start.
- assert.soon(
- function() {
- const result = runCurOp();
+// Wait for the operation to start.
+assert.soon(
+ function() {
+ const result = runCurOp();
- // Check the 'msg' field to be sure that the failpoint has been reached.
- if (result.length === 1 && result[0].msg === kFailPointName) {
- opId = result[0].opid;
+ // Check the 'msg' field to be sure that the failpoint has been reached.
+ if (result.length === 1 && result[0].msg === kFailPointName) {
+ opId = result[0].opid;
- return true;
- }
+ return true;
+ }
- return false;
- },
- function() {
- return "Failed to find operation in currentOp() output: " +
- tojson(db.currentOp({"ns": coll.getFullName()}));
- });
+ return false;
+ },
+ function() {
+ return "Failed to find operation in currentOp() output: " +
+ tojson(db.currentOp({"ns": coll.getFullName()}));
+ });
- // Kill the operation.
- assert.commandWorked(db.killOp(opId));
+// Kill the operation.
+assert.commandWorked(db.killOp(opId));
- // Ensure that the operation gets marked kill pending while it's still hanging.
- let result = runCurOp();
- assert(result.length === 1, tojson(result));
- assert(result[0].hasOwnProperty("killPending"));
- assert.eq(true, result[0].killPending);
+// Ensure that the operation gets marked kill pending while it's still hanging.
+let result = runCurOp();
+assert(result.length === 1, tojson(result));
+assert(result[0].hasOwnProperty("killPending"));
+assert.eq(true, result[0].killPending);
- // Release the failpoint. The operation should check for interrupt and then finish.
- assert.commandWorked(conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "off"}));
+// Release the failpoint. The operation should check for interrupt and then finish.
+assert.commandWorked(conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "off"}));
- awaitShell();
+awaitShell();
- result = runCurOp();
- assert(result.length === 0, tojson(result));
+result = runCurOp();
+assert(result.length === 0, tojson(result));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index df23946dee4..35e38722edb 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -8,64 +8,64 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- var st = new ShardingTest(
- {shards: 1, configReplSetTestOptions: {settings: {chainingAllowed: false}}});
- var testDB = st.s.getDB('test');
+var st =
+ new ShardingTest({shards: 1, configReplSetTestOptions: {settings: {chainingAllowed: false}}});
+var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- // Ensures that all metadata writes thus far have been replicated to all nodes
- st.configRS.awaitReplication();
+// Ensures that all metadata writes thus far have been replicated to all nodes
+st.configRS.awaitReplication();
- var configSecondaryList = st.configRS.getSecondaries();
- var configSecondaryToKill = configSecondaryList[0];
- var delayedConfigSecondary = configSecondaryList[1];
+var configSecondaryList = st.configRS.getSecondaries();
+var configSecondaryToKill = configSecondaryList[0];
+var delayedConfigSecondary = configSecondaryList[1];
- assert.writeOK(testDB.user.insert({_id: 1}));
+assert.writeOK(testDB.user.insert({_id: 1}));
- delayedConfigSecondary.getDB('admin').adminCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+delayedConfigSecondary.getDB('admin').adminCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
- // Do one metadata write in order to bump the optime on mongos
- assert.writeOK(st.getDB('config').TestConfigColl.insert({TestKey: 'Test value'}));
+// Do one metadata write in order to bump the optime on mongos
+assert.writeOK(st.getDB('config').TestConfigColl.insert({TestKey: 'Test value'}));
- st.configRS.stopMaster();
- MongoRunner.stopMongod(configSecondaryToKill);
+st.configRS.stopMaster();
+MongoRunner.stopMongod(configSecondaryToKill);
- // Clears all cached info so mongos will be forced to query from the config.
- st.s.adminCommand({flushRouterConfig: 1});
+// Clears all cached info so mongos will be forced to query from the config.
+st.s.adminCommand({flushRouterConfig: 1});
- print('Attempting read on a sharded collection...');
- var exception = assert.throws(function() {
- testDB.user.find({}).maxTimeMS(15000).itcount();
- });
+print('Attempting read on a sharded collection...');
+var exception = assert.throws(function() {
+ testDB.user.find({}).maxTimeMS(15000).itcount();
+});
- assert(ErrorCodes.isExceededTimeLimitError(exception.code));
+assert(ErrorCodes.isExceededTimeLimitError(exception.code));
- let msgAA = 'command config.$cmd command: find { find: "databases"';
- let msgAB = 'errCode:' + ErrorCodes.ClientDisconnect;
- let msgB = 'Command on database config timed out waiting for read concern to be satisfied.';
- assert.soon(
- function() {
- var logMessages =
- assert.commandWorked(delayedConfigSecondary.adminCommand({getLog: 'global'})).log;
- for (var i = 0; i < logMessages.length; i++) {
- if ((logMessages[i].indexOf(msgAA) != -1 && logMessages[i].indexOf(msgAB) != -1) ||
- logMessages[i].indexOf(msgB) != -1) {
- return true;
- }
+let msgAA = 'command config.$cmd command: find { find: "databases"';
+let msgAB = 'errCode:' + ErrorCodes.ClientDisconnect;
+let msgB = 'Command on database config timed out waiting for read concern to be satisfied.';
+assert.soon(
+ function() {
+ var logMessages =
+ assert.commandWorked(delayedConfigSecondary.adminCommand({getLog: 'global'})).log;
+ for (var i = 0; i < logMessages.length; i++) {
+ if ((logMessages[i].indexOf(msgAA) != -1 && logMessages[i].indexOf(msgAB) != -1) ||
+ logMessages[i].indexOf(msgB) != -1) {
+ return true;
}
- return false;
- },
- 'Did not see any log entries containing the following message: ' + msgAA + ' ... ' + msgAB +
- ' or ' + msgB,
- 60000,
- 300);
+ }
+ return false;
+ },
+ 'Did not see any log entries containing the following message: ' + msgAA + ' ... ' + msgAB +
+ ' or ' + msgB,
+ 60000,
+ 300);
- // Can't do clean shutdown with this failpoint on.
- delayedConfigSecondary.getDB('admin').adminCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+// Can't do clean shutdown with this failpoint on.
+delayedConfigSecondary.getDB('admin').adminCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index 5862483138a..c3df1b4baf2 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -6,65 +6,65 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
+'use strict';
- // Starts a new sharding environment limiting the chunk size to 1GB (highest value allowed).
- // Note that early splitting will start with a 1/4 of max size currently.
- var s = new ShardingTest({name: 'large_chunk', shards: 2, other: {chunkSize: 1024}});
- var db = s.getDB("test");
+// Starts a new sharding environment limiting the chunk size to 1GB (highest value allowed).
+// Note that early splitting will start with a 1/4 of max size currently.
+var s = new ShardingTest({name: 'large_chunk', shards: 2, other: {chunkSize: 1024}});
+var db = s.getDB("test");
- //
- // Step 1 - Test moving a large chunk
- //
+//
+// Step 1 - Test moving a large chunk
+//
- // Turn on sharding on the 'test.foo' collection and generate a large chunk
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
+// Turn on sharding on the 'test.foo' collection and generate a large chunk
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
- var bigString = "";
- while (bigString.length < 10000) {
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- }
+var bigString = "";
+while (bigString.length < 10000) {
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+}
- var inserted = 0;
- var num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (400 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
+var inserted = 0;
+var num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (400 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "step 1 - need one large chunk");
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "step 1 - need one large chunk");
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
- // Make sure that we don't move that chunk if it goes past what we consider the maximum chunk
- // size
- print("Checkpoint 1a");
- var max = 200 * 1024 * 1024;
- assert.throws(function() {
- s.adminCommand({
- movechunk: "test.foo",
- find: {_id: 1},
- to: secondary.getMongo().name,
- maxChunkSizeBytes: max
- });
+// Make sure that we don't move that chunk if it goes past what we consider the maximum chunk
+// size
+print("Checkpoint 1a");
+var max = 200 * 1024 * 1024;
+assert.throws(function() {
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {_id: 1},
+ to: secondary.getMongo().name,
+ maxChunkSizeBytes: max
});
+});
- // Move the chunk
- print("checkpoint 1b");
- var before = s.config.chunks.find({ns: 'test.foo'}).toArray();
- assert.commandWorked(
- s.s0.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name}));
+// Move the chunk
+print("checkpoint 1b");
+var before = s.config.chunks.find({ns: 'test.foo'}).toArray();
+assert.commandWorked(
+ s.s0.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name}));
- var after = s.config.chunks.find({ns: 'test.foo'}).toArray();
- assert.neq(before[0].shard, after[0].shard, "move chunk did not work");
+var after = s.config.chunks.find({ns: 'test.foo'}).toArray();
+assert.neq(before[0].shard, after[0].shard, "move chunk did not work");
- s.config.changelog.find().forEach(printjson);
+s.config.changelog.find().forEach(printjson);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/libs/sharded_transactions_helpers.js b/jstests/sharding/libs/sharded_transactions_helpers.js
index 1355e9610f0..dc49b839e30 100644
--- a/jstests/sharding/libs/sharded_transactions_helpers.js
+++ b/jstests/sharding/libs/sharded_transactions_helpers.js
@@ -13,27 +13,27 @@ function getCoordinatorFailpoints() {
const coordinatorFailpointDataArr = [
{failpoint: "hangBeforeWritingParticipantList", numTimesShouldBeHit: 1},
{
- // Test targeting remote nodes for prepare
- failpoint: "hangWhileTargetingRemoteHost",
- numTimesShouldBeHit: 2 /* once per remote participant */
+ // Test targeting remote nodes for prepare
+ failpoint: "hangWhileTargetingRemoteHost",
+ numTimesShouldBeHit: 2 /* once per remote participant */
},
{
- // Test targeting local node for prepare
- failpoint: "hangWhileTargetingLocalHost",
- numTimesShouldBeHit: 1
+ // Test targeting local node for prepare
+ failpoint: "hangWhileTargetingLocalHost",
+ numTimesShouldBeHit: 1
},
{failpoint: "hangBeforeWritingDecision", numTimesShouldBeHit: 1},
{
- // Test targeting remote nodes for decision
- failpoint: "hangWhileTargetingRemoteHost",
- numTimesShouldBeHit: 2, /* once per remote participant */
- skip: 2 /* to skip when the failpoint is hit for prepare */
+ // Test targeting remote nodes for decision
+ failpoint: "hangWhileTargetingRemoteHost",
+ numTimesShouldBeHit: 2, /* once per remote participant */
+ skip: 2 /* to skip when the failpoint is hit for prepare */
},
{
- // Test targeting local node for decision
- failpoint: "hangWhileTargetingLocalHost",
- numTimesShouldBeHit: 1,
- skip: 1 /* to skip when the failpoint is hit for prepare */
+ // Test targeting local node for decision
+ failpoint: "hangWhileTargetingLocalHost",
+ numTimesShouldBeHit: 1,
+ skip: 1 /* to skip when the failpoint is hit for prepare */
},
{failpoint: "hangBeforeDeletingCoordinatorDoc", numTimesShouldBeHit: 1},
];
@@ -70,16 +70,16 @@ function assertNoSuchTransactionOnAllShards(st, lsid, txnNumber) {
}
function assertNoSuchTransactionOnConn(conn, lsid, txnNumber) {
- assert.commandFailedWithCode(conn.getDB("foo").runCommand({
- find: "bar",
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction,
- "expected there to be no active transaction on shard, lsid: " +
- tojson(lsid) + ", txnNumber: " + tojson(txnNumber) +
- ", connection: " + tojson(conn));
+ assert.commandFailedWithCode(
+ conn.getDB("foo").runCommand({
+ find: "bar",
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction,
+ "expected there to be no active transaction on shard, lsid: " + tojson(lsid) +
+ ", txnNumber: " + tojson(txnNumber) + ", connection: " + tojson(conn));
}
function waitForFailpoint(hitFailpointStr, numTimes) {
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index ec744207e97..ef6b7a1c903 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -2,59 +2,60 @@
// See: http://jira.mongodb.org/browse/SERVER-1896
(function() {
- var s = new ShardingTest({name: "limit_push", shards: 2, mongos: 1});
- var db = s.getDB("test");
-
- // Create some data
- for (i = 0; i < 100; i++) {
- db.limit_push.insert({_id: i, x: i});
- }
- db.limit_push.ensureIndex({x: 1});
- assert.eq(100, db.limit_push.find().length(), "Incorrect number of documents");
-
- // Shard the collection
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.limit_push", key: {x: 1}});
-
- // Now split the and move the data between the shards
- s.adminCommand({split: "test.limit_push", middle: {x: 50}});
- s.adminCommand({
- moveChunk: "test.limit_push",
- find: {x: 51},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
-
- // Check that the chunck have split correctly
- assert.eq(2, s.config.chunks.count({"ns": "test.limit_push"}), "wrong number of chunks");
-
- // The query is asking for the maximum value below a given value
- // db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
- q = {x: {$lt: 60}};
-
- // Make sure the basic queries are correct
- assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents");
- // rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
- // assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
-
- // Now make sure that the explain shos that each shard is returning a single document as
- // indicated
- // by the "n" element for each shard
- exp = db.limit_push.find(q).sort({x: -1}).limit(1).explain("executionStats");
- printjson(exp);
-
- var execStages = exp.executionStats.executionStages;
- assert.eq("SHARD_MERGE_SORT", execStages.stage, "Expected SHARD_MERGE_SORT as root stage");
-
- var k = 0;
- for (var j in execStages.shards) {
- assert.eq(1,
- execStages.shards[j].executionStages.nReturned,
- "'n' is not 1 from shard000" + k.toString());
- k++;
- }
-
- s.stop();
-
+var s = new ShardingTest({name: "limit_push", shards: 2, mongos: 1});
+var db = s.getDB("test");
+
+// Create some data
+for (i = 0; i < 100; i++) {
+ db.limit_push.insert({_id: i, x: i});
+}
+db.limit_push.ensureIndex({x: 1});
+assert.eq(100, db.limit_push.find().length(), "Incorrect number of documents");
+
+// Shard the collection
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.limit_push", key: {x: 1}});
+
+// Now split the and move the data between the shards
+s.adminCommand({split: "test.limit_push", middle: {x: 50}});
+s.adminCommand({
+ moveChunk: "test.limit_push",
+ find: {x: 51},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+// Check that the chunck have split correctly
+assert.eq(2, s.config.chunks.count({"ns": "test.limit_push"}), "wrong number of chunks");
+
+// The query is asking for the maximum value below a given value
+// db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
+q = {
+ x: {$lt: 60}
+};
+
+// Make sure the basic queries are correct
+assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents");
+// rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
+// assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
+
+// Now make sure that the explain shos that each shard is returning a single document as
+// indicated
+// by the "n" element for each shard
+exp = db.limit_push.find(q).sort({x: -1}).limit(1).explain("executionStats");
+printjson(exp);
+
+var execStages = exp.executionStats.executionStages;
+assert.eq("SHARD_MERGE_SORT", execStages.stage, "Expected SHARD_MERGE_SORT as root stage");
+
+var k = 0;
+for (var j in execStages.shards) {
+ assert.eq(1,
+ execStages.shards[j].executionStages.nReturned,
+ "'n' is not 1 from shard000" + k.toString());
+ k++;
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/linearizable_read_concern.js b/jstests/sharding/linearizable_read_concern.js
index 1269bc5c4a3..9b3ac62acce 100644
--- a/jstests/sharding/linearizable_read_concern.js
+++ b/jstests/sharding/linearizable_read_concern.js
@@ -25,104 +25,104 @@ load("jstests/replsets/rslib.js");
load("jstests/libs/write_concern_util.js");
(function() {
- "use strict";
-
- // Skip db hash check and shard replication since this test leaves a replica set shard
- // partitioned.
- TestData.skipCheckDBHashes = true;
- TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
-
- var testName = "linearizable_read_concern";
-
- var st = new ShardingTest({
- name: testName,
- shards: 2,
- other: {rs0: {nodes: 3}, rs1: {nodes: 3}, useBridge: true},
- mongos: 1,
- config: 1,
- enableBalancer: false
- });
-
- jsTestLog("Setting up sharded cluster.");
-
- // Set up the sharded cluster.
- var dbName = testName;
- var collName = "test";
- var collNamespace = dbName + "." + collName;
- var shard0ReplTest = st.rs0;
- var shard1ReplTest = st.rs1;
- var testDB = st.s.getDB(dbName);
-
- // Set high election timeout so that primary doesn't step down during linearizable read test.
- var cfg = shard0ReplTest.getReplSetConfigFromNode(0);
- cfg.settings.electionTimeoutMillis = shard0ReplTest.kDefaultTimeoutMS;
- reconfig(shard0ReplTest, cfg, true);
-
- // Set up sharded collection. Put 5 documents on each shard, with keys {x: 0...9}.
- var numDocs = 10;
- shardCollectionWithChunks(st, testDB[collName], numDocs);
-
- // Make sure the 'shardIdentity' document on each shard is replicated to all secondary nodes
- // before issuing reads against them.
- shard0ReplTest.awaitReplication();
- shard1ReplTest.awaitReplication();
-
- // Print current sharding stats for debugging.
- st.printShardingStatus(5);
-
- // Filter to target one document in each shard.
- var shard0DocKey = 2;
- var shard1DocKey = 7;
- var dualShardQueryFilter = {$or: [{x: shard0DocKey}, {x: shard1DocKey}]};
-
- jsTestLog("Testing linearizable read from secondaries");
-
- // Execute a linearizable read from secondaries (targeting both shards) which should fail.
- st.s.setReadPref("secondary");
- var res = assert.commandFailed(testDB.runReadCommand({
- find: collName,
- filter: dualShardQueryFilter,
- readConcern: {level: "linearizable"},
- maxTimeMS: shard0ReplTest.kDefaultTimeoutMS
- }));
- assert.eq(res.code, ErrorCodes.NotMaster);
-
- jsTestLog("Testing linearizable read from primaries.");
-
- // Execute a linearizable read from primaries (targeting both shards) which should succeed.
- st.s.setReadPref("primary");
- var res = assert.writeOK(testDB.runReadCommand({
- find: collName,
- sort: {x: 1},
- filter: dualShardQueryFilter,
- readConcern: {level: "linearizable"},
- maxTimeMS: shard0ReplTest.kDefaultTimeoutMS
- }));
-
- // Make sure data was returned from both shards correctly.
- assert.eq(res.cursor.firstBatch[0].x, shard0DocKey);
- assert.eq(res.cursor.firstBatch[1].x, shard1DocKey);
-
- jsTestLog("Testing linearizable read targeting partitioned primary.");
-
- var primary = shard0ReplTest.getPrimary();
- var secondaries = shard0ReplTest.getSecondaries();
-
- // Partition the primary in the first shard.
- secondaries[0].disconnect(primary);
- secondaries[1].disconnect(primary);
-
- jsTestLog("Current Replica Set Topology of First Shard: [Secondary-Secondary] [Primary]");
-
- // Execute a linearizable read targeting the partitioned primary in first shard, and good
- // primary in the second shard. This should time out due to partitioned primary.
- var result = testDB.runReadCommand({
- find: collName,
- filter: dualShardQueryFilter,
- readConcern: {level: "linearizable"},
- maxTimeMS: 3000
- });
- assert.commandFailedWithCode(result, ErrorCodes.MaxTimeMSExpired);
-
- st.stop();
+"use strict";
+
+// Skip db hash check and shard replication since this test leaves a replica set shard
+// partitioned.
+TestData.skipCheckDBHashes = true;
+TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
+
+var testName = "linearizable_read_concern";
+
+var st = new ShardingTest({
+ name: testName,
+ shards: 2,
+ other: {rs0: {nodes: 3}, rs1: {nodes: 3}, useBridge: true},
+ mongos: 1,
+ config: 1,
+ enableBalancer: false
+});
+
+jsTestLog("Setting up sharded cluster.");
+
+// Set up the sharded cluster.
+var dbName = testName;
+var collName = "test";
+var collNamespace = dbName + "." + collName;
+var shard0ReplTest = st.rs0;
+var shard1ReplTest = st.rs1;
+var testDB = st.s.getDB(dbName);
+
+// Set high election timeout so that primary doesn't step down during linearizable read test.
+var cfg = shard0ReplTest.getReplSetConfigFromNode(0);
+cfg.settings.electionTimeoutMillis = shard0ReplTest.kDefaultTimeoutMS;
+reconfig(shard0ReplTest, cfg, true);
+
+// Set up sharded collection. Put 5 documents on each shard, with keys {x: 0...9}.
+var numDocs = 10;
+shardCollectionWithChunks(st, testDB[collName], numDocs);
+
+// Make sure the 'shardIdentity' document on each shard is replicated to all secondary nodes
+// before issuing reads against them.
+shard0ReplTest.awaitReplication();
+shard1ReplTest.awaitReplication();
+
+// Print current sharding stats for debugging.
+st.printShardingStatus(5);
+
+// Filter to target one document in each shard.
+var shard0DocKey = 2;
+var shard1DocKey = 7;
+var dualShardQueryFilter = {$or: [{x: shard0DocKey}, {x: shard1DocKey}]};
+
+jsTestLog("Testing linearizable read from secondaries");
+
+// Execute a linearizable read from secondaries (targeting both shards) which should fail.
+st.s.setReadPref("secondary");
+var res = assert.commandFailed(testDB.runReadCommand({
+ find: collName,
+ filter: dualShardQueryFilter,
+ readConcern: {level: "linearizable"},
+ maxTimeMS: shard0ReplTest.kDefaultTimeoutMS
+}));
+assert.eq(res.code, ErrorCodes.NotMaster);
+
+jsTestLog("Testing linearizable read from primaries.");
+
+// Execute a linearizable read from primaries (targeting both shards) which should succeed.
+st.s.setReadPref("primary");
+var res = assert.writeOK(testDB.runReadCommand({
+ find: collName,
+ sort: {x: 1},
+ filter: dualShardQueryFilter,
+ readConcern: {level: "linearizable"},
+ maxTimeMS: shard0ReplTest.kDefaultTimeoutMS
+}));
+
+// Make sure data was returned from both shards correctly.
+assert.eq(res.cursor.firstBatch[0].x, shard0DocKey);
+assert.eq(res.cursor.firstBatch[1].x, shard1DocKey);
+
+jsTestLog("Testing linearizable read targeting partitioned primary.");
+
+var primary = shard0ReplTest.getPrimary();
+var secondaries = shard0ReplTest.getSecondaries();
+
+// Partition the primary in the first shard.
+secondaries[0].disconnect(primary);
+secondaries[1].disconnect(primary);
+
+jsTestLog("Current Replica Set Topology of First Shard: [Secondary-Secondary] [Primary]");
+
+// Execute a linearizable read targeting the partitioned primary in first shard, and good
+// primary in the second shard. This should time out due to partitioned primary.
+var result = testDB.runReadCommand({
+ find: collName,
+ filter: dualShardQueryFilter,
+ readConcern: {level: "linearizable"},
+ maxTimeMS: 3000
+});
+assert.commandFailedWithCode(result, ErrorCodes.MaxTimeMSExpired);
+
+st.stop();
})();
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index f5b046c26b7..ce13ea5871d 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -1,93 +1,93 @@
(function() {
- 'use strict';
- var test = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
-
- var mongos = test.s0;
- var mongod = test.shard0;
-
- var res;
- var dbArray;
-
- // grab the config db instance by name
- var getDBSection = function(dbsArray, dbToFind) {
- for (var pos in dbsArray) {
- if (dbsArray[pos].name && dbsArray[pos].name === dbToFind)
- return dbsArray[pos];
- }
- return null;
- };
-
- // Function to verify information for a database entry in listDatabases.
- var dbEntryCheck = function(dbEntry, onConfig) {
- assert.neq(null, dbEntry);
- assert.neq(null, dbEntry.sizeOnDisk);
- assert.eq(false, dbEntry.empty);
-
- // Check against shards
- var shards = dbEntry.shards;
- assert(shards);
- assert((shards["config"] && onConfig) || (!shards["config"] && !onConfig));
- };
-
- // Non-config-server db checks.
- {
- assert.writeOK(mongos.getDB("blah").foo.insert({_id: 1}));
- assert.writeOK(mongos.getDB("foo").foo.insert({_id: 1}));
- assert.writeOK(mongos.getDB("raw").foo.insert({_id: 1}));
-
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
-
- dbEntryCheck(getDBSection(dbArray, "blah"), false);
- dbEntryCheck(getDBSection(dbArray, "foo"), false);
- dbEntryCheck(getDBSection(dbArray, "raw"), false);
- }
-
- // Local db is never returned.
- {
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
-
- assert(!getDBSection(dbArray, 'local'));
- }
+'use strict';
+var test = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
- // Admin and config are always reported on the config shard.
- {
- assert.writeOK(mongos.getDB("admin").test.insert({_id: 1}));
- assert.writeOK(mongos.getDB("config").test.insert({_id: 1}));
+var mongos = test.s0;
+var mongod = test.shard0;
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
+var res;
+var dbArray;
- dbEntryCheck(getDBSection(dbArray, "config"), true);
- dbEntryCheck(getDBSection(dbArray, "admin"), true);
+// grab the config db instance by name
+var getDBSection = function(dbsArray, dbToFind) {
+ for (var pos in dbsArray) {
+ if (dbsArray[pos].name && dbsArray[pos].name === dbToFind)
+ return dbsArray[pos];
}
-
- // Config db can be present on config shard and on other shards.
- {
- mongod.getDB("config").foo.insert({_id: 1});
-
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
-
- var entry = getDBSection(dbArray, "config");
- dbEntryCheck(entry, true);
- assert(entry["shards"]);
- assert.eq(Object.keys(entry["shards"]).length, 2);
- }
-
- // Admin db is only reported on the config shard, never on other shards.
- {
- mongod.getDB("admin").foo.insert({_id: 1});
-
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
-
- var entry = getDBSection(dbArray, "admin");
- dbEntryCheck(entry, true);
- assert(entry["shards"]);
- assert.eq(Object.keys(entry["shards"]).length, 1);
- }
-
- test.stop();
+ return null;
+};
+
+// Function to verify information for a database entry in listDatabases.
+var dbEntryCheck = function(dbEntry, onConfig) {
+ assert.neq(null, dbEntry);
+ assert.neq(null, dbEntry.sizeOnDisk);
+ assert.eq(false, dbEntry.empty);
+
+ // Check against shards
+ var shards = dbEntry.shards;
+ assert(shards);
+ assert((shards["config"] && onConfig) || (!shards["config"] && !onConfig));
+};
+
+// Non-config-server db checks.
+{
+ assert.writeOK(mongos.getDB("blah").foo.insert({_id: 1}));
+ assert.writeOK(mongos.getDB("foo").foo.insert({_id: 1}));
+ assert.writeOK(mongos.getDB("raw").foo.insert({_id: 1}));
+
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ dbEntryCheck(getDBSection(dbArray, "blah"), false);
+ dbEntryCheck(getDBSection(dbArray, "foo"), false);
+ dbEntryCheck(getDBSection(dbArray, "raw"), false);
+}
+
+// Local db is never returned.
+{
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ assert(!getDBSection(dbArray, 'local'));
+}
+
+// Admin and config are always reported on the config shard.
+{
+ assert.writeOK(mongos.getDB("admin").test.insert({_id: 1}));
+ assert.writeOK(mongos.getDB("config").test.insert({_id: 1}));
+
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ dbEntryCheck(getDBSection(dbArray, "config"), true);
+ dbEntryCheck(getDBSection(dbArray, "admin"), true);
+}
+
+// Config db can be present on config shard and on other shards.
+{
+ mongod.getDB("config").foo.insert({_id: 1});
+
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ var entry = getDBSection(dbArray, "config");
+ dbEntryCheck(entry, true);
+ assert(entry["shards"]);
+ assert.eq(Object.keys(entry["shards"]).length, 2);
+}
+
+// Admin db is only reported on the config shard, never on other shards.
+{
+ mongod.getDB("admin").foo.insert({_id: 1});
+
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ var entry = getDBSection(dbArray, "admin");
+ dbEntryCheck(entry, true);
+ assert(entry["shards"]);
+ assert.eq(Object.keys(entry["shards"]).length, 1);
+}
+
+test.stop();
})();
diff --git a/jstests/sharding/listshards.js b/jstests/sharding/listshards.js
index d4261cadb03..e008ffb6689 100644
--- a/jstests/sharding/listshards.js
+++ b/jstests/sharding/listshards.js
@@ -2,69 +2,67 @@
// Test the listShards command by adding stand-alone and replica-set shards to a cluster
//
(function() {
- 'use strict';
+'use strict';
- function checkShardName(shardName, shardsArray) {
- var found = false;
- shardsArray.forEach(function(shardObj) {
- if (shardObj._id === shardName) {
- found = true;
- return;
- }
- });
- return found;
- }
+function checkShardName(shardName, shardsArray) {
+ var found = false;
+ shardsArray.forEach(function(shardObj) {
+ if (shardObj._id === shardName) {
+ found = true;
+ return;
+ }
+ });
+ return found;
+}
- var shardTest = new ShardingTest(
- {name: 'listShardsTest', shards: 1, mongos: 1, other: {useHostname: true}});
+var shardTest =
+ new ShardingTest({name: 'listShardsTest', shards: 1, mongos: 1, other: {useHostname: true}});
- var mongos = shardTest.s0;
- var res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- var shardsArray = res.shards;
- assert.eq(shardsArray.length, 1);
+var mongos = shardTest.s0;
+var res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+var shardsArray = res.shards;
+assert.eq(shardsArray.length, 1);
- // add standalone mongod
- var standaloneShard = MongoRunner.runMongod({useHostName: true, shardsvr: ""});
- res = shardTest.admin.runCommand({addShard: standaloneShard.host, name: 'standalone'});
- assert.commandWorked(res, 'addShard command failed');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 2);
- assert(checkShardName('standalone', shardsArray),
- 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
+// add standalone mongod
+var standaloneShard = MongoRunner.runMongod({useHostName: true, shardsvr: ""});
+res = shardTest.admin.runCommand({addShard: standaloneShard.host, name: 'standalone'});
+assert.commandWorked(res, 'addShard command failed');
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 2);
+assert(checkShardName('standalone', shardsArray),
+ 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
- // add replica set named 'repl'
- var rs1 =
- new ReplSetTest({name: 'repl', nodes: 1, useHostName: true, nodeOptions: {shardsvr: ""}});
- rs1.startSet();
- rs1.initiate();
- res = shardTest.admin.runCommand({addShard: rs1.getURL()});
- assert.commandWorked(res, 'addShard command failed');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 3);
- assert(checkShardName('repl', shardsArray),
- 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
+// add replica set named 'repl'
+var rs1 = new ReplSetTest({name: 'repl', nodes: 1, useHostName: true, nodeOptions: {shardsvr: ""}});
+rs1.startSet();
+rs1.initiate();
+res = shardTest.admin.runCommand({addShard: rs1.getURL()});
+assert.commandWorked(res, 'addShard command failed');
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 3);
+assert(checkShardName('repl', shardsArray),
+ 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
- // remove 'repl' shard
- assert.soon(function() {
- var res = shardTest.admin.runCommand({removeShard: 'repl'});
- assert.commandWorked(res, 'removeShard command failed');
- return res.state === 'completed';
- }, 'failed to remove the replica set shard');
+// remove 'repl' shard
+assert.soon(function() {
+ var res = shardTest.admin.runCommand({removeShard: 'repl'});
+ assert.commandWorked(res, 'removeShard command failed');
+ return res.state === 'completed';
+}, 'failed to remove the replica set shard');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 2);
- assert(!checkShardName('repl', shardsArray),
- 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
-
- rs1.stopSet();
- shardTest.stop();
- MongoRunner.stopMongod(standaloneShard);
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 2);
+assert(!checkShardName('repl', shardsArray),
+ 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
+rs1.stopSet();
+shardTest.stop();
+MongoRunner.stopMongod(standaloneShard);
})();
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 996fb949175..55b7548d6db 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -9,142 +9,67 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
-
- var replSetName = "replsets_server-6591";
- var keyfile = "jstests/libs/key1";
- var numShards = 2;
- var username = "foo";
- var password = "bar";
-
- var createUser = function(mongo) {
- print("============ adding a user.");
- mongo.getDB("admin").createUser(
- {user: username, pwd: password, roles: jsTest.adminUserRoles});
- };
-
- var addUsersToEachShard = function(st) {
- for (var i = 0; i < numShards; i++) {
- print("============ adding a user to shard " + i);
- var d = st["shard" + i];
- d.getDB("admin").createUser(
- {user: username, pwd: password, roles: jsTest.adminUserRoles});
+'use strict';
+
+var replSetName = "replsets_server-6591";
+var keyfile = "jstests/libs/key1";
+var numShards = 2;
+var username = "foo";
+var password = "bar";
+
+var createUser = function(mongo) {
+ print("============ adding a user.");
+ mongo.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
+};
+
+var addUsersToEachShard = function(st) {
+ for (var i = 0; i < numShards; i++) {
+ print("============ adding a user to shard " + i);
+ var d = st["shard" + i];
+ d.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
+ }
+};
+
+var addShard = function(st, shouldPass) {
+ var m = MongoRunner.runMongod({auth: "", keyFile: keyfile, useHostname: false, 'shardsvr': ''});
+ var res = st.getDB("admin").runCommand({addShard: m.host});
+ if (shouldPass) {
+ assert.commandWorked(res, "Add shard");
+ } else {
+ assert.commandFailed(res, "Add shard");
+ }
+ return m;
+};
+
+var findEmptyShard = function(st, ns) {
+ var counts = st.chunkCounts("foo");
+
+ for (var shard in counts) {
+ if (counts[shard] == 0) {
+ return shard;
}
- };
-
- var addShard = function(st, shouldPass) {
- var m =
- MongoRunner.runMongod({auth: "", keyFile: keyfile, useHostname: false, 'shardsvr': ''});
- var res = st.getDB("admin").runCommand({addShard: m.host});
- if (shouldPass) {
- assert.commandWorked(res, "Add shard");
- } else {
- assert.commandFailed(res, "Add shard");
- }
- return m;
- };
+ }
- var findEmptyShard = function(st, ns) {
- var counts = st.chunkCounts("foo");
+ return null;
+};
- for (var shard in counts) {
- if (counts[shard] == 0) {
- return shard;
- }
- }
+var assertCannotRunCommands = function(mongo, st) {
+ print("============ ensuring that commands cannot be run.");
- return null;
- };
-
- var assertCannotRunCommands = function(mongo, st) {
- print("============ ensuring that commands cannot be run.");
-
- // CRUD
- var test = mongo.getDB("test");
- assert.throws(function() {
- test.system.users.findOne();
- });
- assert.writeError(test.foo.save({_id: 0}));
- assert.throws(function() {
- test.foo.findOne({_id: 0});
- });
- assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeError(test.foo.remove({_id: 0}));
-
- // Multi-shard
- assert.throws(function() {
- test.foo.mapReduce(
- function() {
- emit(1, 1);
- },
- function(id, count) {
- return Array.sum(count);
- },
- {out: "other"});
- });
-
- // Config
- assert.throws(function() {
- mongo.getDB("config").shards.findOne();
- });
-
- var authorizeErrorCode = 13;
- var res = mongo.getDB("admin").runCommand({
- moveChunk: "test.foo",
- find: {_id: 1},
- to: st.shard0.shardName // Arbitrary shard.
- });
- assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk");
- // Create collection
- assert.commandFailedWithCode(
- mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
- authorizeErrorCode,
- "createCollection");
- // Set/Get system parameters
- var params = [
- {param: "journalCommitInterval", val: 200},
- {param: "logLevel", val: 2},
- {param: "logUserIds", val: 1},
- {param: "notablescan", val: 1},
- {param: "quiet", val: 1},
- {param: "replApplyBatchSize", val: 10},
- {param: "replIndexPrefetch", val: "none"},
- {param: "syncdelay", val: 30},
- {param: "traceExceptions", val: true},
- {param: "sslMode", val: "preferSSL"},
- {param: "clusterAuthMode", val: "sendX509"},
- {param: "userCacheInvalidationIntervalSecs", val: 300}
- ];
- params.forEach(function(p) {
- var cmd = {setParameter: 1};
- cmd[p.param] = p.val;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode,
- "setParameter: " + p.param);
- });
- params.forEach(function(p) {
- var cmd = {getParameter: 1};
- cmd[p.param] = 1;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode,
- "getParameter: " + p.param);
- });
- };
-
- var assertCanRunCommands = function(mongo, st) {
- print("============ ensuring that commands can be run.");
-
- // CRUD
- var test = mongo.getDB("test");
-
- // this will throw if it fails
+ // CRUD
+ var test = mongo.getDB("test");
+ assert.throws(function() {
test.system.users.findOne();
+ });
+ assert.writeError(test.foo.save({_id: 0}));
+ assert.throws(function() {
+ test.foo.findOne({_id: 0});
+ });
+ assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeError(test.foo.remove({_id: 0}));
- assert.writeOK(test.foo.save({_id: 0}));
- assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeOK(test.foo.remove({_id: 0}));
-
- // Multi-shard
+ // Multi-shard
+ assert.throws(function() {
test.foo.mapReduce(
function() {
emit(1, 1);
@@ -153,122 +78,192 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
return Array.sum(count);
},
{out: "other"});
+ });
- // Config
- // this will throw if it fails
+ // Config
+ assert.throws(function() {
mongo.getDB("config").shards.findOne();
+ });
- var to = findEmptyShard(st, "test.foo");
- var res = mongo.getDB("admin").runCommand({moveChunk: "test.foo", find: {_id: 1}, to: to});
- assert.commandWorked(res);
- };
-
- var authenticate = function(mongo) {
- print("============ authenticating user.");
- mongo.getDB("admin").auth(username, password);
- };
+ var authorizeErrorCode = 13;
+ var res = mongo.getDB("admin").runCommand({
+ moveChunk: "test.foo",
+ find: {_id: 1},
+ to: st.shard0.shardName // Arbitrary shard.
+ });
+ assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk");
+ // Create collection
+ assert.commandFailedWithCode(
+ mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
+ authorizeErrorCode,
+ "createCollection");
+ // Set/Get system parameters
+ var params = [
+ {param: "journalCommitInterval", val: 200},
+ {param: "logLevel", val: 2},
+ {param: "logUserIds", val: 1},
+ {param: "notablescan", val: 1},
+ {param: "quiet", val: 1},
+ {param: "replApplyBatchSize", val: 10},
+ {param: "replIndexPrefetch", val: "none"},
+ {param: "syncdelay", val: 30},
+ {param: "traceExceptions", val: true},
+ {param: "sslMode", val: "preferSSL"},
+ {param: "clusterAuthMode", val: "sendX509"},
+ {param: "userCacheInvalidationIntervalSecs", val: 300}
+ ];
+ params.forEach(function(p) {
+ var cmd = {setParameter: 1};
+ cmd[p.param] = p.val;
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
+ });
+ params.forEach(function(p) {
+ var cmd = {getParameter: 1};
+ cmd[p.param] = 1;
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
+ });
+};
+
+var assertCanRunCommands = function(mongo, st) {
+ print("============ ensuring that commands can be run.");
+
+ // CRUD
+ var test = mongo.getDB("test");
+
+ // this will throw if it fails
+ test.system.users.findOne();
+
+ assert.writeOK(test.foo.save({_id: 0}));
+ assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeOK(test.foo.remove({_id: 0}));
+
+ // Multi-shard
+ test.foo.mapReduce(
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
+
+ // Config
+ // this will throw if it fails
+ mongo.getDB("config").shards.findOne();
+
+ var to = findEmptyShard(st, "test.foo");
+ var res = mongo.getDB("admin").runCommand({moveChunk: "test.foo", find: {_id: 1}, to: to});
+ assert.commandWorked(res);
+};
+
+var authenticate = function(mongo) {
+ print("============ authenticating user.");
+ mongo.getDB("admin").auth(username, password);
+};
+
+var setupSharding = function(shardingTest) {
+ var mongo = shardingTest.s;
+
+ print("============ enabling sharding on test.foo.");
+ mongo.getDB("admin").runCommand({enableSharding: "test"});
+ shardingTest.ensurePrimaryShard('test', st.shard1.shardName);
+ mongo.getDB("admin").runCommand({shardCollection: "test.foo", key: {_id: 1}});
+
+ var test = mongo.getDB("test");
+ for (var i = 1; i < 20; i++) {
+ test.foo.insert({_id: i});
+ }
+};
+
+var start = function() {
+ // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+ return new ShardingTest({
+ auth: "",
+ shards: numShards,
+ other: {
+ keyFile: keyfile,
+ chunkSize: 1,
+ useHostname:
+ false, // Must use localhost to take advantage of the localhost auth bypass
+ shardAsReplicaSet: false
+ }
+ });
+};
- var setupSharding = function(shardingTest) {
- var mongo = shardingTest.s;
+var shutdown = function(st) {
+ print("============ shutting down.");
- print("============ enabling sharding on test.foo.");
- mongo.getDB("admin").runCommand({enableSharding: "test"});
- shardingTest.ensurePrimaryShard('test', st.shard1.shardName);
- mongo.getDB("admin").runCommand({shardCollection: "test.foo", key: {_id: 1}});
+ // SERVER-8445
+ // Unlike MongoRunner.stopMongod and ReplSetTest.stopSet,
+ // ShardingTest.stop does not have a way to provide auth
+ // information. Therefore, we'll do this manually for now.
- var test = mongo.getDB("test");
- for (var i = 1; i < 20; i++) {
- test.foo.insert({_id: i});
- }
- };
-
- var start = function() {
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- return new ShardingTest({
- auth: "",
- shards: numShards,
- other: {
- keyFile: keyfile,
- chunkSize: 1,
- useHostname:
- false, // Must use localhost to take advantage of the localhost auth bypass
- shardAsReplicaSet: false
- }
- });
- };
-
- var shutdown = function(st) {
- print("============ shutting down.");
-
- // SERVER-8445
- // Unlike MongoRunner.stopMongod and ReplSetTest.stopSet,
- // ShardingTest.stop does not have a way to provide auth
- // information. Therefore, we'll do this manually for now.
-
- for (var i = 0; i < st._mongos.length; i++) {
- var conn = st["s" + i];
- MongoRunner.stopMongos(conn,
- /*signal*/ false,
- {auth: {user: username, pwd: password}});
- }
+ for (var i = 0; i < st._mongos.length; i++) {
+ var conn = st["s" + i];
+ MongoRunner.stopMongos(conn,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
- for (var i = 0; i < st._connections.length; i++) {
- var conn = st["shard" + i];
- MongoRunner.stopMongod(conn,
- /*signal*/ false,
- {auth: {user: username, pwd: password}});
- }
+ for (var i = 0; i < st._connections.length; i++) {
+ var conn = st["shard" + i];
+ MongoRunner.stopMongod(conn,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
- for (var i = 0; i < st._configServers.length; i++) {
- var conn = st["config" + i];
- MongoRunner.stopMongod(conn,
- /*signal*/ false,
- {auth: {user: username, pwd: password}});
- }
+ for (var i = 0; i < st._configServers.length; i++) {
+ var conn = st["config" + i];
+ MongoRunner.stopMongod(conn,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
- st.stop();
- };
+ st.stop();
+};
- print("=====================");
- print("starting shards");
- print("=====================");
- var st = start();
- var host = st.s.host;
- var extraShards = [];
+print("=====================");
+print("starting shards");
+print("=====================");
+var st = start();
+var host = st.s.host;
+var extraShards = [];
- var mongo = new Mongo(host);
+var mongo = new Mongo(host);
- assertCannotRunCommands(mongo, st);
+assertCannotRunCommands(mongo, st);
- extraShards.push(addShard(st, 1));
- createUser(mongo);
+extraShards.push(addShard(st, 1));
+createUser(mongo);
- authenticate(mongo);
- authenticate(st.s);
- setupSharding(st);
+authenticate(mongo);
+authenticate(st.s);
+setupSharding(st);
- addUsersToEachShard(st);
- st.printShardingStatus();
+addUsersToEachShard(st);
+st.printShardingStatus();
- assertCanRunCommands(mongo, st);
+assertCanRunCommands(mongo, st);
- print("===============================");
- print("reconnecting with a new client.");
- print("===============================");
+print("===============================");
+print("reconnecting with a new client.");
+print("===============================");
- mongo = new Mongo(host);
+mongo = new Mongo(host);
- assertCannotRunCommands(mongo, st);
- extraShards.push(addShard(mongo, 0));
+assertCannotRunCommands(mongo, st);
+extraShards.push(addShard(mongo, 0));
- authenticate(mongo);
+authenticate(mongo);
- assertCanRunCommands(mongo, st);
- extraShards.push(addShard(mongo, 1));
- st.printShardingStatus();
+assertCanRunCommands(mongo, st);
+extraShards.push(addShard(mongo, 1));
+st.printShardingStatus();
- shutdown(st);
- extraShards.forEach(function(sh) {
- MongoRunner.stopMongod(sh);
- });
+shutdown(st);
+extraShards.forEach(function(sh) {
+ MongoRunner.stopMongod(sh);
+});
})();
diff --git a/jstests/sharding/logical_time_api.js b/jstests/sharding/logical_time_api.js
index 918c47b9864..3fc1a484d29 100644
--- a/jstests/sharding/logical_time_api.js
+++ b/jstests/sharding/logical_time_api.js
@@ -8,93 +8,92 @@
* Expects logicalTime to come in the command body from both a mongos and a mongod.
*/
(function() {
- "use strict";
-
- // Returns true if the given object contains a logicalTime BSON object in the following format:
- // $clusterTime: {
- // clusterTime: <Timestamp>
- // signature: {
- // hash: <BinData>
- // keyId: <NumberLong>
- // }
- // }
- function containsValidLogicalTimeBson(obj) {
- if (!obj) {
- return false;
- }
-
- var logicalTime = obj.$clusterTime;
- return logicalTime && isType(logicalTime, "BSON") &&
- isType(logicalTime.clusterTime, "Timestamp") && isType(logicalTime.signature, "BSON") &&
- isType(logicalTime.signature.hash, "BinData") &&
- isType(logicalTime.signature.keyId, "NumberLong");
+"use strict";
+
+// Returns true if the given object contains a logicalTime BSON object in the following format:
+// $clusterTime: {
+// clusterTime: <Timestamp>
+// signature: {
+// hash: <BinData>
+// keyId: <NumberLong>
+// }
+// }
+function containsValidLogicalTimeBson(obj) {
+ if (!obj) {
+ return false;
}
- function isType(val, typeString) {
- assert.eq(Object.prototype.toString.call(val),
- "[object " + typeString + "]",
- "expected: " + val + ", to be of type: " + typeString);
- return true;
- }
-
- // A mongos that talks to a non-sharded collection on a sharded replica set returns a
- // logicalTime BSON object that matches the expected format.
- var st = new ShardingTest({name: "logical_time_api", shards: {rs0: {nodes: 1}}});
-
- var testDB = st.s.getDB("test");
- var res =
- assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 1}]}));
- assert(containsValidLogicalTimeBson(res),
- "Expected command body from a mongos talking to a non-sharded collection on a sharded " +
- "replica set to contain logicalTime, received: " + tojson(res));
-
- // A mongos that talks to a sharded collection on a sharded replica set returns a
- // logicalTime BSON object that matches the expected format.
- assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
- assert.commandWorked(st.s.adminCommand({shardCollection: "test.bar", key: {x: 1}}));
-
- res = assert.commandWorked(testDB.runCommand("insert", {insert: "bar", documents: [{x: 2}]}));
- assert(containsValidLogicalTimeBson(res),
- "Expected command body from a mongos talking to a sharded collection on a sharded " +
- "replica set to contain logicalTime, received: " + tojson(res));
-
- // Verify mongos can accept requests with $clusterTime in the command body.
- assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
-
- // A mongod in a sharded replica set returns a logicalTime bson that matches the expected
- // format.
- testDB = st.rs0.getPrimary().getDB("test");
- res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 3}]}));
- assert(containsValidLogicalTimeBson(res),
- "Expected command body from a mongod in a sharded replica set to contain " +
- "logicalTime, received: " + tojson(res));
-
- // Verify mongod can accept requests with $clusterTime in the command body.
- res = assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
-
- st.stop();
-
- // A mongod from a non-sharded replica set does not return logicalTime.
- var replTest = new ReplSetTest({name: "logical_time_api_non_sharded_replset", nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- testDB = replTest.getPrimary().getDB("test");
- res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 4}]}));
- assert(containsValidLogicalTimeBson(res),
- "Expected command body from a mongod in a non-sharded replica set to " +
- "contain logicalTime, received: " + tojson(res));
-
- replTest.stopSet();
-
- // A standalone mongod does not return logicalTime.
- var standalone = MongoRunner.runMongod();
-
- testDB = standalone.getDB("test");
- res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 5}]}));
- assert(!containsValidLogicalTimeBson(res),
- "Expected command body from a standalone mongod to not contain logicalTime, " +
- "received: " + tojson(res));
-
- MongoRunner.stopMongod(standalone);
+ var logicalTime = obj.$clusterTime;
+ return logicalTime && isType(logicalTime, "BSON") &&
+ isType(logicalTime.clusterTime, "Timestamp") && isType(logicalTime.signature, "BSON") &&
+ isType(logicalTime.signature.hash, "BinData") &&
+ isType(logicalTime.signature.keyId, "NumberLong");
+}
+
+function isType(val, typeString) {
+ assert.eq(Object.prototype.toString.call(val),
+ "[object " + typeString + "]",
+ "expected: " + val + ", to be of type: " + typeString);
+ return true;
+}
+
+// A mongos that talks to a non-sharded collection on a sharded replica set returns a
+// logicalTime BSON object that matches the expected format.
+var st = new ShardingTest({name: "logical_time_api", shards: {rs0: {nodes: 1}}});
+
+var testDB = st.s.getDB("test");
+var res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 1}]}));
+assert(containsValidLogicalTimeBson(res),
+ "Expected command body from a mongos talking to a non-sharded collection on a sharded " +
+ "replica set to contain logicalTime, received: " + tojson(res));
+
+// A mongos that talks to a sharded collection on a sharded replica set returns a
+// logicalTime BSON object that matches the expected format.
+assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
+assert.commandWorked(st.s.adminCommand({shardCollection: "test.bar", key: {x: 1}}));
+
+res = assert.commandWorked(testDB.runCommand("insert", {insert: "bar", documents: [{x: 2}]}));
+assert(containsValidLogicalTimeBson(res),
+ "Expected command body from a mongos talking to a sharded collection on a sharded " +
+ "replica set to contain logicalTime, received: " + tojson(res));
+
+// Verify mongos can accept requests with $clusterTime in the command body.
+assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
+
+// A mongod in a sharded replica set returns a logicalTime bson that matches the expected
+// format.
+testDB = st.rs0.getPrimary().getDB("test");
+res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 3}]}));
+assert(containsValidLogicalTimeBson(res),
+ "Expected command body from a mongod in a sharded replica set to contain " +
+ "logicalTime, received: " + tojson(res));
+
+// Verify mongod can accept requests with $clusterTime in the command body.
+res = assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
+
+st.stop();
+
+// A mongod from a non-sharded replica set does not return logicalTime.
+var replTest = new ReplSetTest({name: "logical_time_api_non_sharded_replset", nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+testDB = replTest.getPrimary().getDB("test");
+res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 4}]}));
+assert(containsValidLogicalTimeBson(res),
+ "Expected command body from a mongod in a non-sharded replica set to " +
+ "contain logicalTime, received: " + tojson(res));
+
+replTest.stopSet();
+
+// A standalone mongod does not return logicalTime.
+var standalone = MongoRunner.runMongod();
+
+testDB = standalone.getDB("test");
+res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 5}]}));
+assert(!containsValidLogicalTimeBson(res),
+ "Expected command body from a standalone mongod to not contain logicalTime, " +
+ "received: " + tojson(res));
+
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/sharding/logical_time_metadata.js b/jstests/sharding/logical_time_metadata.js
index e8afac2f5b3..838b3d07816 100644
--- a/jstests/sharding/logical_time_metadata.js
+++ b/jstests/sharding/logical_time_metadata.js
@@ -4,58 +4,57 @@
* where the cluster time metadata can propagated, making it inherently racy.
*/
(function() {
- "use strict";
+"use strict";
- function assertHasClusterTimeAndOperationTime(res) {
- assert.hasFields(res, ['$clusterTime']);
- assert.hasFields(res.$clusterTime, ['clusterTime', 'signature']);
- }
+function assertHasClusterTimeAndOperationTime(res) {
+ assert.hasFields(res, ['$clusterTime']);
+ assert.hasFields(res.$clusterTime, ['clusterTime', 'signature']);
+}
- var st = new ShardingTest({shards: {rs0: {nodes: 3}}});
- st.s.adminCommand({enableSharding: 'test'});
+var st = new ShardingTest({shards: {rs0: {nodes: 3}}});
+st.s.adminCommand({enableSharding: 'test'});
- var db = st.s.getDB('test');
+var db = st.s.getDB('test');
- var res = db.runCommand({insert: 'user', documents: [{x: 10}]});
- assert.commandWorked(res);
- assertHasClusterTimeAndOperationTime(res);
+var res = db.runCommand({insert: 'user', documents: [{x: 10}]});
+assert.commandWorked(res);
+assertHasClusterTimeAndOperationTime(res);
- res = db.runCommand({blah: 'blah'});
- assert.commandFailed(res);
- assertHasClusterTimeAndOperationTime(res);
+res = db.runCommand({blah: 'blah'});
+assert.commandFailed(res);
+assertHasClusterTimeAndOperationTime(res);
- res = db.runCommand({insert: "user", documents: [{x: 10}], writeConcern: {blah: "blah"}});
- assert.commandFailed(res);
- assertHasClusterTimeAndOperationTime(res);
+res = db.runCommand({insert: "user", documents: [{x: 10}], writeConcern: {blah: "blah"}});
+assert.commandFailed(res);
+assertHasClusterTimeAndOperationTime(res);
- res = st.rs0.getPrimary().adminCommand({replSetGetStatus: 1});
+res = st.rs0.getPrimary().adminCommand({replSetGetStatus: 1});
- // Cluster time may advance after replSetGetStatus finishes executing and before its logical
- // time metadata is computed, in which case the response's $clusterTime will be greater than the
- // appliedOpTime timestamp in its body. Assert the timestamp is <= $clusterTime to account for
- // this.
- var appliedTime = res.optimes.appliedOpTime.ts;
- var logicalTimeMetadata = res.$clusterTime;
- assert.lte(0,
- timestampCmp(appliedTime, logicalTimeMetadata.clusterTime),
- 'appliedTime: ' + tojson(appliedTime) + ' not less than or equal to clusterTime: ' +
- tojson(logicalTimeMetadata.clusterTime));
+// Cluster time may advance after replSetGetStatus finishes executing and before its logical
+// time metadata is computed, in which case the response's $clusterTime will be greater than the
+// appliedOpTime timestamp in its body. Assert the timestamp is <= $clusterTime to account for
+// this.
+var appliedTime = res.optimes.appliedOpTime.ts;
+var logicalTimeMetadata = res.$clusterTime;
+assert.lte(0,
+ timestampCmp(appliedTime, logicalTimeMetadata.clusterTime),
+ 'appliedTime: ' + tojson(appliedTime) + ' not less than or equal to clusterTime: ' +
+ tojson(logicalTimeMetadata.clusterTime));
- assert.commandWorked(db.runCommand({ping: 1, '$clusterTime': logicalTimeMetadata}));
+assert.commandWorked(db.runCommand({ping: 1, '$clusterTime': logicalTimeMetadata}));
- db = st.rs0.getPrimary().getDB('testRS');
- res = db.runCommand({insert: 'user', documents: [{x: 10}]});
- assert.commandWorked(res);
- assertHasClusterTimeAndOperationTime(res);
+db = st.rs0.getPrimary().getDB('testRS');
+res = db.runCommand({insert: 'user', documents: [{x: 10}]});
+assert.commandWorked(res);
+assertHasClusterTimeAndOperationTime(res);
- res = db.runCommand({blah: 'blah'});
- assert.commandFailed(res);
- assertHasClusterTimeAndOperationTime(res);
+res = db.runCommand({blah: 'blah'});
+assert.commandFailed(res);
+assertHasClusterTimeAndOperationTime(res);
- res = db.runCommand({insert: "user", documents: [{x: 10}], writeConcern: {blah: "blah"}});
- assert.commandFailed(res);
- assertHasClusterTimeAndOperationTime(res);
-
- st.stop();
+res = db.runCommand({insert: "user", documents: [{x: 10}], writeConcern: {blah: "blah"}});
+assert.commandFailed(res);
+assertHasClusterTimeAndOperationTime(res);
+st.stop();
})();
diff --git a/jstests/sharding/lookup.js b/jstests/sharding/lookup.js
index 2d988912944..3c0364bd6a4 100644
--- a/jstests/sharding/lookup.js
+++ b/jstests/sharding/lookup.js
@@ -1,254 +1,244 @@
// Basic $lookup regression tests.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/fixture_helpers.js"); // For isSharded.
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
-
- const st = new ShardingTest({shards: 2, config: 1, mongos: 1});
- const testName = "lookup_sharded";
-
- const nodeList = DiscoverTopology.findNonConfigNodes(st.s);
- setParameterOnAllHosts(nodeList, "internalQueryAllowShardedLookup", true);
-
- const mongosDB = st.s0.getDB(testName);
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Used by testPipeline to sort result documents. All _ids must be primitives.
- function compareId(a, b) {
- if (a._id < b._id) {
- return -1;
- }
- if (a._id > b._id) {
- return 1;
- }
- return 0;
- }
-
- // Helper for testing that pipeline returns correct set of results.
- function testPipeline(pipeline, expectedResult, collection) {
- assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
- expectedResult.sort(compareId));
- }
-
- function runTest(coll, from, thirdColl, fourthColl) {
- let db = null; // Using the db variable is banned in this function.
-
- assert.commandWorked(coll.remove({}));
- assert.commandWorked(from.remove({}));
- assert.commandWorked(thirdColl.remove({}));
- assert.commandWorked(fourthColl.remove({}));
+"use strict";
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 2}));
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/fixture_helpers.js"); // For isSharded.
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
- assert.writeOK(from.insert({_id: 0, b: 1}));
- assert.writeOK(from.insert({_id: 1, b: null}));
- assert.writeOK(from.insert({_id: 2}));
+const st = new ShardingTest({shards: 2, config: 1, mongos: 1});
+const testName = "lookup_sharded";
- //
- // Basic functionality.
- //
+const nodeList = DiscoverTopology.findNonConfigNodes(st.s);
+setParameterOnAllHosts(nodeList, "internalQueryAllowShardedLookup", true);
- // "from" document added to "as" field if a == b, where nonexistent fields are treated as
- // null.
- let expectedResults = [
- {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
- {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}],
- expectedResults,
- coll);
-
- // If localField is nonexistent, it is treated as if it is null.
- expectedResults = [
- {_id: 0, a: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline(
- [{$lookup: {localField: "nonexistent", foreignField: "b", from: "from", as: "same"}}],
- expectedResults,
- coll);
+const mongosDB = st.s0.getDB(testName);
+assert.commandWorked(mongosDB.dropDatabase());
- // If foreignField is nonexistent, it is treated as if it is null.
- expectedResults = [
- {_id: 0, a: 1, "same": []},
- {_id: 1, a: null, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline(
- [{$lookup: {localField: "a", foreignField: "nonexistent", from: "from", as: "same"}}],
- expectedResults,
- coll);
-
- // If there are no matches or the from coll doesn't exist, the result is an empty array.
- expectedResults =
- [{_id: 0, a: 1, "same": []}, {_id: 1, a: null, "same": []}, {_id: 2, "same": []}];
- testPipeline(
- [{$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}}],
- expectedResults,
- coll);
- testPipeline(
- [{$lookup: {localField: "a", foreignField: "b", from: "nonexistent", as: "same"}}],
- expectedResults,
- coll);
+// Used by testPipeline to sort result documents. All _ids must be primitives.
+function compareId(a, b) {
+ if (a._id < b._id) {
+ return -1;
+ }
+ if (a._id > b._id) {
+ return 1;
+ }
+ return 0;
+}
- // If field name specified by "as" already exists, it is overwritten.
- expectedResults = [
- {_id: 0, "a": [{_id: 0, b: 1}]},
- {_id: 1, "a": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "a": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "a"}}],
- expectedResults,
- coll);
+// Helper for testing that pipeline returns correct set of results.
+function testPipeline(pipeline, expectedResult, collection) {
+ assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
+ expectedResult.sort(compareId));
+}
- // Running multiple $lookups in the same pipeline is allowed.
- expectedResults = [
- {_id: 0, a: 1, "c": [{_id: 0, b: 1}], "d": [{_id: 0, b: 1}]},
- {
- _id: 1,
- a: null, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]
- },
- {_id: 2, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline(
- [
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "c"}},
- {$project: {"a": 1, "c": 1}},
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "d"}}
- ],
- expectedResults,
- coll);
+function runTest(coll, from, thirdColl, fourthColl) {
+ let db = null; // Using the db variable is banned in this function.
- //
- // Coalescing with $unwind.
- //
-
- // A normal $unwind with on the "as" field.
- expectedResults = [
- {_id: 0, a: 1, same: {_id: 0, b: 1}},
- {_id: 1, a: null, same: {_id: 1, b: null}},
- {_id: 1, a: null, same: {_id: 2}},
- {_id: 2, same: {_id: 1, b: null}},
- {_id: 2, same: {_id: 2}}
- ];
- testPipeline(
- [
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
- {$unwind: {path: "$same"}}
- ],
- expectedResults,
- coll);
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(from.remove({}));
+ assert.commandWorked(thirdColl.remove({}));
+ assert.commandWorked(fourthColl.remove({}));
- // An $unwind on the "as" field, with includeArrayIndex.
- expectedResults = [
- {_id: 0, a: 1, same: {_id: 0, b: 1}, index: NumberLong(0)},
- {_id: 1, a: null, same: {_id: 1, b: null}, index: NumberLong(0)},
- {_id: 1, a: null, same: {_id: 2}, index: NumberLong(1)},
- {_id: 2, same: {_id: 1, b: null}, index: NumberLong(0)},
- {_id: 2, same: {_id: 2}, index: NumberLong(1)},
- ];
- testPipeline(
- [
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
- {$unwind: {path: "$same", includeArrayIndex: "index"}}
- ],
- expectedResults,
- coll);
+ assert.writeOK(coll.insert({_id: 0, a: 1}));
+ assert.writeOK(coll.insert({_id: 1, a: null}));
+ assert.writeOK(coll.insert({_id: 2}));
- // Normal $unwind with no matching documents.
- expectedResults = [];
- testPipeline(
- [
- {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
- {$unwind: {path: "$same"}}
- ],
- expectedResults,
- coll);
+ assert.writeOK(from.insert({_id: 0, b: 1}));
+ assert.writeOK(from.insert({_id: 1, b: null}));
+ assert.writeOK(from.insert({_id: 2}));
- // $unwind with preserveNullAndEmptyArray with no matching documents.
- expectedResults = [
- {_id: 0, a: 1},
- {_id: 1, a: null},
- {_id: 2},
- ];
- testPipeline(
- [
- {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
- {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
- ],
- expectedResults,
- coll);
+ //
+ // Basic functionality.
+ //
- // $unwind with preserveNullAndEmptyArray, some with matching documents, some without.
- expectedResults = [
- {_id: 0, a: 1},
- {_id: 1, a: null, same: {_id: 0, b: 1}},
- {_id: 2},
- ];
- testPipeline(
- [
- {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
- {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
- ],
- expectedResults,
- coll);
+ // "from" document added to "as" field if a == b, where nonexistent fields are treated as
+ // null.
+ let expectedResults = [
+ {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
+ {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
+
+ // If localField is nonexistent, it is treated as if it is null.
+ expectedResults = [
+ {_id: 0, a: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline(
+ [{$lookup: {localField: "nonexistent", foreignField: "b", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
+
+ // If foreignField is nonexistent, it is treated as if it is null.
+ expectedResults = [
+ {_id: 0, a: 1, "same": []},
+ {_id: 1, a: null, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline(
+ [{$lookup: {localField: "a", foreignField: "nonexistent", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
+
+ // If there are no matches or the from coll doesn't exist, the result is an empty array.
+ expectedResults =
+ [{_id: 0, a: 1, "same": []}, {_id: 1, a: null, "same": []}, {_id: 2, "same": []}];
+ testPipeline(
+ [{$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
+ testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "nonexistent", as: "same"}}],
+ expectedResults,
+ coll);
+
+ // If field name specified by "as" already exists, it is overwritten.
+ expectedResults = [
+ {_id: 0, "a": [{_id: 0, b: 1}]},
+ {_id: 1, "a": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "a": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "a"}}],
+ expectedResults,
+ coll);
+
+ // Running multiple $lookups in the same pipeline is allowed.
+ expectedResults = [
+ {_id: 0, a: 1, "c": [{_id: 0, b: 1}], "d": [{_id: 0, b: 1}]},
+ {_id: 1, a: null, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "c"}},
+ {$project: {"a": 1, "c": 1}},
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "d"}}
+ ],
+ expectedResults,
+ coll);
- // $unwind with preserveNullAndEmptyArray and includeArrayIndex, some with matching
- // documents, some without.
- expectedResults = [
- {_id: 0, a: 1, index: null},
- {_id: 1, a: null, same: {_id: 0, b: 1}, index: NumberLong(0)},
- {_id: 2, index: null},
- ];
- testPipeline(
- [
- {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
- {
- $unwind:
- {path: "$same", preserveNullAndEmptyArrays: true, includeArrayIndex: "index"}
- }
- ],
- expectedResults,
- coll);
+ //
+ // Coalescing with $unwind.
+ //
- //
- // Dependencies.
- //
+ // A normal $unwind with on the "as" field.
+ expectedResults = [
+ {_id: 0, a: 1, same: {_id: 0, b: 1}},
+ {_id: 1, a: null, same: {_id: 1, b: null}},
+ {_id: 1, a: null, same: {_id: 2}},
+ {_id: 2, same: {_id: 1, b: null}},
+ {_id: 2, same: {_id: 2}}
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same"}}
+ ],
+ expectedResults,
+ coll);
+
+ // An $unwind on the "as" field, with includeArrayIndex.
+ expectedResults = [
+ {_id: 0, a: 1, same: {_id: 0, b: 1}, index: NumberLong(0)},
+ {_id: 1, a: null, same: {_id: 1, b: null}, index: NumberLong(0)},
+ {_id: 1, a: null, same: {_id: 2}, index: NumberLong(1)},
+ {_id: 2, same: {_id: 1, b: null}, index: NumberLong(0)},
+ {_id: 2, same: {_id: 2}, index: NumberLong(1)},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", includeArrayIndex: "index"}}
+ ],
+ expectedResults,
+ coll);
+
+ // Normal $unwind with no matching documents.
+ expectedResults = [];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
+ {$unwind: {path: "$same"}}
+ ],
+ expectedResults,
+ coll);
+
+ // $unwind with preserveNullAndEmptyArray with no matching documents.
+ expectedResults = [
+ {_id: 0, a: 1},
+ {_id: 1, a: null},
+ {_id: 2},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
+ ],
+ expectedResults,
+ coll);
+
+ // $unwind with preserveNullAndEmptyArray, some with matching documents, some without.
+ expectedResults = [
+ {_id: 0, a: 1},
+ {_id: 1, a: null, same: {_id: 0, b: 1}},
+ {_id: 2},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
+ ],
+ expectedResults,
+ coll);
+
+ // $unwind with preserveNullAndEmptyArray and includeArrayIndex, some with matching
+ // documents, some without.
+ expectedResults = [
+ {_id: 0, a: 1, index: null},
+ {_id: 1, a: null, same: {_id: 0, b: 1}, index: NumberLong(0)},
+ {_id: 2, index: null},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true, includeArrayIndex: "index"}}
+ ],
+ expectedResults,
+ coll);
- // If $lookup didn't add "localField" to its dependencies, this test would fail as the
- // value of the "a" field would be lost and treated as null.
- expectedResults = [
- {_id: 0, "same": [{_id: 0, b: 1}]},
- {_id: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline(
- [
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
- {$project: {"same": 1}}
- ],
- expectedResults,
- coll);
+ //
+ // Dependencies.
+ //
- // If $lookup didn't add fields referenced by "let" variables to its dependencies, this test
- // would fail as the value of the "a" field would be lost and treated as null.
- expectedResults = [
- {"_id": 0, "same": [{"_id": 0, "x": 1}, {"_id": 1, "x": 1}, {"_id": 2, "x": 1}]},
- {
- "_id": 1,
- "same": [{"_id": 0, "x": null}, {"_id": 1, "x": null}, {"_id": 2, "x": null}]
- },
- {"_id": 2, "same": [{"_id": 0}, {"_id": 1}, {"_id": 2}]}
- ];
- testPipeline(
+ // If $lookup didn't add "localField" to its dependencies, this test would fail as the
+ // value of the "a" field would be lost and treated as null.
+ expectedResults = [
+ {_id: 0, "same": [{_id: 0, b: 1}]},
+ {_id: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$project: {"same": 1}}
+ ],
+ expectedResults,
+ coll);
+
+ // If $lookup didn't add fields referenced by "let" variables to its dependencies, this test
+ // would fail as the value of the "a" field would be lost and treated as null.
+ expectedResults = [
+ {"_id": 0, "same": [{"_id": 0, "x": 1}, {"_id": 1, "x": 1}, {"_id": 2, "x": 1}]},
+ {"_id": 1, "same": [{"_id": 0, "x": null}, {"_id": 1, "x": null}, {"_id": 2, "x": null}]},
+ {"_id": 2, "same": [{"_id": 0}, {"_id": 1}, {"_id": 2}]}
+ ];
+ testPipeline(
[
{
$lookup: {
@@ -263,53 +253,53 @@
expectedResults,
coll);
- //
- // Dotted field paths.
- //
-
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.writeOK(coll.insert({_id: 3, a: {c: 1}}));
-
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: 1}));
- assert.writeOK(from.insert({_id: 1, b: null}));
- assert.writeOK(from.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 3, b: {c: 1}}));
- assert.writeOK(from.insert({_id: 4, b: {c: 2}}));
-
- // Once without a dotted field.
- let pipeline = [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}];
- expectedResults = [
- {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
- {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 3, a: {c: 1}, "same": [{_id: 3, b: {c: 1}}]}
- ];
- testPipeline(pipeline, expectedResults, coll);
-
- // Look up a dotted field.
- pipeline = [{$lookup: {localField: "a.c", foreignField: "b.c", from: "from", as: "same"}}];
- // All but the last document in 'coll' have a nullish value for 'a.c'.
- expectedResults = [
- {_id: 0, a: 1, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
- {_id: 1, a: null, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
- {_id: 2, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
- {_id: 3, a: {c: 1}, same: [{_id: 3, b: {c: 1}}]}
- ];
- testPipeline(pipeline, expectedResults, coll);
-
- // With an $unwind stage.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {b: 1}}));
- assert.writeOK(coll.insert({_id: 1}));
-
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, target: 1}));
+ //
+ // Dotted field paths.
+ //
- pipeline = [
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: 1}));
+ assert.writeOK(coll.insert({_id: 1, a: null}));
+ assert.writeOK(coll.insert({_id: 2}));
+ assert.writeOK(coll.insert({_id: 3, a: {c: 1}}));
+
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0, b: 1}));
+ assert.writeOK(from.insert({_id: 1, b: null}));
+ assert.writeOK(from.insert({_id: 2}));
+ assert.writeOK(from.insert({_id: 3, b: {c: 1}}));
+ assert.writeOK(from.insert({_id: 4, b: {c: 2}}));
+
+ // Once without a dotted field.
+ let pipeline = [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}];
+ expectedResults = [
+ {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
+ {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 3, a: {c: 1}, "same": [{_id: 3, b: {c: 1}}]}
+ ];
+ testPipeline(pipeline, expectedResults, coll);
+
+ // Look up a dotted field.
+ pipeline = [{$lookup: {localField: "a.c", foreignField: "b.c", from: "from", as: "same"}}];
+ // All but the last document in 'coll' have a nullish value for 'a.c'.
+ expectedResults = [
+ {_id: 0, a: 1, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
+ {_id: 1, a: null, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
+ {_id: 3, a: {c: 1}, same: [{_id: 3, b: {c: 1}}]}
+ ];
+ testPipeline(pipeline, expectedResults, coll);
+
+ // With an $unwind stage.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: {b: 1}}));
+ assert.writeOK(coll.insert({_id: 1}));
+
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0, target: 1}));
+
+ pipeline = [
{
$lookup: {
localField: "a.b",
@@ -329,25 +319,25 @@
}
}
];
- expectedResults = [
- {_id: 0, a: {b: 1}, same: {documents: {_id: 0, target: 1}}, c: {d: {e: NumberLong(0)}}},
- {_id: 1, same: {}, c: {d: {e: null}}},
- ];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [
+ {_id: 0, a: {b: 1}, same: {documents: {_id: 0, target: 1}}, c: {d: {e: NumberLong(0)}}},
+ {_id: 1, same: {}, c: {d: {e: null}}},
+ ];
+ testPipeline(pipeline, expectedResults, coll);
- //
- // Query-like local fields (SERVER-21287)
- //
+ //
+ // Query-like local fields (SERVER-21287)
+ //
- // This must only do an equality match rather than treating the value as a regex.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: /a regex/}));
+ // This must only do an equality match rather than treating the value as a regex.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: /a regex/}));
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: /a regex/}));
- assert.writeOK(from.insert({_id: 1, b: "string that matches /a regex/"}));
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0, b: /a regex/}));
+ assert.writeOK(from.insert({_id: 1, b: "string that matches /a regex/"}));
- pipeline = [
+ pipeline = [
{
$lookup: {
localField: "a",
@@ -357,22 +347,22 @@
}
},
];
- expectedResults = [{_id: 0, a: /a regex/, b: [{_id: 0, b: /a regex/}]}];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [{_id: 0, a: /a regex/, b: [{_id: 0, b: /a regex/}]}];
+ testPipeline(pipeline, expectedResults, coll);
- //
- // A local value of an array.
- //
+ //
+ // A local value of an array.
+ //
- // Basic array corresponding to multiple documents.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 2]}));
+ // Basic array corresponding to multiple documents.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1, 2]}));
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0}));
+ assert.writeOK(from.insert({_id: 1}));
- pipeline = [
+ pipeline = [
{
$lookup: {
localField: "a",
@@ -382,18 +372,18 @@
}
},
];
- expectedResults = [{_id: 0, a: [0, 1, 2], b: [{_id: 0}, {_id: 1}]}];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [{_id: 0, a: [0, 1, 2], b: [{_id: 0}, {_id: 1}]}];
+ testPipeline(pipeline, expectedResults, coll);
- // Basic array corresponding to a single document.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [1]}));
+ // Basic array corresponding to a single document.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: [1]}));
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0}));
+ assert.writeOK(from.insert({_id: 1}));
- pipeline = [
+ pipeline = [
{
$lookup: {
localField: "a",
@@ -403,21 +393,21 @@
}
},
];
- expectedResults = [{_id: 0, a: [1], b: [{_id: 1}]}];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [{_id: 0, a: [1], b: [{_id: 1}]}];
+ testPipeline(pipeline, expectedResults, coll);
- // Array containing regular expressions.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [/a regex/, /^x/]}));
- assert.writeOK(coll.insert({_id: 1, a: [/^x/]}));
+ // Array containing regular expressions.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: [/a regex/, /^x/]}));
+ assert.writeOK(coll.insert({_id: 1, a: [/^x/]}));
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: "should not match a regex"}));
- assert.writeOK(from.insert({_id: 1, b: "xxxx"}));
- assert.writeOK(from.insert({_id: 2, b: /a regex/}));
- assert.writeOK(from.insert({_id: 3, b: /^x/}));
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0, b: "should not match a regex"}));
+ assert.writeOK(from.insert({_id: 1, b: "xxxx"}));
+ assert.writeOK(from.insert({_id: 2, b: /a regex/}));
+ assert.writeOK(from.insert({_id: 3, b: /^x/}));
- pipeline = [
+ pipeline = [
{
$lookup: {
localField: "a",
@@ -427,23 +417,23 @@
}
},
];
- expectedResults = [
- {_id: 0, a: [/a regex/, /^x/], b: [{_id: 2, b: /a regex/}, {_id: 3, b: /^x/}]},
- {_id: 1, a: [/^x/], b: [{_id: 3, b: /^x/}]}
- ];
- testPipeline(pipeline, expectedResults, coll);
-
- // 'localField' references a field within an array of sub-objects.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {b: 2}]}));
-
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
- assert.writeOK(from.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 3}));
-
- pipeline = [
+ expectedResults = [
+ {_id: 0, a: [/a regex/, /^x/], b: [{_id: 2, b: /a regex/}, {_id: 3, b: /^x/}]},
+ {_id: 1, a: [/^x/], b: [{_id: 3, b: /^x/}]}
+ ];
+ testPipeline(pipeline, expectedResults, coll);
+
+ // 'localField' references a field within an array of sub-objects.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {b: 2}]}));
+
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0}));
+ assert.writeOK(from.insert({_id: 1}));
+ assert.writeOK(from.insert({_id: 2}));
+ assert.writeOK(from.insert({_id: 3}));
+
+ pipeline = [
{
$lookup: {
localField: "a.b",
@@ -454,17 +444,17 @@
},
];
- expectedResults = [{"_id": 0, "a": [{"b": 1}, {"b": 2}], "c": [{"_id": 1}, {"_id": 2}]}];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [{"_id": 0, "a": [{"b": 1}, {"b": 2}], "c": [{"_id": 1}, {"_id": 2}]}];
+ testPipeline(pipeline, expectedResults, coll);
- //
- // Test $lookup when the foreign collection is a view.
- //
- // TODO SERVER-32548: Allow this test to run when the foreign collection is sharded.
- if (!FixtureHelpers.isSharded(from)) {
- assert.commandWorked(
- coll.getDB().runCommand({create: "fromView", viewOn: "from", pipeline: []}));
- pipeline = [
+ //
+ // Test $lookup when the foreign collection is a view.
+ //
+ // TODO SERVER-32548: Allow this test to run when the foreign collection is sharded.
+ if (!FixtureHelpers.isSharded(from)) {
+ assert.commandWorked(
+ coll.getDB().runCommand({create: "fromView", viewOn: "from", pipeline: []}));
+ pipeline = [
{
$lookup: {
localField: "a.b",
@@ -475,181 +465,167 @@
},
];
- expectedResults =
- [{"_id": 0, "a": [{"b": 1}, {"b": 2}], "c": [{"_id": 1}, {"_id": 2}]}];
- testPipeline(pipeline, expectedResults, coll);
- }
-
- //
- // Error cases.
- //
-
- // 'from', 'as', 'localField' and 'foreignField' must all be specified when run with
- // localField/foreignField syntax.
- assertErrorCode(coll,
- [{$lookup: {foreignField: "b", from: "from", as: "same"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", from: "from", as: "same"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", as: "same"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", from: "from"}}],
- ErrorCodes.FailedToParse);
-
- // localField/foreignField and pipeline/let syntax must not be mixed.
- assertErrorCode(coll,
- [{$lookup: {pipeline: [], foreignField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {pipeline: [], localField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(
- coll,
- [{$lookup: {pipeline: [], localField: "b", foreignField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {let : {a: "$b"}, foreignField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {let : {a: "$b"}, localField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(
- coll,
- [{
- $lookup:
- {let : {a: "$b"}, localField: "b", foreignField: "b", from: "from", as: "as"}
- }],
- ErrorCodes.FailedToParse);
-
- // 'from', 'as', 'localField' and 'foreignField' must all be of type string.
- assertErrorCode(coll,
- [{$lookup: {localField: 1, foreignField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: 1, from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", from: 1, as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", from: "from", as: 1}}],
- ErrorCodes.FailedToParse);
-
- // The foreign collection must be a valid namespace.
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", from: "", as: "as"}}],
- ErrorCodes.InvalidNamespace);
- // $lookup's field must be an object.
- assertErrorCode(coll, [{$lookup: "string"}], ErrorCodes.FailedToParse);
+ expectedResults = [{"_id": 0, "a": [{"b": 1}, {"b": 2}], "c": [{"_id": 1}, {"_id": 2}]}];
+ testPipeline(pipeline, expectedResults, coll);
}
//
- // Test unsharded local collection and unsharded foreign collection.
+ // Error cases.
//
- mongosDB.lookUp.drop();
- mongosDB.from.drop();
- mongosDB.thirdColl.drop();
- mongosDB.fourthColl.drop();
-
- runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
-
- // Verify that the command is sent only to the primary shard when both the local and foreign
- // collections are unsharded.
- assert(!assert
- .commandWorked(mongosDB.lookup.explain().aggregate([{
- $lookup: {
- from: mongosDB.from.getName(),
- localField: "a",
- foreignField: "b",
- as: "results"
- }
- }]))
- .hasOwnProperty("shards"));
- // Enable sharding on the test DB and ensure its primary is shard0000.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
- //
- // Test unsharded local collection and sharded foreign collection.
- //
-
- // Shard the foreign collection on _id.
- st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
- runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
-
- //
- // Test sharded local collection and unsharded foreign collection.
- //
- assert(mongosDB.from.drop());
-
- // Shard the local collection on _id.
- st.shardColl(mongosDB.lookup, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
- runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
-
- //
- // Test sharded local and foreign collections.
- //
-
- // Shard the foreign collection on _id.
- st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
- runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
-
- // Test that a $lookup from an unsharded collection followed by a $merge to a sharded collection
- // is allowed.
- const sourceColl = st.getDB(testName).lookUp;
- assert(sourceColl.drop());
- assert(st.adminCommand({shardCollection: sourceColl.getFullName(), key: {_id: "hashed"}}));
- assert.commandWorked(sourceColl.insert({_id: 0, a: 0}));
-
- const outColl = st.getDB(testName).out;
- assert(outColl.drop());
- assert(st.adminCommand({shardCollection: outColl.getFullName(), key: {_id: "hashed"}}));
-
- const fromColl = st.getDB(testName).from;
- assert(fromColl.drop());
- assert.commandWorked(fromColl.insert({_id: 0, b: 0}));
-
- sourceColl.aggregate([
- {$lookup: {localField: "a", foreignField: "b", from: fromColl.getName(), as: "same"}},
- {$merge: {into: outColl.getName()}}
- ]);
-
- assert.eq([{a: 0, same: [{_id: 0, b: 0}]}], outColl.find({}, {_id: 0}).toArray());
-
- // Disable the server parameter. Be sure that an attempt to run a $lookup on a sharded
- // collection fails.
- setParameterOnAllHosts(nodeList, "internalQueryAllowShardedLookup", false);
-
- // Re shard the foreign collection on _id.
- st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
-
- let err = assert.throws(() => sourceColl
- .aggregate([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: fromColl.getName(),
- as: "same"
- }
- }])
- .itcount());
- assert.eq(err.code, 28769);
- err = assert.throws(() => sourceColl
- .aggregate([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: fromColl.getName(),
- as: "same"
- }
- }],
- {allowDiskUse: true})
- .itcount());
- assert.eq(err.code, 28769);
- err = assert.throws(() => sourceColl
+ // 'from', 'as', 'localField' and 'foreignField' must all be specified when run with
+ // localField/foreignField syntax.
+ assertErrorCode(
+ coll, [{$lookup: {foreignField: "b", from: "from", as: "same"}}], ErrorCodes.FailedToParse);
+ assertErrorCode(
+ coll, [{$lookup: {localField: "a", from: "from", as: "same"}}], ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", as: "same"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", from: "from"}}],
+ ErrorCodes.FailedToParse);
+
+ // localField/foreignField and pipeline/let syntax must not be mixed.
+ assertErrorCode(coll,
+ [{$lookup: {pipeline: [], foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {pipeline: [], localField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(
+ coll,
+ [{$lookup: {pipeline: [], localField: "b", foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {let : {a: "$b"}, foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {let : {a: "$b"}, localField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(
+ coll,
+ [{$lookup: {let : {a: "$b"}, localField: "b", foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+
+ // 'from', 'as', 'localField' and 'foreignField' must all be of type string.
+ assertErrorCode(coll,
+ [{$lookup: {localField: 1, foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: 1, from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", from: 1, as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", from: "from", as: 1}}],
+ ErrorCodes.FailedToParse);
+
+ // The foreign collection must be a valid namespace.
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", from: "", as: "as"}}],
+ ErrorCodes.InvalidNamespace);
+ // $lookup's field must be an object.
+ assertErrorCode(coll, [{$lookup: "string"}], ErrorCodes.FailedToParse);
+}
+
+//
+// Test unsharded local collection and unsharded foreign collection.
+//
+mongosDB.lookUp.drop();
+mongosDB.from.drop();
+mongosDB.thirdColl.drop();
+mongosDB.fourthColl.drop();
+
+runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
+
+// Verify that the command is sent only to the primary shard when both the local and foreign
+// collections are unsharded.
+assert(
+ !assert
+ .commandWorked(mongosDB.lookup.explain().aggregate([{
+ $lookup:
+ {from: mongosDB.from.getName(), localField: "a", foreignField: "b", as: "results"}
+ }]))
+ .hasOwnProperty("shards"));
+// Enable sharding on the test DB and ensure its primary is shard0000.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+//
+// Test unsharded local collection and sharded foreign collection.
+//
+
+// Shard the foreign collection on _id.
+st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
+
+//
+// Test sharded local collection and unsharded foreign collection.
+//
+assert(mongosDB.from.drop());
+
+// Shard the local collection on _id.
+st.shardColl(mongosDB.lookup, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
+
+//
+// Test sharded local and foreign collections.
+//
+
+// Shard the foreign collection on _id.
+st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
+
+// Test that a $lookup from an unsharded collection followed by a $merge to a sharded collection
+// is allowed.
+const sourceColl = st.getDB(testName).lookUp;
+assert(sourceColl.drop());
+assert(st.adminCommand({shardCollection: sourceColl.getFullName(), key: {_id: "hashed"}}));
+assert.commandWorked(sourceColl.insert({_id: 0, a: 0}));
+
+const outColl = st.getDB(testName).out;
+assert(outColl.drop());
+assert(st.adminCommand({shardCollection: outColl.getFullName(), key: {_id: "hashed"}}));
+
+const fromColl = st.getDB(testName).from;
+assert(fromColl.drop());
+assert.commandWorked(fromColl.insert({_id: 0, b: 0}));
+
+sourceColl.aggregate([
+ {$lookup: {localField: "a", foreignField: "b", from: fromColl.getName(), as: "same"}},
+ {$merge: {into: outColl.getName()}}
+]);
+
+assert.eq([{a: 0, same: [{_id: 0, b: 0}]}], outColl.find({}, {_id: 0}).toArray());
+
+// Disable the server parameter. Be sure that an attempt to run a $lookup on a sharded
+// collection fails.
+setParameterOnAllHosts(nodeList, "internalQueryAllowShardedLookup", false);
+
+// Re shard the foreign collection on _id.
+st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+
+let err = assert.throws(
+ () =>
+ sourceColl
+ .aggregate([{
+ $lookup: {localField: "a", foreignField: "b", from: fromColl.getName(), as: "same"}
+ }])
+ .itcount());
+assert.eq(err.code, 28769);
+err = assert.throws(
+ () => sourceColl
+ .aggregate(
+ [{
+ $lookup:
+ {localField: "a", foreignField: "b", from: fromColl.getName(), as: "same"}
+ }],
+ {allowDiskUse: true})
+ .itcount());
+assert.eq(err.code, 28769);
+err = assert.throws(() => sourceColl
.aggregate(
[
{$_internalSplitPipeline: {mergeType: "anyShard"}},
@@ -664,7 +640,7 @@
],
{allowDiskUse: true})
.itcount());
- assert.eq(err.code, 28769);
+assert.eq(err.code, 28769);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
index a1bce25ad81..9d71a70e135 100644
--- a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
@@ -2,114 +2,114 @@
// sharded with a compound shard key.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB['coll'];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection with a compound shard key: a, b, c. Then split it into two chunks,
- // and put one chunk on each shard.
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: mongosColl.getFullName(), key: {a: 1, b: 1, c: 1}}));
-
- // Split the collection into 2 chunks:
- // [{a: MinKey, b: MinKey, c: MinKey}, {a: 1, b: MinKey, c: MinKey})
- // and
- // [{a: 1, b: MinKey, c: MinKey}, {a: MaxKey, b: MaxKey, c: MaxKey}).
- assert.commandWorked(mongosDB.adminCommand(
- {split: mongosColl.getFullName(), middle: {a: 1, b: MinKey, c: MinKey}}));
-
- // Move the upper chunk to shard 1.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {a: 1, b: MinKey, c: MinKey},
- to: st.rs1.getURL()
- }));
-
- const changeStreamSingleColl = mongosColl.watch([], {fullDocument: "updateLookup"});
- const changeStreamWholeDb = mongosDB.watch([], {fullDocument: "updateLookup"});
-
- const nDocs = 6;
- const bValues = ["one", "two", "three", "four", "five", "six"];
-
- // This shard key function results in 1/3rd of documents on shard0 and 2/3rds on shard1.
- function shardKeyFromId(id) {
- return {a: id % 3, b: bValues[id], c: id % 2};
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
}
-
- // Do some writes.
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB['coll'];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection with a compound shard key: a, b, c. Then split it into two chunks,
+// and put one chunk on each shard.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {a: 1, b: 1, c: 1}}));
+
+// Split the collection into 2 chunks:
+// [{a: MinKey, b: MinKey, c: MinKey}, {a: 1, b: MinKey, c: MinKey})
+// and
+// [{a: 1, b: MinKey, c: MinKey}, {a: MaxKey, b: MaxKey, c: MaxKey}).
+assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {a: 1, b: MinKey, c: MinKey}}));
+
+// Move the upper chunk to shard 1.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {a: 1, b: MinKey, c: MinKey},
+ to: st.rs1.getURL()
+}));
+
+const changeStreamSingleColl = mongosColl.watch([], {fullDocument: "updateLookup"});
+const changeStreamWholeDb = mongosDB.watch([], {fullDocument: "updateLookup"});
+
+const nDocs = 6;
+const bValues = ["one", "two", "three", "four", "five", "six"];
+
+// This shard key function results in 1/3rd of documents on shard0 and 2/3rds on shard1.
+function shardKeyFromId(id) {
+ return {a: id % 3, b: bValues[id], c: id % 2};
+}
+
+// Do some writes.
+for (let id = 0; id < nDocs; ++id) {
+ const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
+ assert.writeOK(mongosColl.insert(documentKey));
+ assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 1}}));
+}
+
+[changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
+ jsTestLog(`Testing updateLookup on namespace ${changeStream._ns}`);
for (let id = 0; id < nDocs; ++id) {
- const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
- assert.writeOK(mongosColl.insert(documentKey));
- assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 1}}));
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
+
+ assert.soon(() => changeStream.hasNext());
+ next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
+ assert.docEq(next.fullDocument,
+ Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 1}));
}
-
- [changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
- jsTestLog(`Testing updateLookup on namespace ${changeStream._ns}`);
- for (let id = 0; id < nDocs; ++id) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
- assert.docEq(next.fullDocument,
- Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 1}));
- }
- });
-
- // Test that the change stream can still see the updated post image, even if a chunk is
- // migrated.
+});
+
+// Test that the change stream can still see the updated post image, even if a chunk is
+// migrated.
+for (let id = 0; id < nDocs; ++id) {
+ const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
+ assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 2}}));
+}
+
+// Move the upper chunk back to shard 0.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {a: 1, b: MinKey, c: MinKey},
+ to: st.rs0.getURL()
+}));
+
+[changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
+ jsTestLog(`Testing updateLookup after moveChunk on namespace ${changeStream._ns}`);
for (let id = 0; id < nDocs; ++id) {
- const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
- assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 2}}));
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
+ assert.docEq(next.fullDocument,
+ Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 2}));
}
+});
- // Move the upper chunk back to shard 0.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {a: 1, b: MinKey, c: MinKey},
- to: st.rs0.getURL()
- }));
-
- [changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
- jsTestLog(`Testing updateLookup after moveChunk on namespace ${changeStream._ns}`);
- for (let id = 0; id < nDocs; ++id) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
- assert.docEq(next.fullDocument,
- Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 2}));
- }
- });
-
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
index f1e9e6da502..058a92c6832 100644
--- a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
@@ -2,78 +2,75 @@
// sharded with a hashed shard key.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const st = new ShardingTest({
- shards: 2,
- enableBalancer: false,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
+const st = new ShardingTest({
+ shards: 2,
+ enableBalancer: false,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+ }
+});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB['coll'];
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB['coll'];
- assert.commandWorked(mongosDB.dropDatabase());
+assert.commandWorked(mongosDB.dropDatabase());
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Shard the test collection on the field "shardKey", and split it into two chunks.
- assert.commandWorked(mongosDB.adminCommand({
- shardCollection: mongosColl.getFullName(),
- numInitialChunks: 2,
- key: {shardKey: "hashed"}
- }));
+// Shard the test collection on the field "shardKey", and split it into two chunks.
+assert.commandWorked(mongosDB.adminCommand(
+ {shardCollection: mongosColl.getFullName(), numInitialChunks: 2, key: {shardKey: "hashed"}}));
- // Make sure the negative chunk is on shard 0.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- bounds: [{shardKey: MinKey}, {shardKey: NumberLong("0")}],
- to: st.rs0.getURL()
- }));
+// Make sure the negative chunk is on shard 0.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ bounds: [{shardKey: MinKey}, {shardKey: NumberLong("0")}],
+ to: st.rs0.getURL()
+}));
- // Make sure the positive chunk is on shard 1.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- bounds: [{shardKey: NumberLong("0")}, {shardKey: MaxKey}],
- to: st.rs1.getURL()
- }));
+// Make sure the positive chunk is on shard 1.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ bounds: [{shardKey: NumberLong("0")}, {shardKey: MaxKey}],
+ to: st.rs1.getURL()
+}));
- const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
+const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
- // Write enough documents that we likely have some on each shard.
- const nDocs = 1000;
- for (let id = 0; id < nDocs; ++id) {
- assert.writeOK(mongosColl.insert({_id: id, shardKey: id}));
- assert.writeOK(mongosColl.update({shardKey: id}, {$set: {updatedCount: 1}}));
- }
+// Write enough documents that we likely have some on each shard.
+const nDocs = 1000;
+for (let id = 0; id < nDocs; ++id) {
+ assert.writeOK(mongosColl.insert({_id: id, shardKey: id}));
+ assert.writeOK(mongosColl.update({shardKey: id}, {$set: {updatedCount: 1}}));
+}
- for (let id = 0; id < nDocs; ++id) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {shardKey: id, _id: id});
+for (let id = 0; id < nDocs; ++id) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {shardKey: id, _id: id});
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, {shardKey: id, _id: id});
- assert.docEq(next.fullDocument, {_id: id, shardKey: id, updatedCount: 1});
- }
+ assert.soon(() => changeStream.hasNext());
+ next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, {shardKey: id, _id: id});
+ assert.docEq(next.fullDocument, {_id: id, shardKey: id, updatedCount: 1});
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
index 843dda1c524..f6235d1082c 100644
--- a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
@@ -2,94 +2,92 @@
// sharded with a key which is just the "_id" field.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
}
-
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB['coll'];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey) chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}));
- assert.writeOK(mongosColl.insert({_id: 1}));
-
- const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
-
- // Do some writes.
- assert.writeOK(mongosColl.insert({_id: 1000}));
- assert.writeOK(mongosColl.insert({_id: -1000}));
- assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 1}}));
- assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 1}}));
-
- for (let nextId of[1000, -1000]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {_id: nextId});
- }
-
- for (let nextId of[1000, -1000]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- // Only the "_id" field is present in next.documentKey because the shard key is the _id.
- assert.eq(next.documentKey, {_id: nextId});
- assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 1});
- }
-
- // Test that the change stream can still see the updated post image, even if a chunk is
- // migrated.
- assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 2}}));
- assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 2}}));
-
- // Split the [0, MaxKey) chunk into 2: [0, 500), [500, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 500}}));
- // Move the [500, MaxKey) chunk back to st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1000}, to: st.rs0.getURL()}));
-
- for (let nextId of[1000, -1000]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, {_id: nextId});
- assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 2});
- }
-
- st.stop();
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB['coll'];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey) chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+
+// Write a document to each chunk.
+assert.writeOK(mongosColl.insert({_id: -1}));
+assert.writeOK(mongosColl.insert({_id: 1}));
+
+const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
+
+// Do some writes.
+assert.writeOK(mongosColl.insert({_id: 1000}));
+assert.writeOK(mongosColl.insert({_id: -1000}));
+assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 1}}));
+assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 1}}));
+
+for (let nextId of [1000, -1000]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {_id: nextId});
+}
+
+for (let nextId of [1000, -1000]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ // Only the "_id" field is present in next.documentKey because the shard key is the _id.
+ assert.eq(next.documentKey, {_id: nextId});
+ assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 1});
+}
+
+// Test that the change stream can still see the updated post image, even if a chunk is
+// migrated.
+assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 2}}));
+assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 2}}));
+
+// Split the [0, MaxKey) chunk into 2: [0, 500), [500, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 500}}));
+// Move the [500, MaxKey) chunk back to st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1000}, to: st.rs0.getURL()}));
+
+for (let nextId of [1000, -1000]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, {_id: nextId});
+ assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 2});
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/lookup_mongod_unaware.js b/jstests/sharding/lookup_mongod_unaware.js
index 2a363eb1ce2..2750425205e 100644
--- a/jstests/sharding/lookup_mongod_unaware.js
+++ b/jstests/sharding/lookup_mongod_unaware.js
@@ -6,182 +6,179 @@
// expect it to still have all the previous data.
// @tags: [requires_persistence]
(function() {
- "use strict";
-
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
-
- // Restarts the primary shard and ensures that it believes both collections are unsharded.
- function restartPrimaryShard(rs, localColl, foreignColl) {
- // Returns true if the shard is aware that the collection is sharded.
- function hasRoutingInfoForNs(shardConn, coll) {
- const res = shardConn.adminCommand({getShardVersion: coll, fullMetadata: true});
- assert.commandWorked(res);
- return res.metadata.collVersion != undefined;
- }
-
- rs.restart(0);
- rs.awaitSecondaryNodes();
- assert(!hasRoutingInfoForNs(rs.getPrimary(), localColl.getFullName()));
- assert(!hasRoutingInfoForNs(rs.getPrimary(), foreignColl.getFullName()));
-
- // Reset the server parameter allowing sharded $lookup on each node.
- setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(rs.getPrimary()),
- "internalQueryAllowShardedLookup",
- true);
+"use strict";
+
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
+
+// Restarts the primary shard and ensures that it believes both collections are unsharded.
+function restartPrimaryShard(rs, localColl, foreignColl) {
+ // Returns true if the shard is aware that the collection is sharded.
+ function hasRoutingInfoForNs(shardConn, coll) {
+ const res = shardConn.adminCommand({getShardVersion: coll, fullMetadata: true});
+ assert.commandWorked(res);
+ return res.metadata.collVersion != undefined;
}
- const testName = "lookup_stale_mongod";
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- rs: {nodes: 1},
- });
+ rs.restart(0);
+ rs.awaitSecondaryNodes();
+ assert(!hasRoutingInfoForNs(rs.getPrimary(), localColl.getFullName()));
+ assert(!hasRoutingInfoForNs(rs.getPrimary(), foreignColl.getFullName()));
- // Set the parameter allowing sharded $lookup on all nodes.
- setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(st.s0).concat([st.s1.host]),
+ // Reset the server parameter allowing sharded $lookup on each node.
+ setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(rs.getPrimary()),
"internalQueryAllowShardedLookup",
true);
+}
+
+const testName = "lookup_stale_mongod";
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ rs: {nodes: 1},
+});
+
+// Set the parameter allowing sharded $lookup on all nodes.
+setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(st.s0).concat([st.s1.host]),
+ "internalQueryAllowShardedLookup",
+ true);
+
+const mongos0DB = st.s0.getDB(testName);
+const mongos0LocalColl = mongos0DB[testName + "_local"];
+const mongos0ForeignColl = mongos0DB[testName + "_foreign"];
+
+const mongos1DB = st.s1.getDB(testName);
+const mongos1LocalColl = mongos1DB[testName + "_local"];
+const mongos1ForeignColl = mongos1DB[testName + "_foreign"];
+
+const pipeline = [
+ {$lookup: {localField: "a", foreignField: "b", from: mongos0ForeignColl.getName(), as: "same"}},
+ // Unwind the results of the $lookup, so we can sort by them to get a consistent ordering
+ // for the query results.
+ {$unwind: "$same"},
+ {$sort: {_id: 1, "same._id": 1}}
+];
+
+// The results are expected to be correct if the $lookup stage is executed on the mongos which
+// is aware that the collection is sharded.
+const expectedResults = [
+ {_id: 0, a: 1, "same": {_id: 0, b: 1}},
+ {_id: 1, a: null, "same": {_id: 1, b: null}},
+ {_id: 1, a: null, "same": {_id: 2}},
+ {_id: 2, "same": {_id: 1, b: null}},
+ {_id: 2, "same": {_id: 2}}
+];
+
+// Ensure that shard0 is the primary shard.
+assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
+st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
+
+assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+
+assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+
+// Send writes through mongos1 such that it's aware of the collections and believes they are
+// unsharded.
+assert.writeOK(mongos1LocalColl.insert({_id: 2}));
+assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
- const mongos0DB = st.s0.getDB(testName);
- const mongos0LocalColl = mongos0DB[testName + "_local"];
- const mongos0ForeignColl = mongos0DB[testName + "_foreign"];
-
- const mongos1DB = st.s1.getDB(testName);
- const mongos1LocalColl = mongos1DB[testName + "_local"];
- const mongos1ForeignColl = mongos1DB[testName + "_foreign"];
-
- const pipeline = [
- {
- $lookup:
- {localField: "a", foreignField: "b", from: mongos0ForeignColl.getName(), as: "same"}
- },
- // Unwind the results of the $lookup, so we can sort by them to get a consistent ordering
- // for the query results.
- {$unwind: "$same"},
- {$sort: {_id: 1, "same._id": 1}}
- ];
-
- // The results are expected to be correct if the $lookup stage is executed on the mongos which
- // is aware that the collection is sharded.
- const expectedResults = [
- {_id: 0, a: 1, "same": {_id: 0, b: 1}},
- {_id: 1, a: null, "same": {_id: 1, b: null}},
- {_id: 1, a: null, "same": {_id: 2}},
- {_id: 2, "same": {_id: 1, b: null}},
- {_id: 2, "same": {_id: 2}}
- ];
-
- // Ensure that shard0 is the primary shard.
- assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
- st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
-
- assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
- assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
-
- assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
-
- // Send writes through mongos1 such that it's aware of the collections and believes they are
- // unsharded.
- assert.writeOK(mongos1LocalColl.insert({_id: 2}));
- assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
-
- //
- // Test unsharded local and sharded foreign collections, with the primary shard unaware that
- // the foreign collection is sharded.
- //
-
- // Shard the foreign collection.
- assert.commandWorked(
- mongos0DB.adminCommand({shardCollection: mongos0ForeignColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
- assert.commandWorked(
- mongos0DB.adminCommand({split: mongos0ForeignColl.getFullName(), middle: {_id: 1}}));
-
- // Move the [minKey, 1) chunk to shard1.
- assert.commandWorked(mongos0DB.adminCommand({
- moveChunk: mongos0ForeignColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Verify $lookup results through the fresh mongos.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- // Verify $lookup results through mongos1, which is not aware that the foreign collection is
- // sharded. In this case the results will be correct since the entire pipeline will be run on a
- // shard, which will do a refresh before executing the foreign pipeline.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- //
- // Test sharded local and sharded foreign collections, with the primary shard unaware that
- // either collection is sharded.
- //
-
- // Shard the local collection.
- assert.commandWorked(
- mongos0DB.adminCommand({shardCollection: mongos0LocalColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
- assert.commandWorked(
- mongos0DB.adminCommand({split: mongos0LocalColl.getFullName(), middle: {_id: 1}}));
-
- // Move the [minKey, 1) chunk to shard1.
- assert.commandWorked(mongos0DB.adminCommand({
- moveChunk: mongos0LocalColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Verify $lookup results through the fresh mongos.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- // Verify $lookup results through mongos1, which is not aware that the local
- // collection is sharded. The results are expected to be incorrect when both the mongos and
- // primary shard incorrectly believe that a collection is unsharded.
- // TODO: This should be fixed by SERVER-32629, likewise for the other aggregates in this file
- // sent to the stale mongos.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
- {_id: 1, a: null, "same": {_id: 1, b: null}},
- {_id: 1, a: null, "same": {_id: 2}},
-
- {_id: 2, "same": {_id: 1, b: null}},
- {_id: 2, "same": {_id: 2}}
- ]);
-
- //
- // Test sharded local and unsharded foreign collections, with the primary shard unaware that
- // the local collection is sharded.
- //
-
- // Recreate the foreign collection as unsharded.
- mongos0ForeignColl.drop();
- assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
-
- // Verify $lookup results through the fresh mongos.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- // Verify $lookup results through mongos1, which is not aware that the local
- // collection is sharded. The results are expected to be incorrect when both the mongos and
- // primary shard incorrectly believe that a collection is unsharded.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
- {_id: 1, a: null, "same": {_id: 1, b: null}},
- {_id: 1, a: null, "same": {_id: 2}},
- {_id: 2, "same": {_id: 1, b: null}},
- {_id: 2, "same": {_id: 2}}
- ]);
-
- st.stop();
+//
+// Test unsharded local and sharded foreign collections, with the primary shard unaware that
+// the foreign collection is sharded.
+//
+
+// Shard the foreign collection.
+assert.commandWorked(
+ mongos0DB.adminCommand({shardCollection: mongos0ForeignColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
+assert.commandWorked(
+ mongos0DB.adminCommand({split: mongos0ForeignColl.getFullName(), middle: {_id: 1}}));
+
+// Move the [minKey, 1) chunk to shard1.
+assert.commandWorked(mongos0DB.adminCommand({
+ moveChunk: mongos0ForeignColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Verify $lookup results through the fresh mongos.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+// Verify $lookup results through mongos1, which is not aware that the foreign collection is
+// sharded. In this case the results will be correct since the entire pipeline will be run on a
+// shard, which will do a refresh before executing the foreign pipeline.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+//
+// Test sharded local and sharded foreign collections, with the primary shard unaware that
+// either collection is sharded.
+//
+
+// Shard the local collection.
+assert.commandWorked(
+ mongos0DB.adminCommand({shardCollection: mongos0LocalColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
+assert.commandWorked(
+ mongos0DB.adminCommand({split: mongos0LocalColl.getFullName(), middle: {_id: 1}}));
+
+// Move the [minKey, 1) chunk to shard1.
+assert.commandWorked(mongos0DB.adminCommand({
+ moveChunk: mongos0LocalColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Verify $lookup results through the fresh mongos.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+// Verify $lookup results through mongos1, which is not aware that the local
+// collection is sharded. The results are expected to be incorrect when both the mongos and
+// primary shard incorrectly believe that a collection is unsharded.
+// TODO: This should be fixed by SERVER-32629, likewise for the other aggregates in this file
+// sent to the stale mongos.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
+ {_id: 1, a: null, "same": {_id: 1, b: null}},
+ {_id: 1, a: null, "same": {_id: 2}},
+
+ {_id: 2, "same": {_id: 1, b: null}},
+ {_id: 2, "same": {_id: 2}}
+]);
+
+//
+// Test sharded local and unsharded foreign collections, with the primary shard unaware that
+// the local collection is sharded.
+//
+
+// Recreate the foreign collection as unsharded.
+mongos0ForeignColl.drop();
+assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
+
+// Verify $lookup results through the fresh mongos.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+// Verify $lookup results through mongos1, which is not aware that the local
+// collection is sharded. The results are expected to be incorrect when both the mongos and
+// primary shard incorrectly believe that a collection is unsharded.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
+ {_id: 1, a: null, "same": {_id: 1, b: null}},
+ {_id: 1, a: null, "same": {_id: 2}},
+ {_id: 2, "same": {_id: 1, b: null}},
+ {_id: 2, "same": {_id: 2}}
+]);
+
+st.stop();
})();
diff --git a/jstests/sharding/lookup_on_shard.js b/jstests/sharding/lookup_on_shard.js
index cf6104dcaf1..2dc96378fab 100644
--- a/jstests/sharding/lookup_on_shard.js
+++ b/jstests/sharding/lookup_on_shard.js
@@ -1,39 +1,39 @@
// Test that a pipeline with a $lookup stage on a sharded foreign collection may be run on a mongod.
(function() {
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
- const sharded = new ShardingTest({mongos: 1, shards: 2});
+const sharded = new ShardingTest({mongos: 1, shards: 2});
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(sharded.s), "internalQueryAllowShardedLookup", true);
+setParameterOnAllHosts(
+ DiscoverTopology.findNonConfigNodes(sharded.s), "internalQueryAllowShardedLookup", true);
- assert.commandWorked(sharded.s.adminCommand({enableSharding: "test"}));
- sharded.ensurePrimaryShard('test', sharded.shard0.shardName);
+assert.commandWorked(sharded.s.adminCommand({enableSharding: "test"}));
+sharded.ensurePrimaryShard('test', sharded.shard0.shardName);
- const coll = sharded.s.getDB('test').mainColl;
- const foreignColl = sharded.s.getDB('test').foreignColl;
- const smallColl = sharded.s.getDB("test").smallColl;
+const coll = sharded.s.getDB('test').mainColl;
+const foreignColl = sharded.s.getDB('test').foreignColl;
+const smallColl = sharded.s.getDB("test").smallColl;
- const nDocsMainColl = 10;
- const nDocsForeignColl = 2 * nDocsMainColl;
+const nDocsMainColl = 10;
+const nDocsForeignColl = 2 * nDocsMainColl;
- for (let i = 0; i < nDocsMainColl; i++) {
- assert.commandWorked(coll.insert({_id: i, collName: "mainColl", foreignId: i}));
+for (let i = 0; i < nDocsMainColl; i++) {
+ assert.commandWorked(coll.insert({_id: i, collName: "mainColl", foreignId: i}));
- assert.commandWorked(
- foreignColl.insert({_id: 2 * i, key: i, collName: "foreignColl", data: "hello-0"}));
- assert.commandWorked(
- foreignColl.insert({_id: 2 * i + 1, key: i, collName: "foreignColl", data: "hello-1"}));
- }
- assert.commandWorked(smallColl.insert({_id: 0, collName: "smallColl"}));
+ assert.commandWorked(
+ foreignColl.insert({_id: 2 * i, key: i, collName: "foreignColl", data: "hello-0"}));
+ assert.commandWorked(
+ foreignColl.insert({_id: 2 * i + 1, key: i, collName: "foreignColl", data: "hello-1"}));
+}
+assert.commandWorked(smallColl.insert({_id: 0, collName: "smallColl"}));
- const runTest = function() {
- (function testSingleLookupFromShard() {
- // Run a pipeline which must be merged on a shard. This should force the $lookup (on
- // the sharded collection) to be run on a mongod.
- pipeline = [
+const runTest = function() {
+ (function testSingleLookupFromShard() {
+ // Run a pipeline which must be merged on a shard. This should force the $lookup (on
+ // the sharded collection) to be run on a mongod.
+ pipeline = [
{
$lookup: {
localField: "foreignId",
@@ -45,16 +45,16 @@
{$_internalSplitPipeline: {mergeType: "anyShard"}}
];
- const results = coll.aggregate(pipeline).toArray();
- assert.eq(results.length, nDocsMainColl);
- for (let i = 0; i < results.length; i++) {
- assert.eq(results[i].foreignDoc.length, 2, results[i]);
- }
- })();
+ const results = coll.aggregate(pipeline).toArray();
+ assert.eq(results.length, nDocsMainColl);
+ for (let i = 0; i < results.length; i++) {
+ assert.eq(results[i].foreignDoc.length, 2, results[i]);
+ }
+ })();
- (function testMultipleLookupsFromShard() {
- // Run two lookups in a row (both on mongod).
- pipeline = [
+ (function testMultipleLookupsFromShard() {
+ // Run two lookups in a row (both on mongod).
+ pipeline = [
{
$lookup: {
localField: "foreignId",
@@ -72,17 +72,17 @@
},
{$_internalSplitPipeline: {mergeType: "anyShard"}}
];
- const results = coll.aggregate(pipeline).toArray();
- assert.eq(results.length, nDocsMainColl);
- for (let i = 0; i < results.length; i++) {
- assert.eq(results[i].foreignDoc.length, 2, results[i]);
- assert.eq(results[i].smallCollDocs.length, 1, results[i]);
- }
- })();
-
- (function testUnshardedLookupWithinShardedLookup() {
- // Pipeline with unsharded $lookup inside a sharded $lookup.
- pipeline = [
+ const results = coll.aggregate(pipeline).toArray();
+ assert.eq(results.length, nDocsMainColl);
+ for (let i = 0; i < results.length; i++) {
+ assert.eq(results[i].foreignDoc.length, 2, results[i]);
+ assert.eq(results[i].smallCollDocs.length, 1, results[i]);
+ }
+ })();
+
+ (function testUnshardedLookupWithinShardedLookup() {
+ // Pipeline with unsharded $lookup inside a sharded $lookup.
+ pipeline = [
{
$lookup: {
from: "foreignColl",
@@ -94,55 +94,57 @@
},
{$_internalSplitPipeline: {mergeType: "anyShard"}}
];
- const results = coll.aggregate(pipeline).toArray();
-
- assert.eq(results.length, nDocsMainColl);
- for (let i = 0; i < results.length; i++) {
- assert.eq(results[i].foreignDoc.length, nDocsForeignColl);
- for (let j = 0; j < nDocsForeignColl; j++) {
- // Each document pulled from the foreign collection should have one document
- // from "smallColl."
- assert.eq(results[i].foreignDoc[j].collName, "foreignColl");
-
- // TODO SERVER-39016: Once a mongod is able to target the primary shard when
- // reading from a non-sharded collection this should always work. Until then,
- // the results of the query depend on which shard is chosen as the merging
- // shard. If the primary shard is chosen, we'll get the correct results (and
- // correctly find a document in "smallColl"). Otherwise if the merging shard is
- // not the primary shard, the merging shard will attempt to do a local read (on
- // an empty/non-existent collection), which will return nothing.
- if (results[i].foreignDoc[j].doc.length === 1) {
- assert.eq(results[i].foreignDoc[j].doc[0].collName, "smallColl");
- } else {
- assert.eq(results[i].foreignDoc[j].doc.length, 0);
- }
+ const results = coll.aggregate(pipeline).toArray();
+
+ assert.eq(results.length, nDocsMainColl);
+ for (let i = 0; i < results.length; i++) {
+ assert.eq(results[i].foreignDoc.length, nDocsForeignColl);
+ for (let j = 0; j < nDocsForeignColl; j++) {
+ // Each document pulled from the foreign collection should have one document
+ // from "smallColl."
+ assert.eq(results[i].foreignDoc[j].collName, "foreignColl");
+
+ // TODO SERVER-39016: Once a mongod is able to target the primary shard when
+ // reading from a non-sharded collection this should always work. Until then,
+ // the results of the query depend on which shard is chosen as the merging
+ // shard. If the primary shard is chosen, we'll get the correct results (and
+ // correctly find a document in "smallColl"). Otherwise if the merging shard is
+ // not the primary shard, the merging shard will attempt to do a local read (on
+ // an empty/non-existent collection), which will return nothing.
+ if (results[i].foreignDoc[j].doc.length === 1) {
+ assert.eq(results[i].foreignDoc[j].doc[0].collName, "smallColl");
+ } else {
+ assert.eq(results[i].foreignDoc[j].doc.length, 0);
}
}
- })();
- };
-
- jsTestLog("Running test with neither collection sharded");
- runTest();
-
- jsTestLog("Running test with foreign collection sharded");
- sharded.shardColl("foreignColl",
- {_id: 1}, // shard key
- {_id: 5}, // split
- {_id: 5}, // move
- "test", // dbName
- true // waitForDelete
- );
- runTest();
-
- jsTestLog("Running test with main and foreign collection sharded");
- sharded.shardColl("mainColl",
- {_id: 1}, // shard key
- {_id: 5}, // split
- {_id: 5}, // move
- "test", // dbName
- true // waitForDelete
- );
- runTest();
-
- sharded.stop();
+ }
+ })();
+};
+
+jsTestLog("Running test with neither collection sharded");
+runTest();
+
+jsTestLog("Running test with foreign collection sharded");
+sharded.shardColl(
+ "foreignColl",
+ {_id: 1}, // shard key
+ {_id: 5}, // split
+ {_id: 5}, // move
+ "test", // dbName
+ true // waitForDelete
+);
+runTest();
+
+jsTestLog("Running test with main and foreign collection sharded");
+sharded.shardColl(
+ "mainColl",
+ {_id: 1}, // shard key
+ {_id: 5}, // split
+ {_id: 5}, // move
+ "test", // dbName
+ true // waitForDelete
+);
+runTest();
+
+sharded.stop();
})();
diff --git a/jstests/sharding/lookup_stale_mongos.js b/jstests/sharding/lookup_stale_mongos.js
index 2b346c8c0c0..f1e71280a18 100644
--- a/jstests/sharding/lookup_stale_mongos.js
+++ b/jstests/sharding/lookup_stale_mongos.js
@@ -3,134 +3,131 @@
// when it's not, and likewise when mongos thinks the collection is unsharded but is actually
// sharded.
(function() {
- "use strict";
-
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
-
- const testName = "lookup_stale_mongos";
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- });
- setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(st.s0).concat([st.s1.host]),
- "internalQueryAllowShardedLookup",
- true);
-
- const mongos0DB = st.s0.getDB(testName);
- assert.commandWorked(mongos0DB.dropDatabase());
- const mongos0LocalColl = mongos0DB[testName + "_local"];
- const mongos0ForeignColl = mongos0DB[testName + "_foreign"];
-
- const mongos1DB = st.s1.getDB(testName);
- const mongos1LocalColl = mongos1DB[testName + "_local"];
- const mongos1ForeignColl = mongos1DB[testName + "_foreign"];
-
- const pipeline = [
- {
- $lookup:
- {localField: "a", foreignField: "b", from: mongos1ForeignColl.getName(), as: "same"}
- },
- {$sort: {_id: 1}}
- ];
- const expectedResults = [
- {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
- {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
- ];
-
- // Ensure that shard0 is the primary shard.
- assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
- st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
-
- assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
- assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
-
- assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
-
- // Send writes through mongos1 such that it's aware of the collections and believes they are
- // unsharded.
- assert.writeOK(mongos1LocalColl.insert({_id: 2}));
- assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
-
- //
- // Test unsharded local and sharded foreign collections, with mongos unaware that the foreign
- // collection is sharded.
- //
-
- // Shard the foreign collection through mongos0.
- assert.commandWorked(
- mongos0DB.adminCommand({shardCollection: mongos0ForeignColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
- assert.commandWorked(
- mongos0DB.adminCommand({split: mongos0ForeignColl.getFullName(), middle: {_id: 1}}));
-
- // Move the [minKey, 1) chunk to shard1.
- assert.commandWorked(mongos0DB.adminCommand({
- moveChunk: mongos0ForeignColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Issue a $lookup through mongos1, which is unaware that the foreign collection is sharded.
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- //
- // Test sharded local and sharded foreign collections, with mongos unaware that the local
- // collection is sharded.
- //
-
- // Shard the local collection through mongos0.
- assert.commandWorked(
- mongos0DB.adminCommand({shardCollection: mongos0LocalColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
- assert.commandWorked(
- mongos0DB.adminCommand({split: mongos0LocalColl.getFullName(), middle: {_id: 1}}));
-
- // Move the [minKey, 1) chunk to shard1.
- assert.commandWorked(mongos0DB.adminCommand({
- moveChunk: mongos0LocalColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Issue a $lookup through mongos1, which is unaware that the local collection is sharded.
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- //
- // Test sharded local and unsharded foreign collections, with mongos unaware that the foreign
- // collection is unsharded.
- //
-
- // Recreate the foreign collection as unsharded through mongos0.
- mongos0ForeignColl.drop();
- assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
-
- // Issue a $lookup through mongos1, which is unaware that the foreign collection is now
- // unsharded.
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- //
- // Test unsharded local and foreign collections, with mongos unaware that the local
- // collection is unsharded.
- //
-
- // Recreate the local collection as unsharded through mongos0.
- mongos0LocalColl.drop();
- assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
- assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
- assert.writeOK(mongos0LocalColl.insert({_id: 2}));
-
- // Issue a $lookup through mongos1, which is unaware that the local collection is now
- // unsharded.
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- st.stop();
+"use strict";
+
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
+
+const testName = "lookup_stale_mongos";
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+});
+setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(st.s0).concat([st.s1.host]),
+ "internalQueryAllowShardedLookup",
+ true);
+
+const mongos0DB = st.s0.getDB(testName);
+assert.commandWorked(mongos0DB.dropDatabase());
+const mongos0LocalColl = mongos0DB[testName + "_local"];
+const mongos0ForeignColl = mongos0DB[testName + "_foreign"];
+
+const mongos1DB = st.s1.getDB(testName);
+const mongos1LocalColl = mongos1DB[testName + "_local"];
+const mongos1ForeignColl = mongos1DB[testName + "_foreign"];
+
+const pipeline = [
+ {$lookup: {localField: "a", foreignField: "b", from: mongos1ForeignColl.getName(), as: "same"}},
+ {$sort: {_id: 1}}
+];
+const expectedResults = [
+ {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
+ {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
+];
+
+// Ensure that shard0 is the primary shard.
+assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
+st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
+
+assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+
+assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+
+// Send writes through mongos1 such that it's aware of the collections and believes they are
+// unsharded.
+assert.writeOK(mongos1LocalColl.insert({_id: 2}));
+assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
+
+//
+// Test unsharded local and sharded foreign collections, with mongos unaware that the foreign
+// collection is sharded.
+//
+
+// Shard the foreign collection through mongos0.
+assert.commandWorked(
+ mongos0DB.adminCommand({shardCollection: mongos0ForeignColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
+assert.commandWorked(
+ mongos0DB.adminCommand({split: mongos0ForeignColl.getFullName(), middle: {_id: 1}}));
+
+// Move the [minKey, 1) chunk to shard1.
+assert.commandWorked(mongos0DB.adminCommand({
+ moveChunk: mongos0ForeignColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Issue a $lookup through mongos1, which is unaware that the foreign collection is sharded.
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+//
+// Test sharded local and sharded foreign collections, with mongos unaware that the local
+// collection is sharded.
+//
+
+// Shard the local collection through mongos0.
+assert.commandWorked(
+ mongos0DB.adminCommand({shardCollection: mongos0LocalColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
+assert.commandWorked(
+ mongos0DB.adminCommand({split: mongos0LocalColl.getFullName(), middle: {_id: 1}}));
+
+// Move the [minKey, 1) chunk to shard1.
+assert.commandWorked(mongos0DB.adminCommand({
+ moveChunk: mongos0LocalColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Issue a $lookup through mongos1, which is unaware that the local collection is sharded.
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+//
+// Test sharded local and unsharded foreign collections, with mongos unaware that the foreign
+// collection is unsharded.
+//
+
+// Recreate the foreign collection as unsharded through mongos0.
+mongos0ForeignColl.drop();
+assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
+
+// Issue a $lookup through mongos1, which is unaware that the foreign collection is now
+// unsharded.
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+//
+// Test unsharded local and foreign collections, with mongos unaware that the local
+// collection is unsharded.
+//
+
+// Recreate the local collection as unsharded through mongos0.
+mongos0LocalColl.drop();
+assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+assert.writeOK(mongos0LocalColl.insert({_id: 2}));
+
+// Issue a $lookup through mongos1, which is unaware that the local collection is now
+// unsharded.
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+st.stop();
})();
diff --git a/jstests/sharding/major_version_check.js b/jstests/sharding/major_version_check.js
index 1b4e1906379..eb6eccc1f1e 100644
--- a/jstests/sharding/major_version_check.js
+++ b/jstests/sharding/major_version_check.js
@@ -2,52 +2,51 @@
// Tests that only a correct major-version is needed to connect to a shard via mongos
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1, mongos: 2});
+var st = new ShardingTest({shards: 1, mongos: 2});
- var mongos = st.s0;
- var staleMongos = st.s1;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var staleMongos = st.s1;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection("foo.bar");
- // Shard collection
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+// Shard collection
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- // Make sure our stale mongos is up-to-date with no splits
- staleMongos.getCollection(coll + "").findOne();
+// Make sure our stale mongos is up-to-date with no splits
+staleMongos.getCollection(coll + "").findOne();
- // Run one split
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+// Run one split
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- // Make sure our stale mongos is not up-to-date with the split
- printjson(admin.runCommand({getShardVersion: coll + ""}));
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+// Make sure our stale mongos is not up-to-date with the split
+printjson(admin.runCommand({getShardVersion: coll + ""}));
+printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
- // Compare strings b/c timestamp comparison is a bit weird
- assert.eq(Timestamp(1, 2), admin.runCommand({getShardVersion: coll + ""}).version);
- assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+// Compare strings b/c timestamp comparison is a bit weird
+assert.eq(Timestamp(1, 2), admin.runCommand({getShardVersion: coll + ""}).version);
+assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
- // See if our stale mongos is required to catch up to run a findOne on an existing connection
- staleMongos.getCollection(coll + "").findOne();
+// See if our stale mongos is required to catch up to run a findOne on an existing connection
+staleMongos.getCollection(coll + "").findOne();
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
- assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
- // See if our stale mongos is required to catch up to run a findOne on a new connection
- staleMongos = new Mongo(staleMongos.host);
- staleMongos.getCollection(coll + "").findOne();
+// See if our stale mongos is required to catch up to run a findOne on a new connection
+staleMongos = new Mongo(staleMongos.host);
+staleMongos.getCollection(coll + "").findOne();
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
- assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-
- st.stop();
+assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+st.stop();
})();
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
index 6737d5fec0c..b51b0111a1e 100644
--- a/jstests/sharding/mapReduce_inSharded.js
+++ b/jstests/sharding/mapReduce_inSharded.js
@@ -1,86 +1,87 @@
(function() {
- "use strict";
-
- var verifyOutput = function(out) {
- printjson(out);
- assert.commandWorked(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, 512, "output count is wrong");
- };
-
- var st = new ShardingTest(
- {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
- st.adminCommand({enablesharding: "mrShard"});
- st.ensurePrimaryShard('mrShard', st.shard1.shardName);
- st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-
- var db = st.getDB("mrShard");
-
- var bulk = db.srcSharded.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
- }
- assert.writeOK(bulk.execute());
+"use strict";
- function map() {
- emit(this.i, 1);
- }
- function reduce(key, values) {
- return Array.sum(values);
- }
-
- // sharded src
- var suffix = "InSharded";
-
- var out = db.srcSharded.mapReduce(map, reduce, "mrBasic" + suffix);
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
- verifyOutput(out);
- assert(out.results != 'undefined', "no results for inline");
-
- // Ensure that mapReduce with a sharded input collection can accept the collation option.
- out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}, collation: {locale: "en_US"}});
- verifyOutput(out);
- assert(out.results != 'undefined', "no results for inline with collation");
-
- out = db.srcSharded.mapReduce(
- map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
- verifyOutput(out);
-
- out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
- map: map,
- reduce: reduce,
- out: "mrBasic" + "srcSharded",
- });
- verifyOutput(out);
-
- // Ensure that the collation option is propagated to the shards. This uses a case-insensitive
- // collation, and the query seeding the mapReduce should only match the document if the
- // collation is passed along to the shards.
- assert.writeOK(db.srcSharded.remove({}));
- assert.eq(db.srcSharded.find().itcount(), 0);
- assert.writeOK(db.srcSharded.insert({i: 0, j: 0, str: "FOO"}));
- out = db.srcSharded.mapReduce(
- map,
- reduce,
- {out: {inline: 1}, query: {str: "foo"}, collation: {locale: "en_US", strength: 2}});
+var verifyOutput = function(out) {
+ printjson(out);
assert.commandWorked(out);
- assert.eq(out.counts.input, 1);
- st.stop();
+ assert.eq(out.counts.input, 51200, "input count is wrong");
+ assert.eq(out.counts.emit, 51200, "emit count is wrong");
+ assert.gt(out.counts.reduce, 99, "reduce count is wrong");
+ assert.eq(out.counts.output, 512, "output count is wrong");
+};
+
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+
+st.adminCommand({enablesharding: "mrShard"});
+st.ensurePrimaryShard('mrShard', st.shard1.shardName);
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
+
+var db = st.getDB("mrShard");
+
+var bulk = db.srcSharded.initializeUnorderedBulkOp();
+for (var j = 0; j < 100; j++) {
+ for (var i = 0; i < 512; i++) {
+ bulk.insert({j: j, i: i});
+ }
+}
+assert.writeOK(bulk.execute());
+
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
+
+// sharded src
+var suffix = "InSharded";
+
+var out = db.srcSharded.mapReduce(map, reduce, "mrBasic" + suffix);
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
+verifyOutput(out);
+assert(out.results != 'undefined', "no results for inline");
+
+// Ensure that mapReduce with a sharded input collection can accept the collation option.
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}, collation: {locale: "en_US"}});
+verifyOutput(out);
+assert(out.results != 'undefined', "no results for inline with collation");
+
+out = db.srcSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
+verifyOutput(out);
+
+out = db.runCommand({
+ mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
+ map: map,
+ reduce: reduce,
+ out: "mrBasic" +
+ "srcSharded",
+});
+verifyOutput(out);
+
+// Ensure that the collation option is propagated to the shards. This uses a case-insensitive
+// collation, and the query seeding the mapReduce should only match the document if the
+// collation is passed along to the shards.
+assert.writeOK(db.srcSharded.remove({}));
+assert.eq(db.srcSharded.find().itcount(), 0);
+assert.writeOK(db.srcSharded.insert({i: 0, j: 0, str: "FOO"}));
+out = db.srcSharded.mapReduce(
+ map,
+ reduce,
+ {out: {inline: 1}, query: {str: "foo"}, collation: {locale: "en_US", strength: 2}});
+assert.commandWorked(out);
+assert.eq(out.counts.input, 1);
+st.stop();
})();
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index 92dae92f5f0..7a8730d2c4d 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -1,70 +1,69 @@
(function() {
- "use strict";
+"use strict";
- var verifyOutput = function(out) {
- printjson(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, 512, "output count is wrong");
- };
+var verifyOutput = function(out) {
+ printjson(out);
+ assert.eq(out.counts.input, 51200, "input count is wrong");
+ assert.eq(out.counts.emit, 51200, "emit count is wrong");
+ assert.gt(out.counts.reduce, 99, "reduce count is wrong");
+ assert.eq(out.counts.output, 512, "output count is wrong");
+};
- var st = new ShardingTest(
- {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
- var admin = st.s0.getDB('admin');
+var admin = st.s0.getDB('admin');
- assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
- st.ensurePrimaryShard('mrShard', st.shard1.shardName);
- assert.commandWorked(
- admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
+assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
+st.ensurePrimaryShard('mrShard', st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
- var db = st.s0.getDB("mrShard");
+var db = st.s0.getDB("mrShard");
- var bulk = db.srcSharded.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
+var bulk = db.srcSharded.initializeUnorderedBulkOp();
+for (var j = 0; j < 100; j++) {
+ for (var i = 0; i < 512; i++) {
+ bulk.insert({j: j, i: i});
}
- assert.writeOK(bulk.execute());
-
- function map() {
- emit(this.i, 1);
- }
- function reduce(key, values) {
- return Array.sum(values);
- }
-
- // sharded src sharded dst
- var suffix = "InShardedOutSharded";
-
- var out =
- db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
- verifyOutput(out);
- assert(out.results != 'undefined', "no results for inline");
-
- out = db.srcSharded.mapReduce(
- map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
- verifyOutput(out);
-
- out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
- map: map,
- reduce: reduce,
- out: "mrBasic" + "srcSharded",
- });
- verifyOutput(out);
-
- st.stop();
-
+}
+assert.writeOK(bulk.execute());
+
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
+
+// sharded src sharded dst
+var suffix = "InShardedOutSharded";
+
+var out =
+ db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
+verifyOutput(out);
+assert(out.results != 'undefined', "no results for inline");
+
+out = db.srcSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
+verifyOutput(out);
+
+out = db.runCommand({
+ mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
+ map: map,
+ reduce: reduce,
+ out: "mrBasic" +
+ "srcSharded",
+});
+verifyOutput(out);
+
+st.stop();
})();
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
index d194623c3e7..07da267d132 100644
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ b/jstests/sharding/mapReduce_nonSharded.js
@@ -57,7 +57,8 @@ out = db.runCommand({
mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
- out: "mrBasic" + "srcNonSharded",
+ out: "mrBasic" +
+ "srcNonSharded",
});
verifyOutput(out);
st.stop();
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index 75a5fcfca33..eeb88371a7e 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -55,7 +55,8 @@ out = db.runCommand({
mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
- out: "mrBasic" + "srcNonSharded",
+ out: "mrBasic" +
+ "srcNonSharded",
});
verifyOutput(out);
st.stop();
diff --git a/jstests/sharding/mapReduce_outSharded_checkUUID.js b/jstests/sharding/mapReduce_outSharded_checkUUID.js
index 9faa35cb836..25a499c4bed 100644
--- a/jstests/sharding/mapReduce_outSharded_checkUUID.js
+++ b/jstests/sharding/mapReduce_outSharded_checkUUID.js
@@ -1,151 +1,149 @@
(function() {
- "use strict";
- load("jstests/libs/uuid_util.js");
-
- var verifyOutput = function(out, output) {
- printjson(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, output, "output count is wrong");
- };
-
- var assertCollectionNotOnShard = function(db, coll) {
- var listCollsRes = db.runCommand({listCollections: 1, filter: {name: coll}});
- assert.commandWorked(listCollsRes);
- assert.neq(undefined, listCollsRes.cursor);
- assert.neq(undefined, listCollsRes.cursor.firstBatch);
- assert.eq(0, listCollsRes.cursor.firstBatch.length);
- };
-
- var st = new ShardingTest({shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1}});
-
- var admin = st.s0.getDB('admin');
-
- assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
- st.ensurePrimaryShard('mrShard', st.shard1.shardName);
- assert.commandWorked(
- admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
-
- var db = st.s0.getDB("mrShard");
-
- var bulk = db.srcSharded.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
+"use strict";
+load("jstests/libs/uuid_util.js");
+
+var verifyOutput = function(out, output) {
+ printjson(out);
+ assert.eq(out.counts.input, 51200, "input count is wrong");
+ assert.eq(out.counts.emit, 51200, "emit count is wrong");
+ assert.gt(out.counts.reduce, 99, "reduce count is wrong");
+ assert.eq(out.counts.output, output, "output count is wrong");
+};
+
+var assertCollectionNotOnShard = function(db, coll) {
+ var listCollsRes = db.runCommand({listCollections: 1, filter: {name: coll}});
+ assert.commandWorked(listCollsRes);
+ assert.neq(undefined, listCollsRes.cursor);
+ assert.neq(undefined, listCollsRes.cursor.firstBatch);
+ assert.eq(0, listCollsRes.cursor.firstBatch.length);
+};
+
+var st = new ShardingTest({shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1}});
+
+var admin = st.s0.getDB('admin');
+
+assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
+st.ensurePrimaryShard('mrShard', st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
+
+var db = st.s0.getDB("mrShard");
+
+var bulk = db.srcSharded.initializeUnorderedBulkOp();
+for (var j = 0; j < 100; j++) {
+ for (var i = 0; i < 512; i++) {
+ bulk.insert({j: j, i: i});
}
- assert.writeOK(bulk.execute());
-
- function map() {
- emit(this.i, 1);
- }
- function reduce(key, values) {
- return Array.sum(values);
- }
-
- // sharded src sharded dst
- var suffix = "InShardedOutSharded";
-
- // Check that merge to an existing empty sharded collection works and creates a new UUID after
- // M/R
- st.adminCommand({shardcollection: "mrShard.outSharded", key: {"_id": 1}});
- var origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- var out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
- verifyOutput(out, 512);
- var newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- assert.neq(origUUID, newUUID);
-
- // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
- // the new UUID should have been written to it.
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
-
- // Shard0 should not have any chunks from the output collection because all shards should have
- // returned an empty split point list in the first phase of the mapReduce, since the reduced
- // data size is far less than the chunk size setting of 1MB.
- assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
-
- // Check that merge to an existing sharded collection that has data on all shards works and that
- // the collection uses the same UUID after M/R
- assert.commandWorked(admin.runCommand({split: "mrShard.outSharded", middle: {"_id": 2000}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: "mrShard.outSharded", find: {"_id": 2000}, to: st.shard0.shardName}));
- assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 1000}));
- assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 2001}));
- origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
- verifyOutput(out, 514);
-
- newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- assert.eq(origUUID, newUUID);
- assert.eq(newUUID, getUUIDFromListCollections(st.shard0.getDB("mrShard"), "outSharded"));
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
-
- // Check that replace to an existing sharded collection has data on all shards works and that
- // the collection creates a new UUID after M/R.
- origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "outSharded", sharded: true}});
- verifyOutput(out, 512);
-
- newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- assert.neq(origUUID, newUUID);
-
- // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
- // the new UUID should have been written to it.
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
-
- // Shard0 should not have any chunks from the output collection because all shards should have
- // returned an empty split point list in the first phase of the mapReduce, since the reduced
- // data size is far less than the chunk size setting of 1MB.
- assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
-
- // Check that reduce to an existing unsharded collection fails when `sharded: true`.
- assert.commandWorked(db.runCommand({create: "reduceUnsharded"}));
- assert.commandFailed(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {reduce: "reduceUnsharded", sharded: true}
- }));
-
- assert.commandWorked(db.reduceUnsharded.insert({x: 1}));
- assert.commandFailed(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {reduce: "reduceUnsharded", sharded: true}
- }));
-
- // Check that replace to an existing unsharded collection works when `sharded: true`.
- assert.commandWorked(db.runCommand({create: "replaceUnsharded"}));
- origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
-
- assert.commandWorked(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {replace: "replaceUnsharded", sharded: true}
- }));
-
- newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
- assert.neq(origUUID, newUUID);
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
-
- assert.commandWorked(db.replaceUnsharded.insert({x: 1}));
- origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
-
- assert.commandWorked(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {replace: "replaceUnsharded", sharded: true}
- }));
-
- newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
- assert.neq(origUUID, newUUID);
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
-
- st.stop();
-
+}
+assert.writeOK(bulk.execute());
+
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
+
+// sharded src sharded dst
+var suffix = "InShardedOutSharded";
+
+// Check that merge to an existing empty sharded collection works and creates a new UUID after
+// M/R
+st.adminCommand({shardcollection: "mrShard.outSharded", key: {"_id": 1}});
+var origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+var out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
+verifyOutput(out, 512);
+var newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+assert.neq(origUUID, newUUID);
+
+// Shard1 is the primary shard and only one chunk should have been written, so the chunk with
+// the new UUID should have been written to it.
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
+
+// Shard0 should not have any chunks from the output collection because all shards should have
+// returned an empty split point list in the first phase of the mapReduce, since the reduced
+// data size is far less than the chunk size setting of 1MB.
+assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
+
+// Check that merge to an existing sharded collection that has data on all shards works and that
+// the collection uses the same UUID after M/R
+assert.commandWorked(admin.runCommand({split: "mrShard.outSharded", middle: {"_id": 2000}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: "mrShard.outSharded", find: {"_id": 2000}, to: st.shard0.shardName}));
+assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 1000}));
+assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 2001}));
+origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
+verifyOutput(out, 514);
+
+newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+assert.eq(origUUID, newUUID);
+assert.eq(newUUID, getUUIDFromListCollections(st.shard0.getDB("mrShard"), "outSharded"));
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
+
+// Check that replace to an existing sharded collection has data on all shards works and that
+// the collection creates a new UUID after M/R.
+origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "outSharded", sharded: true}});
+verifyOutput(out, 512);
+
+newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+assert.neq(origUUID, newUUID);
+
+// Shard1 is the primary shard and only one chunk should have been written, so the chunk with
+// the new UUID should have been written to it.
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
+
+// Shard0 should not have any chunks from the output collection because all shards should have
+// returned an empty split point list in the first phase of the mapReduce, since the reduced
+// data size is far less than the chunk size setting of 1MB.
+assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
+
+// Check that reduce to an existing unsharded collection fails when `sharded: true`.
+assert.commandWorked(db.runCommand({create: "reduceUnsharded"}));
+assert.commandFailed(db.runCommand({
+ mapReduce: "srcSharded",
+ map: map,
+ reduce: reduce,
+ out: {reduce: "reduceUnsharded", sharded: true}
+}));
+
+assert.commandWorked(db.reduceUnsharded.insert({x: 1}));
+assert.commandFailed(db.runCommand({
+ mapReduce: "srcSharded",
+ map: map,
+ reduce: reduce,
+ out: {reduce: "reduceUnsharded", sharded: true}
+}));
+
+// Check that replace to an existing unsharded collection works when `sharded: true`.
+assert.commandWorked(db.runCommand({create: "replaceUnsharded"}));
+origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
+
+assert.commandWorked(db.runCommand({
+ mapReduce: "srcSharded",
+ map: map,
+ reduce: reduce,
+ out: {replace: "replaceUnsharded", sharded: true}
+}));
+
+newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
+assert.neq(origUUID, newUUID);
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
+
+assert.commandWorked(db.replaceUnsharded.insert({x: 1}));
+origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
+
+assert.commandWorked(db.runCommand({
+ mapReduce: "srcSharded",
+ map: map,
+ reduce: reduce,
+ out: {replace: "replaceUnsharded", sharded: true}
+}));
+
+newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
+assert.neq(origUUID, newUUID);
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
+
+st.stop();
})();
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index d46ba9af74a..16c56658a5d 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -5,267 +5,266 @@
// Note that mongos does not time out commands or query ops (which remains responsibility of mongod,
// pending development of an interrupt framework for mongos).
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0;
- var shards = [st.shard0, st.shard1];
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
- var cursor;
- var res;
+var mongos = st.s0;
+var shards = [st.shard0, st.shard1];
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+var cursor;
+var res;
- // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
- // to throw if it receives an operation with a max time. See fail point declaration for complete
- // description.
- var configureMaxTimeAlwaysTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- };
+// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
+// to throw if it receives an operation with a max time. See fail point declaration for complete
+// description.
+var configureMaxTimeAlwaysTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+};
- // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which prohibits
- // mongod from enforcing time limits. See fail point declaration for complete description.
- var configureMaxTimeNeverTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
- };
+// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which prohibits
+// mongod from enforcing time limits. See fail point declaration for complete description.
+var configureMaxTimeNeverTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
+};
- //
- // Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
- //
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
- st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+//
+// Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
+//
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
- //
- // Insert 1000 documents into sharded collection, such that each shard owns 500.
- //
- const nDocsPerShard = 500;
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = -nDocsPerShard; i < nDocsPerShard; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
- assert.eq(nDocsPerShard, shards[0].getCollection(coll.getFullName()).count());
- assert.eq(nDocsPerShard, shards[1].getCollection(coll.getFullName()).count());
+//
+// Insert 1000 documents into sharded collection, such that each shard owns 500.
+//
+const nDocsPerShard = 500;
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = -nDocsPerShard; i < nDocsPerShard; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
+assert.eq(nDocsPerShard, shards[0].getCollection(coll.getFullName()).count());
+assert.eq(nDocsPerShard, shards[1].getCollection(coll.getFullName()).count());
- //
- // Test that mongos correctly forwards max time to shards for sharded queries. Uses
- // maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
- //
+//
+// Test that mongos correctly forwards max time to shards for sharded queries. Uses
+// maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
+//
- // Positive test.
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- cursor = coll.find();
- cursor.maxTimeMS(60 * 1000);
- assert.throws(function() {
- cursor.next();
- }, [], "expected query to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+// Positive test.
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+cursor = coll.find();
+cursor.maxTimeMS(60 * 1000);
+assert.throws(function() {
+ cursor.next();
+}, [], "expected query to fail in mongod due to maxTimeAlwaysTimeOut fail point");
- // Negative test.
- configureMaxTimeAlwaysTimeOut("off");
- cursor = coll.find();
- cursor.maxTimeMS(60 * 1000);
- assert.doesNotThrow(function() {
- cursor.next();
- }, [], "expected query to not hit time limit in mongod");
+// Negative test.
+configureMaxTimeAlwaysTimeOut("off");
+cursor = coll.find();
+cursor.maxTimeMS(60 * 1000);
+assert.doesNotThrow(function() {
+ cursor.next();
+}, [], "expected query to not hit time limit in mongod");
- //
- // Test that mongos correctly times out max time sharded getmore operations. Uses
- // maxTimeNeverTimeOut to ensure mongod doesn't enforce a time limit.
- //
+//
+// Test that mongos correctly times out max time sharded getmore operations. Uses
+// maxTimeNeverTimeOut to ensure mongod doesn't enforce a time limit.
+//
- configureMaxTimeNeverTimeOut("alwaysOn");
+configureMaxTimeNeverTimeOut("alwaysOn");
- // Positive test. ~10s operation, 2s limit. The operation takes ~10s because each shard
- // processes 250 batches of ~40ms each, and the shards are processing getMores in parallel.
- cursor = coll.find({
- $where: function() {
- sleep(20);
- return true;
- }
- });
- cursor.batchSize(2);
- cursor.maxTimeMS(2 * 1000);
- assert.doesNotThrow(
- () => cursor.next(), [], "did not expect mongos to time out first batch of query");
- assert.throws(() => cursor.itcount(), [], "expected mongos to abort getmore due to time limit");
+// Positive test. ~10s operation, 2s limit. The operation takes ~10s because each shard
+// processes 250 batches of ~40ms each, and the shards are processing getMores in parallel.
+cursor = coll.find({
+ $where: function() {
+ sleep(20);
+ return true;
+ }
+});
+cursor.batchSize(2);
+cursor.maxTimeMS(2 * 1000);
+assert.doesNotThrow(
+ () => cursor.next(), [], "did not expect mongos to time out first batch of query");
+assert.throws(() => cursor.itcount(), [], "expected mongos to abort getmore due to time limit");
- // Negative test. ~5s operation, with a high (1-day) limit.
- cursor = coll.find({
- $where: function() {
- sleep(10);
- return true;
- }
- });
- cursor.batchSize(2);
- cursor.maxTimeMS(1000 * 60 * 60 * 24);
- assert.doesNotThrow(function() {
- cursor.next();
- }, [], "did not expect mongos to time out first batch of query");
- assert.doesNotThrow(function() {
- cursor.itcount();
- }, [], "did not expect getmore ops to hit the time limit");
+// Negative test. ~5s operation, with a high (1-day) limit.
+cursor = coll.find({
+ $where: function() {
+ sleep(10);
+ return true;
+ }
+});
+cursor.batchSize(2);
+cursor.maxTimeMS(1000 * 60 * 60 * 24);
+assert.doesNotThrow(function() {
+ cursor.next();
+}, [], "did not expect mongos to time out first batch of query");
+assert.doesNotThrow(function() {
+ cursor.itcount();
+}, [], "did not expect getmore ops to hit the time limit");
- configureMaxTimeNeverTimeOut("off");
+configureMaxTimeNeverTimeOut("off");
- //
- // Test that mongos correctly forwards max time to shards for sharded commands. Uses
- // maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
- //
+//
+// Test that mongos correctly forwards max time to shards for sharded commands. Uses
+// maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
+//
- // Positive test for "validate".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(
- coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
- ErrorCodes.MaxTimeMSExpired,
- "expected vailidate to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+// Positive test for "validate".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected vailidate to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "validate".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
- "expected validate to not hit time limit in mongod");
+// Negative test for "validate".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
+ "expected validate to not hit time limit in mongod");
- // Positive test for "count".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(coll.runCommand("count", {maxTimeMS: 60 * 1000}),
- ErrorCodes.MaxTimeMSExpired,
- "expected count to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " +
- tojson(res));
+// Positive test for "count".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ coll.runCommand("count", {maxTimeMS: 60 * 1000}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected count to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "count".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("count", {maxTimeMS: 60 * 1000}),
- "expected count to not hit time limit in mongod");
+// Negative test for "count".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("count", {maxTimeMS: 60 * 1000}),
+ "expected count to not hit time limit in mongod");
- // Positive test for "collStats".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(
- coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
- ErrorCodes.MaxTimeMSExpired,
- "expected collStats to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+// Positive test for "collStats".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected collStats to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "collStats".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
- "expected collStats to not hit time limit in mongod");
+// Negative test for "collStats".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
+ "expected collStats to not hit time limit in mongod");
- // Positive test for "mapReduce".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- res = coll.runCommand("mapReduce", {
- map: function() {
- emit(0, 0);
- },
- reduce: function(key, values) {
- return 0;
- },
- out: {inline: 1},
- maxTimeMS: 60 * 1000
- });
- assert.commandFailedWithCode(
- res,
- ErrorCodes.MaxTimeMSExpired,
- "expected mapReduce to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+// Positive test for "mapReduce".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+res = coll.runCommand("mapReduce", {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+});
+assert.commandFailedWithCode(
+ res,
+ ErrorCodes.MaxTimeMSExpired,
+ "expected mapReduce to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "mapReduce".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("mapReduce", {
- map: function() {
- emit(0, 0);
- },
- reduce: function(key, values) {
- return 0;
- },
- out: {inline: 1},
- maxTimeMS: 60 * 1000
- }),
- "expected mapReduce to not hit time limit in mongod");
+// Negative test for "mapReduce".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("mapReduce", {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+}),
+ "expected mapReduce to not hit time limit in mongod");
- // Positive test for "aggregate".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(
- coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
- ErrorCodes.MaxTimeMSExpired,
- "expected aggregate to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+// Positive test for "aggregate".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected aggregate to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "aggregate".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(
- coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
- "expected aggregate to not hit time limit in mongod");
+// Negative test for "aggregate".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
+ "expected aggregate to not hit time limit in mongod");
- // Test that the maxTimeMS is still enforced on the shards even if we do not spend much time in
- // mongos blocking.
+// Test that the maxTimeMS is still enforced on the shards even if we do not spend much time in
+// mongos blocking.
- // Manually run a find here so we can be sure cursor establishment happens with batch size 0.
- res = assert.commandWorked(coll.runCommand({
- find: coll.getName(),
- filter: {
- $where: function() {
- if (this._id < 0) {
- // Slow down the query only on one of the shards. Each shard has 500 documents
- // so we expect this shard to take ~10 seconds to return a batch of 500.
- sleep(20);
- }
- return true;
+// Manually run a find here so we can be sure cursor establishment happens with batch size 0.
+res = assert.commandWorked(coll.runCommand({
+ find: coll.getName(),
+ filter: {
+ $where: function() {
+ if (this._id < 0) {
+ // Slow down the query only on one of the shards. Each shard has 500 documents
+ // so we expect this shard to take ~10 seconds to return a batch of 500.
+ sleep(20);
}
- },
- maxTimeMS: 2000,
- batchSize: 0
- }));
- // Use a batch size of 500 to allow returning results from the fast shard as soon as they're
- // ready, as opposed to waiting to return one 16MB batch at a time.
- const kBatchSize = nDocsPerShard;
- cursor = new DBCommandCursor(coll.getDB(), res, kBatchSize);
- // The fast shard should return relatively quickly.
- for (let i = 0; i < nDocsPerShard; ++i) {
- let next = assert.doesNotThrow(
- () => cursor.next(), [], "did not expect mongos to time out first batch of query");
- assert.gte(next._id, 0);
- }
- // Sleep on the client-side so mongos's time budget is not being used.
- sleep(3 * 1000);
- // Even though mongos has not been blocking this whole time, the shard has been busy computing
- // the next batch and should have timed out.
- assert.throws(() => cursor.next(), [], "expected mongos to abort getMore due to time limit");
+ return true;
+ }
+ },
+ maxTimeMS: 2000,
+ batchSize: 0
+}));
+// Use a batch size of 500 to allow returning results from the fast shard as soon as they're
+// ready, as opposed to waiting to return one 16MB batch at a time.
+const kBatchSize = nDocsPerShard;
+cursor = new DBCommandCursor(coll.getDB(), res, kBatchSize);
+// The fast shard should return relatively quickly.
+for (let i = 0; i < nDocsPerShard; ++i) {
+ let next = assert.doesNotThrow(
+ () => cursor.next(), [], "did not expect mongos to time out first batch of query");
+ assert.gte(next._id, 0);
+}
+// Sleep on the client-side so mongos's time budget is not being used.
+sleep(3 * 1000);
+// Even though mongos has not been blocking this whole time, the shard has been busy computing
+// the next batch and should have timed out.
+assert.throws(() => cursor.next(), [], "expected mongos to abort getMore due to time limit");
- // The moveChunk tests are disabled due to SERVER-30179
- //
- // // Positive test for "moveChunk".
- // configureMaxTimeAlwaysTimeOut("alwaysOn");
- // res = admin.runCommand({
- // moveChunk: coll.getFullName(),
- // find: {_id: 0},
- // to: st.shard0.shardName,
- // maxTimeMS: 1000 * 60 * 60 * 24
- // });
- // assert.commandFailed(
- // res,
- // "expected moveChunk to fail due to maxTimeAlwaysTimeOut fail point, but instead got: " +
- // tojson(res));
+// The moveChunk tests are disabled due to SERVER-30179
+//
+// // Positive test for "moveChunk".
+// configureMaxTimeAlwaysTimeOut("alwaysOn");
+// res = admin.runCommand({
+// moveChunk: coll.getFullName(),
+// find: {_id: 0},
+// to: st.shard0.shardName,
+// maxTimeMS: 1000 * 60 * 60 * 24
+// });
+// assert.commandFailed(
+// res,
+// "expected moveChunk to fail due to maxTimeAlwaysTimeOut fail point, but instead got: " +
+// tojson(res));
- // // Negative test for "moveChunk".
- // configureMaxTimeAlwaysTimeOut("off");
- // assert.commandWorked(admin.runCommand({
- // moveChunk: coll.getFullName(),
- // find: {_id: 0},
- // to: st.shard0.shardName,
- // maxTimeMS: 1000 * 60 * 60 * 24
- // }),
- // "expected moveChunk to not hit time limit in mongod");
+// // Negative test for "moveChunk".
+// configureMaxTimeAlwaysTimeOut("off");
+// assert.commandWorked(admin.runCommand({
+// moveChunk: coll.getFullName(),
+// find: {_id: 0},
+// to: st.shard0.shardName,
+// maxTimeMS: 1000 * 60 * 60 * 24
+// }),
+// "expected moveChunk to not hit time limit in mongod");
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/max_time_ms_sharded_new_commands.js b/jstests/sharding/max_time_ms_sharded_new_commands.js
index c8072359ce1..b611199954e 100644
--- a/jstests/sharding/max_time_ms_sharded_new_commands.js
+++ b/jstests/sharding/max_time_ms_sharded_new_commands.js
@@ -1,45 +1,44 @@
// Make sure the setFeatureCompatibilityVersion command respects maxTimeMs.
(function() {
- 'use strict';
- load("./jstests/libs/feature_compatibility_version.js");
- var st = new ShardingTest({shards: 2});
+'use strict';
+load("./jstests/libs/feature_compatibility_version.js");
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0;
- var shards = [st.shard0, st.shard1];
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
- var cursor;
- var res;
+var mongos = st.s0;
+var shards = [st.shard0, st.shard1];
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+var cursor;
+var res;
- // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
- // to throw if it receives an operation with a max time. See fail point declaration for
- // complete description.
- var configureMaxTimeAlwaysTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- };
+// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
+// to throw if it receives an operation with a max time. See fail point declaration for
+// complete description.
+var configureMaxTimeAlwaysTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+};
- // Positive test for "setFeatureCompatibilityVersion"
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(
- admin.runCommand(
- {setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
- ErrorCodes.MaxTimeMSExpired,
- "expected setFeatureCompatibilityVersion to fail due to maxTimeAlwaysTimeOut fail point");
+// Positive test for "setFeatureCompatibilityVersion"
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ admin.runCommand(
+ {setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected setFeatureCompatibilityVersion to fail due to maxTimeAlwaysTimeOut fail point");
- // Negative test for "setFeatureCompatibilityVersion"
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(
- admin.runCommand(
- {setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
- "expected setFeatureCompatibilityVersion to not hit time limit in mongod");
+// Negative test for "setFeatureCompatibilityVersion"
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(
+ admin.runCommand(
+ {setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
+ "expected setFeatureCompatibilityVersion to not hit time limit in mongod");
- assert.commandWorked(
- admin.runCommand(
- {setFeatureCompatibilityVersion: latestFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
- "expected setFeatureCompatibilityVersion to not hit time limit in mongod");
+assert.commandWorked(
+ admin.runCommand({setFeatureCompatibilityVersion: latestFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
+ "expected setFeatureCompatibilityVersion to not hit time limit in mongod");
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_compound_shard_key.js b/jstests/sharding/merge_chunks_compound_shard_key.js
index 9d6fc3aac14..3472073f4c5 100644
--- a/jstests/sharding/merge_chunks_compound_shard_key.js
+++ b/jstests/sharding/merge_chunks_compound_shard_key.js
@@ -4,92 +4,89 @@
//
(function() {
- 'use strict';
-
- var getShardVersion = function() {
- var res = st.shard0.adminCommand({getShardVersion: coll + ""});
- assert.commandWorked(res);
- var version = res.global;
- assert(version);
- return version;
- };
-
- // Merge two neighboring chunks and check post conditions.
- var checkMergeWorked = function(lowerBound, upperBound) {
- var oldVersion = getShardVersion();
- var numChunksBefore = chunks.find().itcount();
-
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [lowerBound, upperBound]}));
-
- assert.eq(numChunksBefore - 1, chunks.find().itcount());
- assert.eq(1, chunks.find({min: lowerBound, max: upperBound}).itcount());
-
- var newVersion = getShardVersion();
- assert.eq(newVersion.t, oldVersion.t);
- assert.gt(newVersion.i, oldVersion.i);
- };
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s;
- var admin = mongos.getDB("admin");
- var shards = mongos.getCollection("config.shards").find().toArray();
- var chunks = mongos.getCollection("config.chunks");
- var coll = mongos.getCollection("foo.bar");
-
- jsTest.log("Create a sharded collection with a compound shard key.");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1, y: 1}}));
-
- // Chunks after splits:
- // (MinKey, { x: 0, y: 1 })
- // ({ x: 0, y: 1 }, { x: 1, y: 0 })
- // ({ x: 1, y: 0 }, { x: 2, y: 0 })
- // ({ x: 2, y: 0 }, { x: 2, y: 1 })
- // ({ x: 2, y: 1 }, MaxKey)
- jsTest.log("Create chunks.");
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0, y: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 1, y: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 1}}));
-
- jsTest.log("Insert some data into each of the chunk ranges.");
- assert.writeOK(coll.insert({x: -1, y: 2}));
- assert.writeOK(coll.insert({x: 0, y: 2}));
- assert.writeOK(coll.insert({x: 1, y: 2}));
- assert.writeOK(coll.insert({x: 2, y: 1}));
- assert.writeOK(coll.insert({x: 2, y: 3}));
-
- // Chunks after merge:
- // (MinKey, { x: 0, y: 1 })
- // ({ x: 0, y: 1 }, { x: 2, y: 0 })
- // ({ x: 2, y: 0 }, { x: 2, y: 1 })
- // ({ x: 2, y: 1 }, MaxKey)
- jsTest.log("Merge chunks whose upper and lower bounds are compound shard keys.");
- checkMergeWorked({x: 0, y: 1}, {x: 2, y: 0});
-
- // Chunks after merge:
- // (MinKey, { x: 2, y: 0 })
- // ({ x: 2, y: 0 }, { x: 2, y: 1 })
- // ({ x: 2, y: 1 }, MaxKey)
- jsTest.log(
- "Merge chunks whose upper bound contains a compound shard key, lower bound is MinKey");
- checkMergeWorked({x: MinKey, y: MinKey}, {x: 2, y: 0});
-
- // Chunks after merge:
- // (MinKey, { x: 2, y: 0 })
- // ({ x: 2, y: 0 }, MaxKey)
- jsTest.log(
- "Merge chunks whose lower bound contains a compound shard key, upper bound is MaxKey");
- checkMergeWorked({x: 2, y: 0}, {x: MaxKey, y: MaxKey});
-
- // Chunks after merge:
- // (MinKey, MaxKey)
- jsTest.log("Merge chunks whos bounds are MinKey/MaxKey, but which have a compound shard key");
- checkMergeWorked({x: MinKey, y: MinKey}, {x: MaxKey, y: MaxKey});
-
- st.stop();
-
+'use strict';
+
+var getShardVersion = function() {
+ var res = st.shard0.adminCommand({getShardVersion: coll + ""});
+ assert.commandWorked(res);
+ var version = res.global;
+ assert(version);
+ return version;
+};
+
+// Merge two neighboring chunks and check post conditions.
+var checkMergeWorked = function(lowerBound, upperBound) {
+ var oldVersion = getShardVersion();
+ var numChunksBefore = chunks.find().itcount();
+
+ assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [lowerBound, upperBound]}));
+
+ assert.eq(numChunksBefore - 1, chunks.find().itcount());
+ assert.eq(1, chunks.find({min: lowerBound, max: upperBound}).itcount());
+
+ var newVersion = getShardVersion();
+ assert.eq(newVersion.t, oldVersion.t);
+ assert.gt(newVersion.i, oldVersion.i);
+};
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s;
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var chunks = mongos.getCollection("config.chunks");
+var coll = mongos.getCollection("foo.bar");
+
+jsTest.log("Create a sharded collection with a compound shard key.");
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1, y: 1}}));
+
+// Chunks after splits:
+// (MinKey, { x: 0, y: 1 })
+// ({ x: 0, y: 1 }, { x: 1, y: 0 })
+// ({ x: 1, y: 0 }, { x: 2, y: 0 })
+// ({ x: 2, y: 0 }, { x: 2, y: 1 })
+// ({ x: 2, y: 1 }, MaxKey)
+jsTest.log("Create chunks.");
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0, y: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 1, y: 0}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 0}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 1}}));
+
+jsTest.log("Insert some data into each of the chunk ranges.");
+assert.writeOK(coll.insert({x: -1, y: 2}));
+assert.writeOK(coll.insert({x: 0, y: 2}));
+assert.writeOK(coll.insert({x: 1, y: 2}));
+assert.writeOK(coll.insert({x: 2, y: 1}));
+assert.writeOK(coll.insert({x: 2, y: 3}));
+
+// Chunks after merge:
+// (MinKey, { x: 0, y: 1 })
+// ({ x: 0, y: 1 }, { x: 2, y: 0 })
+// ({ x: 2, y: 0 }, { x: 2, y: 1 })
+// ({ x: 2, y: 1 }, MaxKey)
+jsTest.log("Merge chunks whose upper and lower bounds are compound shard keys.");
+checkMergeWorked({x: 0, y: 1}, {x: 2, y: 0});
+
+// Chunks after merge:
+// (MinKey, { x: 2, y: 0 })
+// ({ x: 2, y: 0 }, { x: 2, y: 1 })
+// ({ x: 2, y: 1 }, MaxKey)
+jsTest.log("Merge chunks whose upper bound contains a compound shard key, lower bound is MinKey");
+checkMergeWorked({x: MinKey, y: MinKey}, {x: 2, y: 0});
+
+// Chunks after merge:
+// (MinKey, { x: 2, y: 0 })
+// ({ x: 2, y: 0 }, MaxKey)
+jsTest.log("Merge chunks whose lower bound contains a compound shard key, upper bound is MaxKey");
+checkMergeWorked({x: 2, y: 0}, {x: MaxKey, y: MaxKey});
+
+// Chunks after merge:
+// (MinKey, MaxKey)
+jsTest.log("Merge chunks whos bounds are MinKey/MaxKey, but which have a compound shard key");
+checkMergeWorked({x: MinKey, y: MinKey}, {x: MaxKey, y: MaxKey});
+
+st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js
index 5d3bfbbb97b..3166f47113e 100644
--- a/jstests/sharding/merge_chunks_test.js
+++ b/jstests/sharding/merge_chunks_test.js
@@ -2,145 +2,133 @@
// Tests that merging chunks via mongos works/doesn't work with different chunk configurations
//
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2, mongos: 2});
-
- var mongos = st.s0;
- var staleMongos = st.s1;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard('foo', st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-
- // Create ranges MIN->0,0->10,(hole),20->40,40->50,50->90,(hole),100->110,110->MAX on first
- // shard
- jsTest.log("Creating ranges...");
-
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 10}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 20}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 40}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 50}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 90}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 100}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 110}}));
-
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: st.shard1.shardName}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 90}, to: st.shard1.shardName}));
-
- st.printShardingStatus();
-
- // Insert some data into each of the consolidated ranges
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 10}));
- assert.writeOK(coll.insert({_id: 40}));
- assert.writeOK(coll.insert({_id: 110}));
-
- var staleCollection = staleMongos.getCollection(coll + "");
-
- jsTest.log("Trying merges that should fail...");
-
- // S0: min->0, 0->10, 20->40, 40->50, 50->90, 100->110, 110->max
- // S1: 10->20, 90->100
-
- // Make sure merging non-exact chunks is invalid
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 5}]}));
- assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 5}, {_id: 10}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 15}, {_id: 50}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 55}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 105}, {_id: MaxKey}]}));
-
- // Make sure merging single chunks is invalid
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 0}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 40}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 110}, {_id: MaxKey}]}));
-
- // Make sure merging over holes is invalid
- assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 40}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 110}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 50}, {_id: 110}]}));
-
- // Make sure merging between shards is invalid
- assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 20}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 10}, {_id: 40}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 100}]}));
- assert.eq(4, staleCollection.find().itcount());
-
- jsTest.log("Trying merges that should succeed...");
-
- // Make sure merge including the MinKey works
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 10}]}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 20->40, 40->50, 50->90, 100->110, 110->max
- // S1: 10->20, 90->100
-
- // Make sure merging three chunks in the middle works
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 90}]}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 20->90, 100->110, 110->max
- // S1: 10->20, 90->100
-
- // Make sure merge including the MaxKey works
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 100}, {_id: MaxKey}]}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 20->90, 100->max
- // S1: 10->20, 90->100
-
- // Make sure merging chunks after a chunk has been moved out of a shard succeeds
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 110}, to: st.shard1.shardName}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: st.shard0.shardName}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 10->20, 20->90
- // S1: 90->100, 100->max
-
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 90}, {_id: MaxKey}]}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 10->20, 20->90
- // S1: 90->max
-
- // Make sure merge on the other shard after a chunk has been merged succeeds
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 90}]}));
- // S0: min->90
- // S1: 90->max
-
- st.printShardingStatus(true);
-
- assert.eq(2, st.s0.getDB('config').chunks.find({'ns': 'foo.bar'}).itcount());
- assert.eq(
- 1,
- st.s0.getDB('config')
- .chunks
- .find({'ns': 'foo.bar', 'min._id': MinKey, 'max._id': 90, shard: st.shard0.shardName})
- .itcount());
- assert.eq(
- 1,
- st.s0.getDB('config')
- .chunks
- .find({'ns': 'foo.bar', 'min._id': 90, 'max._id': MaxKey, shard: st.shard1.shardName})
- .itcount());
-
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 2, mongos: 2});
+
+var mongos = st.s0;
+var staleMongos = st.s1;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard('foo', st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+
+// Create ranges MIN->0,0->10,(hole),20->40,40->50,50->90,(hole),100->110,110->MAX on first
+// shard
+jsTest.log("Creating ranges...");
+
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 10}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 20}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 40}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 50}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 90}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 100}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 110}}));
+
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 90}, to: st.shard1.shardName}));
+
+st.printShardingStatus();
+
+// Insert some data into each of the consolidated ranges
+assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 10}));
+assert.writeOK(coll.insert({_id: 40}));
+assert.writeOK(coll.insert({_id: 110}));
+
+var staleCollection = staleMongos.getCollection(coll + "");
+
+jsTest.log("Trying merges that should fail...");
+
+// S0: min->0, 0->10, 20->40, 40->50, 50->90, 100->110, 110->max
+// S1: 10->20, 90->100
+
+// Make sure merging non-exact chunks is invalid
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 5}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 5}, {_id: 10}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 15}, {_id: 50}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 55}]}));
+assert.commandFailed(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 105}, {_id: MaxKey}]}));
+
+// Make sure merging single chunks is invalid
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 0}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 40}]}));
+assert.commandFailed(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 110}, {_id: MaxKey}]}));
+
+// Make sure merging over holes is invalid
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 40}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 110}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 50}, {_id: 110}]}));
+
+// Make sure merging between shards is invalid
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 20}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 10}, {_id: 40}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 100}]}));
+assert.eq(4, staleCollection.find().itcount());
+
+jsTest.log("Trying merges that should succeed...");
+
+// Make sure merge including the MinKey works
+assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 10}]}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 20->40, 40->50, 50->90, 100->110, 110->max
+// S1: 10->20, 90->100
+
+// Make sure merging three chunks in the middle works
+assert.commandWorked(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 90}]}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 20->90, 100->110, 110->max
+// S1: 10->20, 90->100
+
+// Make sure merge including the MaxKey works
+assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 100}, {_id: MaxKey}]}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 20->90, 100->max
+// S1: 10->20, 90->100
+
+// Make sure merging chunks after a chunk has been moved out of a shard succeeds
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 110}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: st.shard0.shardName}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 10->20, 20->90
+// S1: 90->100, 100->max
+
+assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 90}, {_id: MaxKey}]}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 10->20, 20->90
+// S1: 90->max
+
+// Make sure merge on the other shard after a chunk has been merged succeeds
+assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 90}]}));
+// S0: min->90
+// S1: 90->max
+
+st.printShardingStatus(true);
+
+assert.eq(2, st.s0.getDB('config').chunks.find({'ns': 'foo.bar'}).itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks
+ .find({'ns': 'foo.bar', 'min._id': MinKey, 'max._id': 90, shard: st.shard0.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks
+ .find({'ns': 'foo.bar', 'min._id': 90, 'max._id': MaxKey, shard: st.shard1.shardName})
+ .itcount());
+
+st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_test_with_md_ops.js b/jstests/sharding/merge_chunks_test_with_md_ops.js
index 63b2504521f..9f99cd584c4 100644
--- a/jstests/sharding/merge_chunks_test_with_md_ops.js
+++ b/jstests/sharding/merge_chunks_test_with_md_ops.js
@@ -1,54 +1,53 @@
// Tests that merging chunks does not prevent cluster from doing other metadata ops
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- st.printShardingStatus();
+st.printShardingStatus();
- // Split and merge the first chunk repeatedly
- jsTest.log("Splitting and merging repeatedly...");
-
- for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
- printjson(mongos.getDB("config").chunks.find().toArray());
- }
-
- // Move the first chunk to the other shard
- jsTest.log("Moving to another shard...");
+// Split and merge the first chunk repeatedly
+jsTest.log("Splitting and merging repeatedly...");
+for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ printjson(mongos.getDB("config").chunks.find().toArray());
+}
- // Split and merge the chunk repeatedly
- jsTest.log("Splitting and merging repeatedly (again)...");
+// Move the first chunk to the other shard
+jsTest.log("Moving to another shard...");
- for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
- printjson(mongos.getDB("config").chunks.find().toArray());
- }
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
- // Move the chunk back to the original shard
- jsTest.log("Moving to original shard...");
+// Split and merge the chunk repeatedly
+jsTest.log("Splitting and merging repeatedly (again)...");
+for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard0.shardName}));
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ printjson(mongos.getDB("config").chunks.find().toArray());
+}
+
+// Move the chunk back to the original shard
+jsTest.log("Moving to original shard...");
- st.printShardingStatus();
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard0.shardName}));
- st.stop();
+st.printShardingStatus();
+st.stop();
})();
diff --git a/jstests/sharding/merge_command_options.js b/jstests/sharding/merge_command_options.js
index e82f71695f0..7d0edc56754 100644
--- a/jstests/sharding/merge_command_options.js
+++ b/jstests/sharding/merge_command_options.js
@@ -1,182 +1,183 @@
// Tests that aggregations with a $merge stage respect the options set on the command.
(function() {
- 'use strict';
-
- load("jstests/libs/profiler.js"); // For profilerHasNumMatchingEntriesOrThrow.
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+'use strict';
+
+load("jstests/libs/profiler.js"); // For profilerHasNumMatchingEntriesOrThrow.
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+
+const mongosDB = st.s0.getDB("test");
+const source = mongosDB.getCollection("source");
+const target = mongosDB.getCollection("target");
+const primaryDB = st.rs0.getPrimary().getDB("test");
+const nonPrimaryDB = st.rs1.getPrimary().getDB("test");
+const maxTimeMS = 5 * 60 * 1000;
+
+// Enable profiling on the test DB.
+assert.commandWorked(primaryDB.setProfilingLevel(2));
+assert.commandWorked(nonPrimaryDB.setProfilingLevel(2));
+
+// Enable sharding on the test DB and ensure that shard0 is the primary.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the target collection, and set the unique flag to ensure that there's a unique
+// index on the shard key.
+const shardKey = {
+ sk: 1
+};
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: target.getFullName(), key: shardKey, unique: true}));
+assert.commandWorked(mongosDB.adminCommand({split: target.getFullName(), middle: {sk: 1}}));
+assert.commandWorked(
+ mongosDB.adminCommand({moveChunk: target.getFullName(), find: {sk: 1}, to: st.rs1.getURL()}));
+
+assert.commandWorked(source.insert({sk: "dummy"}));
+
+// The shardCollection command will send a listIndexes on the target collection.
+profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 1
+});
+
+// Test that the maxTimeMS value is used for both the listIndexes command for uniqueKey
+// validation as well as the $merge aggregation itself.
+(function testMaxTimeMS() {
+ assert.commandWorked(source.runCommand("aggregate", {
+ pipeline: [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(shardKey)
+ }
+ }],
+ cursor: {},
+ maxTimeMS: maxTimeMS
+ }));
+
+ // Verify the profile entry for the aggregate on the source collection.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {
+ ns: source.getFullName(),
+ "command.aggregate": source.getName(),
+ "command.maxTimeMS": maxTimeMS
+ },
+ numExpectedMatches: 1
+ });
- const mongosDB = st.s0.getDB("test");
- const source = mongosDB.getCollection("source");
- const target = mongosDB.getCollection("target");
- const primaryDB = st.rs0.getPrimary().getDB("test");
- const nonPrimaryDB = st.rs1.getPrimary().getDB("test");
- const maxTimeMS = 5 * 60 * 1000;
+ // The listIndexes command should be sent to the primary shard only. Note that the
+ // maxTimeMS will *not* show up in the profiler since the parameter is used as a timeout for
+ // the remote command vs. part of the command itself.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 2
+ });
+})();
- // Enable profiling on the test DB.
- assert.commandWorked(primaryDB.setProfilingLevel(2));
- assert.commandWorked(nonPrimaryDB.setProfilingLevel(2));
+(function testTimeout() {
+ // Configure the "maxTimeAlwaysTimeOut" fail point on the primary shard, which forces
+ // mongod to throw if it receives an operation with a max time.
+ assert.commandWorked(primaryDB.getSiblingDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
+
+ // Test that the $merge correctly fails when the maxTimeMS is exceeded.
+ const res = source.runCommand("aggregate", {
+ pipeline: [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(shardKey)
+ }
+ }],
+ cursor: {},
+ maxTimeMS: maxTimeMS
+ });
+ assert.commandFailedWithCode(
+ res,
+ ErrorCodes.MaxTimeMSExpired,
+ "expected aggregate to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+
+ // The actual aggregate should not be in the profiler since the initial listIndexes should
+ // have timed out.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {
+ ns: source.getFullName(),
+ "command.aggregate": source.getName(),
+ "command.maxTimeMS": maxTimeMS
+ },
+ numExpectedMatches: 1
+ });
- // Enable sharding on the test DB and ensure that shard0 is the primary.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+ // Verify that there is an additional listIndexes profiler entry on the primary shard.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 3
+ });
- // Shard the target collection, and set the unique flag to ensure that there's a unique
- // index on the shard key.
- const shardKey = {sk: 1};
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: target.getFullName(), key: shardKey, unique: true}));
- assert.commandWorked(mongosDB.adminCommand({split: target.getFullName(), middle: {sk: 1}}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: target.getFullName(), find: {sk: 1}, to: st.rs1.getURL()}));
+ assert.commandWorked(primaryDB.getSiblingDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
+})();
- assert.commandWorked(source.insert({sk: "dummy"}));
+// Test that setting a read preference on the $merge also applies to the listIndexes
+// command.
+(function testReadPreference() {
+ const secondaryDB = st.rs0.getSecondary().getDB("test");
+ assert.commandWorked(secondaryDB.setProfilingLevel(2));
+
+ assert.commandWorked(source.runCommand("aggregate", {
+ pipeline: [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(shardKey)
+ }
+ }],
+ cursor: {},
+ $readPreference: {mode: "secondary"}
+ }));
+
+ // Verify that the profiler on the secondary includes an entry for the listIndexes.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: secondaryDB,
+ filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 1
+ });
- // The shardCollection command will send a listIndexes on the target collection.
+ // Verify that the primary shard does *not* have an additional listIndexes profiler entry.
profilerHasNumMatchingEntriesOrThrow({
profileDB: primaryDB,
filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 3
+ });
+
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: secondaryDB,
+ filter: {
+ ns: source.getFullName(),
+ "command.aggregate": source.getName(),
+ "command.$readPreference": {mode: "secondary"},
+ },
numExpectedMatches: 1
});
- // Test that the maxTimeMS value is used for both the listIndexes command for uniqueKey
- // validation as well as the $merge aggregation itself.
- (function testMaxTimeMS() {
- assert.commandWorked(source.runCommand("aggregate", {
- pipeline: [{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(shardKey)
- }
- }],
- cursor: {},
- maxTimeMS: maxTimeMS
- }));
-
- // Verify the profile entry for the aggregate on the source collection.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {
- ns: source.getFullName(),
- "command.aggregate": source.getName(),
- "command.maxTimeMS": maxTimeMS
- },
- numExpectedMatches: 1
- });
-
- // The listIndexes command should be sent to the primary shard only. Note that the
- // maxTimeMS will *not* show up in the profiler since the parameter is used as a timeout for
- // the remote command vs. part of the command itself.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
- numExpectedMatches: 2
- });
- })();
-
- (function testTimeout() {
- // Configure the "maxTimeAlwaysTimeOut" fail point on the primary shard, which forces
- // mongod to throw if it receives an operation with a max time.
- assert.commandWorked(primaryDB.getSiblingDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
-
- // Test that the $merge correctly fails when the maxTimeMS is exceeded.
- const res = source.runCommand("aggregate", {
- pipeline: [{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(shardKey)
- }
- }],
- cursor: {},
- maxTimeMS: maxTimeMS
- });
- assert.commandFailedWithCode(
- res,
- ErrorCodes.MaxTimeMSExpired,
- "expected aggregate to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
-
- // The actual aggregate should not be in the profiler since the initial listIndexes should
- // have timed out.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {
- ns: source.getFullName(),
- "command.aggregate": source.getName(),
- "command.maxTimeMS": maxTimeMS
- },
- numExpectedMatches: 1
- });
-
- // Verify that there is an additional listIndexes profiler entry on the primary shard.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
- numExpectedMatches: 3
- });
-
- assert.commandWorked(primaryDB.getSiblingDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
- })();
-
- // Test that setting a read preference on the $merge also applies to the listIndexes
- // command.
- (function testReadPreference() {
- const secondaryDB = st.rs0.getSecondary().getDB("test");
- assert.commandWorked(secondaryDB.setProfilingLevel(2));
-
- assert.commandWorked(source.runCommand("aggregate", {
- pipeline: [{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(shardKey)
- }
- }],
- cursor: {},
- $readPreference: {mode: "secondary"}
- }));
-
- // Verify that the profiler on the secondary includes an entry for the listIndexes.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: secondaryDB,
- filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
- numExpectedMatches: 1
- });
-
- // Verify that the primary shard does *not* have an additional listIndexes profiler entry.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
- numExpectedMatches: 3
- });
-
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: secondaryDB,
- filter: {
- ns: source.getFullName(),
- "command.aggregate": source.getName(),
- "command.$readPreference": {mode: "secondary"},
- },
- numExpectedMatches: 1
- });
-
- // Test that $out cannot be run against a secondary since it writes directly to a local temp
- // collection.
- assert.commandFailedWithCode(source.runCommand("aggregate", {
- pipeline: [{$out: "non_existent"}],
- cursor: {},
- $readPreference: {mode: "secondary"}
- }),
- 16994,
- "Expected $out to fail to create the temp collection.");
- })();
-
- st.stop();
+ // Test that $out cannot be run against a secondary since it writes directly to a local temp
+ // collection.
+ assert.commandFailedWithCode(
+ source.runCommand(
+ "aggregate",
+ {pipeline: [{$out: "non_existent"}], cursor: {}, $readPreference: {mode: "secondary"}}),
+ 16994,
+ "Expected $out to fail to create the temp collection.");
+})();
+
+st.stop();
})();
diff --git a/jstests/sharding/merge_does_not_force_pipeline_split.js b/jstests/sharding/merge_does_not_force_pipeline_split.js
index 383452469aa..ce919537951 100644
--- a/jstests/sharding/merge_does_not_force_pipeline_split.js
+++ b/jstests/sharding/merge_does_not_force_pipeline_split.js
@@ -1,108 +1,108 @@
// Tests that a $merge stage does not force a pipeline to split into a "shards part" and a "merging
// part" if no other stage in the pipeline would force such a split.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- const mongosDB = st.s.getDB("test_db");
+const mongosDB = st.s.getDB("test_db");
- const inColl = mongosDB["inColl"];
- // Two different output collections will be sharded by different keys.
- const outCollById = mongosDB["outCollById"];
- const outCollBySK = mongosDB["outCollBySK"];
- st.shardColl(outCollById, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
- st.shardColl(outCollBySK, {sk: 1}, {sk: 500}, {sk: 500}, mongosDB.getName());
- const numDocs = 1000;
+const inColl = mongosDB["inColl"];
+// Two different output collections will be sharded by different keys.
+const outCollById = mongosDB["outCollById"];
+const outCollBySK = mongosDB["outCollBySK"];
+st.shardColl(outCollById, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
+st.shardColl(outCollBySK, {sk: 1}, {sk: 500}, {sk: 500}, mongosDB.getName());
+const numDocs = 1000;
- function insertData(coll) {
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, sk: numDocs - i});
- }
- assert.commandWorked(bulk.execute());
+function insertData(coll) {
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, sk: numDocs - i});
}
+ assert.commandWorked(bulk.execute());
+}
- // Shard the input collection.
- st.shardColl(inColl, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
+// Shard the input collection.
+st.shardColl(inColl, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
- // Insert some data to the input collection.
- insertData(inColl);
+// Insert some data to the input collection.
+insertData(inColl);
- function assertMergeRunsOnShards(explain) {
- assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
- assert(explain.splitPipeline.hasOwnProperty("shardsPart"), tojson(explain));
- assert.eq(
- explain.splitPipeline.shardsPart.filter(stage => stage.hasOwnProperty("$merge")).length,
- 1,
- tojson(explain));
- assert(explain.splitPipeline.hasOwnProperty("mergerPart"), tojson(explain));
- assert.eq([], explain.splitPipeline.mergerPart, tojson(explain));
- }
+function assertMergeRunsOnShards(explain) {
+ assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
+ assert(explain.splitPipeline.hasOwnProperty("shardsPart"), tojson(explain));
+ assert.eq(
+ explain.splitPipeline.shardsPart.filter(stage => stage.hasOwnProperty("$merge")).length,
+ 1,
+ tojson(explain));
+ assert(explain.splitPipeline.hasOwnProperty("mergerPart"), tojson(explain));
+ assert.eq([], explain.splitPipeline.mergerPart, tojson(explain));
+}
- // Test that a simple $merge can run in parallel. Note that we still expect a 'splitPipeline' in
- // the explain output, but the merging half should be empty to indicate that the entire thing is
- // executing in parallel on the shards.
+// Test that a simple $merge can run in parallel. Note that we still expect a 'splitPipeline' in
+// the explain output, but the merging half should be empty to indicate that the entire thing is
+// executing in parallel on the shards.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- assert.commandWorked(outCollById.remove({}));
- assert.commandWorked(outCollBySK.remove({}));
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ assert.commandWorked(outCollById.remove({}));
+ assert.commandWorked(outCollBySK.remove({}));
- let explain = inColl.explain().aggregate([{
- $merge: {
- into: outCollById.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assertMergeRunsOnShards(explain);
- assert.eq(outCollById.find().itcount(), 0);
- // We expect the test to succeed for all $merge modes. However, the 'whenNotMatched: fail'
- // mode will cause the test to fail if the source collection has a document without a match
- // in the target collection. Similarly 'whenNotMatched: discard' will fail the assertion
- // below for the expected number of document in target collection. So we populate the target
- // collection with the same documents as in the source.
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- insertData(outCollById);
+ let explain = inColl.explain().aggregate([{
+ $merge: {
+ into: outCollById.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
}
+ }]);
+ assertMergeRunsOnShards(explain);
+ assert.eq(outCollById.find().itcount(), 0);
+ // We expect the test to succeed for all $merge modes. However, the 'whenNotMatched: fail'
+ // mode will cause the test to fail if the source collection has a document without a match
+ // in the target collection. Similarly 'whenNotMatched: discard' will fail the assertion
+ // below for the expected number of document in target collection. So we populate the target
+ // collection with the same documents as in the source.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ insertData(outCollById);
+ }
- // Actually execute the pipeline and make sure it works as expected.
- assert.doesNotThrow(() => inColl.aggregate([{
- $merge: {
- into: outCollById.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- assert.eq(outCollById.find().itcount(), numDocs);
+ // Actually execute the pipeline and make sure it works as expected.
+ assert.doesNotThrow(() => inColl.aggregate([{
+ $merge: {
+ into: outCollById.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]));
+ assert.eq(outCollById.find().itcount(), numDocs);
- // Test the same thing but in a pipeline where the output collection's shard key differs
- // from the input collection's.
- explain = inColl.explain().aggregate([{
- $merge: {
- into: outCollBySK.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assertMergeRunsOnShards(explain);
- // Again, test that execution works as expected.
- assert.eq(outCollBySK.find().itcount(), 0);
+ // Test the same thing but in a pipeline where the output collection's shard key differs
+ // from the input collection's.
+ explain = inColl.explain().aggregate([{
+ $merge: {
+ into: outCollBySK.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]);
+ assertMergeRunsOnShards(explain);
+ // Again, test that execution works as expected.
+ assert.eq(outCollBySK.find().itcount(), 0);
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- insertData(outCollBySK);
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ insertData(outCollBySK);
+ }
+ assert.doesNotThrow(() => inColl.aggregate([{
+ $merge: {
+ into: outCollBySK.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
}
- assert.doesNotThrow(() => inColl.aggregate([{
- $merge: {
- into: outCollBySK.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- assert.eq(outCollBySK.find().itcount(), numDocs);
- });
+ }]));
+ assert.eq(outCollBySK.find().itcount(), numDocs);
+});
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/merge_from_stale_mongos.js b/jstests/sharding/merge_from_stale_mongos.js
index d91d92dcb62..e7b7e42d548 100644
--- a/jstests/sharding/merge_from_stale_mongos.js
+++ b/jstests/sharding/merge_from_stale_mongos.js
@@ -1,247 +1,245 @@
// Tests for $merge against a stale mongos with combinations of sharded/unsharded source and target
// collections.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-
- const st = new ShardingTest({
- shards: 2,
- mongos: 4,
- });
-
- const freshMongos = st.s0.getDB(jsTestName());
- const staleMongosSource = st.s1.getDB(jsTestName());
- const staleMongosTarget = st.s2.getDB(jsTestName());
- const staleMongosBoth = st.s3.getDB(jsTestName());
-
- const sourceColl = freshMongos.getCollection("source");
- const targetColl = freshMongos.getCollection("target");
-
- // Enable sharding on the test DB and ensure its primary is shard 0.
+"use strict";
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 4,
+});
+
+const freshMongos = st.s0.getDB(jsTestName());
+const staleMongosSource = st.s1.getDB(jsTestName());
+const staleMongosTarget = st.s2.getDB(jsTestName());
+const staleMongosBoth = st.s3.getDB(jsTestName());
+
+const sourceColl = freshMongos.getCollection("source");
+const targetColl = freshMongos.getCollection("target");
+
+// Enable sharding on the test DB and ensure its primary is shard 0.
+assert.commandWorked(staleMongosSource.adminCommand({enableSharding: staleMongosSource.getName()}));
+st.ensurePrimaryShard(staleMongosSource.getName(), st.rs0.getURL());
+
+// Shards the collection 'coll' through 'mongos'.
+function shardCollWithMongos(mongos, coll) {
+ coll.drop();
+ // Shard the given collection on _id, split the collection into 2 chunks: [MinKey, 0) and
+ // [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
+ assert.commandWorked(mongos.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+ assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
assert.commandWorked(
- staleMongosSource.adminCommand({enableSharding: staleMongosSource.getName()}));
- st.ensurePrimaryShard(staleMongosSource.getName(), st.rs0.getURL());
-
- // Shards the collection 'coll' through 'mongos'.
- function shardCollWithMongos(mongos, coll) {
- coll.drop();
- // Shard the given collection on _id, split the collection into 2 chunks: [MinKey, 0) and
- // [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
- assert.commandWorked(
- mongos.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
- assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- }
-
- // Configures the two mongos, staleMongosSource and staleMongosTarget, to be stale on the source
- // and target collections, respectively. For instance, if 'shardedSource' is true then
- // staleMongosSource will believe that the source collection is unsharded.
- function setupStaleMongos({shardedSource, shardedTarget}) {
- // Initialize both mongos to believe the collections are unsharded.
+ mongos.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+}
+
+// Configures the two mongos, staleMongosSource and staleMongosTarget, to be stale on the source
+// and target collections, respectively. For instance, if 'shardedSource' is true then
+// staleMongosSource will believe that the source collection is unsharded.
+function setupStaleMongos({shardedSource, shardedTarget}) {
+ // Initialize both mongos to believe the collections are unsharded.
+ sourceColl.drop();
+ targetColl.drop();
+ assert.commandWorked(
+ staleMongosSource[sourceColl.getName()].insert({_id: "insert when unsharded (source)"}));
+ assert.commandWorked(
+ staleMongosSource[targetColl.getName()].insert({_id: "insert when unsharded (source)"}));
+ assert.commandWorked(
+ staleMongosTarget[sourceColl.getName()].insert({_id: "insert when unsharded (target)"}));
+ assert.commandWorked(
+ staleMongosTarget[targetColl.getName()].insert({_id: "insert when unsharded (target)"}));
+
+ if (shardedSource) {
+ // Shard the source collection through the staleMongosTarget mongos, keeping the
+ // staleMongosSource unaware.
+ shardCollWithMongos(staleMongosTarget, sourceColl);
+ } else {
+ // Shard the collection through staleMongosSource.
+ shardCollWithMongos(staleMongosSource, sourceColl);
+
+ // Then drop the collection, but do not recreate it yet as that will happen on the next
+ // insert later in the test.
sourceColl.drop();
- targetColl.drop();
- assert.commandWorked(staleMongosSource[sourceColl.getName()].insert(
- {_id: "insert when unsharded (source)"}));
- assert.commandWorked(staleMongosSource[targetColl.getName()].insert(
- {_id: "insert when unsharded (source)"}));
- assert.commandWorked(staleMongosTarget[sourceColl.getName()].insert(
- {_id: "insert when unsharded (target)"}));
- assert.commandWorked(staleMongosTarget[targetColl.getName()].insert(
- {_id: "insert when unsharded (target)"}));
-
- if (shardedSource) {
- // Shard the source collection through the staleMongosTarget mongos, keeping the
- // staleMongosSource unaware.
- shardCollWithMongos(staleMongosTarget, sourceColl);
- } else {
- // Shard the collection through staleMongosSource.
- shardCollWithMongos(staleMongosSource, sourceColl);
-
- // Then drop the collection, but do not recreate it yet as that will happen on the next
- // insert later in the test.
- sourceColl.drop();
- }
-
- if (shardedTarget) {
- // Shard the target collection through the staleMongosSource mongos, keeping the
- // staleMongosTarget unaware.
- shardCollWithMongos(staleMongosSource, targetColl);
- } else {
- // Shard the collection through staleMongosTarget.
- shardCollWithMongos(staleMongosTarget, targetColl);
-
- // Then drop the collection, but do not recreate it yet as that will happen on the next
- // insert later in the test.
- targetColl.drop();
- }
}
- // Runs a $merge with the given modes against each mongos in 'mongosList'. This method will wrap
- // 'mongosList' into a list if it is not an array.
- function runMergeTest(whenMatchedMode, whenNotMatchedMode, mongosList) {
- if (!(mongosList instanceof Array)) {
- mongosList = [mongosList];
- }
-
- mongosList.forEach(mongos => {
- targetColl.remove({});
- sourceColl.remove({});
- // Insert several documents into the source and target collection without any conflicts.
- // Note that the chunk split point is at {_id: 0}.
- assert.commandWorked(sourceColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
- assert.commandWorked(targetColl.insert([{_id: -2}, {_id: 2}, {_id: 3}]));
-
- mongos[sourceColl.getName()].aggregate([{
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
-
- // If whenNotMatchedMode is "discard", then the documents in the source collection will
- // not get written to the target since none of them match.
- assert.eq(whenNotMatchedMode == "discard" ? 3 : 6, targetColl.find().itcount());
- });
- }
+ if (shardedTarget) {
+ // Shard the target collection through the staleMongosSource mongos, keeping the
+ // staleMongosTarget unaware.
+ shardCollWithMongos(staleMongosSource, targetColl);
+ } else {
+ // Shard the collection through staleMongosTarget.
+ shardCollWithMongos(staleMongosTarget, targetColl);
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause the assertion below to trip.
- if (whenNotMatchedMode == "fail")
- return;
-
- // For each mode, test the following scenarios:
- // * Both the source and target collections are sharded.
- // * Both the source and target collections are unsharded.
- // * Source collection is sharded and the target collection is unsharded.
- // * Source collection is unsharded and the target collection is sharded.
- setupStaleMongos({shardedSource: false, shardedTarget: false});
- runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
-
- setupStaleMongos({shardedSource: true, shardedTarget: true});
- runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
-
- setupStaleMongos({shardedSource: true, shardedTarget: false});
- runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
-
- setupStaleMongos({shardedSource: false, shardedTarget: true});
- runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
-
- //
- // The remaining tests run against a mongos which is stale with respect to BOTH the source
- // and target collections.
- //
- const sourceCollStale = staleMongosBoth.getCollection(sourceColl.getName());
- const targetCollStale = staleMongosBoth.getCollection(targetColl.getName());
-
- //
- // 1. Both source and target collections are sharded.
- //
- sourceCollStale.drop();
- targetCollStale.drop();
-
- // Insert into both collections through the stale mongos such that it believes the
- // collections exist and are unsharded.
- assert.commandWorked(sourceCollStale.insert({_id: 0}));
- assert.commandWorked(targetCollStale.insert({_id: 0}));
-
- shardCollWithMongos(freshMongos, sourceColl);
- shardCollWithMongos(freshMongos, targetColl);
-
- // Test against the stale mongos, which believes both collections are unsharded.
- runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
-
- //
- // 2. Both source and target collections are unsharded.
- //
- sourceColl.drop();
+ // Then drop the collection, but do not recreate it yet as that will happen on the next
+ // insert later in the test.
targetColl.drop();
+ }
+}
- // The collections were both dropped through a different mongos, so the stale mongos still
- // believes that they're sharded.
- runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
-
- //
- // 3. Source collection is sharded and target collection is unsharded.
- //
- sourceCollStale.drop();
-
- // Insert into the source collection through the stale mongos such that it believes the
- // collection exists and is unsharded.
- assert.commandWorked(sourceCollStale.insert({_id: 0}));
-
- // Shard the source collection through the fresh mongos.
- shardCollWithMongos(freshMongos, sourceColl);
-
- // Shard the target through the stale mongos, but then drop and recreate it as unsharded
- // through a different mongos.
- shardCollWithMongos(staleMongosBoth, targetColl);
- targetColl.drop();
+// Runs a $merge with the given modes against each mongos in 'mongosList'. This method will wrap
+// 'mongosList' into a list if it is not an array.
+function runMergeTest(whenMatchedMode, whenNotMatchedMode, mongosList) {
+ if (!(mongosList instanceof Array)) {
+ mongosList = [mongosList];
+ }
- // At this point, the stale mongos believes the source collection is unsharded and the
- // target collection is sharded when in fact the reverse is true.
- runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+ mongosList.forEach(mongos => {
+ targetColl.remove({});
+ sourceColl.remove({});
+ // Insert several documents into the source and target collection without any conflicts.
+ // Note that the chunk split point is at {_id: 0}.
+ assert.commandWorked(sourceColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
+ assert.commandWorked(targetColl.insert([{_id: -2}, {_id: 2}, {_id: 3}]));
+
+ mongos[sourceColl.getName()].aggregate([{
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]);
+
+ // If whenNotMatchedMode is "discard", then the documents in the source collection will
+ // not get written to the target since none of them match.
+ assert.eq(whenNotMatchedMode == "discard" ? 3 : 6, targetColl.find().itcount());
+ });
+}
+
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause the assertion below to trip.
+ if (whenNotMatchedMode == "fail")
+ return;
+
+ // For each mode, test the following scenarios:
+ // * Both the source and target collections are sharded.
+ // * Both the source and target collections are unsharded.
+ // * Source collection is sharded and the target collection is unsharded.
+ // * Source collection is unsharded and the target collection is sharded.
+ setupStaleMongos({shardedSource: false, shardedTarget: false});
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
- //
- // 4. Source collection is unsharded and target collection is sharded.
- //
- sourceCollStale.drop();
- targetCollStale.drop();
+ setupStaleMongos({shardedSource: true, shardedTarget: true});
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
- // Insert into the target collection through the stale mongos such that it believes the
- // collection exists and is unsharded.
- assert.commandWorked(targetCollStale.insert({_id: 0}));
+ setupStaleMongos({shardedSource: true, shardedTarget: false});
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
- shardCollWithMongos(freshMongos, targetColl);
+ setupStaleMongos({shardedSource: false, shardedTarget: true});
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
+
+ //
+ // The remaining tests run against a mongos which is stale with respect to BOTH the source
+ // and target collections.
+ //
+ const sourceCollStale = staleMongosBoth.getCollection(sourceColl.getName());
+ const targetCollStale = staleMongosBoth.getCollection(targetColl.getName());
+
+ //
+ // 1. Both source and target collections are sharded.
+ //
+ sourceCollStale.drop();
+ targetCollStale.drop();
+
+ // Insert into both collections through the stale mongos such that it believes the
+ // collections exist and are unsharded.
+ assert.commandWorked(sourceCollStale.insert({_id: 0}));
+ assert.commandWorked(targetCollStale.insert({_id: 0}));
+
+ shardCollWithMongos(freshMongos, sourceColl);
+ shardCollWithMongos(freshMongos, targetColl);
+
+ // Test against the stale mongos, which believes both collections are unsharded.
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+
+ //
+ // 2. Both source and target collections are unsharded.
+ //
+ sourceColl.drop();
+ targetColl.drop();
+
+ // The collections were both dropped through a different mongos, so the stale mongos still
+ // believes that they're sharded.
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+
+ //
+ // 3. Source collection is sharded and target collection is unsharded.
+ //
+ sourceCollStale.drop();
+
+ // Insert into the source collection through the stale mongos such that it believes the
+ // collection exists and is unsharded.
+ assert.commandWorked(sourceCollStale.insert({_id: 0}));
+
+ // Shard the source collection through the fresh mongos.
+ shardCollWithMongos(freshMongos, sourceColl);
+
+ // Shard the target through the stale mongos, but then drop and recreate it as unsharded
+ // through a different mongos.
+ shardCollWithMongos(staleMongosBoth, targetColl);
+ targetColl.drop();
+
+ // At this point, the stale mongos believes the source collection is unsharded and the
+ // target collection is sharded when in fact the reverse is true.
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+
+ //
+ // 4. Source collection is unsharded and target collection is sharded.
+ //
+ sourceCollStale.drop();
+ targetCollStale.drop();
+
+ // Insert into the target collection through the stale mongos such that it believes the
+ // collection exists and is unsharded.
+ assert.commandWorked(targetCollStale.insert({_id: 0}));
+
+ shardCollWithMongos(freshMongos, targetColl);
+
+ // Shard the source through the stale mongos, but then drop and recreate it as unsharded
+ // through a different mongos.
+ shardCollWithMongos(staleMongosBoth, sourceColl);
+ sourceColl.drop();
+
+ // At this point, the stale mongos believes the source collection is sharded and the target
+ // collection is unsharded when in fact the reverse is true.
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+});
+
+// Runs a legacy $out against each mongos in 'mongosList'. This method will wrap 'mongosList'
+// into a list if it is not an array.
+function runOutTest(mongosList) {
+ if (!(mongosList instanceof Array)) {
+ mongosList = [mongosList];
+ }
- // Shard the source through the stale mongos, but then drop and recreate it as unsharded
- // through a different mongos.
- shardCollWithMongos(staleMongosBoth, sourceColl);
- sourceColl.drop();
+ mongosList.forEach(mongos => {
+ targetColl.remove({});
+ sourceColl.remove({});
+ // Insert several documents into the source and target collection without any conflicts.
+ // Note that the chunk split point is at {_id: 0}.
+ assert.commandWorked(sourceColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
+ assert.commandWorked(targetColl.insert([{_id: -2}, {_id: 2}, {_id: 3}]));
- // At this point, the stale mongos believes the source collection is sharded and the target
- // collection is unsharded when in fact the reverse is true.
- runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+ mongos[sourceColl.getName()].aggregate([{$out: targetColl.getName()}]);
+ assert.eq(3, targetColl.find().itcount());
});
+}
- // Runs a legacy $out against each mongos in 'mongosList'. This method will wrap 'mongosList'
- // into a list if it is not an array.
- function runOutTest(mongosList) {
- if (!(mongosList instanceof Array)) {
- mongosList = [mongosList];
- }
-
- mongosList.forEach(mongos => {
- targetColl.remove({});
- sourceColl.remove({});
- // Insert several documents into the source and target collection without any conflicts.
- // Note that the chunk split point is at {_id: 0}.
- assert.commandWorked(sourceColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
- assert.commandWorked(targetColl.insert([{_id: -2}, {_id: 2}, {_id: 3}]));
-
- mongos[sourceColl.getName()].aggregate([{$out: targetColl.getName()}]);
- assert.eq(3, targetColl.find().itcount());
- });
- }
-
- // Legacy $out will fail if the target collection is sharded.
- setupStaleMongos({shardedSource: false, shardedTarget: false});
- runOutTest([staleMongosSource, staleMongosTarget]);
+// Legacy $out will fail if the target collection is sharded.
+setupStaleMongos({shardedSource: false, shardedTarget: false});
+runOutTest([staleMongosSource, staleMongosTarget]);
- setupStaleMongos({shardedSource: true, shardedTarget: true});
- assert.eq(assert.throws(() => runOutTest(staleMongosSource)).code, 28769);
- assert.eq(assert.throws(() => runOutTest(staleMongosTarget)).code, 17017);
+setupStaleMongos({shardedSource: true, shardedTarget: true});
+assert.eq(assert.throws(() => runOutTest(staleMongosSource)).code, 28769);
+assert.eq(assert.throws(() => runOutTest(staleMongosTarget)).code, 17017);
- setupStaleMongos({shardedSource: true, shardedTarget: false});
- runOutTest([staleMongosSource, staleMongosTarget]);
+setupStaleMongos({shardedSource: true, shardedTarget: false});
+runOutTest([staleMongosSource, staleMongosTarget]);
- setupStaleMongos({shardedSource: false, shardedTarget: true});
- assert.eq(assert.throws(() => runOutTest(staleMongosSource)).code, 28769);
- assert.eq(assert.throws(() => runOutTest(staleMongosTarget)).code, 17017);
+setupStaleMongos({shardedSource: false, shardedTarget: true});
+assert.eq(assert.throws(() => runOutTest(staleMongosSource)).code, 28769);
+assert.eq(assert.throws(() => runOutTest(staleMongosTarget)).code, 17017);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/merge_hashed_shard_key.js b/jstests/sharding/merge_hashed_shard_key.js
index bd9e1e11475..86661c9c1b0 100644
--- a/jstests/sharding/merge_hashed_shard_key.js
+++ b/jstests/sharding/merge_hashed_shard_key.js
@@ -2,89 +2,88 @@
// when the "on" field is not explicitly specified and also when there is a unique, non-hashed index
// that matches the "on" field(s).
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
- // assertMergeFailsWithoutUniqueIndex,
- // assertMergeSucceedsWithExpectedUniqueIndex.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
+ // assertMergeFailsWithoutUniqueIndex,
+// assertMergeSucceedsWithExpectedUniqueIndex.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const mongosDB = st.s0.getDB("merge_hashed_shard_key");
- const foreignDB = st.s0.getDB("merge_hashed_shard_key_foreign");
- const source = mongosDB.source;
- const target = mongosDB.target;
- source.drop();
- target.drop();
+const mongosDB = st.s0.getDB("merge_hashed_shard_key");
+const foreignDB = st.s0.getDB("merge_hashed_shard_key_foreign");
+const source = mongosDB.source;
+const target = mongosDB.target;
+source.drop();
+target.drop();
- assert.commandWorked(source.insert({placeholderDoc: 1}));
+assert.commandWorked(source.insert({placeholderDoc: 1}));
- function testHashedShardKey(shardKey, spec, prefixPipeline = []) {
- target.drop();
- st.shardColl(target, shardKey, spec);
+function testHashedShardKey(shardKey, spec, prefixPipeline = []) {
+ target.drop();
+ st.shardColl(target, shardKey, spec);
- // Test that $merge passes without specifying an "on" field.
- assertMergeSucceedsWithExpectedUniqueIndex(
- {source: source, target: target, prevStages: prefixPipeline});
+ // Test that $merge passes without specifying an "on" field.
+ assertMergeSucceedsWithExpectedUniqueIndex(
+ {source: source, target: target, prevStages: prefixPipeline});
- // Test that $merge fails even if the "on" fields matches the shardKey, since it isn't
- // unique.
- assertMergeFailsWithoutUniqueIndex({
- source: source,
- target: target,
- onFields: Object.keys(shardKey),
- prevStages: prefixPipeline
- });
+ // Test that $merge fails even if the "on" fields matches the shardKey, since it isn't
+ // unique.
+ assertMergeFailsWithoutUniqueIndex({
+ source: source,
+ target: target,
+ onFields: Object.keys(shardKey),
+ prevStages: prefixPipeline
+ });
- // Test that the $merge passes if there exists a unique index prefixed on the hashed shard
- // key.
- const prefixedUniqueKey = Object.merge(shardKey, {extraField: 1});
- prefixPipeline = prefixPipeline.concat([{$addFields: {extraField: 1}}]);
- assert.commandWorked(target.createIndex(prefixedUniqueKey, {unique: true}));
- assertMergeSucceedsWithExpectedUniqueIndex(
- {source: source, target: target, prevStages: prefixPipeline});
- assertMergeSucceedsWithExpectedUniqueIndex({
- source: source,
- target: target,
- onFields: Object.keys(prefixedUniqueKey),
- prevStages: prefixPipeline
- });
- }
+ // Test that the $merge passes if there exists a unique index prefixed on the hashed shard
+ // key.
+ const prefixedUniqueKey = Object.merge(shardKey, {extraField: 1});
+ prefixPipeline = prefixPipeline.concat([{$addFields: {extraField: 1}}]);
+ assert.commandWorked(target.createIndex(prefixedUniqueKey, {unique: true}));
+ assertMergeSucceedsWithExpectedUniqueIndex(
+ {source: source, target: target, prevStages: prefixPipeline});
+ assertMergeSucceedsWithExpectedUniqueIndex({
+ source: source,
+ target: target,
+ onFields: Object.keys(prefixedUniqueKey),
+ prevStages: prefixPipeline
+ });
+}
- //
- // Tests for a hashed non-id shard key.
- //
- let prevStage = [{$addFields: {hashedKey: 1}}];
- testHashedShardKey({hashedKey: 1}, {hashedKey: "hashed"}, prevStage);
+//
+// Tests for a hashed non-id shard key.
+//
+let prevStage = [{$addFields: {hashedKey: 1}}];
+testHashedShardKey({hashedKey: 1}, {hashedKey: "hashed"}, prevStage);
- //
- // Tests for a hashed non-id dotted path shard key.
- //
- prevStage = [{$addFields: {dotted: {path: 1}}}];
- testHashedShardKey({"dotted.path": 1}, {"dotted.path": "hashed"}, prevStage);
+//
+// Tests for a hashed non-id dotted path shard key.
+//
+prevStage = [{$addFields: {dotted: {path: 1}}}];
+testHashedShardKey({"dotted.path": 1}, {"dotted.path": "hashed"}, prevStage);
- //
- // Tests for a compound hashed shard key.
- //
- prevStage = [{$addFields: {hashedKey: {subField: 1}, nonHashedKey: 1}}];
- testHashedShardKey({"hashedKey.subField": 1, nonHashedKey: 1},
- {"hashedKey.subField": "hashed", nonHashedKey: 1},
- prevStage);
+//
+// Tests for a compound hashed shard key.
+//
+prevStage = [{$addFields: {hashedKey: {subField: 1}, nonHashedKey: 1}}];
+testHashedShardKey({"hashedKey.subField": 1, nonHashedKey: 1},
+ {"hashedKey.subField": "hashed", nonHashedKey: 1},
+ prevStage);
- //
- // Tests for a hashed _id shard key.
- //
- target.drop();
- st.shardColl(target, {_id: 1}, {_id: "hashed"});
+//
+// Tests for a hashed _id shard key.
+//
+target.drop();
+st.shardColl(target, {_id: 1}, {_id: "hashed"});
- // Test that $merge passes without specifying an "on" field.
- assertMergeSucceedsWithExpectedUniqueIndex({source: source, target: target});
+// Test that $merge passes without specifying an "on" field.
+assertMergeSucceedsWithExpectedUniqueIndex({source: source, target: target});
- // Test that $merge passes when the uniqueKey matches the shard key. Note that the _id index is
- // always create with {unique: true} regardless of whether the shard key was marked as unique
- // when the collection was sharded.
- assertMergeSucceedsWithExpectedUniqueIndex(
- {source: source, target: target, uniqueKey: {_id: 1}});
+// Test that $merge passes when the uniqueKey matches the shard key. Note that the _id index is
+// always create with {unique: true} regardless of whether the shard key was marked as unique
+// when the collection was sharded.
+assertMergeSucceedsWithExpectedUniqueIndex({source: source, target: target, uniqueKey: {_id: 1}});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_on_fields.js b/jstests/sharding/merge_on_fields.js
index c3437603343..91345835ae7 100644
--- a/jstests/sharding/merge_on_fields.js
+++ b/jstests/sharding/merge_on_fields.js
@@ -1,87 +1,87 @@
// Tests that the "on" fields are correctly automatically generated when the user does not specify
// it in the $merge stage.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStage'.
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStage'.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const mongosDB = st.s0.getDB("merge_on_fields");
- const firstColl = mongosDB.first;
- const secondColl = mongosDB.second;
- const sourceCollection = mongosDB.source;
- assert.commandWorked(sourceCollection.insert([{a: 1, b: 1, c: 1, d: 1}, {a: 2, b: 2, c: 2}]));
+const mongosDB = st.s0.getDB("merge_on_fields");
+const firstColl = mongosDB.first;
+const secondColl = mongosDB.second;
+const sourceCollection = mongosDB.source;
+assert.commandWorked(sourceCollection.insert([{a: 1, b: 1, c: 1, d: 1}, {a: 2, b: 2, c: 2}]));
- // Test that the unique key will be defaulted to the document key for a sharded collection.
- st.shardColl(firstColl.getName(),
- {a: 1, b: 1, c: 1},
- {a: 1, b: 1, c: 1},
- {a: 1, b: MinKey, c: MinKey},
- mongosDB.getName());
+// Test that the unique key will be defaulted to the document key for a sharded collection.
+st.shardColl(firstColl.getName(),
+ {a: 1, b: 1, c: 1},
+ {a: 1, b: 1, c: 1},
+ {a: 1, b: MinKey, c: MinKey},
+ mongosDB.getName());
- // Write a document to each chunk.
- assert.commandWorked(firstColl.insert({_id: 1, a: -3, b: -5, c: -6}));
- assert.commandWorked(firstColl.insert({_id: 2, a: 5, b: 3, c: 2}));
+// Write a document to each chunk.
+assert.commandWorked(firstColl.insert({_id: 1, a: -3, b: -5, c: -6}));
+assert.commandWorked(firstColl.insert({_id: 2, a: 5, b: 3, c: 2}));
- // Testing operations on the same sharded collection.
- let explainResult = sourceCollection.explain().aggregate(
- [{$merge: {into: firstColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]);
- assert.setEq(new Set(["_id", "a", "b", "c"]),
- new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
+// Testing operations on the same sharded collection.
+let explainResult = sourceCollection.explain().aggregate(
+ [{$merge: {into: firstColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]);
+assert.setEq(new Set(["_id", "a", "b", "c"]),
+ new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
- explainResult = sourceCollection.explain().aggregate(
- [{$merge: {into: firstColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
- assert.setEq(new Set(["_id", "a", "b", "c"]),
- new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
+explainResult = sourceCollection.explain().aggregate(
+ [{$merge: {into: firstColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
+assert.setEq(new Set(["_id", "a", "b", "c"]),
+ new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
- // Test it with a different collection and shard key pattern.
- st.shardColl(
- secondColl.getName(), {a: 1, b: 1}, {a: 1, b: 1}, {a: 1, b: MinKey}, mongosDB.getName());
+// Test it with a different collection and shard key pattern.
+st.shardColl(
+ secondColl.getName(), {a: 1, b: 1}, {a: 1, b: 1}, {a: 1, b: MinKey}, mongosDB.getName());
- // Write a document to each chunk.
- assert.commandWorked(secondColl.insert({_id: 3, a: -1, b: -3, c: 5}));
- assert.commandWorked(secondColl.insert({_id: 4, a: 4, b: 5, c: 6}));
+// Write a document to each chunk.
+assert.commandWorked(secondColl.insert({_id: 3, a: -1, b: -3, c: 5}));
+assert.commandWorked(secondColl.insert({_id: 4, a: 4, b: 5, c: 6}));
- explainResult = sourceCollection.explain().aggregate(
- [{$merge: {into: secondColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]);
- assert.setEq(new Set(["_id", "a", "b"]),
- new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
+explainResult = sourceCollection.explain().aggregate(
+ [{$merge: {into: secondColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]);
+assert.setEq(new Set(["_id", "a", "b"]),
+ new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
- explainResult = sourceCollection.explain().aggregate(
- [{$merge: {into: firstColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
- assert.setEq(new Set(["_id", "a", "b", "c"]),
- new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
+explainResult = sourceCollection.explain().aggregate(
+ [{$merge: {into: firstColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
+assert.setEq(new Set(["_id", "a", "b", "c"]),
+ new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
- // Test that the "on" field is defaulted to _id for a collection which does not exist.
- const doesNotExist = mongosDB.doesNotExist;
- doesNotExist.drop();
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- explainResult = sourceCollection.explain().aggregate([{
- $merge: {
- into: doesNotExist.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assert.eq(["_id"], getAggPlanStage(explainResult, "$merge").$merge.on);
- });
+// Test that the "on" field is defaulted to _id for a collection which does not exist.
+const doesNotExist = mongosDB.doesNotExist;
+doesNotExist.drop();
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ explainResult = sourceCollection.explain().aggregate([{
+ $merge: {
+ into: doesNotExist.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]);
+ assert.eq(["_id"], getAggPlanStage(explainResult, "$merge").$merge.on);
+});
- // Test that the "on" field is defaulted to _id for an unsharded collection.
- const unsharded = mongosDB.unsharded;
- unsharded.drop();
- assert.commandWorked(unsharded.insert({x: 1}));
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- explainResult = sourceCollection.explain().aggregate([{
- $merge: {
- into: unsharded.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assert.eq(["_id"], getAggPlanStage(explainResult, "$merge").$merge.on);
- });
+// Test that the "on" field is defaulted to _id for an unsharded collection.
+const unsharded = mongosDB.unsharded;
+unsharded.drop();
+assert.commandWorked(unsharded.insert({x: 1}));
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ explainResult = sourceCollection.explain().aggregate([{
+ $merge: {
+ into: unsharded.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]);
+ assert.eq(["_id"], getAggPlanStage(explainResult, "$merge").$merge.on);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_requires_unique_index.js b/jstests/sharding/merge_requires_unique_index.js
index 78ee6c7f9eb..e42a49e5dce 100644
--- a/jstests/sharding/merge_requires_unique_index.js
+++ b/jstests/sharding/merge_requires_unique_index.js
@@ -3,204 +3,235 @@
// collator-compatible index in the index catalog. This is meant to test sharding-related
// configurations that are not covered by the aggregation passthrough suites.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
- // assertMergeFailsWithoutUniqueIndex,
- // assertMergeSucceedsWithExpectedUniqueIndex.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
+ // assertMergeFailsWithoutUniqueIndex,
+// assertMergeSucceedsWithExpectedUniqueIndex.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const mongosDB = st.s0.getDB("merge_requires_unique_index");
- const foreignDB = st.s0.getDB("merge_requires_unique_index_foreign");
- const sourceColl = mongosDB.source;
- let targetColl = mongosDB.target;
- sourceColl.drop();
+const mongosDB = st.s0.getDB("merge_requires_unique_index");
+const foreignDB = st.s0.getDB("merge_requires_unique_index_foreign");
+const sourceColl = mongosDB.source;
+let targetColl = mongosDB.target;
+sourceColl.drop();
- // Enable sharding on the test DB and ensure that shard0 is the primary.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure that shard0 is the primary.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Enable sharding on the foreign DB, except ensure that shard1 is the primary shard.
- assert.commandWorked(foreignDB.adminCommand({enableSharding: foreignDB.getName()}));
- st.ensurePrimaryShard(foreignDB.getName(), st.rs1.getURL());
+// Enable sharding on the foreign DB, except ensure that shard1 is the primary shard.
+assert.commandWorked(foreignDB.adminCommand({enableSharding: foreignDB.getName()}));
+st.ensurePrimaryShard(foreignDB.getName(), st.rs1.getURL());
- // Increase the log verbosity for sharding, in the hope of getting a clearer picture of the
- // cluster writer as part of BF-11106. This should be removed once BF-11106 is fixed.
- st.shard0.getDB("admin").setLogLevel(4, 'sharding');
- st.shard1.getDB("admin").setLogLevel(4, 'sharding');
+// Increase the log verbosity for sharding, in the hope of getting a clearer picture of the
+// cluster writer as part of BF-11106. This should be removed once BF-11106 is fixed.
+st.shard0.getDB("admin").setLogLevel(4, 'sharding');
+st.shard1.getDB("admin").setLogLevel(4, 'sharding');
- function resetTargetColl(shardKey, split) {
- targetColl.drop();
- // Shard the target collection, and set the unique flag to ensure that there's a unique
- // index on the shard key.
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: targetColl.getFullName(), key: shardKey, unique: true}));
- assert.commandWorked(
- mongosDB.adminCommand({split: targetColl.getFullName(), middle: split}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: targetColl.getFullName(), find: split, to: st.rs1.getURL()}));
- }
+function resetTargetColl(shardKey, split) {
+ targetColl.drop();
+ // Shard the target collection, and set the unique flag to ensure that there's a unique
+ // index on the shard key.
+ assert.commandWorked(mongosDB.adminCommand(
+ {shardCollection: targetColl.getFullName(), key: shardKey, unique: true}));
+ assert.commandWorked(mongosDB.adminCommand({split: targetColl.getFullName(), middle: split}));
+ assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: targetColl.getFullName(), find: split, to: st.rs1.getURL()}));
+}
- function runOnFieldsTests(targetShardKey, targetSplit) {
- jsTestLog("Running unique key tests for target shard key " + tojson(targetShardKey));
- resetTargetColl(targetShardKey, targetSplit);
+function runOnFieldsTests(targetShardKey, targetSplit) {
+ jsTestLog("Running unique key tests for target shard key " + tojson(targetShardKey));
+ resetTargetColl(targetShardKey, targetSplit);
- // Not specifying "on" fields should always pass.
- assertMergeSucceedsWithExpectedUniqueIndex({source: sourceColl, target: targetColl});
+ // Not specifying "on" fields should always pass.
+ assertMergeSucceedsWithExpectedUniqueIndex({source: sourceColl, target: targetColl});
- // Since the target collection is sharded with a unique shard key, specifying "on" fields
- // that is equal to the shard key should be valid.
- assertMergeSucceedsWithExpectedUniqueIndex(
- {source: sourceColl, target: targetColl, onFields: Object.keys(targetShardKey)});
+ // Since the target collection is sharded with a unique shard key, specifying "on" fields
+ // that is equal to the shard key should be valid.
+ assertMergeSucceedsWithExpectedUniqueIndex(
+ {source: sourceColl, target: targetColl, onFields: Object.keys(targetShardKey)});
- // Create a compound "on" fields consisting of the shard key and one additional field.
- let prefixPipeline = [{$addFields: {newField: 1}}];
- const indexSpec = Object.merge(targetShardKey, {newField: 1});
+ // Create a compound "on" fields consisting of the shard key and one additional field.
+ let prefixPipeline = [{$addFields: {newField: 1}}];
+ const indexSpec = Object.merge(targetShardKey, {newField: 1});
- // Expect the $merge to fail since we haven't created a unique index on the compound
- // "on" fields.
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
+ // Expect the $merge to fail since we haven't created a unique index on the compound
+ // "on" fields.
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
- // Create the unique index and verify that the "on" fields is now valid.
- assert.commandWorked(targetColl.createIndex(indexSpec, {unique: true}));
- assertMergeSucceedsWithExpectedUniqueIndex({
- source: sourceColl,
- target: targetColl,
- onFields: Object.keys(indexSpec),
- prevStages: prefixPipeline
- });
+ // Create the unique index and verify that the "on" fields is now valid.
+ assert.commandWorked(targetColl.createIndex(indexSpec, {unique: true}));
+ assertMergeSucceedsWithExpectedUniqueIndex({
+ source: sourceColl,
+ target: targetColl,
+ onFields: Object.keys(indexSpec),
+ prevStages: prefixPipeline
+ });
- // Create a non-unique index and make sure that doesn't work.
- assert.commandWorked(targetColl.dropIndex(indexSpec));
- assert.commandWorked(targetColl.createIndex(indexSpec));
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
+ // Create a non-unique index and make sure that doesn't work.
+ assert.commandWorked(targetColl.dropIndex(indexSpec));
+ assert.commandWorked(targetColl.createIndex(indexSpec));
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
- // Test that a unique, partial index on the "on" fields cannot be used to satisfy the
- // requirement.
- resetTargetColl(targetShardKey, targetSplit);
- assert.commandWorked(targetColl.createIndex(
- indexSpec, {unique: true, partialFilterExpression: {a: {$gte: 2}}}));
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
+ // Test that a unique, partial index on the "on" fields cannot be used to satisfy the
+ // requirement.
+ resetTargetColl(targetShardKey, targetSplit);
+ assert.commandWorked(
+ targetColl.createIndex(indexSpec, {unique: true, partialFilterExpression: {a: {$gte: 2}}}));
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
- // Test that a unique index on the "on" fields cannot be used to satisfy the requirement if
- // it has a different collation.
- resetTargetColl(targetShardKey, targetSplit);
- assert.commandWorked(
- targetColl.createIndex(indexSpec, {unique: true, collation: {locale: "en_US"}}));
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- options: {collation: {locale: "en"}},
- prevStages: prefixPipeline
- });
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- options: {collation: {locale: "simple"}},
- prevStages: prefixPipeline
- });
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- options: {collation: {locale: "en_US", strength: 1}},
- prevStages: prefixPipeline
- });
- assertMergeSucceedsWithExpectedUniqueIndex({
- source: sourceColl,
- target: targetColl,
- onFields: Object.keys(indexSpec),
- options: {collation: {locale: "en_US"}},
- prevStages: prefixPipeline
- });
+ // Test that a unique index on the "on" fields cannot be used to satisfy the requirement if
+ // it has a different collation.
+ resetTargetColl(targetShardKey, targetSplit);
+ assert.commandWorked(
+ targetColl.createIndex(indexSpec, {unique: true, collation: {locale: "en_US"}}));
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ options: {collation: {locale: "en"}},
+ prevStages: prefixPipeline
+ });
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ options: {collation: {locale: "simple"}},
+ prevStages: prefixPipeline
+ });
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ options: {collation: {locale: "en_US", strength: 1}},
+ prevStages: prefixPipeline
+ });
+ assertMergeSucceedsWithExpectedUniqueIndex({
+ source: sourceColl,
+ target: targetColl,
+ onFields: Object.keys(indexSpec),
+ options: {collation: {locale: "en_US"}},
+ prevStages: prefixPipeline
+ });
- // Test that a unique index with dotted field names can be used.
- resetTargetColl(targetShardKey, targetSplit);
- const dottedPathIndexSpec = Object.merge(targetShardKey, {"newField.subField": 1});
- assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
+ // Test that a unique index with dotted field names can be used.
+ resetTargetColl(targetShardKey, targetSplit);
+ const dottedPathIndexSpec = Object.merge(targetShardKey, {"newField.subField": 1});
+ assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
- // No longer a supporting index on the original compound "on" fields.
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
+ // No longer a supporting index on the original compound "on" fields.
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
- // Test that an embedded object matching the "on" fields is valid.
- prefixPipeline = [{$addFields: {"newField.subField": 5}}];
- assertMergeSucceedsWithExpectedUniqueIndex({
- source: sourceColl,
- target: targetColl,
- onFields: Object.keys(dottedPathIndexSpec),
- prevStages: prefixPipeline
- });
+ // Test that an embedded object matching the "on" fields is valid.
+ prefixPipeline = [{$addFields: {"newField.subField": 5}}];
+ assertMergeSucceedsWithExpectedUniqueIndex({
+ source: sourceColl,
+ target: targetColl,
+ onFields: Object.keys(dottedPathIndexSpec),
+ prevStages: prefixPipeline
+ });
- // Test that we cannot use arrays with a dotted path within a $merge.
- resetTargetColl(targetShardKey, targetSplit);
- assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
- withEachMergeMode(
- ({whenMatchedMode, whenNotMatchedMode}) => {
- assertErrorCode(
- sourceColl,
- [
- {
- $replaceRoot:
- {newRoot: {$mergeObjects: ["$$ROOT", {newField: [{subField: 1}]}]}}
- },
- {
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
+ // Test that we cannot use arrays with a dotted path within a $merge.
+ resetTargetColl(targetShardKey, targetSplit);
+ assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ assertErrorCode(sourceColl,
+ [
+ {
+ $replaceRoot: {
+ newRoot:
+ {$mergeObjects: ["$$ROOT", {newField: [{subField: 1}]}]}
+ }
},
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode,
- on: Object.keys(dottedPathIndexSpec)
- }
- }
- ],
- 51132);
- });
+ {
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode,
+ on: Object.keys(dottedPathIndexSpec)
+ }
+ }
+ ],
+ 51132);
+ });
- // Test that a unique index that is multikey can still be used.
- resetTargetColl(targetShardKey, targetSplit);
- assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
- assert.commandWorked(targetColl.insert(
- Object.merge(targetShardKey, {newField: [{subField: "hi"}, {subField: "hello"}]})));
- assert.commandWorked(sourceColl.update(
- {}, {$set: {newField: {subField: "hi"}, proofOfUpdate: "PROOF"}}, {multi: true}));
+ // Test that a unique index that is multikey can still be used.
+ resetTargetColl(targetShardKey, targetSplit);
+ assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
+ assert.commandWorked(targetColl.insert(
+ Object.merge(targetShardKey, {newField: [{subField: "hi"}, {subField: "hello"}]})));
+ assert.commandWorked(sourceColl.update(
+ {}, {$set: {newField: {subField: "hi"}, proofOfUpdate: "PROOF"}}, {multi: true}));
- // If whenMatched is "replace" and whenNotMatched is "insert", expect the command to
- // fail if the "on" fields does not contain _id, since a replacement-style update will fail
- // if attempting to modify _id.
- if (dottedPathIndexSpec.hasOwnProperty("_id")) {
- assert.doesNotThrow(() => sourceColl.aggregate([{
+ // If whenMatched is "replace" and whenNotMatched is "insert", expect the command to
+ // fail if the "on" fields does not contain _id, since a replacement-style update will fail
+ // if attempting to modify _id.
+ if (dottedPathIndexSpec.hasOwnProperty("_id")) {
+ assert.doesNotThrow(() => sourceColl.aggregate([{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(dottedPathIndexSpec)
+ }
+ }]));
+ assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
+ {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
+ {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
+ } else {
+ assertErrMsgContains(sourceColl,
+ [{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(dottedPathIndexSpec)
+ }
+ }],
+ ErrorCodes.ImmutableField,
+ "did you attempt to modify the _id or the shard key?");
+
+ assert.doesNotThrow(() => sourceColl.aggregate([
+ {$project: {_id: 0}},
+ {
$merge: {
into: {
db: targetColl.getDB().getName(),
@@ -210,85 +241,53 @@
whenNotMatched: "insert",
on: Object.keys(dottedPathIndexSpec)
}
- }]));
- assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
- {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
- {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
- } else {
- assertErrMsgContains(sourceColl,
- [{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(dottedPathIndexSpec)
- }
- }],
- ErrorCodes.ImmutableField,
- "did you attempt to modify the _id or the shard key?");
-
- assert.doesNotThrow(() => sourceColl.aggregate([
- {$project: {_id: 0}},
- {
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(dottedPathIndexSpec)
- }
- }
- ]));
- assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
- {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
- {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
- }
+ }
+ ]));
+ assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
+ {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
+ {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
}
+}
- function testAgainstDB(targetDB) {
- targetColl = targetDB["target"];
- targetColl.drop();
+function testAgainstDB(targetDB) {
+ targetColl = targetDB["target"];
+ targetColl.drop();
- //
- // Test unsharded source and sharded target collections.
- //
- let targetShardKey = {_id: 1, a: 1, b: 1};
- let splitPoint = {_id: 0, a: 0, b: 0};
- sourceColl.drop();
- assert.commandWorked(sourceColl.insert([{a: 0, b: 0}, {a: 1, b: 1}]));
- runOnFieldsTests(targetShardKey, splitPoint);
+ //
+ // Test unsharded source and sharded target collections.
+ //
+ let targetShardKey = {_id: 1, a: 1, b: 1};
+ let splitPoint = {_id: 0, a: 0, b: 0};
+ sourceColl.drop();
+ assert.commandWorked(sourceColl.insert([{a: 0, b: 0}, {a: 1, b: 1}]));
+ runOnFieldsTests(targetShardKey, splitPoint);
- // Test with a shard key that does *not* include _id.
- targetShardKey = {a: 1, b: 1};
- splitPoint = {a: 0, b: 0};
- runOnFieldsTests(targetShardKey, splitPoint);
+ // Test with a shard key that does *not* include _id.
+ targetShardKey = {a: 1, b: 1};
+ splitPoint = {a: 0, b: 0};
+ runOnFieldsTests(targetShardKey, splitPoint);
- //
- // Test both source and target collections as sharded.
- //
- targetShardKey = {_id: 1, a: 1, b: 1};
- splitPoint = {_id: 0, a: 0, b: 0};
- sourceColl.drop();
- st.shardColl(sourceColl.getName(), {a: 1}, {a: 0}, {a: 1}, mongosDB.getName());
- assert.commandWorked(sourceColl.insert([{a: 0, b: 0}, {a: 1, b: 1}]));
- runOnFieldsTests(targetShardKey, splitPoint);
+ //
+ // Test both source and target collections as sharded.
+ //
+ targetShardKey = {_id: 1, a: 1, b: 1};
+ splitPoint = {_id: 0, a: 0, b: 0};
+ sourceColl.drop();
+ st.shardColl(sourceColl.getName(), {a: 1}, {a: 0}, {a: 1}, mongosDB.getName());
+ assert.commandWorked(sourceColl.insert([{a: 0, b: 0}, {a: 1, b: 1}]));
+ runOnFieldsTests(targetShardKey, splitPoint);
- // Re-run the test with a shard key that does *not* include _id.
- targetShardKey = {a: 1, b: 1};
- splitPoint = {a: 0, b: 0};
- runOnFieldsTests(targetShardKey, splitPoint);
- }
+ // Re-run the test with a shard key that does *not* include _id.
+ targetShardKey = {a: 1, b: 1};
+ splitPoint = {a: 0, b: 0};
+ runOnFieldsTests(targetShardKey, splitPoint);
+}
- // First test $merge to the same database as the source.
- testAgainstDB(mongosDB);
+// First test $merge to the same database as the source.
+testAgainstDB(mongosDB);
- // Then test against a foreign database, with the same expected behavior.
- testAgainstDB(foreignDB);
+// Then test against a foreign database, with the same expected behavior.
+testAgainstDB(foreignDB);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_stale_on_fields.js b/jstests/sharding/merge_stale_on_fields.js
index 685840c527c..87a48a0482c 100644
--- a/jstests/sharding/merge_stale_on_fields.js
+++ b/jstests/sharding/merge_stale_on_fields.js
@@ -1,93 +1,93 @@
// Tests that an $merge stage is able to default the "on" fields to the correct value - even if one
// or more of the involved nodes has a stale cache of the routing information.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
-
- const st = new ShardingTest({shards: 2, mongos: 2});
-
- const dbName = "merge_stale_unique_key";
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-
- const source = st.s0.getDB(dbName).source;
- const target = st.s0.getDB(dbName).target;
-
- // Test that an $merge through a stale mongos can still use the correct "on" fields and succeed.
- (function testDefaultOnFieldsIsRecent() {
- const freshMongos = st.s0;
- const staleMongos = st.s1;
-
- // Set up two collections for an aggregate with an $merge: The source collection will be
- // unsharded and the target collection will be sharded amongst the two shards.
- const staleMongosDB = staleMongos.getDB(dbName);
- st.shardColl(source, {_id: 1}, {_id: 0}, {_id: 1});
-
- (function setupStaleMongos() {
- // Shard the collection through 'staleMongos', setting up 'staleMongos' to believe the
- // collection is sharded by {sk: 1, _id: 1}.
- assert.commandWorked(staleMongosDB.adminCommand(
- {shardCollection: target.getFullName(), key: {sk: 1, _id: 1}}));
- // Perform a query through that mongos to ensure the cache is populated.
- assert.eq(0, staleMongosDB[target.getName()].find().itcount());
-
- // Drop the collection from the other mongos - it is no longer sharded but the stale
- // mongos doesn't know that yet.
- target.drop();
- }());
-
- // At this point 'staleMongos' will believe that the target collection is sharded. This
- // should not prevent it from running an $merge without "on" fields specified.
- // Specifically, the mongos should force a refresh of its cache before defaulting the "on"
- // fields.
- assert.commandWorked(source.insert({_id: 'seed'}));
-
- // If we had used the stale "on" fields, this aggregation would fail since the documents do
- // not have an 'sk' field.
- assert.doesNotThrow(() => staleMongosDB[source.getName()].aggregate([
- {$merge: {into: target.getName(), whenMatched: 'fail', whenNotMatched: 'insert'}}
- ]));
- assert.eq(target.find().toArray(), [{_id: 'seed'}]);
+"use strict";
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
+
+const st = new ShardingTest({shards: 2, mongos: 2});
+
+const dbName = "merge_stale_unique_key";
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+
+const source = st.s0.getDB(dbName).source;
+const target = st.s0.getDB(dbName).target;
+
+// Test that an $merge through a stale mongos can still use the correct "on" fields and succeed.
+(function testDefaultOnFieldsIsRecent() {
+ const freshMongos = st.s0;
+ const staleMongos = st.s1;
+
+ // Set up two collections for an aggregate with an $merge: The source collection will be
+ // unsharded and the target collection will be sharded amongst the two shards.
+ const staleMongosDB = staleMongos.getDB(dbName);
+ st.shardColl(source, {_id: 1}, {_id: 0}, {_id: 1});
+
+ (function setupStaleMongos() {
+ // Shard the collection through 'staleMongos', setting up 'staleMongos' to believe the
+ // collection is sharded by {sk: 1, _id: 1}.
+ assert.commandWorked(staleMongosDB.adminCommand(
+ {shardCollection: target.getFullName(), key: {sk: 1, _id: 1}}));
+ // Perform a query through that mongos to ensure the cache is populated.
+ assert.eq(0, staleMongosDB[target.getName()].find().itcount());
+
+ // Drop the collection from the other mongos - it is no longer sharded but the stale
+ // mongos doesn't know that yet.
target.drop();
}());
- // Test that if the collection is dropped and re-sharded during the course of the aggregation
- // that the operation will fail rather than proceed with the old shard key.
- function testEpochChangeDuringAgg({mergeSpec, failpoint, failpointData}) {
- // Converts a single string or an array of strings into it's object spec form. For instance,
- // for input ["a", "b"] the returned object would be {a: 1, b: 1}.
- function indexSpecFromOnFields(onFields) {
- let spec = {};
- if (typeof(onFields) == "string") {
- spec[onFields] = 1;
- } else {
- onFields.forEach((field) => {
- spec[field] = 1;
- });
- }
- return spec;
- }
+ // At this point 'staleMongos' will believe that the target collection is sharded. This
+ // should not prevent it from running an $merge without "on" fields specified.
+ // Specifically, the mongos should force a refresh of its cache before defaulting the "on"
+ // fields.
+ assert.commandWorked(source.insert({_id: 'seed'}));
+
+ // If we had used the stale "on" fields, this aggregation would fail since the documents do
+ // not have an 'sk' field.
+ assert.doesNotThrow(
+ () => staleMongosDB[source.getName()].aggregate(
+ [{$merge: {into: target.getName(), whenMatched: 'fail', whenNotMatched: 'insert'}}]));
+ assert.eq(target.find().toArray(), [{_id: 'seed'}]);
+ target.drop();
+}());
- target.drop();
- if (mergeSpec.hasOwnProperty('on')) {
- assert.commandWorked(
- target.createIndex(indexSpecFromOnFields(mergeSpec.on), {unique: true}));
- assert.commandWorked(st.s.adminCommand(
- {shardCollection: target.getFullName(), key: indexSpecFromOnFields(mergeSpec.on)}));
+// Test that if the collection is dropped and re-sharded during the course of the aggregation
+// that the operation will fail rather than proceed with the old shard key.
+function testEpochChangeDuringAgg({mergeSpec, failpoint, failpointData}) {
+ // Converts a single string or an array of strings into it's object spec form. For instance,
+ // for input ["a", "b"] the returned object would be {a: 1, b: 1}.
+ function indexSpecFromOnFields(onFields) {
+ let spec = {};
+ if (typeof (onFields) == "string") {
+ spec[onFields] = 1;
} else {
- assert.commandWorked(
- st.s.adminCommand({shardCollection: target.getFullName(), key: {sk: 1, _id: 1}}));
+ onFields.forEach((field) => {
+ spec[field] = 1;
+ });
}
+ return spec;
+ }
- // Use a failpoint to make the query feeding into the aggregate hang while we drop the
- // collection.
- [st.rs0.getPrimary(), st.rs1.getPrimary()].forEach((mongod) => {
- assert.commandWorked(mongod.adminCommand(
- {configureFailPoint: failpoint, mode: "alwaysOn", data: failpointData || {}}));
- });
- let parallelShellJoiner;
- try {
- let parallelCode = `
+ target.drop();
+ if (mergeSpec.hasOwnProperty('on')) {
+ assert.commandWorked(
+ target.createIndex(indexSpecFromOnFields(mergeSpec.on), {unique: true}));
+ assert.commandWorked(st.s.adminCommand(
+ {shardCollection: target.getFullName(), key: indexSpecFromOnFields(mergeSpec.on)}));
+ } else {
+ assert.commandWorked(
+ st.s.adminCommand({shardCollection: target.getFullName(), key: {sk: 1, _id: 1}}));
+ }
+
+ // Use a failpoint to make the query feeding into the aggregate hang while we drop the
+ // collection.
+ [st.rs0.getPrimary(), st.rs1.getPrimary()].forEach((mongod) => {
+ assert.commandWorked(mongod.adminCommand(
+ {configureFailPoint: failpoint, mode: "alwaysOn", data: failpointData || {}}));
+ });
+ let parallelShellJoiner;
+ try {
+ let parallelCode = `
const source = db.getSiblingDB("${dbName}").${source.getName()};
const error = assert.throws(() => source.aggregate([
{$addFields: {sk: "$_id"}},
@@ -96,103 +96,101 @@
assert.eq(error.code, ErrorCodes.StaleEpoch);
`;
- if (mergeSpec.hasOwnProperty("on")) {
- // If a user specifies their own "on" fields, we don't need to fail an aggregation
- // if the collection is dropped and recreated or the epoch otherwise changes. We are
- // allowed to fail such an operation should we choose to in the future, but for now
- // we don't expect to because we do not do anything special on mongos to ensure the
- // catalog cache is up to date, so do not want to attach mongos's believed epoch to
- // the command for the shards.
- parallelCode = `
+ if (mergeSpec.hasOwnProperty("on")) {
+ // If a user specifies their own "on" fields, we don't need to fail an aggregation
+ // if the collection is dropped and recreated or the epoch otherwise changes. We are
+ // allowed to fail such an operation should we choose to in the future, but for now
+ // we don't expect to because we do not do anything special on mongos to ensure the
+ // catalog cache is up to date, so do not want to attach mongos's believed epoch to
+ // the command for the shards.
+ parallelCode = `
const source = db.getSiblingDB("${dbName}").${source.getName()};
assert.doesNotThrow(() => source.aggregate([
{$addFields: {sk: "$_id"}},
{$merge: ${tojsononeline(mergeSpec)}}
]));
`;
- }
-
- parallelShellJoiner = startParallelShell(parallelCode, st.s.port);
-
- // Wait for the merging $merge to appear in the currentOp output from the shards. We
- // should see that the $merge stage has an 'epoch' field serialized from the mongos.
- const getAggOps = function() {
- return st.s.getDB("admin")
- .aggregate([
- {$currentOp: {}},
- {$match: {"cursor.originatingCommand.pipeline": {$exists: true}}}
- ])
- .toArray();
- };
- const hasMergeRunning = function() {
- return getAggOps()
- .filter((op) => {
- const pipeline = op.cursor.originatingCommand.pipeline;
- return pipeline.length > 0 &&
- pipeline[pipeline.length - 1].hasOwnProperty("$merge");
- })
- .length >= 1;
- };
- assert.soon(hasMergeRunning, () => tojson(getAggOps()));
-
- // Drop the collection so that the epoch changes.
- target.drop();
- } finally {
- [st.rs0.getPrimary(), st.rs1.getPrimary()].forEach((mongod) => {
- assert.commandWorked(
- mongod.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- });
}
- parallelShellJoiner();
- }
- // Insert enough documents to force a yield.
- const bulk = source.initializeUnorderedBulkOp();
- for (let i = 0; i < 1000; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute());
-
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause a different assertion error from the one
- // expected.
- if (whenNotMatchedMode == "fail")
- return;
-
- testEpochChangeDuringAgg({
- mergeSpec: {
- into: target.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- },
- failpoint: "setYieldAllLocksHang",
- failpointData: {namespace: source.getFullName()}
- });
- testEpochChangeDuringAgg({
- mergeSpec: {
- into: target.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode,
- on: "sk"
- },
- failpoint: "setYieldAllLocksHang",
- failpointData: {namespace: source.getFullName()}
+ parallelShellJoiner = startParallelShell(parallelCode, st.s.port);
+
+ // Wait for the merging $merge to appear in the currentOp output from the shards. We
+ // should see that the $merge stage has an 'epoch' field serialized from the mongos.
+ const getAggOps = function() {
+ return st.s.getDB("admin")
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {"cursor.originatingCommand.pipeline": {$exists: true}}}
+ ])
+ .toArray();
+ };
+ const hasMergeRunning = function() {
+ return getAggOps()
+ .filter((op) => {
+ const pipeline = op.cursor.originatingCommand.pipeline;
+ return pipeline.length > 0 &&
+ pipeline[pipeline.length - 1].hasOwnProperty("$merge");
+ })
+ .length >= 1;
+ };
+ assert.soon(hasMergeRunning, () => tojson(getAggOps()));
+
+ // Drop the collection so that the epoch changes.
+ target.drop();
+ } finally {
+ [st.rs0.getPrimary(), st.rs1.getPrimary()].forEach((mongod) => {
+ assert.commandWorked(mongod.adminCommand({configureFailPoint: failpoint, mode: "off"}));
});
+ }
+ parallelShellJoiner();
+}
+
+// Insert enough documents to force a yield.
+const bulk = source.initializeUnorderedBulkOp();
+for (let i = 0; i < 1000; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
+
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause a different assertion error from the one
+ // expected.
+ if (whenNotMatchedMode == "fail")
+ return;
- });
- // Test with some different failpoints to prove we will detect an epoch change in the middle
- // of the inserts or updates.
testEpochChangeDuringAgg({
- mergeSpec: {into: target.getName(), whenMatched: "fail", whenNotMatched: "insert"},
- failpoint: "hangDuringBatchInsert",
- failpointData: {nss: target.getFullName()}
+ mergeSpec: {
+ into: target.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ },
+ failpoint: "setYieldAllLocksHang",
+ failpointData: {namespace: source.getFullName()}
});
testEpochChangeDuringAgg({
- mergeSpec: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert"},
- failpoint: "hangDuringBatchUpdate",
- failpointData: {nss: target.getFullName()}
+ mergeSpec: {
+ into: target.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode,
+ on: "sk"
+ },
+ failpoint: "setYieldAllLocksHang",
+ failpointData: {namespace: source.getFullName()}
});
-
- st.stop();
+});
+// Test with some different failpoints to prove we will detect an epoch change in the middle
+// of the inserts or updates.
+testEpochChangeDuringAgg({
+ mergeSpec: {into: target.getName(), whenMatched: "fail", whenNotMatched: "insert"},
+ failpoint: "hangDuringBatchInsert",
+ failpointData: {nss: target.getFullName()}
+});
+testEpochChangeDuringAgg({
+ mergeSpec: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert"},
+ failpoint: "hangDuringBatchUpdate",
+ failpointData: {nss: target.getFullName()}
+});
+
+st.stop();
}());
diff --git a/jstests/sharding/merge_to_existing.js b/jstests/sharding/merge_to_existing.js
index ff653186fcb..3e9038f9cc4 100644
--- a/jstests/sharding/merge_to_existing.js
+++ b/jstests/sharding/merge_to_existing.js
@@ -1,150 +1,150 @@
// Tests for $merge with an existing target collection.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const mongosDB = st.s0.getDB("source_db");
- const sourceColl = mongosDB["source_coll"];
- const outputCollSameDb = mongosDB[jsTestName() + "_merge"];
+const mongosDB = st.s0.getDB("source_db");
+const sourceColl = mongosDB["source_coll"];
+const outputCollSameDb = mongosDB[jsTestName() + "_merge"];
- function testMerge(sourceColl, targetColl, shardedSource, shardedTarget) {
- jsTestLog(`Testing $merge from ${sourceColl.getFullName()} ` +
- `(${shardedSource ? "sharded" : "unsharded"}) to ${targetColl.getFullName()} ` +
- `(${shardedTarget ? "sharded" : "unsharded"})`);
- sourceColl.drop();
- targetColl.drop();
- assert.commandWorked(targetColl.runCommand("create"));
+function testMerge(sourceColl, targetColl, shardedSource, shardedTarget) {
+ jsTestLog(`Testing $merge from ${sourceColl.getFullName()} ` +
+ `(${shardedSource ? "sharded" : "unsharded"}) to ${targetColl.getFullName()} ` +
+ `(${shardedTarget ? "sharded" : "unsharded"})`);
+ sourceColl.drop();
+ targetColl.drop();
+ assert.commandWorked(targetColl.runCommand("create"));
- if (shardedSource) {
- st.shardColl(sourceColl, {_id: 1}, {_id: 0}, {_id: 1}, sourceColl.getDB().getName());
+ if (shardedSource) {
+ st.shardColl(sourceColl, {_id: 1}, {_id: 0}, {_id: 1}, sourceColl.getDB().getName());
+ }
+
+ if (shardedTarget) {
+ st.shardColl(targetColl, {_id: 1}, {_id: 0}, {_id: 1}, targetColl.getDB().getName());
+ }
+
+ for (let i = -5; i < 5; i++) {
+ assert.commandWorked(sourceColl.insert({_id: i}));
+ }
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Test without documents in target collection.
+ assert.commandWorked(targetColl.remove({}));
+ if (whenNotMatchedMode == "fail") {
+ // Test whenNotMatchedMode: "fail" to an existing collection.
+ assertErrorCode(sourceColl,
+ [{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ 13113);
+ } else {
+ assert.doesNotThrow(() => sourceColl.aggregate([{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]));
+ assert.eq(whenNotMatchedMode == "discard" ? 0 : 10, targetColl.find().itcount());
}
- if (shardedTarget) {
- st.shardColl(targetColl, {_id: 1}, {_id: 0}, {_id: 1}, targetColl.getDB().getName());
+ // Test with documents in target collection. Every document in the source collection is
+ // present in the target, plus some additional documents that doesn't match.
+ assert.commandWorked(targetColl.remove({}));
+ for (let i = -10; i < 5; i++) {
+ assert.commandWorked(targetColl.insert({_id: i}));
}
- for (let i = -5; i < 5; i++) {
- assert.commandWorked(sourceColl.insert({_id: i}));
+ if (whenMatchedMode == "fail") {
+ // Test whenMatched: "fail" to an existing collection with unique key conflicts.
+ assertErrorCode(sourceColl,
+ [{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ ErrorCodes.DuplicateKey);
+ } else {
+ assert.doesNotThrow(() => sourceColl.aggregate([{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]));
}
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Test without documents in target collection.
- assert.commandWorked(targetColl.remove({}));
- if (whenNotMatchedMode == "fail") {
- // Test whenNotMatchedMode: "fail" to an existing collection.
- assertErrorCode(sourceColl,
- [{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- 13113);
- } else {
- assert.doesNotThrow(() => sourceColl.aggregate([{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- assert.eq(whenNotMatchedMode == "discard" ? 0 : 10, targetColl.find().itcount());
- }
-
- // Test with documents in target collection. Every document in the source collection is
- // present in the target, plus some additional documents that doesn't match.
- assert.commandWorked(targetColl.remove({}));
- for (let i = -10; i < 5; i++) {
- assert.commandWorked(targetColl.insert({_id: i}));
- }
-
- if (whenMatchedMode == "fail") {
- // Test whenMatched: "fail" to an existing collection with unique key conflicts.
- assertErrorCode(sourceColl,
- [{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- ErrorCodes.DuplicateKey);
- } else {
- assert.doesNotThrow(() => sourceColl.aggregate([{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- }
- assert.eq(15, targetColl.find().itcount());
- });
-
- // Legacy $out is only supported to the same database.
- if (sourceColl.getDB() === targetColl.getDB()) {
- if (shardedTarget) {
- // Test that legacy $out fails if the target collection is sharded.
- assertErrorCode(sourceColl, [{$out: targetColl.getName()}], 28769);
- } else {
- // Test that legacy $out will drop the target collection and replace with the
- // contents of the source collection.
- sourceColl.aggregate([{$out: targetColl.getName()}]);
- assert.eq(10, targetColl.find().itcount());
- }
+ assert.eq(15, targetColl.find().itcount());
+ });
+
+ // Legacy $out is only supported to the same database.
+ if (sourceColl.getDB() === targetColl.getDB()) {
+ if (shardedTarget) {
+ // Test that legacy $out fails if the target collection is sharded.
+ assertErrorCode(sourceColl, [{$out: targetColl.getName()}], 28769);
+ } else {
+ // Test that legacy $out will drop the target collection and replace with the
+ // contents of the source collection.
+ sourceColl.aggregate([{$out: targetColl.getName()}]);
+ assert.eq(10, targetColl.find().itcount());
}
}
+}
- //
- // Tests for $merge where the output collection is in the same database as the source
- // collection.
- //
+//
+// Tests for $merge where the output collection is in the same database as the source
+// collection.
+//
- // Test with unsharded source and sharded target collection.
- testMerge(sourceColl, outputCollSameDb, false, true);
+// Test with unsharded source and sharded target collection.
+testMerge(sourceColl, outputCollSameDb, false, true);
- // Test with sharded source and sharded target collection.
- testMerge(sourceColl, outputCollSameDb, true, true);
+// Test with sharded source and sharded target collection.
+testMerge(sourceColl, outputCollSameDb, true, true);
- // Test with sharded source and unsharded target collection.
- testMerge(sourceColl, outputCollSameDb, true, false);
+// Test with sharded source and unsharded target collection.
+testMerge(sourceColl, outputCollSameDb, true, false);
- // Test with unsharded source and unsharded target collection.
- testMerge(sourceColl, outputCollSameDb, false, false);
+// Test with unsharded source and unsharded target collection.
+testMerge(sourceColl, outputCollSameDb, false, false);
- //
- // Tests for $merge to a database that differs from the source collection's database.
- //
- const foreignDb = st.s0.getDB("foreign_db");
- const outputCollDiffDb = foreignDb["output_coll"];
+//
+// Tests for $merge to a database that differs from the source collection's database.
+//
+const foreignDb = st.s0.getDB("foreign_db");
+const outputCollDiffDb = foreignDb["output_coll"];
- // Test with sharded source and sharded target collection.
- testMerge(sourceColl, outputCollDiffDb, true, true);
+// Test with sharded source and sharded target collection.
+testMerge(sourceColl, outputCollDiffDb, true, true);
- // Test with unsharded source and unsharded target collection.
- testMerge(sourceColl, outputCollDiffDb, false, false);
+// Test with unsharded source and unsharded target collection.
+testMerge(sourceColl, outputCollDiffDb, false, false);
- // Test with unsharded source and sharded target collection.
- testMerge(sourceColl, outputCollDiffDb, false, true);
+// Test with unsharded source and sharded target collection.
+testMerge(sourceColl, outputCollDiffDb, false, true);
- // Test with sharded source and unsharded target collection.
- testMerge(sourceColl, outputCollDiffDb, true, false);
+// Test with sharded source and unsharded target collection.
+testMerge(sourceColl, outputCollDiffDb, true, false);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/merge_to_non_existing.js b/jstests/sharding/merge_to_non_existing.js
index e4be4a1618c..5f6af78a86a 100644
--- a/jstests/sharding/merge_to_non_existing.js
+++ b/jstests/sharding/merge_to_non_existing.js
@@ -1,108 +1,107 @@
// Tests for $merge with a non-existing target collection.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const sourceDB = st.s0.getDB("source_db");
-
- /**
- * Run an aggregation on 'sourceColl' that writes documents to 'targetColl' with $merge.
- */
- function testMerge(sourceColl, targetColl, shardedSource) {
- sourceColl.drop();
-
- if (shardedSource) {
- st.shardColl(sourceColl, {_id: 1}, {_id: 0}, {_id: 1}, sourceDB.getName());
- }
-
- for (let i = 0; i < 10; i++) {
- assert.commandWorked(sourceColl.insert({_id: i}));
- }
-
- // Test the behavior for each of the $merge modes. Since the target collection does not
- // exist, the behavior should be identical.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause the assertion below to trip.
- if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
- return;
-
- targetColl.drop();
- sourceColl.aggregate([{
+"use strict";
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const sourceDB = st.s0.getDB("source_db");
+
+/**
+ * Run an aggregation on 'sourceColl' that writes documents to 'targetColl' with $merge.
+ */
+function testMerge(sourceColl, targetColl, shardedSource) {
+ sourceColl.drop();
+
+ if (shardedSource) {
+ st.shardColl(sourceColl, {_id: 1}, {_id: 0}, {_id: 1}, sourceDB.getName());
+ }
+
+ for (let i = 0; i < 10; i++) {
+ assert.commandWorked(sourceColl.insert({_id: i}));
+ }
+
+ // Test the behavior for each of the $merge modes. Since the target collection does not
+ // exist, the behavior should be identical.
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause the assertion below to trip.
+ if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
+ return;
+
+ targetColl.drop();
+ sourceColl.aggregate([{
+ $merge: {
+ into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode,
+ on: "_id"
+ }
+ }]);
+ assert.eq(whenNotMatchedMode == "discard" ? 0 : 10, targetColl.find().itcount());
+ });
+
+ // Test that $merge fails if the "on" field is anything but "_id" when the target collection
+ // does not exist.
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause the assertion below to trip.
+ if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
+ return;
+
+ targetColl.drop();
+ assertErrorCode(
+ sourceColl,
+ [{
$merge: {
into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
whenMatched: whenMatchedMode,
whenNotMatched: whenNotMatchedMode,
- on: "_id"
+ on: "not_allowed"
}
- }]);
- assert.eq(whenNotMatchedMode == "discard" ? 0 : 10, targetColl.find().itcount());
- });
-
- // Test that $merge fails if the "on" field is anything but "_id" when the target collection
- // does not exist.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause the assertion below to trip.
- if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
- return;
-
- targetColl.drop();
- assertErrorCode(
- sourceColl,
- [{
- $merge: {
- into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode,
- on: "not_allowed"
- }
- }],
- 51190);
- });
-
- // If 'targetColl' is in the same database as 'sourceColl', test that the legacy $out works
- // correctly.
- if (targetColl.getDB() == sourceColl.getDB()) {
- jsTestLog(
- `Testing $out from ${sourceColl.getFullName()} ` +
- `(${shardedSource ? "sharded" : "unsharded"}) to ${targetColl.getFullName()} ` +
- `with legacy syntax`);
-
- targetColl.drop();
- sourceColl.aggregate([{$out: targetColl.getName()}]);
- assert.eq(10, targetColl.find().itcount());
- }
+ }],
+ 51190);
+ });
+
+ // If 'targetColl' is in the same database as 'sourceColl', test that the legacy $out works
+ // correctly.
+ if (targetColl.getDB() == sourceColl.getDB()) {
+ jsTestLog(`Testing $out from ${sourceColl.getFullName()} ` +
+ `(${shardedSource ? "sharded" : "unsharded"}) to ${targetColl.getFullName()} ` +
+ `with legacy syntax`);
+
+ targetColl.drop();
+ sourceColl.aggregate([{$out: targetColl.getName()}]);
+ assert.eq(10, targetColl.find().itcount());
}
-
- const sourceColl = sourceDB["source_coll"];
- const outputCollSameDb = sourceDB["output_coll"];
-
- // Test $merge from an unsharded source collection to a non-existent output collection in the
- // same database.
- testMerge(sourceColl, outputCollSameDb, false);
-
- // Like the last test case, but perform a $merge from a sharded source collection to a
- // non-existent output collection in the same database.
- testMerge(sourceColl, outputCollSameDb, true);
-
- // Test that $merge in a sharded cluster fails when the output is sent to a different database
- // that doesn't exist.
- const foreignDb = st.s0.getDB("foreign_db");
- const outputCollDiffDb = foreignDb["output_coll"];
- foreignDb.dropDatabase();
- assert.throws(() => testMerge(sourceColl, outputCollDiffDb, false));
- assert.throws(() => testMerge(sourceColl, outputCollDiffDb, true));
-
- // Test $merge from an unsharded source collection to an output collection in a different
- // database where the database exists but the collection does not.
- assert.commandWorked(foreignDb["test"].insert({_id: "forcing database creation"}));
- testMerge(sourceColl, outputCollDiffDb, false);
-
- // Like the last test, but with a sharded source collection.
- testMerge(sourceColl, outputCollDiffDb, true);
- st.stop();
+}
+
+const sourceColl = sourceDB["source_coll"];
+const outputCollSameDb = sourceDB["output_coll"];
+
+// Test $merge from an unsharded source collection to a non-existent output collection in the
+// same database.
+testMerge(sourceColl, outputCollSameDb, false);
+
+// Like the last test case, but perform a $merge from a sharded source collection to a
+// non-existent output collection in the same database.
+testMerge(sourceColl, outputCollSameDb, true);
+
+// Test that $merge in a sharded cluster fails when the output is sent to a different database
+// that doesn't exist.
+const foreignDb = st.s0.getDB("foreign_db");
+const outputCollDiffDb = foreignDb["output_coll"];
+foreignDb.dropDatabase();
+assert.throws(() => testMerge(sourceColl, outputCollDiffDb, false));
+assert.throws(() => testMerge(sourceColl, outputCollDiffDb, true));
+
+// Test $merge from an unsharded source collection to an output collection in a different
+// database where the database exists but the collection does not.
+assert.commandWorked(foreignDb["test"].insert({_id: "forcing database creation"}));
+testMerge(sourceColl, outputCollDiffDb, false);
+
+// Like the last test, but with a sharded source collection.
+testMerge(sourceColl, outputCollDiffDb, true);
+st.stop();
}());
diff --git a/jstests/sharding/merge_with_chunk_migrations.js b/jstests/sharding/merge_with_chunk_migrations.js
index 2b9ba4256fa..461fe57cf8d 100644
--- a/jstests/sharding/merge_with_chunk_migrations.js
+++ b/jstests/sharding/merge_with_chunk_migrations.js
@@ -1,47 +1,47 @@
// Tests that the $merge aggregation stage is resilient to chunk migrations in both the source and
// output collection during execution.
(function() {
- 'use strict';
+'use strict';
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- const mongosDB = st.s.getDB(jsTestName());
- const sourceColl = mongosDB["source"];
- const targetColl = mongosDB["target"];
+const mongosDB = st.s.getDB(jsTestName());
+const sourceColl = mongosDB["source"];
+const targetColl = mongosDB["target"];
- function setAggHang(mode) {
- assert.commandWorked(st.shard0.adminCommand(
- {configureFailPoint: "hangBeforeDocumentSourceCursorLoadBatch", mode: mode}));
- assert.commandWorked(st.shard1.adminCommand(
- {configureFailPoint: "hangBeforeDocumentSourceCursorLoadBatch", mode: mode}));
+function setAggHang(mode) {
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangBeforeDocumentSourceCursorLoadBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangBeforeDocumentSourceCursorLoadBatch", mode: mode}));
+}
+
+function runMergeWithMode(whenMatchedMode, whenNotMatchedMode, shardedColl) {
+ assert.commandWorked(targetColl.remove({}));
+
+ // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
+ // documents, causing the assertion below to fail. To avoid that, we match the documents in
+ // target collection with the documents in source.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
+ assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
}
- function runMergeWithMode(whenMatchedMode, whenNotMatchedMode, shardedColl) {
- assert.commandWorked(targetColl.remove({}));
-
- // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
- // documents, causing the assertion below to fail. To avoid that, we match the documents in
- // target collection with the documents in source.
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
- assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
- }
-
- // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
- setAggHang("alwaysOn");
-
- let comment = whenMatchedMode + "_" + whenNotMatchedMode + "_" + shardedColl.getName();
-
- const mergeSpec = {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- };
- // The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
- // from being optimized away after it's been split. Otherwise, we won't hit the failpoint.
- let outFn = `
+ // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
+ setAggHang("alwaysOn");
+
+ let comment = whenMatchedMode + "_" + whenNotMatchedMode + "_" + shardedColl.getName();
+
+ const mergeSpec = {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ };
+ // The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
+ // from being optimized away after it's been split. Otherwise, we won't hit the failpoint.
+ let outFn = `
const sourceDB = db.getSiblingDB(jsTestName());
const sourceColl = sourceDB["${sourceColl.getName()}"];
sourceColl.aggregate([
@@ -51,43 +51,42 @@
{comment: "${comment}"});
`;
- // Start the $merge aggregation in a parallel shell.
- let mergeShell = startParallelShell(outFn, st.s.port);
-
- // Wait for the parallel shell to hit the failpoint.
- assert.soon(
- () =>
- mongosDB.currentOp({op: "command", "command.comment": comment}).inprog.length == 1,
- () => tojson(mongosDB.currentOp().inprog));
-
- // Migrate the chunk on shard1 to shard0.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {shardKey: 1}, to: st.shard0.shardName}));
-
- // Unset the failpoint to unblock the $merge and join with the parallel shell.
- setAggHang("off");
- mergeShell();
-
- // Verify that the $merge succeeded.
- assert.eq(2, targetColl.find().itcount());
-
- // Now both chunks are on shard0. Run a similar test except migrate the chunks back to
- // shard1 in the middle of execution.
- assert.commandWorked(targetColl.remove({}));
-
- // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
- // documents, causing the assertion below to fail. To avoid that, we match the documents in
- // target collection with the documents in source.
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
- assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
- }
-
- setAggHang("alwaysOn");
- comment = comment + "_2";
- // The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
- // from being optimized away after it's been split. Otherwise, we won't hit the failpoint.
- outFn = `
+ // Start the $merge aggregation in a parallel shell.
+ let mergeShell = startParallelShell(outFn, st.s.port);
+
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(
+ () => mongosDB.currentOp({op: "command", "command.comment": comment}).inprog.length == 1,
+ () => tojson(mongosDB.currentOp().inprog));
+
+ // Migrate the chunk on shard1 to shard0.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {shardKey: 1}, to: st.shard0.shardName}));
+
+ // Unset the failpoint to unblock the $merge and join with the parallel shell.
+ setAggHang("off");
+ mergeShell();
+
+ // Verify that the $merge succeeded.
+ assert.eq(2, targetColl.find().itcount());
+
+ // Now both chunks are on shard0. Run a similar test except migrate the chunks back to
+ // shard1 in the middle of execution.
+ assert.commandWorked(targetColl.remove({}));
+
+ // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
+ // documents, causing the assertion below to fail. To avoid that, we match the documents in
+ // target collection with the documents in source.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
+ assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
+ }
+
+ setAggHang("alwaysOn");
+ comment = comment + "_2";
+ // The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
+ // from being optimized away after it's been split. Otherwise, we won't hit the failpoint.
+ outFn = `
const sourceDB = db.getSiblingDB(jsTestName());
const sourceColl = sourceDB["${sourceColl.getName()}"];
sourceColl.aggregate([
@@ -96,56 +95,55 @@
],
{comment: "${comment}"});
`;
- mergeShell = startParallelShell(outFn, st.s.port);
+ mergeShell = startParallelShell(outFn, st.s.port);
- // Wait for the parallel shell to hit the failpoint.
- assert.soon(
- () =>
- mongosDB.currentOp({op: "command", "command.comment": comment}).inprog.length == 1,
- () => tojson(mongosDB.currentOp().inprog));
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(
+ () => mongosDB.currentOp({op: "command", "command.comment": comment}).inprog.length == 1,
+ () => tojson(mongosDB.currentOp().inprog));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {shardKey: -1}, to: st.shard1.shardName}));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {shardKey: 1}, to: st.shard1.shardName}));
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {shardKey: -1}, to: st.shard1.shardName}));
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {shardKey: 1}, to: st.shard1.shardName}));
- // Unset the failpoint to unblock the $merge and join with the parallel shell.
- setAggHang("off");
- mergeShell();
+ // Unset the failpoint to unblock the $merge and join with the parallel shell.
+ setAggHang("off");
+ mergeShell();
- // Verify that the $merge succeeded.
- assert.eq(2, targetColl.find().itcount());
+ // Verify that the $merge succeeded.
+ assert.eq(2, targetColl.find().itcount());
- // Reset the chunk distribution.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {shardKey: -1}, to: st.shard0.shardName}));
- }
+ // Reset the chunk distribution.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {shardKey: -1}, to: st.shard0.shardName}));
+}
- // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Write a document to each chunk of the source collection.
- assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
- assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
+// Write a document to each chunk of the source collection.
+assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
+assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runMergeWithMode(whenMatchedMode, whenNotMatchedMode, sourceColl);
- });
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ runMergeWithMode(whenMatchedMode, whenNotMatchedMode, sourceColl);
+});
- // Run a similar test with chunk migrations on the output collection instead.
- sourceColl.drop();
- assert.commandWorked(targetColl.remove({}));
- // Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Run a similar test with chunk migrations on the output collection instead.
+sourceColl.drop();
+assert.commandWorked(targetColl.remove({}));
+// Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Write two documents in the source collection that should target the two chunks in the target
- // collection.
- assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
- assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
+// Write two documents in the source collection that should target the two chunks in the target
+// collection.
+assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
+assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runMergeWithMode(whenMatchedMode, whenNotMatchedMode, targetColl);
- });
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ runMergeWithMode(whenMatchedMode, whenNotMatchedMode, targetColl);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_with_drop_shard.js b/jstests/sharding/merge_with_drop_shard.js
index 442f4c89a4c..cc03ea31c42 100644
--- a/jstests/sharding/merge_with_drop_shard.js
+++ b/jstests/sharding/merge_with_drop_shard.js
@@ -1,61 +1,60 @@
// Tests that the $merge aggregation stage is resilient to drop shard in both the source and
// output collection during execution.
(function() {
- 'use strict';
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
-
- const mongosDB = st.s.getDB(jsTestName());
- const sourceColl = mongosDB["source"];
- const targetColl = mongosDB["target"];
-
- assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard1.name);
-
- function setAggHang(mode) {
- assert.commandWorked(st.shard0.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
- assert.commandWorked(st.shard1.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
- }
-
- function removeShard(shard) {
- // We need the balancer to drain all the chunks out of the shard that is being removed.
- assert.commandWorked(st.startBalancer());
- st.waitForBalancer(true, 60000);
- var res = st.s.adminCommand({removeShard: shard.shardName});
+'use strict';
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+
+const mongosDB = st.s.getDB(jsTestName());
+const sourceColl = mongosDB["source"];
+const targetColl = mongosDB["target"];
+
+assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard1.name);
+
+function setAggHang(mode) {
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
+}
+
+function removeShard(shard) {
+ // We need the balancer to drain all the chunks out of the shard that is being removed.
+ assert.commandWorked(st.startBalancer());
+ st.waitForBalancer(true, 60000);
+ var res = st.s.adminCommand({removeShard: shard.shardName});
+ assert.commandWorked(res);
+ assert.eq('started', res.state);
+ assert.soon(function() {
+ res = st.s.adminCommand({removeShard: shard.shardName});
assert.commandWorked(res);
- assert.eq('started', res.state);
- assert.soon(function() {
- res = st.s.adminCommand({removeShard: shard.shardName});
- assert.commandWorked(res);
- return ('completed' === res.state);
- }, "removeShard never completed for shard " + shard.shardName);
-
- // Drop the test database on the removed shard so it does not interfere with addShard later.
- assert.commandWorked(shard.getDB(mongosDB.getName()).dropDatabase());
-
- st.configRS.awaitLastOpCommitted();
- assert.commandWorked(st.s.adminCommand({flushRouterConfig: 1}));
- assert.commandWorked(st.stopBalancer());
- st.waitForBalancer(false, 60000);
- }
-
- function addShard(shard) {
- assert.commandWorked(st.s.adminCommand({addShard: shard}));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: sourceColl.getFullName(), find: {shardKey: 0}, to: shard}));
- }
- function runMergeWithMode(
- whenMatchedMode, whenNotMatchedMode, shardedColl, dropShard, expectFailCode) {
- // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
- setAggHang("alwaysOn");
-
- let comment =
- whenMatchedMode + "_" + whenNotMatchedMode + "_" + shardedColl.getName() + "_1";
- let outFn = `
+ return ('completed' === res.state);
+ }, "removeShard never completed for shard " + shard.shardName);
+
+ // Drop the test database on the removed shard so it does not interfere with addShard later.
+ assert.commandWorked(shard.getDB(mongosDB.getName()).dropDatabase());
+
+ st.configRS.awaitLastOpCommitted();
+ assert.commandWorked(st.s.adminCommand({flushRouterConfig: 1}));
+ assert.commandWorked(st.stopBalancer());
+ st.waitForBalancer(false, 60000);
+}
+
+function addShard(shard) {
+ assert.commandWorked(st.s.adminCommand({addShard: shard}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: sourceColl.getFullName(), find: {shardKey: 0}, to: shard}));
+}
+function runMergeWithMode(
+ whenMatchedMode, whenNotMatchedMode, shardedColl, dropShard, expectFailCode) {
+ // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
+ setAggHang("alwaysOn");
+
+ let comment = whenMatchedMode + "_" + whenNotMatchedMode + "_" + shardedColl.getName() + "_1";
+ let outFn = `
const sourceDB = db.getSiblingDB(jsTestName());
const sourceColl = sourceDB["${sourceColl.getName()}"];
let cmdRes = sourceDB.runCommand({
@@ -76,61 +75,60 @@
}
`;
- // Start the $merge aggregation in a parallel shell.
- let mergeShell = startParallelShell(outFn, st.s.port);
-
- // Wait for the parallel shell to hit the failpoint.
- assert.soon(
- () => mongosDB
- .currentOp({
- $or: [
- {op: "command", "command.comment": comment},
- {op: "getmore", "cursor.originatingCommand.comment": comment}
- ]
- })
- .inprog.length >= 1,
- () => tojson(mongosDB.currentOp().inprog));
-
- if (dropShard) {
- removeShard(st.shard0);
- } else {
- addShard(st.rs0.getURL());
- }
- // Unset the failpoint to unblock the $merge and join with the parallel shell.
- setAggHang("off");
- mergeShell();
-
- assert.eq(2, targetColl.find().itcount());
+ // Start the $merge aggregation in a parallel shell.
+ let mergeShell = startParallelShell(outFn, st.s.port);
+
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(() => mongosDB
+ .currentOp({
+ $or: [
+ {op: "command", "command.comment": comment},
+ {op: "getmore", "cursor.originatingCommand.comment": comment}
+ ]
+ })
+ .inprog.length >= 1,
+ () => tojson(mongosDB.currentOp().inprog));
+
+ if (dropShard) {
+ removeShard(st.shard0);
+ } else {
+ addShard(st.rs0.getURL());
}
+ // Unset the failpoint to unblock the $merge and join with the parallel shell.
+ setAggHang("off");
+ mergeShell();
+
+ assert.eq(2, targetColl.find().itcount());
+}
- // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Write two documents in the source collection that should target the two chunks in the target
- // collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1, _id: 0}));
- assert.commandWorked(sourceColl.insert({shardKey: 1, _id: 1}));
+// Write two documents in the source collection that should target the two chunks in the target
+// collection.
+assert.commandWorked(sourceColl.insert({shardKey: -1, _id: 0}));
+assert.commandWorked(sourceColl.insert({shardKey: 1, _id: 1}));
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- assert.commandWorked(targetColl.remove({}));
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ assert.commandWorked(targetColl.remove({}));
- // Match the data from source into target so that we don't fail the assertion for
- // 'whenNotMatchedMode:fail/discard'.
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- assert.commandWorked(targetColl.insert({shardKey: -1, _id: 0}));
- assert.commandWorked(targetColl.insert({shardKey: 1, _id: 1}));
- }
+ // Match the data from source into target so that we don't fail the assertion for
+ // 'whenNotMatchedMode:fail/discard'.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ assert.commandWorked(targetColl.insert({shardKey: -1, _id: 0}));
+ assert.commandWorked(targetColl.insert({shardKey: 1, _id: 1}));
+ }
- runMergeWithMode(whenMatchedMode, whenNotMatchedMode, targetColl, true, undefined);
- runMergeWithMode(whenMatchedMode,
- whenNotMatchedMode,
- targetColl,
- false,
- whenMatchedMode == "fail" ? ErrorCodes.DuplicateKey : undefined);
- });
+ runMergeWithMode(whenMatchedMode, whenNotMatchedMode, targetColl, true, undefined);
+ runMergeWithMode(whenMatchedMode,
+ whenNotMatchedMode,
+ targetColl,
+ false,
+ whenMatchedMode == "fail" ? ErrorCodes.DuplicateKey : undefined);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_with_move_primary.js b/jstests/sharding/merge_with_move_primary.js
index 5de910b29fa..94d00de22eb 100644
--- a/jstests/sharding/merge_with_move_primary.js
+++ b/jstests/sharding/merge_with_move_primary.js
@@ -1,37 +1,37 @@
// Tests that the $merge aggregation stage is resilient to move primary in both the source and
// output collection during execution.
(function() {
- 'use strict';
+'use strict';
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- const mongosDB = st.s.getDB(jsTestName());
- const sourceColl = mongosDB["source"];
- const targetColl = mongosDB["target"];
+const mongosDB = st.s.getDB(jsTestName());
+const sourceColl = mongosDB["source"];
+const targetColl = mongosDB["target"];
- function setAggHang(mode) {
- assert.commandWorked(st.shard0.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
- assert.commandWorked(st.shard1.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
+function setAggHang(mode) {
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
- assert.commandWorked(st.shard0.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
- assert.commandWorked(st.shard1.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
- }
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
+}
- function runPipelineWithStage({stage, shardedColl, expectedfailCode, expectedNumDocs}) {
- // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
- setAggHang("alwaysOn");
+function runPipelineWithStage({stage, shardedColl, expectedfailCode, expectedNumDocs}) {
+ // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
+ setAggHang("alwaysOn");
- // Set the primary shard.
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+ // Set the primary shard.
+ st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
- let comment = jsTestName() + "_comment";
- let outFn = `
+ let comment = jsTestName() + "_comment";
+ let outFn = `
const sourceDB = db.getSiblingDB(jsTestName());
const sourceColl = sourceDB["${sourceColl.getName()}"];
let cmdRes = sourceDB.runCommand({
@@ -47,151 +47,149 @@
}
`;
- // Start the $merge aggregation in a parallel shell.
- let outShell = startParallelShell(outFn, st.s.port);
-
- // Wait for the parallel shell to hit the failpoint.
- assert.soon(
- () => mongosDB
- .currentOp({
- $or: [
- {op: "command", "command.comment": comment},
- {op: "getmore", "cursor.originatingCommand.comment": comment}
- ]
- })
- .inprog.length == 1,
- () => tojson(mongosDB.currentOp().inprog));
-
- // Migrate the primary shard from shard0 to shard1.
- st.ensurePrimaryShard(mongosDB.getName(), st.shard1.shardName);
-
- // Unset the failpoint to unblock the $merge and join with the parallel shell.
- setAggHang("off");
- outShell();
-
- // Verify that the $merge succeeded.
- if (expectedfailCode === undefined) {
- assert.eq(expectedNumDocs, targetColl.find().itcount());
- }
-
- assert.commandWorked(targetColl.remove({}));
+ // Start the $merge aggregation in a parallel shell.
+ let outShell = startParallelShell(outFn, st.s.port);
+
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(() => mongosDB
+ .currentOp({
+ $or: [
+ {op: "command", "command.comment": comment},
+ {op: "getmore", "cursor.originatingCommand.comment": comment}
+ ]
+ })
+ .inprog.length == 1,
+ () => tojson(mongosDB.currentOp().inprog));
+
+ // Migrate the primary shard from shard0 to shard1.
+ st.ensurePrimaryShard(mongosDB.getName(), st.shard1.shardName);
+
+ // Unset the failpoint to unblock the $merge and join with the parallel shell.
+ setAggHang("off");
+ outShell();
+
+ // Verify that the $merge succeeded.
+ if (expectedfailCode === undefined) {
+ assert.eq(expectedNumDocs, targetColl.find().itcount());
}
- // The source collection is unsharded.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
+ assert.commandWorked(targetColl.remove({}));
+}
+
+// The source collection is unsharded.
+assert.commandWorked(sourceColl.insert({shardKey: -1}));
+assert.commandWorked(sourceColl.insert({shardKey: 1}));
- // Note that the actual error is NamespaceNotFound but it is wrapped in a generic error code by
- // mistake.
+// Note that the actual error is NamespaceNotFound but it is wrapped in a generic error code by
+// mistake.
+runPipelineWithStage({
+ stage: {$out: targetColl.getName()},
+ shardedColl: sourceColl,
+ expectedfailCode: ErrorCodes.CommandFailed
+});
+
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
runPipelineWithStage({
- stage: {$out: targetColl.getName()},
+ stage: {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ },
shardedColl: sourceColl,
- expectedfailCode: ErrorCodes.CommandFailed
+ expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
+ expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
});
+});
+sourceColl.drop();
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runPipelineWithStage({
- stage: {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- },
- shardedColl: sourceColl,
- expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
- expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
- });
+// Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- });
- sourceColl.drop();
-
- // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Write a document to each chunk of the source collection.
+assert.commandWorked(sourceColl.insert({shardKey: -1}));
+assert.commandWorked(sourceColl.insert({shardKey: 1}));
- // Write a document to each chunk of the source collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
+runPipelineWithStage({
+ stage: {$out: targetColl.getName()},
+ shardedColl: sourceColl,
+ expectedfailCode: ErrorCodes.CommandFailed
+});
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
runPipelineWithStage({
- stage: {$out: targetColl.getName()},
+ stage: {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ },
shardedColl: sourceColl,
- expectedfailCode: ErrorCodes.CommandFailed
+ expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
+ expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
});
+});
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runPipelineWithStage({
- stage: {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- },
- shardedColl: sourceColl,
- expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
- expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
- });
- });
+sourceColl.drop();
- sourceColl.drop();
+// Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Write two documents in the source collection that should target the two chunks in the target
+// collection.
+assert.commandWorked(sourceColl.insert({shardKey: -1}));
+assert.commandWorked(sourceColl.insert({shardKey: 1}));
- // Write two documents in the source collection that should target the two chunks in the target
- // collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
+runPipelineWithStage({
+ stage: {$out: targetColl.getName()},
+ shardedColl: targetColl,
+ expectedfailCode: ErrorCodes.CommandFailed
+});
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
runPipelineWithStage({
- stage: {$out: targetColl.getName()},
+ stage: {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ },
shardedColl: targetColl,
- expectedfailCode: ErrorCodes.CommandFailed
+ expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
+ expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
});
+});
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runPipelineWithStage({
- stage: {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- },
- shardedColl: targetColl,
- expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
- expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
- });
- });
+sourceColl.drop();
+targetColl.drop();
+
+// Shard the collections with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+
+// Write two documents in the source collection that should target the two chunks in the target
+// collection.
+assert.commandWorked(sourceColl.insert({shardKey: -1}));
+assert.commandWorked(sourceColl.insert({shardKey: 1}));
- sourceColl.drop();
- targetColl.drop();
-
- // Shard the collections with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
-
- // Write two documents in the source collection that should target the two chunks in the target
- // collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
-
- // Note that the legacy $out is not supported with an existing sharded output collection.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runPipelineWithStage({
- stage: {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- },
- shardedColl: targetColl,
- expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
- expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
- });
+// Note that the legacy $out is not supported with an existing sharded output collection.
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ runPipelineWithStage({
+ stage: {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ },
+ shardedColl: targetColl,
+ expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
+ expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
});
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_write_concern.js b/jstests/sharding/merge_write_concern.js
index b49d2e381ae..a0e3c0a9fcb 100644
--- a/jstests/sharding/merge_write_concern.js
+++ b/jstests/sharding/merge_write_concern.js
@@ -1,101 +1,101 @@
// Tests that $merge respects the writeConcern set on the original aggregation command.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 3}, config: 1});
-
- const mongosDB = st.s0.getDB("merge_write_concern");
- const source = mongosDB["source"];
- const target = mongosDB["target"];
- const shard0 = st.rs0;
- const shard1 = st.rs1;
-
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- function testWriteConcernError(rs) {
- // Make sure that there are only 2 nodes up so w:3 writes will always time out.
- const stoppedSecondary = rs.getSecondary();
- rs.stop(stoppedSecondary);
-
- // Test that $merge correctly returns a WC error.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- const res = mongosDB.runCommand({
- aggregate: "source",
- pipeline: [{
- $merge: {
- into: "target",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- writeConcern: {w: 3, wtimeout: 100},
- cursor: {},
- });
-
- // $merge writeConcern errors are handled differently from normal writeConcern
- // errors. Rather than returing ok:1 and a WriteConcernError, the entire operation
- // fails.
- assert.commandFailedWithCode(res,
- whenNotMatchedMode == "fail"
- ? [13113, ErrorCodes.WriteConcernFailed]
- : ErrorCodes.WriteConcernFailed);
- assert.commandWorked(target.remove({}));
+"use strict";
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 3}, config: 1});
+
+const mongosDB = st.s0.getDB("merge_write_concern");
+const source = mongosDB["source"];
+const target = mongosDB["target"];
+const shard0 = st.rs0;
+const shard1 = st.rs1;
+
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+function testWriteConcernError(rs) {
+ // Make sure that there are only 2 nodes up so w:3 writes will always time out.
+ const stoppedSecondary = rs.getSecondary();
+ rs.stop(stoppedSecondary);
+
+ // Test that $merge correctly returns a WC error.
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ const res = mongosDB.runCommand({
+ aggregate: "source",
+ pipeline: [{
+ $merge: {
+ into: "target",
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ writeConcern: {w: 3, wtimeout: 100},
+ cursor: {},
});
- // Restart the stopped node and verify that the $merge's now pass.
- rs.restart(rs.getSecondary());
- rs.awaitReplication();
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause the assertion below to trip.
- if (whenNotMatchedMode == "fail")
- return;
-
- const res = mongosDB.runCommand({
- aggregate: "source",
- pipeline: [{
- $merge: {
- into: "target",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- writeConcern: {w: 3},
- cursor: {},
- });
-
- // Ensure that the write concern is satisfied within a reasonable amount of time. This
- // prevents the test from hanging if for some reason the write concern can't be
- // satisfied.
- assert.soon(() => assert.commandWorked(res), "writeConcern was not satisfied");
- assert.commandWorked(target.remove({}));
+ // $merge writeConcern errors are handled differently from normal writeConcern
+ // errors. Rather than returing ok:1 and a WriteConcernError, the entire operation
+ // fails.
+ assert.commandFailedWithCode(res,
+ whenNotMatchedMode == "fail"
+ ? [13113, ErrorCodes.WriteConcernFailed]
+ : ErrorCodes.WriteConcernFailed);
+ assert.commandWorked(target.remove({}));
+ });
+
+ // Restart the stopped node and verify that the $merge's now pass.
+ rs.restart(rs.getSecondary());
+ rs.awaitReplication();
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause the assertion below to trip.
+ if (whenNotMatchedMode == "fail")
+ return;
+
+ const res = mongosDB.runCommand({
+ aggregate: "source",
+ pipeline: [{
+ $merge: {
+ into: "target",
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ writeConcern: {w: 3},
+ cursor: {},
});
- }
- // Test that when both collections are unsharded, all writes are directed to the primary shard.
- assert.commandWorked(source.insert([{_id: -1}, {_id: 0}, {_id: 1}, {_id: 2}]));
- testWriteConcernError(shard0);
+ // Ensure that the write concern is satisfied within a reasonable amount of time. This
+ // prevents the test from hanging if for some reason the write concern can't be
+ // satisfied.
+ assert.soon(() => assert.commandWorked(res), "writeConcern was not satisfied");
+ assert.commandWorked(target.remove({}));
+ });
+}
- // Shard the source collection and continue to expect writes to the primary shard.
- st.shardColl(source, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
- testWriteConcernError(shard0);
+// Test that when both collections are unsharded, all writes are directed to the primary shard.
+assert.commandWorked(source.insert([{_id: -1}, {_id: 0}, {_id: 1}, {_id: 2}]));
+testWriteConcernError(shard0);
- // Shard the target collection, however make sure that all writes go to the primary shard by
- // splitting the collection at {_id: 10} and keeping all values in the same chunk.
- st.shardColl(target, {_id: 1}, {_id: 10}, {_id: 10}, mongosDB.getName());
- assert.eq(FixtureHelpers.isSharded(target), true);
- testWriteConcernError(shard0);
+// Shard the source collection and continue to expect writes to the primary shard.
+st.shardColl(source, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+testWriteConcernError(shard0);
- // Write a few documents to the source collection which will be $merge-ed to the second shard.
- assert.commandWorked(source.insert([{_id: 11}, {_id: 12}, {_id: 13}]));
+// Shard the target collection, however make sure that all writes go to the primary shard by
+// splitting the collection at {_id: 10} and keeping all values in the same chunk.
+st.shardColl(target, {_id: 1}, {_id: 10}, {_id: 10}, mongosDB.getName());
+assert.eq(FixtureHelpers.isSharded(target), true);
+testWriteConcernError(shard0);
- // Verify that either shard can produce a WriteConcernError since writes are going to both.
- testWriteConcernError(shard0);
- testWriteConcernError(shard1);
+// Write a few documents to the source collection which will be $merge-ed to the second shard.
+assert.commandWorked(source.insert([{_id: 11}, {_id: 12}, {_id: 13}]));
- st.stop();
+// Verify that either shard can produce a WriteConcernError since writes are going to both.
+testWriteConcernError(shard0);
+testWriteConcernError(shard1);
+
+st.stop();
}());
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 5cdc03d292e..e525a909fea 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -1,68 +1,67 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
+var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
- assert.writeOK(
- s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
+assert.writeOK(s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
- var db = s.getDB("test");
- var coll = db.foo;
+var db = s.getDB("test");
+var coll = db.foo;
- var big = "";
- while (big.length < 10000)
- big += "eliot";
+var big = "";
+while (big.length < 10000)
+ big += "eliot";
- var bulk = coll.initializeUnorderedBulkOp();
- for (var x = 0; x < 100; x++) {
- bulk.insert({x: x, big: big});
- }
- assert.writeOK(bulk.execute());
+var bulk = coll.initializeUnorderedBulkOp();
+for (var x = 0; x < 100; x++) {
+ bulk.insert({x: x, big: big});
+}
+assert.writeOK(bulk.execute());
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 30}}));
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 66}}));
- assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name}));
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 30}}));
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 66}}));
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name}));
- s.printShardingStatus();
+s.printShardingStatus();
- print("YO : " + s.getPrimaryShard("test").host);
- var direct = new Mongo(s.getPrimaryShard("test").host);
- print("direct : " + direct);
+print("YO : " + s.getPrimaryShard("test").host);
+var direct = new Mongo(s.getPrimaryShard("test").host);
+print("direct : " + direct);
- var directDB = direct.getDB("test");
+var directDB = direct.getDB("test");
- for (var done = 0; done < 2 * 1024 * 1024; done += big.length) {
- assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
- }
+for (var done = 0; done < 2 * 1024 * 1024; done += big.length) {
+ assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
+}
- s.printShardingStatus();
+s.printShardingStatus();
- // This is a large chunk, which should not be able to move
- assert.commandFailed(s.s0.adminCommand(
- {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getPrimaryShard("test")).name}));
+// This is a large chunk, which should not be able to move
+assert.commandFailed(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getPrimaryShard("test")).name}));
- for (var i = 0; i < 20; i += 2) {
- try {
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: i}}));
- } catch (e) {
- // We may have auto split on some of these, which is ok
- print(e);
- }
+for (var i = 0; i < 20; i += 2) {
+ try {
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: i}}));
+ } catch (e) {
+ // We may have auto split on some of these, which is ok
+ print(e);
}
+}
- s.printShardingStatus();
+s.printShardingStatus();
- s.startBalancer();
+s.startBalancer();
- assert.soon(function() {
- var x = s.chunkDiff("foo", "test");
- print("chunk diff: " + x);
- return x < 2;
- }, "no balance happened", 8 * 60 * 1000, 2000);
+assert.soon(function() {
+ var x = s.chunkDiff("foo", "test");
+ print("chunk diff: " + x);
+ return x < 2;
+}, "no balance happened", 8 * 60 * 1000, 2000);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index 37cba54f498..13195b61b65 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -4,58 +4,55 @@
* @tags: [resource_intensive]
*/
(function() {
- "use strict";
-
- // TODO: SERVER-33830 remove shardAsReplicaSet: false
- var st = new ShardingTest({
- name: 'migrateBig_balancer',
- shards: 2,
- other: {enableBalancer: true, shardAsReplicaSet: false}
- });
- var mongos = st.s;
- var admin = mongos.getDB("admin");
- var db = mongos.getDB("test");
- var coll = db.getCollection("stuff");
-
- assert.commandWorked(admin.runCommand({enablesharding: coll.getDB().getName()}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
-
- var data = "x";
- var nsq = 16;
- var n = 255;
-
- for (var i = 0; i < nsq; i++)
- data += data;
-
- var dataObj = {};
- for (var i = 0; i < n; i++)
- dataObj["data-" + i] = data;
-
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 40; i++) {
- bulk.insert({data: dataObj});
- }
-
- assert.writeOK(bulk.execute());
- assert.eq(40, coll.count(), "prep1");
-
- assert.commandWorked(admin.runCommand({shardcollection: "" + coll, key: {_id: 1}}));
- st.printShardingStatus();
-
- assert.lt(
- 5, mongos.getDB("config").chunks.find({ns: "test.stuff"}).count(), "not enough chunks");
-
- assert.soon(() => {
- let res =
- mongos.getDB("config")
- .chunks
- .aggregate(
- [{$match: {ns: "test.stuff"}}, {$group: {_id: "$shard", nChunks: {$sum: 1}}}])
- .toArray();
- printjson(res);
- return res.length > 1 && Math.abs(res[0].nChunks - res[1].nChunks) <= 3;
-
- }, "never migrated", 10 * 60 * 1000, 1000);
-
- st.stop();
+"use strict";
+
+// TODO: SERVER-33830 remove shardAsReplicaSet: false
+var st = new ShardingTest({
+ name: 'migrateBig_balancer',
+ shards: 2,
+ other: {enableBalancer: true, shardAsReplicaSet: false}
+});
+var mongos = st.s;
+var admin = mongos.getDB("admin");
+var db = mongos.getDB("test");
+var coll = db.getCollection("stuff");
+
+assert.commandWorked(admin.runCommand({enablesharding: coll.getDB().getName()}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+
+var data = "x";
+var nsq = 16;
+var n = 255;
+
+for (var i = 0; i < nsq; i++)
+ data += data;
+
+var dataObj = {};
+for (var i = 0; i < n; i++)
+ dataObj["data-" + i] = data;
+
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 40; i++) {
+ bulk.insert({data: dataObj});
+}
+
+assert.writeOK(bulk.execute());
+assert.eq(40, coll.count(), "prep1");
+
+assert.commandWorked(admin.runCommand({shardcollection: "" + coll, key: {_id: 1}}));
+st.printShardingStatus();
+
+assert.lt(5, mongos.getDB("config").chunks.find({ns: "test.stuff"}).count(), "not enough chunks");
+
+assert.soon(() => {
+ let res = mongos.getDB("config")
+ .chunks
+ .aggregate(
+ [{$match: {ns: "test.stuff"}}, {$group: {_id: "$shard", nChunks: {$sum: 1}}}])
+ .toArray();
+ printjson(res);
+ return res.length > 1 && Math.abs(res[0].nChunks - res[1].nChunks) <= 3;
+}, "never migrated", 10 * 60 * 1000, 1000);
+
+st.stop();
})();
diff --git a/jstests/sharding/migration_critical_section_concurrency.js b/jstests/sharding/migration_critical_section_concurrency.js
index e51d9d5d738..e98f1f05262 100644
--- a/jstests/sharding/migration_critical_section_concurrency.js
+++ b/jstests/sharding/migration_critical_section_concurrency.js
@@ -4,65 +4,65 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
+'use strict';
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({mongos: 1, shards: 2});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+var st = new ShardingTest({mongos: 1, shards: 2});
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- var testDB = st.s0.getDB('TestDB');
+var testDB = st.s0.getDB('TestDB');
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll0', key: {Key: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll0', middle: {Key: 0}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll0', key: {Key: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll0', middle: {Key: 0}}));
- var coll0 = testDB.Coll0;
- assert.writeOK(coll0.insert({Key: -1, Value: '-1'}));
- assert.writeOK(coll0.insert({Key: 1, Value: '1'}));
+var coll0 = testDB.Coll0;
+assert.writeOK(coll0.insert({Key: -1, Value: '-1'}));
+assert.writeOK(coll0.insert({Key: 1, Value: '1'}));
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll1', key: {Key: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll1', middle: {Key: 0}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll1', key: {Key: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll1', middle: {Key: 0}}));
- var coll1 = testDB.Coll1;
- assert.writeOK(coll1.insert({Key: -1, Value: '-1'}));
- assert.writeOK(coll1.insert({Key: 1, Value: '1'}));
+var coll1 = testDB.Coll1;
+assert.writeOK(coll1.insert({Key: -1, Value: '-1'}));
+assert.writeOK(coll1.insert({Key: 1, Value: '1'}));
- // Ensure that coll0 has chunks on both shards so we can test queries against both donor and
- // recipient for Coll1's migration below
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: 'TestDB.Coll0', find: {Key: 1}, to: st.shard1.shardName}));
+// Ensure that coll0 has chunks on both shards so we can test queries against both donor and
+// recipient for Coll1's migration below
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'TestDB.Coll0', find: {Key: 1}, to: st.shard1.shardName}));
- // Pause the move chunk operation just before it leaves the critical section
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+// Pause the move chunk operation just before it leaves the critical section
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 1}, null, 'TestDB.Coll1', st.shard1.shardName);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 1}, null, 'TestDB.Coll1', st.shard1.shardName);
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- // Ensure that all operations for 'Coll0', which is not being migrated are not stalled
- assert.eq(1, coll0.find({Key: {$lte: -1}}).itcount());
- assert.eq(1, coll0.find({Key: {$gte: 1}}).itcount());
- assert.writeOK(coll0.insert({Key: -2, Value: '-2'}));
- assert.writeOK(coll0.insert({Key: 2, Value: '2'}));
- assert.eq(2, coll0.find({Key: {$lte: -1}}).itcount());
- assert.eq(2, coll0.find({Key: {$gte: 1}}).itcount());
+// Ensure that all operations for 'Coll0', which is not being migrated are not stalled
+assert.eq(1, coll0.find({Key: {$lte: -1}}).itcount());
+assert.eq(1, coll0.find({Key: {$gte: 1}}).itcount());
+assert.writeOK(coll0.insert({Key: -2, Value: '-2'}));
+assert.writeOK(coll0.insert({Key: 2, Value: '2'}));
+assert.eq(2, coll0.find({Key: {$lte: -1}}).itcount());
+assert.eq(2, coll0.find({Key: {$gte: 1}}).itcount());
- // Ensure that read operations for 'Coll1', which *is* being migration are not stalled
- assert.eq(1, coll1.find({Key: {$lte: -1}}).itcount());
- assert.eq(1, coll1.find({Key: {$gte: 1}}).itcount());
+// Ensure that read operations for 'Coll1', which *is* being migration are not stalled
+assert.eq(1, coll1.find({Key: {$lte: -1}}).itcount());
+assert.eq(1, coll1.find({Key: {$gte: 1}}).itcount());
- // Ensure that all operations for non-sharded collections are not stalled
- var collUnsharded = testDB.CollUnsharded;
- assert.eq(0, collUnsharded.find({}).itcount());
- assert.writeOK(collUnsharded.insert({TestKey: 0, Value: 'Zero'}));
- assert.eq(1, collUnsharded.find({}).itcount());
+// Ensure that all operations for non-sharded collections are not stalled
+var collUnsharded = testDB.CollUnsharded;
+assert.eq(0, collUnsharded.find({}).itcount());
+assert.writeOK(collUnsharded.insert({TestKey: 0, Value: 'Zero'}));
+assert.eq(1, collUnsharded.find({}).itcount());
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- joinMoveChunk();
+joinMoveChunk();
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js
index 7e3ba438262..f731c0d3614 100644
--- a/jstests/sharding/migration_failure.js
+++ b/jstests/sharding/migration_failure.js
@@ -5,88 +5,84 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
- assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
- printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
- assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
- assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
- st.printShardingStatus();
+st.printShardingStatus();
- jsTest.log("Testing failed migrations...");
+jsTest.log("Testing failed migrations...");
- var oldVersion = null;
- var newVersion = null;
+var oldVersion = null;
+var newVersion = null;
- // failMigrationCommit -- this creates an error that aborts the migration before the commit
- // migration command is sent.
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
+// failMigrationCommit -- this creates an error that aborts the migration before the commit
+// migration command is sent.
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
- oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.commandFailed(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandFailed(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
- newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.eq(oldVersion.t,
- newVersion.t,
- "The shard version major value should not change after a failed migration");
- // Split does not cause a shard routing table refresh, but the moveChunk attempt will.
- assert.eq(2,
- newVersion.i,
- "The shard routing table should refresh on a failed migration and show the split");
+assert.eq(oldVersion.t,
+ newVersion.t,
+ "The shard version major value should not change after a failed migration");
+// Split does not cause a shard routing table refresh, but the moveChunk attempt will.
+assert.eq(2,
+ newVersion.i,
+ "The shard routing table should refresh on a failed migration and show the split");
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'failMigrationCommit', mode: 'off'}));
+assert.commandWorked(
+ st.shard0.getDB("admin").runCommand({configureFailPoint: 'failMigrationCommit', mode: 'off'}));
- // migrationCommitNetworkError -- mimic migration commit command returning a network error,
- // whereupon the config server is queried to determine that this commit was successful.
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'}));
+// migrationCommitNetworkError -- mimic migration commit command returning a network error,
+// whereupon the config server is queried to determine that this commit was successful.
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'}));
- // Run a migration where there will still be chunks in the collection remaining on the shard
- // afterwards. This will cause the collection's shardVersion to be bumped higher.
- oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+// Run a migration where there will still be chunks in the collection remaining on the shard
+// afterwards. This will cause the collection's shardVersion to be bumped higher.
+oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName}));
- newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.lt(
- oldVersion.t, newVersion.t, "The major value in the shard version should have increased");
- assert.eq(1, newVersion.i, "The minor value in the shard version should be 1");
+assert.lt(oldVersion.t, newVersion.t, "The major value in the shard version should have increased");
+assert.eq(1, newVersion.i, "The minor value in the shard version should be 1");
- // Run a migration to move off the shard's last chunk in the collection. The collection's
- // shardVersion will be reset.
- oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+// Run a migration to move off the shard's last chunk in the collection. The collection's
+// shardVersion will be reset.
+oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
- newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.gt(oldVersion.t,
- newVersion.t,
- "The version prior to the migration should be greater than the reset value");
+assert.gt(oldVersion.t,
+ newVersion.t,
+ "The version prior to the migration should be greater than the reset value");
- assert.eq(
- 0, newVersion.t, "The shard version should have reset, but the major value is not zero");
- assert.eq(
- 0, newVersion.i, "The shard version should have reset, but the minor value is not zero");
+assert.eq(0, newVersion.t, "The shard version should have reset, but the major value is not zero");
+assert.eq(0, newVersion.i, "The shard version should have reset, but the minor value is not zero");
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'}));
-
- st.stop();
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'}));
+st.stop();
})();
diff --git a/jstests/sharding/migration_id_index.js b/jstests/sharding/migration_id_index.js
index 72501a226cb..cb9fc45d7db 100644
--- a/jstests/sharding/migration_id_index.js
+++ b/jstests/sharding/migration_id_index.js
@@ -1,45 +1,45 @@
// This tests that when a chunk migration occurs, all replica set members of the destination shard
// get the correct _id index version for the collection.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- var st = new ShardingTest({shards: 2, rs: {nodes: 2}});
- var testDB = st.s.getDB("test");
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
+var st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+var testDB = st.s.getDB("test");
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
- // Create a collection with a v:1 _id index.
- var coll = testDB.getCollection("migration_id_index");
- coll.drop();
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
- st.rs0.awaitReplication();
- var spec = GetIndexHelpers.findByName(
- st.rs0.getPrimary().getDB("test").migration_id_index.getIndexes(), "_id_");
- assert.neq(spec, null, "_id index spec not found");
- assert.eq(spec.v, 1, tojson(spec));
- spec = GetIndexHelpers.findByName(
- st.rs0.getSecondary().getDB("test").migration_id_index.getIndexes(), "_id_");
- assert.neq(spec, null, "_id index spec not found");
- assert.eq(spec.v, 1, tojson(spec));
+// Create a collection with a v:1 _id index.
+var coll = testDB.getCollection("migration_id_index");
+coll.drop();
+assert.commandWorked(
+ testDB.createCollection(coll.getName(), {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
+st.rs0.awaitReplication();
+var spec = GetIndexHelpers.findByName(
+ st.rs0.getPrimary().getDB("test").migration_id_index.getIndexes(), "_id_");
+assert.neq(spec, null, "_id index spec not found");
+assert.eq(spec.v, 1, tojson(spec));
+spec = GetIndexHelpers.findByName(
+ st.rs0.getSecondary().getDB("test").migration_id_index.getIndexes(), "_id_");
+assert.neq(spec, null, "_id index spec not found");
+assert.eq(spec.v, 1, tojson(spec));
- // Move a chunk to the non-primary shard.
- assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: 5}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 6}, to: st.shard1.shardName}));
+// Move a chunk to the non-primary shard.
+assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: 5}}));
+assert.commandWorked(
+ testDB.adminCommand({moveChunk: coll.getFullName(), find: {a: 6}, to: st.shard1.shardName}));
- // Check that the collection was created with a v:1 _id index on the non-primary shard.
- spec = GetIndexHelpers.findByName(
- st.rs1.getPrimary().getDB("test").migration_id_index.getIndexes(), "_id_");
- assert.neq(spec, null, "_id index spec not found");
- assert.eq(spec.v, 1, tojson(spec));
- spec = GetIndexHelpers.findByName(
- st.rs1.getSecondary().getDB("test").migration_id_index.getIndexes(), "_id_");
- assert.neq(spec, null, "_id index spec not found");
- assert.eq(spec.v, 1, tojson(spec));
+// Check that the collection was created with a v:1 _id index on the non-primary shard.
+spec = GetIndexHelpers.findByName(st.rs1.getPrimary().getDB("test").migration_id_index.getIndexes(),
+ "_id_");
+assert.neq(spec, null, "_id index spec not found");
+assert.eq(spec.v, 1, tojson(spec));
+spec = GetIndexHelpers.findByName(
+ st.rs1.getSecondary().getDB("test").migration_id_index.getIndexes(), "_id_");
+assert.neq(spec, null, "_id index spec not found");
+assert.eq(spec.v, 1, tojson(spec));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/migration_ignore_interrupts_1.js b/jstests/sharding/migration_ignore_interrupts_1.js
index 83a77f08445..0272a204661 100644
--- a/jstests/sharding/migration_ignore_interrupts_1.js
+++ b/jstests/sharding/migration_ignore_interrupts_1.js
@@ -6,72 +6,72 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 3});
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
- coll1 = mongos.getCollection(ns1), shard0 = st.shard0, shard1 = st.shard1,
- shard2 = st.shard2, shard0Coll1 = shard0.getCollection(ns1),
- shard1Coll1 = shard1.getCollection(ns1), shard2Coll1 = shard2.getCollection(ns1);
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 0}}));
- assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
- assert.writeOK(coll1.insert({a: -10}));
- assert.writeOK(coll1.insert({a: 0}));
- assert.writeOK(coll1.insert({a: 10}));
- assert.eq(3, shard0Coll1.find().itcount());
- assert.eq(0, shard1Coll1.find().itcount());
- assert.eq(0, shard2Coll1.find().itcount());
- assert.eq(3, coll1.find().itcount());
-
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns1, find: {a: 10}, to: st.shard2.shardName, _waitForDelete: true}));
-
- // Shard0:
- // coll1: [-inf, 0) [0, 10)
- // Shard1:
- // Shard2:
- // coll1: [10, +inf)
-
- jsTest.log("Set up complete, now proceeding to test that migration interruptions fail.");
-
- // Start a migration between shard0 and shard1 on coll1 and then pause it
- pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
- waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
-
- assert.commandFailedWithCode(
- admin.runCommand({moveChunk: ns1, find: {a: -10}, to: st.shard2.shardName}),
- ErrorCodes.ConflictingOperationInProgress,
- "(1) A shard should not be able to be the donor for two ongoing migrations.");
-
- assert.commandFailedWithCode(
- admin.runCommand({moveChunk: ns1, find: {a: 10}, to: st.shard1.shardName}),
- ErrorCodes.ConflictingOperationInProgress,
- "(2) A shard should not be able to be the recipient of two ongoing migrations.");
-
- assert.commandFailedWithCode(
- admin.runCommand({moveChunk: ns1, find: {a: 10}, to: st.shard0.shardName}),
- ErrorCodes.ConflictingOperationInProgress,
- "(3) A shard should not be able to be both a donor and recipient of migrations.");
-
- // Finish migration
- unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
- assert.doesNotThrow(function() {
- joinMoveChunk();
- });
- assert.eq(1, shard0Coll1.find().itcount());
- assert.eq(1, shard1Coll1.find().itcount());
- assert.eq(1, shard2Coll1.find().itcount());
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 3});
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
+ coll1 = mongos.getCollection(ns1), shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
+ shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
+ shard2Coll1 = shard2.getCollection(ns1);
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 0}}));
+assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
+assert.writeOK(coll1.insert({a: -10}));
+assert.writeOK(coll1.insert({a: 0}));
+assert.writeOK(coll1.insert({a: 10}));
+assert.eq(3, shard0Coll1.find().itcount());
+assert.eq(0, shard1Coll1.find().itcount());
+assert.eq(0, shard2Coll1.find().itcount());
+assert.eq(3, coll1.find().itcount());
+
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns1, find: {a: 10}, to: st.shard2.shardName, _waitForDelete: true}));
+
+// Shard0:
+// coll1: [-inf, 0) [0, 10)
+// Shard1:
+// Shard2:
+// coll1: [10, +inf)
+
+jsTest.log("Set up complete, now proceeding to test that migration interruptions fail.");
+
+// Start a migration between shard0 and shard1 on coll1 and then pause it
+pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
+waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+
+assert.commandFailedWithCode(
+ admin.runCommand({moveChunk: ns1, find: {a: -10}, to: st.shard2.shardName}),
+ ErrorCodes.ConflictingOperationInProgress,
+ "(1) A shard should not be able to be the donor for two ongoing migrations.");
+
+assert.commandFailedWithCode(
+ admin.runCommand({moveChunk: ns1, find: {a: 10}, to: st.shard1.shardName}),
+ ErrorCodes.ConflictingOperationInProgress,
+ "(2) A shard should not be able to be the recipient of two ongoing migrations.");
+
+assert.commandFailedWithCode(
+ admin.runCommand({moveChunk: ns1, find: {a: 10}, to: st.shard0.shardName}),
+ ErrorCodes.ConflictingOperationInProgress,
+ "(3) A shard should not be able to be both a donor and recipient of migrations.");
+
+// Finish migration
+unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+assert.doesNotThrow(function() {
+ joinMoveChunk();
+});
+assert.eq(1, shard0Coll1.find().itcount());
+assert.eq(1, shard1Coll1.find().itcount());
+assert.eq(1, shard2Coll1.find().itcount());
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_ignore_interrupts_2.js b/jstests/sharding/migration_ignore_interrupts_2.js
index 718c6a347ff..b60fa50ccf2 100644
--- a/jstests/sharding/migration_ignore_interrupts_2.js
+++ b/jstests/sharding/migration_ignore_interrupts_2.js
@@ -4,54 +4,54 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
+"use strict";
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
- coll1 = mongos.getCollection(ns1), shard0 = st.shard0, shard1 = st.shard1,
- shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1);
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
+ coll1 = mongos.getCollection(ns1), shard0 = st.shard0, shard1 = st.shard1,
+ shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1);
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
- assert.writeOK(coll1.insert({a: 0}));
- assert.eq(1, shard0Coll1.find().itcount());
- assert.eq(0, shard1Coll1.find().itcount());
- assert.eq(1, coll1.find().itcount());
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+assert.writeOK(coll1.insert({a: 0}));
+assert.eq(1, shard0Coll1.find().itcount());
+assert.eq(0, shard1Coll1.find().itcount());
+assert.eq(1, coll1.find().itcount());
- // Shard0:
- // coll1: [-inf, +inf)
- // Shard1:
+// Shard0:
+// coll1: [-inf, +inf)
+// Shard1:
- jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
+jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
- // Start a migration between shard0 and shard1 on coll1, pause in steady state before commit.
- pauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
- waitForMoveChunkStep(shard0, moveChunkStepNames.reachedSteadyState);
+// Start a migration between shard0 and shard1 on coll1, pause in steady state before commit.
+pauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
+waitForMoveChunkStep(shard0, moveChunkStepNames.reachedSteadyState);
- jsTest.log('Sending false commit command....');
- assert.commandFailed(
- shard1.adminCommand({'_recvChunkCommit': 1, 'sessionId': "fake-migration-session-id"}));
+jsTest.log('Sending false commit command....');
+assert.commandFailed(
+ shard1.adminCommand({'_recvChunkCommit': 1, 'sessionId': "fake-migration-session-id"}));
- jsTest.log("Checking migration recipient is still in steady state, waiting for commit....");
- var res = shard1.adminCommand('_recvChunkStatus');
- assert.commandWorked(res);
- assert.eq(true, res.state === "steady", "False commit command succeeded.");
+jsTest.log("Checking migration recipient is still in steady state, waiting for commit....");
+var res = shard1.adminCommand('_recvChunkStatus');
+assert.commandWorked(res);
+assert.eq(true, res.state === "steady", "False commit command succeeded.");
- // Finish migration.
- unpauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
- assert.doesNotThrow(function() {
- joinMoveChunk();
- });
+// Finish migration.
+unpauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
+assert.doesNotThrow(function() {
+ joinMoveChunk();
+});
- assert.eq(0, shard0Coll1.find().itcount());
- assert.eq(1, shard1Coll1.find().itcount());
+assert.eq(0, shard0Coll1.find().itcount());
+assert.eq(1, shard1Coll1.find().itcount());
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_ignore_interrupts_3.js b/jstests/sharding/migration_ignore_interrupts_3.js
index 13c5b15f6f9..e48159b77b8 100644
--- a/jstests/sharding/migration_ignore_interrupts_3.js
+++ b/jstests/sharding/migration_ignore_interrupts_3.js
@@ -8,101 +8,99 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 3});
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
- ns2 = dbName + ".bar", coll1 = mongos.getCollection(ns1), coll2 = mongos.getCollection(ns2),
- shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
- shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
- shard2Coll1 = shard2.getCollection(ns1), shard0Coll2 = shard0.getCollection(ns2),
- shard1Coll2 = shard1.getCollection(ns2), shard2Coll2 = shard2.getCollection(ns2);
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
- assert.writeOK(coll1.insert({a: 0}));
- assert.eq(1, shard0Coll1.find().itcount());
- assert.eq(0, shard1Coll1.find().itcount());
- assert.eq(0, shard2Coll1.find().itcount());
- assert.eq(1, coll1.find().itcount());
-
- assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
- assert.writeOK(coll2.insert({a: 0}));
- assert.eq(1, shard0Coll2.find().itcount());
- assert.eq(0, shard1Coll2.find().itcount());
- assert.eq(0, shard2Coll2.find().itcount());
- assert.eq(1, coll2.find().itcount());
-
- // Shard0:
- // coll1: [-inf, +inf)
- // coll2: [-inf, +inf)
- // Shard1:
- // Shard2:
-
- jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
-
- // Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt
- // check.
- pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
- pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
- waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
-
- // Abort migration on donor side, recipient is unaware.
- let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
- var abortedMigration = false;
- let inProgressStr = '';
- while (inProgressOps.hasNext()) {
- let op = inProgressOps.next();
- inProgressStr += tojson(op);
- if (op.command.moveChunk) {
- admin.killOp(op.opid);
- abortedMigration = true;
- }
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 3});
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
+ ns2 = dbName + ".bar", coll1 = mongos.getCollection(ns1), coll2 = mongos.getCollection(ns2),
+ shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
+ shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
+ shard2Coll1 = shard2.getCollection(ns1), shard0Coll2 = shard0.getCollection(ns2),
+ shard1Coll2 = shard1.getCollection(ns2), shard2Coll2 = shard2.getCollection(ns2);
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+assert.writeOK(coll1.insert({a: 0}));
+assert.eq(1, shard0Coll1.find().itcount());
+assert.eq(0, shard1Coll1.find().itcount());
+assert.eq(0, shard2Coll1.find().itcount());
+assert.eq(1, coll1.find().itcount());
+
+assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
+assert.writeOK(coll2.insert({a: 0}));
+assert.eq(1, shard0Coll2.find().itcount());
+assert.eq(0, shard1Coll2.find().itcount());
+assert.eq(0, shard2Coll2.find().itcount());
+assert.eq(1, coll2.find().itcount());
+
+// Shard0:
+// coll1: [-inf, +inf)
+// coll2: [-inf, +inf)
+// Shard1:
+// Shard2:
+
+jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
+
+// Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt
+// check.
+pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
+waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+
+// Abort migration on donor side, recipient is unaware.
+let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
+var abortedMigration = false;
+let inProgressStr = '';
+while (inProgressOps.hasNext()) {
+ let op = inProgressOps.next();
+ inProgressStr += tojson(op);
+ if (op.command.moveChunk) {
+ admin.killOp(op.opid);
+ abortedMigration = true;
}
- assert.eq(
- true, abortedMigration, "Failed to abort migration, current running ops: " + inProgressStr);
- unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
- assert.throws(function() {
- joinMoveChunk();
- });
-
- // Start coll2 migration to shard2, pause recipient after delete step.
- pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
- joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll2.getFullName(), st.shard2.shardName);
- waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);
-
- jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
- unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
- assert.soon(function() {
- // Wait for the destination shard to report that it is not in an active migration.
- var res = shard1.adminCommand({'_recvChunkStatus': 1});
- return (res.active == false);
- }, "coll1 migration recipient didn't abort migration in clone phase.", 2 * 60 * 1000);
- assert.eq(
- 1, shard0Coll1.find().itcount(), "donor shard0 completed a migration that it aborted.");
- assert.eq(0,
- shard1Coll1.find().itcount(),
- "shard1 cloned documents despite donor migration abortion.");
-
- jsTest.log('Finishing coll2 migration, which should succeed....');
- unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
- assert.doesNotThrow(function() {
- joinMoveChunk();
- });
-
- assert.eq(0,
- shard0Coll2.find().itcount(),
- "donor shard0 failed to complete a migration after aborting a prior migration.");
- assert.eq(1, shard2Coll2.find().itcount(), "shard2 failed to complete migration.");
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+}
+assert.eq(
+ true, abortedMigration, "Failed to abort migration, current running ops: " + inProgressStr);
+unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+assert.throws(function() {
+ joinMoveChunk();
+});
+
+// Start coll2 migration to shard2, pause recipient after delete step.
+pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll2.getFullName(), st.shard2.shardName);
+waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);
+
+jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
+unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+assert.soon(function() {
+ // Wait for the destination shard to report that it is not in an active migration.
+ var res = shard1.adminCommand({'_recvChunkStatus': 1});
+ return (res.active == false);
+}, "coll1 migration recipient didn't abort migration in clone phase.", 2 * 60 * 1000);
+assert.eq(1, shard0Coll1.find().itcount(), "donor shard0 completed a migration that it aborted.");
+assert.eq(
+ 0, shard1Coll1.find().itcount(), "shard1 cloned documents despite donor migration abortion.");
+
+jsTest.log('Finishing coll2 migration, which should succeed....');
+unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+assert.doesNotThrow(function() {
+ joinMoveChunk();
+});
+
+assert.eq(0,
+ shard0Coll2.find().itcount(),
+ "donor shard0 failed to complete a migration after aborting a prior migration.");
+assert.eq(1, shard2Coll2.find().itcount(), "shard2 failed to complete migration.");
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_ignore_interrupts_4.js b/jstests/sharding/migration_ignore_interrupts_4.js
index 64c7b89e25e..bc692a9897c 100644
--- a/jstests/sharding/migration_ignore_interrupts_4.js
+++ b/jstests/sharding/migration_ignore_interrupts_4.js
@@ -8,102 +8,101 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 3});
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
- ns2 = dbName + ".bar", coll1 = mongos.getCollection(ns1), coll2 = mongos.getCollection(ns2),
- shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
- shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
- shard2Coll1 = shard2.getCollection(ns1), shard0Coll2 = shard0.getCollection(ns2),
- shard1Coll2 = shard1.getCollection(ns2), shard2Coll2 = shard2.getCollection(ns2);
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
- assert.writeOK(coll1.insert({a: 0}));
- assert.eq(1, shard0Coll1.find().itcount());
- assert.eq(0, shard1Coll1.find().itcount());
- assert.eq(0, shard2Coll1.find().itcount());
- assert.eq(1, coll1.find().itcount());
-
- assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
- assert.writeOK(coll2.insert({a: 0}));
- assert.eq(1, shard0Coll2.find().itcount());
- assert.eq(0, shard1Coll2.find().itcount());
- assert.eq(0, shard2Coll2.find().itcount());
- assert.eq(1, coll2.find().itcount());
-
- // Shard0:
- // coll1: [-inf, +inf)
- // coll2: [-inf, +inf)
- // Shard1:
- // Shard2:
-
- jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
-
- // Start coll1 migration to shard1: pause recipient after cloning, donor before interrupt check
- pauseMigrateAtStep(shard1, migrateStepNames.cloned);
- pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
- waitForMigrateStep(shard1, migrateStepNames.cloned);
-
- // Abort migration on donor side, recipient is unaware
- let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
- var abortedMigration = false;
- let inProgressStr = '';
- while (inProgressOps.hasNext()) {
- let op = inProgressOps.next();
- inProgressStr += tojson(op);
- if (op.command.moveChunk) {
- admin.killOp(op.opid);
- abortedMigration = true;
- }
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 3});
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
+ ns2 = dbName + ".bar", coll1 = mongos.getCollection(ns1), coll2 = mongos.getCollection(ns2),
+ shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
+ shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
+ shard2Coll1 = shard2.getCollection(ns1), shard0Coll2 = shard0.getCollection(ns2),
+ shard1Coll2 = shard1.getCollection(ns2), shard2Coll2 = shard2.getCollection(ns2);
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+assert.writeOK(coll1.insert({a: 0}));
+assert.eq(1, shard0Coll1.find().itcount());
+assert.eq(0, shard1Coll1.find().itcount());
+assert.eq(0, shard2Coll1.find().itcount());
+assert.eq(1, coll1.find().itcount());
+
+assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
+assert.writeOK(coll2.insert({a: 0}));
+assert.eq(1, shard0Coll2.find().itcount());
+assert.eq(0, shard1Coll2.find().itcount());
+assert.eq(0, shard2Coll2.find().itcount());
+assert.eq(1, coll2.find().itcount());
+
+// Shard0:
+// coll1: [-inf, +inf)
+// coll2: [-inf, +inf)
+// Shard1:
+// Shard2:
+
+jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
+
+// Start coll1 migration to shard1: pause recipient after cloning, donor before interrupt check
+pauseMigrateAtStep(shard1, migrateStepNames.cloned);
+pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
+waitForMigrateStep(shard1, migrateStepNames.cloned);
+
+// Abort migration on donor side, recipient is unaware
+let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
+var abortedMigration = false;
+let inProgressStr = '';
+while (inProgressOps.hasNext()) {
+ let op = inProgressOps.next();
+ inProgressStr += tojson(op);
+ if (op.command.moveChunk) {
+ admin.killOp(op.opid);
+ abortedMigration = true;
}
- assert.eq(
- true, abortedMigration, "Failed to abort migration, current running ops: " + inProgressStr);
- unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
- assert.throws(function() {
- joinMoveChunk();
- });
-
- // Start coll2 migration to shard2, pause recipient after cloning step.
- pauseMigrateAtStep(shard2, migrateStepNames.cloned);
- joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll2.getFullName(), st.shard2.shardName);
- waitForMigrateStep(shard2, migrateStepNames.cloned);
-
- // Populate donor (shard0) xfermods log.
- assert.writeOK(coll2.insert({a: 1}));
- assert.writeOK(coll2.insert({a: 2}));
- assert.eq(3, coll2.find().itcount(), "Failed to insert documents into coll2.");
- assert.eq(3, shard0Coll2.find().itcount());
-
- jsTest.log('Releasing coll1 migration recipient, whose transferMods command should fail....');
- unpauseMigrateAtStep(shard1, migrateStepNames.cloned);
- assert.soon(function() {
- // Wait for the destination shard to report that it is not in an active migration.
- var res = shard1.adminCommand({'_recvChunkStatus': 1});
- return (res.active == false);
- }, "coll1 migration recipient didn't abort migration in catchup phase.", 2 * 60 * 1000);
- assert.eq(
- 1, shard0Coll1.find().itcount(), "donor shard0 completed a migration that it aborted.");
-
- jsTest.log('Finishing coll2 migration, which should succeed....');
- unpauseMigrateAtStep(shard2, migrateStepNames.cloned);
- assert.doesNotThrow(function() {
- joinMoveChunk();
- });
- assert.eq(0,
- shard0Coll2.find().itcount(),
- "donor shard0 failed to complete a migration after aborting a prior migration.");
- assert.eq(3, shard2Coll2.find().itcount(), "shard2 failed to complete migration.");
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+}
+assert.eq(
+ true, abortedMigration, "Failed to abort migration, current running ops: " + inProgressStr);
+unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+assert.throws(function() {
+ joinMoveChunk();
+});
+
+// Start coll2 migration to shard2, pause recipient after cloning step.
+pauseMigrateAtStep(shard2, migrateStepNames.cloned);
+joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll2.getFullName(), st.shard2.shardName);
+waitForMigrateStep(shard2, migrateStepNames.cloned);
+
+// Populate donor (shard0) xfermods log.
+assert.writeOK(coll2.insert({a: 1}));
+assert.writeOK(coll2.insert({a: 2}));
+assert.eq(3, coll2.find().itcount(), "Failed to insert documents into coll2.");
+assert.eq(3, shard0Coll2.find().itcount());
+
+jsTest.log('Releasing coll1 migration recipient, whose transferMods command should fail....');
+unpauseMigrateAtStep(shard1, migrateStepNames.cloned);
+assert.soon(function() {
+ // Wait for the destination shard to report that it is not in an active migration.
+ var res = shard1.adminCommand({'_recvChunkStatus': 1});
+ return (res.active == false);
+}, "coll1 migration recipient didn't abort migration in catchup phase.", 2 * 60 * 1000);
+assert.eq(1, shard0Coll1.find().itcount(), "donor shard0 completed a migration that it aborted.");
+
+jsTest.log('Finishing coll2 migration, which should succeed....');
+unpauseMigrateAtStep(shard2, migrateStepNames.cloned);
+assert.doesNotThrow(function() {
+ joinMoveChunk();
+});
+assert.eq(0,
+ shard0Coll2.find().itcount(),
+ "donor shard0 failed to complete a migration after aborting a prior migration.");
+assert.eq(3, shard2Coll2.find().itcount(), "shard2 failed to complete migration.");
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_move_chunk_after_receive.js b/jstests/sharding/migration_move_chunk_after_receive.js
index 662dc1879e4..fe28af0d8c3 100644
--- a/jstests/sharding/migration_move_chunk_after_receive.js
+++ b/jstests/sharding/migration_move_chunk_after_receive.js
@@ -5,70 +5,70 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
+'use strict';
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({shards: 3});
+var st = new ShardingTest({shards: 3});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
- var testDB = st.s0.getDB('TestDB');
- var testColl = testDB.TestColl;
+var testDB = st.s0.getDB('TestDB');
+var testColl = testDB.TestColl;
- // Create 3 chunks with one document each and move them so that 0 is on shard0, 1 is on shard1,
- // etc.
- assert.writeOK(testColl.insert({Key: 0, Value: 'Value'}));
- assert.writeOK(testColl.insert({Key: 100, Value: 'Value'}));
- assert.writeOK(testColl.insert({Key: 101, Value: 'Value'}));
- assert.writeOK(testColl.insert({Key: 200, Value: 'Value'}));
+// Create 3 chunks with one document each and move them so that 0 is on shard0, 1 is on shard1,
+// etc.
+assert.writeOK(testColl.insert({Key: 0, Value: 'Value'}));
+assert.writeOK(testColl.insert({Key: 100, Value: 'Value'}));
+assert.writeOK(testColl.insert({Key: 101, Value: 'Value'}));
+assert.writeOK(testColl.insert({Key: 200, Value: 'Value'}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 100}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 101}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 200}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 100}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 101}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 200}}));
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: 'TestDB.TestColl',
- find: {Key: 100},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: 'TestDB.TestColl',
- find: {Key: 101},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: 'TestDB.TestColl',
- find: {Key: 200},
- to: st.shard2.shardName,
- _waitForDelete: true
- }));
+assert.commandWorked(st.s0.adminCommand({
+ moveChunk: 'TestDB.TestColl',
+ find: {Key: 100},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(st.s0.adminCommand({
+ moveChunk: 'TestDB.TestColl',
+ find: {Key: 101},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(st.s0.adminCommand({
+ moveChunk: 'TestDB.TestColl',
+ find: {Key: 200},
+ to: st.shard2.shardName,
+ _waitForDelete: true
+}));
- // Start moving chunk 0 from shard0 to shard1 and pause it just before the metadata is written
- // (but after the migration of the documents has been committed on the recipient)
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- var joinMoveChunk0 = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 0}, null, 'TestDB.TestColl', st.shard1.shardName);
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+// Start moving chunk 0 from shard0 to shard1 and pause it just before the metadata is written
+// (but after the migration of the documents has been committed on the recipient)
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+var joinMoveChunk0 = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 0}, null, 'TestDB.TestColl', st.shard1.shardName);
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- pauseMoveChunkAtStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
- var joinMoveChunk1 = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 100}, null, 'TestDB.TestColl', st.shard2.shardName);
- waitForMoveChunkStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
+pauseMoveChunkAtStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
+var joinMoveChunk1 = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 100}, null, 'TestDB.TestColl', st.shard2.shardName);
+waitForMoveChunkStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- unpauseMoveChunkAtStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+unpauseMoveChunkAtStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
- joinMoveChunk0();
- joinMoveChunk1();
+joinMoveChunk0();
+joinMoveChunk1();
- var foundDocs = testColl.find().toArray();
- assert.eq(4, foundDocs.length, 'Incorrect number of documents found ' + tojson(foundDocs));
+var foundDocs = testColl.find().toArray();
+assert.eq(4, foundDocs.length, 'Incorrect number of documents found ' + tojson(foundDocs));
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_server_status.js b/jstests/sharding/migration_server_status.js
index 63f4c828d1e..423b8353d89 100644
--- a/jstests/sharding/migration_server_status.js
+++ b/jstests/sharding/migration_server_status.js
@@ -6,72 +6,71 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("db.coll");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
-
- // Pause the migration once it starts on both shards -- somewhat arbitrary pause point.
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
-
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {_id: 1}, null, coll.getFullName(), st.shard1.shardName);
-
- var assertMigrationStatusOnServerStatus = function(serverStatusResult,
- sourceShard,
- destinationShard,
- isDonorShard,
- minKey,
- maxKey,
- collectionName) {
- var migrationResult = serverStatusResult.sharding.migrations;
- assert.eq(sourceShard, migrationResult.source);
- assert.eq(destinationShard, migrationResult.destination);
- assert.eq(isDonorShard, migrationResult.isDonorShard);
- assert.eq(minKey, migrationResult.chunk.min);
- assert.eq(maxKey, migrationResult.chunk.max);
- assert.eq(collectionName, migrationResult.collection);
- };
-
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.startedMoveChunk);
-
- // Source shard should return a migration status.
- var shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
- assert(shard0ServerStatus.sharding.migrations);
- assertMigrationStatusOnServerStatus(shard0ServerStatus,
- st.shard0.shardName,
- st.shard1.shardName,
- true,
- {"_id": 0},
- {"_id": {"$maxKey": 1}},
- coll + "");
-
- // Destination shard should not return any migration status.
- var shard1ServerStatus = st.shard1.getDB('admin').runCommand({serverStatus: 1});
- assert(!shard1ServerStatus.sharding.migrations);
-
- // Mongos should never return a migration status.
- var mongosServerStatus = st.s0.getDB('admin').runCommand({serverStatus: 1});
- assert(!mongosServerStatus.sharding.migrations);
-
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
- joinMoveChunk();
-
- // Migration is over, should no longer get a migration status.
- var shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
- assert(!shard0ServerStatus.sharding.migrations);
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
-
+'use strict';
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("db.coll");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+
+// Pause the migration once it starts on both shards -- somewhat arbitrary pause point.
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
+
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 1}, null, coll.getFullName(), st.shard1.shardName);
+
+var assertMigrationStatusOnServerStatus = function(serverStatusResult,
+ sourceShard,
+ destinationShard,
+ isDonorShard,
+ minKey,
+ maxKey,
+ collectionName) {
+ var migrationResult = serverStatusResult.sharding.migrations;
+ assert.eq(sourceShard, migrationResult.source);
+ assert.eq(destinationShard, migrationResult.destination);
+ assert.eq(isDonorShard, migrationResult.isDonorShard);
+ assert.eq(minKey, migrationResult.chunk.min);
+ assert.eq(maxKey, migrationResult.chunk.max);
+ assert.eq(collectionName, migrationResult.collection);
+};
+
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.startedMoveChunk);
+
+// Source shard should return a migration status.
+var shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
+assert(shard0ServerStatus.sharding.migrations);
+assertMigrationStatusOnServerStatus(shard0ServerStatus,
+ st.shard0.shardName,
+ st.shard1.shardName,
+ true,
+ {"_id": 0},
+ {"_id": {"$maxKey": 1}},
+ coll + "");
+
+// Destination shard should not return any migration status.
+var shard1ServerStatus = st.shard1.getDB('admin').runCommand({serverStatus: 1});
+assert(!shard1ServerStatus.sharding.migrations);
+
+// Mongos should never return a migration status.
+var mongosServerStatus = st.s0.getDB('admin').runCommand({serverStatus: 1});
+assert(!mongosServerStatus.sharding.migrations);
+
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
+joinMoveChunk();
+
+// Migration is over, should no longer get a migration status.
+var shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
+assert(!shard0ServerStatus.sharding.migrations);
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js
index 7b0802286b1..73ee2dea163 100644
--- a/jstests/sharding/migration_sets_fromMigrate_flag.js
+++ b/jstests/sharding/migration_sets_fromMigrate_flag.js
@@ -17,178 +17,175 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}});
-
- const dbName = "testDB";
- const ns = dbName + ".foo";
-
- let mongos = st.s0;
- let admin = mongos.getDB('admin');
- let coll = mongos.getCollection(ns);
-
- let donor = st.shard0;
- let recipient = st.shard1;
- let donorColl = donor.getCollection(ns);
- let recipientColl = recipient.getCollection(ns);
- let donorLocal = donor.getDB('local');
- let recipientLocal = recipient.getDB('local');
-
- // Two chunks
- // Donor: [0, 2) [2, 5)
- // Recipient:
- jsTest.log('Enable sharding of the collection and split into two chunks....');
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}}));
-
- // 6 documents,
- // donor: 2 in the first chunk, 3 in the second.
- // recipient: 1 document (shardkey overlaps with a doc in second chunk of donor shard)
- jsTest.log('Inserting 5 docs into donor shard, ensuring one orphan on the recipient shard....');
-
- // Insert just one document into the collection and fail a migration after the cloning step in
- // order to get an orphan onto the recipient shard with the correct UUID for the collection.
- assert.writeOK(coll.insert({_id: 2}));
- assert.eq(1, donorColl.count());
- assert.commandWorked(recipient.adminCommand(
- {configureFailPoint: "failMigrationLeaveOrphans", mode: "alwaysOn"}));
- assert.commandFailed(
- admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 2}, to: st.shard1.shardName}));
- assert.eq(1, recipientColl.count());
- assert.commandWorked(
- recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "off"}));
-
- // Insert the remaining documents into the collection.
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.insert({_id: 3}));
- assert.writeOK(coll.insert({_id: 4}));
- assert.eq(5, donorColl.count());
-
- /**
- * Set failpoint: recipient will pause migration after cloning chunk data from donor,
- * before checking transfer mods log on donor.
- */
-
- jsTest.log('setting recipient failpoint cloned');
- pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-
- /**
- * Start moving chunk [2, 5) from donor shard to recipient shard, run in the background.
- */
-
- // Donor: [0, 2)
- // Recipient: [2, 5)
- jsTest.log('Starting chunk migration, pause after cloning...');
-
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {_id: 2}, null, coll.getFullName(), st.shard1.shardName);
-
- /**
- * Wait for recipient to finish cloning.
- * THEN update 1 document {_id: 3} on donor within the currently migrating chunk.
- * AND delete 1 document {_id: 4} on donor within the currently migrating chunk.
- */
-
- waitForMigrateStep(recipient, migrateStepNames.cloned);
-
- jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
-
- assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
- assert.writeOK(coll.remove({_id: 4}));
-
- /**
- * Finish migration. Unpause recipient migration, wait for it to collect
- * the transfer mods log from donor and finish migration.
- */
-
- jsTest.log('Continuing and finishing migration...');
- unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
- joinMoveChunk();
-
- /**
- * Check documents are where they should be: 2 docs in donor chunk, 2 docs in recipient chunk
- * (because third doc in recipient shard's chunk got deleted on the donor shard during
- * migration).
- */
-
- jsTest.log('Checking that documents are on the shards they should be...');
-
- assert.eq(2, recipientColl.count(), "Recipient shard doesn't have exactly 2 documents!");
- assert.eq(2, donorColl.count(), "Donor shard doesn't have exactly 2 documents!");
- assert.eq(4, coll.count(), "Collection total is not 4!");
-
- /**
- * Check that the fromMigrate flag has been set correctly in donor and recipient oplogs,
- */
-
- jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');
-
- function assertEqAndDumpOpLog(expected, actual, msg) {
- if (expected === actual)
- return;
-
- print('Dumping oplog contents for', ns);
- print('On donor:');
- print(tojson(donorLocal.oplog.rs.find({ns: ns}).toArray()));
-
- print('On recipient:');
- print(tojson(recipientLocal.oplog.rs.find({ns: ns}).toArray()));
-
- assert.eq(expected, actual, msg);
- }
-
- var donorOplogRes = donorLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
- assertEqAndDumpOpLog(1,
- donorOplogRes,
- "fromMigrate flag wasn't set on the donor shard's oplog for " +
- "migrating delete op on {_id: 2}! Test #2 failed.");
-
- donorOplogRes =
- donorLocal.oplog.rs.find({op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
- assertEqAndDumpOpLog(1,
- donorOplogRes,
- "Real delete of {_id: 4} on donor shard incorrectly set the " +
- "fromMigrate flag in the oplog! Test #5 failed.");
-
- // Expect to see two oplog entries for {_id: 2} with 'fromMigrate: true', because this doc was
- // cloned as part of the first failed migration as well as the second successful migration.
- var recipientOplogRes =
- recipientLocal.oplog.rs.find({op: 'i', fromMigrate: true, 'o._id': 2}).count();
- assertEqAndDumpOpLog(2,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
-
- recipientOplogRes =
- recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
- assertEqAndDumpOpLog(1,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on the old {_id: 2} that overlapped " +
- "with the chunk about to be copied! Test #1 failed.");
-
- recipientOplogRes =
- recipientLocal.oplog.rs.find({op: 'u', fromMigrate: true, 'o._id': 3}).count();
- assertEqAndDumpOpLog(1,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for update op on {_id: 3}! Test #4 failed.");
-
- recipientOplogRes =
- recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 4}).count();
- assertEqAndDumpOpLog(1,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on {_id: 4} that occurred during " +
- "migration! Test #5 failed.");
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}});
+
+const dbName = "testDB";
+const ns = dbName + ".foo";
+
+let mongos = st.s0;
+let admin = mongos.getDB('admin');
+let coll = mongos.getCollection(ns);
+
+let donor = st.shard0;
+let recipient = st.shard1;
+let donorColl = donor.getCollection(ns);
+let recipientColl = recipient.getCollection(ns);
+let donorLocal = donor.getDB('local');
+let recipientLocal = recipient.getDB('local');
+
+// Two chunks
+// Donor: [0, 2) [2, 5)
+// Recipient:
+jsTest.log('Enable sharding of the collection and split into two chunks....');
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}}));
+
+// 6 documents,
+// donor: 2 in the first chunk, 3 in the second.
+// recipient: 1 document (shardkey overlaps with a doc in second chunk of donor shard)
+jsTest.log('Inserting 5 docs into donor shard, ensuring one orphan on the recipient shard....');
+
+// Insert just one document into the collection and fail a migration after the cloning step in
+// order to get an orphan onto the recipient shard with the correct UUID for the collection.
+assert.writeOK(coll.insert({_id: 2}));
+assert.eq(1, donorColl.count());
+assert.commandWorked(
+ recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "alwaysOn"}));
+assert.commandFailed(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 2}, to: st.shard1.shardName}));
+assert.eq(1, recipientColl.count());
+assert.commandWorked(
+ recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "off"}));
+
+// Insert the remaining documents into the collection.
+assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 3}));
+assert.writeOK(coll.insert({_id: 4}));
+assert.eq(5, donorColl.count());
+
+/**
+ * Set failpoint: recipient will pause migration after cloning chunk data from donor,
+ * before checking transfer mods log on donor.
+ */
+
+jsTest.log('setting recipient failpoint cloned');
+pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+
+/**
+ * Start moving chunk [2, 5) from donor shard to recipient shard, run in the background.
+ */
+
+// Donor: [0, 2)
+// Recipient: [2, 5)
+jsTest.log('Starting chunk migration, pause after cloning...');
+
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 2}, null, coll.getFullName(), st.shard1.shardName);
+
+/**
+ * Wait for recipient to finish cloning.
+ * THEN update 1 document {_id: 3} on donor within the currently migrating chunk.
+ * AND delete 1 document {_id: 4} on donor within the currently migrating chunk.
+ */
+
+waitForMigrateStep(recipient, migrateStepNames.cloned);
+
+jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
+
+assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
+assert.writeOK(coll.remove({_id: 4}));
+
+/**
+ * Finish migration. Unpause recipient migration, wait for it to collect
+ * the transfer mods log from donor and finish migration.
+ */
+
+jsTest.log('Continuing and finishing migration...');
+unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+joinMoveChunk();
+
+/**
+ * Check documents are where they should be: 2 docs in donor chunk, 2 docs in recipient chunk
+ * (because third doc in recipient shard's chunk got deleted on the donor shard during
+ * migration).
+ */
+
+jsTest.log('Checking that documents are on the shards they should be...');
+
+assert.eq(2, recipientColl.count(), "Recipient shard doesn't have exactly 2 documents!");
+assert.eq(2, donorColl.count(), "Donor shard doesn't have exactly 2 documents!");
+assert.eq(4, coll.count(), "Collection total is not 4!");
+
+/**
+ * Check that the fromMigrate flag has been set correctly in donor and recipient oplogs,
+ */
+
+jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');
+
+function assertEqAndDumpOpLog(expected, actual, msg) {
+ if (expected === actual)
+ return;
+
+ print('Dumping oplog contents for', ns);
+ print('On donor:');
+ print(tojson(donorLocal.oplog.rs.find({ns: ns}).toArray()));
+
+ print('On recipient:');
+ print(tojson(recipientLocal.oplog.rs.find({ns: ns}).toArray()));
+
+ assert.eq(expected, actual, msg);
+}
+
+var donorOplogRes = donorLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
+assertEqAndDumpOpLog(1,
+ donorOplogRes,
+ "fromMigrate flag wasn't set on the donor shard's oplog for " +
+ "migrating delete op on {_id: 2}! Test #2 failed.");
+
+donorOplogRes =
+ donorLocal.oplog.rs.find({op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
+assertEqAndDumpOpLog(1,
+ donorOplogRes,
+ "Real delete of {_id: 4} on donor shard incorrectly set the " +
+ "fromMigrate flag in the oplog! Test #5 failed.");
+
+// Expect to see two oplog entries for {_id: 2} with 'fromMigrate: true', because this doc was
+// cloned as part of the first failed migration as well as the second successful migration.
+var recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'i', fromMigrate: true, 'o._id': 2}).count();
+assertEqAndDumpOpLog(2,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
+
+recipientOplogRes = recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
+assertEqAndDumpOpLog(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on the old {_id: 2} that overlapped " +
+ "with the chunk about to be copied! Test #1 failed.");
+
+recipientOplogRes = recipientLocal.oplog.rs.find({op: 'u', fromMigrate: true, 'o._id': 3}).count();
+assertEqAndDumpOpLog(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for update op on {_id: 3}! Test #4 failed.");
+
+recipientOplogRes = recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 4}).count();
+assertEqAndDumpOpLog(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on {_id: 4} that occurred during " +
+ "migration! Test #5 failed.");
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_with_source_ops.js b/jstests/sharding/migration_with_source_ops.js
index 2fc2c467c0f..91c7a460196 100644
--- a/jstests/sharding/migration_with_source_ops.js
+++ b/jstests/sharding/migration_with_source_ops.js
@@ -18,128 +18,126 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- /**
- * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
- */
-
- var st = new ShardingTest({shards: 2, mongos: 1});
- st.stopBalancer();
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns = dbName + ".foo",
- coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1,
- donorColl = donor.getCollection(ns), recipientColl = recipient.getCollection(ns);
-
- /**
- * Exable sharding, and split collection into two chunks.
- */
-
- // Two chunks
- // Donor: [0, 20) [20, 40)
- // Recipient:
- jsTest.log('Enabling sharding of the collection and pre-splitting into two chunks....');
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
-
- /**
- * Insert data into collection
- */
-
- // 10 documents in each chunk on the donor
- jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
- for (var i = 0; i < 10; ++i)
- assert.writeOK(coll.insert({a: i}));
- for (var i = 20; i < 30; ++i)
- assert.writeOK(coll.insert({a: i}));
- assert.eq(20, coll.count());
-
- /**
- * Set failpoints. Recipient will crash if an out of chunk range data op is
- * received from donor. Recipient will pause migration after cloning chunk data from donor,
- * before checking transfer mods log on donor.
- */
-
- jsTest.log('Setting failpoint failMigrationReceivedOutOfRangeOperation');
- assert.commandWorked(recipient.getDB('admin').runCommand(
- {configureFailPoint: 'failMigrationReceivedOutOfRangeOperation', mode: 'alwaysOn'}));
-
- jsTest.log(
- 'Setting chunk migration recipient failpoint so that it pauses after bulk clone step');
- pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-
- /**
- * Start a moveChunk in the background. Move chunk [20, 40), which has 10 docs in the
- * range, from shard 0 (donor) to shard 1 (recipient). Migration will pause after
- * cloning step (when it reaches the recipient failpoint).
- */
-
- // Donor: [0, 20)
- // Recipient: [20, 40)
- jsTest.log('Starting migration, pause after cloning...');
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 20}, null, coll.getFullName(), st.shard1.shardName);
-
- /**
- * Wait for recipient to finish cloning step.
- * THEN delete 10 documents on the donor shard, 5 in the migrating chunk and 5 in the remaining
- *chunk.
- * AND insert 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
- *chunk.
- * AND update 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
- *chunk.
- *
- * This will populate the migration transfer mods log, which the recipient will collect when it
- * is unpaused.
- */
-
- waitForMigrateStep(recipient, migrateStepNames.cloned);
-
- jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
- assert.writeOK(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
-
- jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
- assert.writeOK(coll.insert({a: 10}));
- assert.writeOK(coll.insert({a: 30}));
-
- jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
- assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
- assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
-
- /**
- * Finish migration. Unpause recipient migration, wait for it to collect
- * the new ops from the donor shard's migration transfer mods log, and finish.
- */
-
- jsTest.log('Continuing and finishing migration...');
- unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
- joinMoveChunk();
-
- /**
- * Check documents are where they should be: 6 docs in each shard's respective chunk.
- */
-
- jsTest.log('Checking that documents are on the shards they should be...');
- assert.eq(6, donorColl.count());
- assert.eq(6, recipientColl.count());
- assert.eq(12, coll.count());
-
- /**
- * Check that the updated documents are where they should be, one on each shard.
- */
-
- jsTest.log('Checking that documents were updated correctly...');
- var donorCollUpdatedNum = donorColl.find({updatedData: "updated"}).count();
- assert.eq(1, donorCollUpdatedNum, "Update failed on donor shard during migration!");
- var recipientCollUpdatedNum = recipientColl.find({updatedData: "updated"}).count();
- assert.eq(1, recipientCollUpdatedNum, "Update failed on recipient shard during migration!");
-
- jsTest.log('DONE!');
- MongoRunner.stopMongod(staticMongod);
- st.stop();
-
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+/**
+ * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
+ */
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+st.stopBalancer();
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns = dbName + ".foo",
+ coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1,
+ donorColl = donor.getCollection(ns), recipientColl = recipient.getCollection(ns);
+
+/**
+ * Exable sharding, and split collection into two chunks.
+ */
+
+// Two chunks
+// Donor: [0, 20) [20, 40)
+// Recipient:
+jsTest.log('Enabling sharding of the collection and pre-splitting into two chunks....');
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
+
+/**
+ * Insert data into collection
+ */
+
+// 10 documents in each chunk on the donor
+jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
+for (var i = 0; i < 10; ++i)
+ assert.writeOK(coll.insert({a: i}));
+for (var i = 20; i < 30; ++i)
+ assert.writeOK(coll.insert({a: i}));
+assert.eq(20, coll.count());
+
+/**
+ * Set failpoints. Recipient will crash if an out of chunk range data op is
+ * received from donor. Recipient will pause migration after cloning chunk data from donor,
+ * before checking transfer mods log on donor.
+ */
+
+jsTest.log('Setting failpoint failMigrationReceivedOutOfRangeOperation');
+assert.commandWorked(recipient.getDB('admin').runCommand(
+ {configureFailPoint: 'failMigrationReceivedOutOfRangeOperation', mode: 'alwaysOn'}));
+
+jsTest.log('Setting chunk migration recipient failpoint so that it pauses after bulk clone step');
+pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+
+/**
+ * Start a moveChunk in the background. Move chunk [20, 40), which has 10 docs in the
+ * range, from shard 0 (donor) to shard 1 (recipient). Migration will pause after
+ * cloning step (when it reaches the recipient failpoint).
+ */
+
+// Donor: [0, 20)
+// Recipient: [20, 40)
+jsTest.log('Starting migration, pause after cloning...');
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 20}, null, coll.getFullName(), st.shard1.shardName);
+
+/**
+ * Wait for recipient to finish cloning step.
+ * THEN delete 10 documents on the donor shard, 5 in the migrating chunk and 5 in the remaining
+ *chunk.
+ * AND insert 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
+ *chunk.
+ * AND update 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
+ *chunk.
+ *
+ * This will populate the migration transfer mods log, which the recipient will collect when it
+ * is unpaused.
+ */
+
+waitForMigrateStep(recipient, migrateStepNames.cloned);
+
+jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
+assert.writeOK(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
+
+jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
+assert.writeOK(coll.insert({a: 10}));
+assert.writeOK(coll.insert({a: 30}));
+
+jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
+assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
+assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
+
+/**
+ * Finish migration. Unpause recipient migration, wait for it to collect
+ * the new ops from the donor shard's migration transfer mods log, and finish.
+ */
+
+jsTest.log('Continuing and finishing migration...');
+unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+joinMoveChunk();
+
+/**
+ * Check documents are where they should be: 6 docs in each shard's respective chunk.
+ */
+
+jsTest.log('Checking that documents are on the shards they should be...');
+assert.eq(6, donorColl.count());
+assert.eq(6, recipientColl.count());
+assert.eq(12, coll.count());
+
+/**
+ * Check that the updated documents are where they should be, one on each shard.
+ */
+
+jsTest.log('Checking that documents were updated correctly...');
+var donorCollUpdatedNum = donorColl.find({updatedData: "updated"}).count();
+assert.eq(1, donorCollUpdatedNum, "Update failed on donor shard during migration!");
+var recipientCollUpdatedNum = recipientColl.find({updatedData: "updated"}).count();
+assert.eq(1, recipientCollUpdatedNum, "Update failed on recipient shard during migration!");
+
+jsTest.log('DONE!');
+MongoRunner.stopMongod(staticMongod);
+st.stop();
})();
diff --git a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
index ab125e12e21..b7cd5ba2876 100644
--- a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
+++ b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
@@ -3,41 +3,39 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- // Insert a recovery doc with non-zero minOpTimeUpdaters to simulate a migration
- // process that crashed in the middle of the critical section.
+// Insert a recovery doc with non-zero minOpTimeUpdaters to simulate a migration
+// process that crashed in the middle of the critical section.
- var recoveryDoc = {
- _id: 'minOpTimeRecovery',
- configsvrConnectionString: st.configRS.getURL(),
- shardName: st.shard0.shardName,
- minOpTime: {ts: Timestamp(0, 0), t: 0},
- minOpTimeUpdaters: 2
- };
+var recoveryDoc = {
+ _id: 'minOpTimeRecovery',
+ configsvrConnectionString: st.configRS.getURL(),
+ shardName: st.shard0.shardName,
+ minOpTime: {ts: Timestamp(0, 0), t: 0},
+ minOpTimeUpdaters: 2
+};
- assert.writeOK(st.shard0.getDB('admin').system.version.insert(recoveryDoc));
+assert.writeOK(st.shard0.getDB('admin').system.version.insert(recoveryDoc));
- // Make sure test is setup correctly.
- var minOpTimeRecoveryDoc =
- st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
+// Make sure test is setup correctly.
+var minOpTimeRecoveryDoc =
+ st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
- assert.neq(null, minOpTimeRecoveryDoc);
- assert.eq(0, minOpTimeRecoveryDoc.minOpTime.ts.getTime());
- assert.eq(2, minOpTimeRecoveryDoc.minOpTimeUpdaters);
+assert.neq(null, minOpTimeRecoveryDoc);
+assert.eq(0, minOpTimeRecoveryDoc.minOpTime.ts.getTime());
+assert.eq(2, minOpTimeRecoveryDoc.minOpTimeUpdaters);
- st.restartShardRS(0);
+st.restartShardRS(0);
- // After the restart, the shard should have updated the opTime and reset minOpTimeUpdaters.
- minOpTimeRecoveryDoc =
- st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
+// After the restart, the shard should have updated the opTime and reset minOpTimeUpdaters.
+minOpTimeRecoveryDoc = st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
- assert.neq(null, minOpTimeRecoveryDoc);
- assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0);
- assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters);
-
- st.stop();
+assert.neq(null, minOpTimeRecoveryDoc);
+assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0);
+assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters);
+st.stop();
})();
diff --git a/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js
index c33eabeffd7..143b939d381 100644
--- a/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js
+++ b/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js
@@ -2,38 +2,37 @@
* Tests that the minOpTimeRecovery document will be created after a migration.
*/
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 2});
-
- var testDB = st.s.getDB('test');
- testDB.adminCommand({enableSharding: 'test'});
- st.ensurePrimaryShard('test', st.shard0.shardName);
- testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
-
- var priConn = st.configRS.getPrimary();
- var replStatus = priConn.getDB('admin').runCommand({replSetGetStatus: 1});
- replStatus.members.forEach(function(memberState) {
- if (memberState.state == 1) { // if primary
- assert.neq(null, memberState.optime);
- assert.neq(null, memberState.optime.ts);
- assert.neq(null, memberState.optime.t);
- }
- });
-
- testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName});
-
- var shardAdmin = st.rs0.getPrimary().getDB('admin');
- var minOpTimeRecoveryDoc = shardAdmin.system.version.findOne({_id: 'minOpTimeRecovery'});
-
- assert.neq(null, minOpTimeRecoveryDoc);
- assert.eq('minOpTimeRecovery', minOpTimeRecoveryDoc._id);
- assert.eq(st.configRS.getURL(),
- minOpTimeRecoveryDoc.configsvrConnectionString); // TODO SERVER-34166: Remove.
- assert.eq(st.shard0.shardName, minOpTimeRecoveryDoc.shardName); // TODO SERVER-34166: Remove.
- assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0);
- assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters);
-
- st.stop();
-
+"use strict";
+
+var st = new ShardingTest({shards: 2});
+
+var testDB = st.s.getDB('test');
+testDB.adminCommand({enableSharding: 'test'});
+st.ensurePrimaryShard('test', st.shard0.shardName);
+testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+
+var priConn = st.configRS.getPrimary();
+var replStatus = priConn.getDB('admin').runCommand({replSetGetStatus: 1});
+replStatus.members.forEach(function(memberState) {
+ if (memberState.state == 1) { // if primary
+ assert.neq(null, memberState.optime);
+ assert.neq(null, memberState.optime.ts);
+ assert.neq(null, memberState.optime.t);
+ }
+});
+
+testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName});
+
+var shardAdmin = st.rs0.getPrimary().getDB('admin');
+var minOpTimeRecoveryDoc = shardAdmin.system.version.findOne({_id: 'minOpTimeRecovery'});
+
+assert.neq(null, minOpTimeRecoveryDoc);
+assert.eq('minOpTimeRecovery', minOpTimeRecoveryDoc._id);
+assert.eq(st.configRS.getURL(),
+ minOpTimeRecoveryDoc.configsvrConnectionString); // TODO SERVER-34166: Remove.
+assert.eq(st.shard0.shardName, minOpTimeRecoveryDoc.shardName); // TODO SERVER-34166: Remove.
+assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0);
+assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters);
+
+st.stop();
})();
diff --git a/jstests/sharding/missing_key.js b/jstests/sharding/missing_key.js
index 304d77fc839..14078cbff24 100644
--- a/jstests/sharding/missing_key.js
+++ b/jstests/sharding/missing_key.js
@@ -1,42 +1,42 @@
// Test that the shardCollection command fails when a preexisting document lacks a shard key field.
// SERVER-8772
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var db = st.s.getDB('testDb');
- var coll = db.testColl;
+var db = st.s.getDB('testDb');
+var coll = db.testColl;
- assert.writeOK(coll.insert({x: 1, z: 1}));
- assert.writeOK(coll.insert({y: 1, z: 1}));
+assert.writeOK(coll.insert({x: 1, z: 1}));
+assert.writeOK(coll.insert({y: 1, z: 1}));
- assert.commandWorked(db.adminCommand({enableSharding: 'testDb'}));
+assert.commandWorked(db.adminCommand({enableSharding: 'testDb'}));
- /**
- * Assert that the shardCollection command fails, with a preexisting index on the provided
- * 'shardKey'.
- */
- function assertInvalidShardKey(shardKey) {
- // Manually create a shard key index.
- coll.dropIndexes();
- coll.ensureIndex(shardKey);
+/**
+ * Assert that the shardCollection command fails, with a preexisting index on the provided
+ * 'shardKey'.
+ */
+function assertInvalidShardKey(shardKey) {
+ // Manually create a shard key index.
+ coll.dropIndexes();
+ coll.ensureIndex(shardKey);
- // Ensure that the shard key index identifies 'x' as present in one document and absent in
- // the other.
- assert.eq(1, coll.find({x: 1}).hint(shardKey).itcount());
- assert.eq(1, coll.find({x: {$exists: false}}).hint(shardKey).itcount());
+ // Ensure that the shard key index identifies 'x' as present in one document and absent in
+ // the other.
+ assert.eq(1, coll.find({x: 1}).hint(shardKey).itcount());
+ assert.eq(1, coll.find({x: {$exists: false}}).hint(shardKey).itcount());
- // Assert that the shardCollection command fails with the provided 'shardKey'.
- assert.commandFailed(db.adminCommand({shardCollection: 'testDb.testColl', key: shardKey}),
- 'shardCollection should have failed on key ' + tojson(shardKey));
- }
+ // Assert that the shardCollection command fails with the provided 'shardKey'.
+ assert.commandFailed(db.adminCommand({shardCollection: 'testDb.testColl', key: shardKey}),
+ 'shardCollection should have failed on key ' + tojson(shardKey));
+}
- // Test single, compound, and hashed shard keys.
- assertInvalidShardKey({x: 1});
- assertInvalidShardKey({x: 1, y: 1});
- assertInvalidShardKey({y: 1, x: 1});
- assertInvalidShardKey({x: 'hashed'});
+// Test single, compound, and hashed shard keys.
+assertInvalidShardKey({x: 1});
+assertInvalidShardKey({x: 1, y: 1});
+assertInvalidShardKey({y: 1, x: 1});
+assertInvalidShardKey({x: 'hashed'});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
index 8cb8fe8f7fb..56c766e24ba 100644
--- a/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
+++ b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
@@ -7,81 +7,89 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
-
- // This test uses authentication and runs commands without authenticating, which is not
- // compatible with implicit sessions.
- TestData.disableImplicitSessions = true;
-
- load("jstests/multiVersion/libs/multi_rs.js");
-
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
- const keyFile = 'jstests/libs/key1';
- const adminUser = {db: "admin", username: "foo", password: "bar"};
- const rUser = {db: "test", username: "r", password: "bar"};
-
- function assertContainsValidLogicalTime(res) {
- assert.hasFields(res, ["$clusterTime"]);
- assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
- // clusterTime must be greater than the uninitialzed value.
- assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
- // The signature must have been signed by a key with a valid generation.
- assert(res.$clusterTime.signature.keyId > NumberLong(0));
-
- assert.hasFields(res, ["operationTime"]);
- assert(Object.prototype.toString.call(res.operationTime) === "[object Timestamp]",
- "operationTime must be a timestamp");
- }
-
- let st = new ShardingTest({shards: {rs0: {nodes: 2}}, other: {keyFile: keyFile}});
-
- jsTestLog("Started ShardingTest");
-
- var adminDB = st.s.getDB("admin");
- adminDB.createUser({user: adminUser.username, pwd: adminUser.password, roles: ["__system"]});
-
- adminDB.auth(adminUser.username, adminUser.password);
- assert(st.s.getDB("admin").system.keys.count() >= 2);
-
- let priRSConn = st.rs0.getPrimary().getDB("admin");
- priRSConn.createUser({user: rUser.username, pwd: rUser.password, roles: ["root"]});
-
- priRSConn.auth(rUser.username, rUser.password);
- const resWithKeys = priRSConn.runCommand({isMaster: 1});
- assertContainsValidLogicalTime(resWithKeys);
- priRSConn.logout();
-
- // Enable the failpoint, remove all keys, and restart the config servers with the failpoint
- // still enabled to guarantee there are no keys.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- assert.commandWorked(st.configRS.nodes[i].adminCommand(
- {"configureFailPoint": "disableKeyGeneration", "mode": "alwaysOn"}));
- }
-
- var priCSConn = st.configRS.getPrimary();
- authutil.asCluster(priCSConn, keyFile, function() {
- priCSConn.getDB("admin").system.keys.remove({purpose: "HMAC"});
- });
-
- assert(adminDB.system.keys.count() == 0, "expected there to be no keys on the config server");
- adminDB.logout();
-
- st.configRS.stopSet(null /* signal */, true /* forRestart */);
- st.configRS.startSet(
- {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
-
- // bounce rs0 to clean the key cache
- st.rs0.stopSet(null /* signal */, true /* forRestart */);
- st.rs0.startSet({restart: true});
-
- priRSConn = st.rs0.getPrimary().getDB("admin");
- priRSConn.auth(rUser.username, rUser.password);
- const resNoKeys = assert.commandWorked(priRSConn.runCommand({isMaster: 1}));
- priRSConn.logout();
-
- assert.eq(resNoKeys.hasOwnProperty("$clusterTime"), false);
- assert.eq(resNoKeys.hasOwnProperty("operationTime"), false);
-
- st.stop();
+"use strict";
+
+// This test uses authentication and runs commands without authenticating, which is not
+// compatible with implicit sessions.
+TestData.disableImplicitSessions = true;
+
+load("jstests/multiVersion/libs/multi_rs.js");
+
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
+const keyFile = 'jstests/libs/key1';
+const adminUser = {
+ db: "admin",
+ username: "foo",
+ password: "bar"
+};
+const rUser = {
+ db: "test",
+ username: "r",
+ password: "bar"
+};
+
+function assertContainsValidLogicalTime(res) {
+ assert.hasFields(res, ["$clusterTime"]);
+ assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
+ // clusterTime must be greater than the uninitialzed value.
+ assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
+ // The signature must have been signed by a key with a valid generation.
+ assert(res.$clusterTime.signature.keyId > NumberLong(0));
+
+ assert.hasFields(res, ["operationTime"]);
+ assert(Object.prototype.toString.call(res.operationTime) === "[object Timestamp]",
+ "operationTime must be a timestamp");
+}
+
+let st = new ShardingTest({shards: {rs0: {nodes: 2}}, other: {keyFile: keyFile}});
+
+jsTestLog("Started ShardingTest");
+
+var adminDB = st.s.getDB("admin");
+adminDB.createUser({user: adminUser.username, pwd: adminUser.password, roles: ["__system"]});
+
+adminDB.auth(adminUser.username, adminUser.password);
+assert(st.s.getDB("admin").system.keys.count() >= 2);
+
+let priRSConn = st.rs0.getPrimary().getDB("admin");
+priRSConn.createUser({user: rUser.username, pwd: rUser.password, roles: ["root"]});
+
+priRSConn.auth(rUser.username, rUser.password);
+const resWithKeys = priRSConn.runCommand({isMaster: 1});
+assertContainsValidLogicalTime(resWithKeys);
+priRSConn.logout();
+
+// Enable the failpoint, remove all keys, and restart the config servers with the failpoint
+// still enabled to guarantee there are no keys.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ assert.commandWorked(st.configRS.nodes[i].adminCommand(
+ {"configureFailPoint": "disableKeyGeneration", "mode": "alwaysOn"}));
+}
+
+var priCSConn = st.configRS.getPrimary();
+authutil.asCluster(priCSConn, keyFile, function() {
+ priCSConn.getDB("admin").system.keys.remove({purpose: "HMAC"});
+});
+
+assert(adminDB.system.keys.count() == 0, "expected there to be no keys on the config server");
+adminDB.logout();
+
+st.configRS.stopSet(null /* signal */, true /* forRestart */);
+st.configRS.startSet(
+ {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
+
+// bounce rs0 to clean the key cache
+st.rs0.stopSet(null /* signal */, true /* forRestart */);
+st.rs0.startSet({restart: true});
+
+priRSConn = st.rs0.getPrimary().getDB("admin");
+priRSConn.auth(rUser.username, rUser.password);
+const resNoKeys = assert.commandWorked(priRSConn.runCommand({isMaster: 1}));
+priRSConn.logout();
+
+assert.eq(resNoKeys.hasOwnProperty("$clusterTime"), false);
+assert.eq(resNoKeys.hasOwnProperty("operationTime"), false);
+
+st.stop();
})();
diff --git a/jstests/sharding/mongos_dataSize_test.js b/jstests/sharding/mongos_dataSize_test.js
index 389529e7d9d..9dc9ceac78f 100644
--- a/jstests/sharding/mongos_dataSize_test.js
+++ b/jstests/sharding/mongos_dataSize_test.js
@@ -1,16 +1,14 @@
// This tests the command dataSize on sharded clusters to ensure that they can use the command.
(function() {
- 'use strict';
+'use strict';
- let s = new ShardingTest({shards: 2, mongos: 1});
- let db = s.getDB("test");
- assert.commandWorked(s.s0.adminCommand({enableSharding: "test"}));
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- assert.commandWorked(
- s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"}));
- assert.commandFailedWithCode(
- s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "foo"}),
- ErrorCodes.InvalidNamespace);
- s.stop();
+let s = new ShardingTest({shards: 2, mongos: 1});
+let db = s.getDB("test");
+assert.commandWorked(s.s0.adminCommand({enableSharding: "test"}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+assert.commandWorked(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"}));
+assert.commandFailedWithCode(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "foo"}),
+ ErrorCodes.InvalidNamespace);
+s.stop();
})();
diff --git a/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js b/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
index 84791008f4d..69a64cd808d 100644
--- a/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
+++ b/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
@@ -4,80 +4,80 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/multiVersion/libs/multi_rs.js");
- load("jstests/multiVersion/libs/multi_cluster.js"); // For restartMongoses.
+load("jstests/multiVersion/libs/multi_rs.js");
+load("jstests/multiVersion/libs/multi_cluster.js"); // For restartMongoses.
- function assertContainsValidLogicalTime(res, check) {
- assert.hasFields(res, ["$clusterTime"]);
- assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
- // clusterTime must be greater than the uninitialzed value.
- // TODO: SERVER-31986 this check can be done only for authenticated connections that do not
- // have advance_cluster_time privilege.
- if (check) {
- assert.eq(bsonWoCompare(res.$clusterTime.clusterTime, Timestamp(0, 0)), 1);
- }
- assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
- // The signature must have been signed by a key with a valid generation.
- if (check) {
- assert(res.$clusterTime.signature.keyId > NumberLong(0));
- }
+function assertContainsValidLogicalTime(res, check) {
+ assert.hasFields(res, ["$clusterTime"]);
+ assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
+ // clusterTime must be greater than the uninitialzed value.
+ // TODO: SERVER-31986 this check can be done only for authenticated connections that do not
+ // have advance_cluster_time privilege.
+ if (check) {
+ assert.eq(bsonWoCompare(res.$clusterTime.clusterTime, Timestamp(0, 0)), 1);
}
+ assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
+ // The signature must have been signed by a key with a valid generation.
+ if (check) {
+ assert(res.$clusterTime.signature.keyId > NumberLong(0));
+ }
+}
- let st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+let st = new ShardingTest({shards: {rs0: {nodes: 2}}});
- // Verify there are keys in the config server eventually, since mongos doesn't block for keys at
- // startup, and that once there are, mongos sends $clusterTime with a signature in responses.
- assert.soonNoExcept(function() {
- assert(st.s.getDB("admin").system.keys.count() >= 2);
+// Verify there are keys in the config server eventually, since mongos doesn't block for keys at
+// startup, and that once there are, mongos sends $clusterTime with a signature in responses.
+assert.soonNoExcept(function() {
+ assert(st.s.getDB("admin").system.keys.count() >= 2);
- let res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
- assertContainsValidLogicalTime(res, false);
+ let res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
+ assertContainsValidLogicalTime(res, false);
- return true;
- }, "expected keys to be created and for mongos to send signed cluster times");
+ return true;
+}, "expected keys to be created and for mongos to send signed cluster times");
- // Enable the failpoint, remove all keys, and restart the config servers with the failpoint
- // still enabled to guarantee there are no keys.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- assert.commandWorked(st.configRS.nodes[i].adminCommand(
- {"configureFailPoint": "disableKeyGeneration", "mode": "alwaysOn"}));
- }
- let res = st.configRS.getPrimary().getDB("admin").system.keys.remove({purpose: "HMAC"});
- assert(res.nRemoved >= 2);
- assert(st.s.getDB("admin").system.keys.count() == 0,
- "expected there to be no keys on the config server");
- st.configRS.stopSet(null /* signal */, true /* forRestart */);
- st.configRS.startSet(
- {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
+// Enable the failpoint, remove all keys, and restart the config servers with the failpoint
+// still enabled to guarantee there are no keys.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ assert.commandWorked(st.configRS.nodes[i].adminCommand(
+ {"configureFailPoint": "disableKeyGeneration", "mode": "alwaysOn"}));
+}
+let res = st.configRS.getPrimary().getDB("admin").system.keys.remove({purpose: "HMAC"});
+assert(res.nRemoved >= 2);
+assert(st.s.getDB("admin").system.keys.count() == 0,
+ "expected there to be no keys on the config server");
+st.configRS.stopSet(null /* signal */, true /* forRestart */);
+st.configRS.startSet(
+ {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
- // Limit the max time between refreshes on the config server, so new keys are created quickly.
- st.configRS.getPrimary().adminCommand({
- "configureFailPoint": "maxKeyRefreshWaitTimeOverrideMS",
- "mode": "alwaysOn",
- "data": {"overrideMS": 1000}
- });
+// Limit the max time between refreshes on the config server, so new keys are created quickly.
+st.configRS.getPrimary().adminCommand({
+ "configureFailPoint": "maxKeyRefreshWaitTimeOverrideMS",
+ "mode": "alwaysOn",
+ "data": {"overrideMS": 1000}
+});
- // Disable the failpoint.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- assert.commandWorked(st.configRS.nodes[i].adminCommand(
- {"configureFailPoint": "disableKeyGeneration", "mode": "off"}));
- }
+// Disable the failpoint.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ assert.commandWorked(st.configRS.nodes[i].adminCommand(
+ {"configureFailPoint": "disableKeyGeneration", "mode": "off"}));
+}
- // Mongos should restart with no problems.
- st.restartMongoses();
+// Mongos should restart with no problems.
+st.restartMongoses();
- // Eventually mongos will discover the new keys, and start signing cluster times.
- assert.soonNoExcept(function() {
- assertContainsValidLogicalTime(st.s.getDB("test").runCommand({isMaster: 1}), false);
- return true;
- }, "expected mongos to eventually start signing cluster times", 60 * 1000); // 60 seconds.
+// Eventually mongos will discover the new keys, and start signing cluster times.
+assert.soonNoExcept(function() {
+ assertContainsValidLogicalTime(st.s.getDB("test").runCommand({isMaster: 1}), false);
+ return true;
+}, "expected mongos to eventually start signing cluster times", 60 * 1000); // 60 seconds.
- // There may be a delay between the creation of the first and second keys, but mongos will start
- // signing after seeing the first key, so there is only guaranteed to be one at this point.
- assert(st.s.getDB("admin").system.keys.count() >= 1,
- "expected there to be at least one generation of keys on the config server");
+// There may be a delay between the creation of the first and second keys, but mongos will start
+// signing after seeing the first key, so there is only guaranteed to be one at this point.
+assert(st.s.getDB("admin").system.keys.count() >= 1,
+ "expected there to be at least one generation of keys on the config server");
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/mongos_local_explain.js b/jstests/sharding/mongos_local_explain.js
index bf9ab379e53..d21ee745306 100644
--- a/jstests/sharding/mongos_local_explain.js
+++ b/jstests/sharding/mongos_local_explain.js
@@ -3,30 +3,30 @@
* confirms that the pipeline ran entirely on mongoS.
*/
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({name: "mongos_comment_test", mongos: 1, shards: 1});
- const mongosConn = st.s;
+const st = new ShardingTest({name: "mongos_comment_test", mongos: 1, shards: 1});
+const mongosConn = st.s;
- const stageSpec = {
- "$listLocalSessions": {allUsers: false, users: [{user: "nobody", db: "nothing"}]}
- };
+const stageSpec = {
+ "$listLocalSessions": {allUsers: false, users: [{user: "nobody", db: "nothing"}]}
+};
- // Use the test stage to create a pipeline that runs exclusively on mongoS.
- const mongosOnlyPipeline = [stageSpec, {$match: {dummyField: 1}}];
+// Use the test stage to create a pipeline that runs exclusively on mongoS.
+const mongosOnlyPipeline = [stageSpec, {$match: {dummyField: 1}}];
- // We expect the explain output to reflect the stage's spec.
- const expectedExplainStages = [stageSpec, {$match: {dummyField: {$eq: 1}}}];
+// We expect the explain output to reflect the stage's spec.
+const expectedExplainStages = [stageSpec, {$match: {dummyField: {$eq: 1}}}];
- // Test that the mongoS-only pipeline is explainable.
- const explainPlan = assert.commandWorked(mongosConn.getDB("admin").runCommand(
- {aggregate: 1, pipeline: mongosOnlyPipeline, explain: true}));
+// Test that the mongoS-only pipeline is explainable.
+const explainPlan = assert.commandWorked(mongosConn.getDB("admin").runCommand(
+ {aggregate: 1, pipeline: mongosOnlyPipeline, explain: true}));
- // We expect the stages to appear under the 'mongos' heading, for 'splitPipeline' to be
- // null, and for the 'mongos.host' field to be the hostname:port of the mongoS itself.
- assert.docEq(explainPlan.mongos.stages, expectedExplainStages);
- assert.eq(explainPlan.mongos.host, mongosConn.name);
- assert.isnull(explainPlan.splitPipeline);
+// We expect the stages to appear under the 'mongos' heading, for 'splitPipeline' to be
+// null, and for the 'mongos.host' field to be the hostname:port of the mongoS itself.
+assert.docEq(explainPlan.mongos.stages, expectedExplainStages);
+assert.eq(explainPlan.mongos.host, mongosConn.name);
+assert.isnull(explainPlan.splitPipeline);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index 8fedcb09ce2..6dc458c2ae7 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -1,42 +1,41 @@
// Tests whether new sharding is detected on insert by mongos
(function() {
- var st = new ShardingTest({name: "mongos_no_detect_sharding", shards: 1, mongos: 2});
+var st = new ShardingTest({name: "mongos_no_detect_sharding", shards: 1, mongos: 2});
- var mongos = st.s;
- var config = mongos.getDB("config");
+var mongos = st.s;
+var config = mongos.getDB("config");
- print("Creating unsharded connection...");
+print("Creating unsharded connection...");
- var mongos2 = st._mongos[1];
+var mongos2 = st._mongos[1];
- var coll = mongos2.getCollection("test.foo");
- assert.writeOK(coll.insert({i: 0}));
+var coll = mongos2.getCollection("test.foo");
+assert.writeOK(coll.insert({i: 0}));
- print("Sharding collection...");
+print("Sharding collection...");
- var admin = mongos.getDB("admin");
+var admin = mongos.getDB("admin");
- assert.eq(coll.getShardVersion().ok, 0);
+assert.eq(coll.getShardVersion().ok, 0);
- admin.runCommand({enableSharding: "test"});
- admin.runCommand({shardCollection: "test.foo", key: {_id: 1}});
+admin.runCommand({enableSharding: "test"});
+admin.runCommand({shardCollection: "test.foo", key: {_id: 1}});
- print("Seeing if data gets inserted unsharded...");
- print("No splits occur here!");
+print("Seeing if data gets inserted unsharded...");
+print("No splits occur here!");
- // Insert a bunch of data which should trigger a split
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({i: i + 1});
- }
- assert.writeOK(bulk.execute());
+// Insert a bunch of data which should trigger a split
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({i: i + 1});
+}
+assert.writeOK(bulk.execute());
- st.printShardingStatus(true);
+st.printShardingStatus(true);
- assert.eq(coll.getShardVersion().ok, 1);
- assert.eq(101, coll.find().itcount());
-
- st.stop();
+assert.eq(coll.getShardVersion().ok, 1);
+assert.eq(101, coll.find().itcount());
+st.stop();
})();
diff --git a/jstests/sharding/mongos_no_replica_set_refresh.js b/jstests/sharding/mongos_no_replica_set_refresh.js
index c6a8a1e05b0..b9080f950fc 100644
--- a/jstests/sharding/mongos_no_replica_set_refresh.js
+++ b/jstests/sharding/mongos_no_replica_set_refresh.js
@@ -2,136 +2,132 @@
load("jstests/replsets/rslib.js");
(function() {
- 'use strict';
-
- var five_minutes = 5 * 60 * 1000;
-
- var numRSHosts = function() {
- var result = assert.commandWorked(rsObj.nodes[0].adminCommand({ismaster: 1}));
- return result.hosts.length + result.passives.length;
- };
-
- var numMongosHosts = function() {
- var commandResult = assert.commandWorked(mongos.adminCommand("connPoolStats"));
- var result = commandResult.replicaSets[rsObj.name];
- return result.hosts.length;
- };
-
- var configServerURL = function() {
- var result = config.shards.find().toArray()[0];
- return result.host;
- };
-
- var checkNumHosts = function(expectedNumHosts) {
- jsTest.log("Waiting for the shard to discover that it now has " + expectedNumHosts +
- " hosts.");
- var numHostsSeenByShard;
-
- // Use a high timeout (5 minutes) because replica set refreshes are only done every 30
- // seconds.
- assert.soon(
- function() {
- numHostsSeenByShard = numRSHosts();
- return numHostsSeenByShard === expectedNumHosts;
- },
- function() {
- return ("Expected shard to see " + expectedNumHosts + " hosts but found " +
- numHostsSeenByShard);
- },
- five_minutes);
-
- jsTest.log("Waiting for the mongos to discover that the shard now has " + expectedNumHosts +
- " hosts.");
- var numHostsSeenByMongos;
-
- // Use a high timeout (5 minutes) because replica set refreshes are only done every 30
- // seconds.
- assert.soon(
- function() {
- numHostsSeenByMongos = numMongosHosts();
- return numHostsSeenByMongos === expectedNumHosts;
- },
- function() {
- return ("Expected mongos to see " + expectedNumHosts +
- " hosts on shard but found " + numHostsSeenByMongos);
- },
- five_minutes);
- };
-
- var st = new ShardingTest({
- name: 'mongos_no_replica_set_refresh',
- shards: 1,
- mongos: 1,
- other: {
- rs0: {
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- {rsConfig: {priority: 0}},
- ],
- }
- }
- });
-
- var rsObj = st.rs0;
- assert.commandWorked(rsObj.nodes[0].adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
- timeoutMillis: 60 * 1000,
- }),
- 'node 0 ' + rsObj.nodes[0].host + ' failed to become primary');
+'use strict';
- var mongos = st.s;
- var config = mongos.getDB("config");
+var five_minutes = 5 * 60 * 1000;
- printjson(mongos.getCollection("foo.bar").findOne());
+var numRSHosts = function() {
+ var result = assert.commandWorked(rsObj.nodes[0].adminCommand({ismaster: 1}));
+ return result.hosts.length + result.passives.length;
+};
- jsTestLog("Removing a node from the shard's replica set.");
+var numMongosHosts = function() {
+ var commandResult = assert.commandWorked(mongos.adminCommand("connPoolStats"));
+ var result = commandResult.replicaSets[rsObj.name];
+ return result.hosts.length;
+};
- var rsConfig = rsObj.getReplSetConfigFromNode(0);
+var configServerURL = function() {
+ var result = config.shards.find().toArray()[0];
+ return result.host;
+};
- var removedNode = rsConfig.members.pop();
- rsConfig.version++;
- reconfig(rsObj, rsConfig);
+var checkNumHosts = function(expectedNumHosts) {
+ jsTest.log("Waiting for the shard to discover that it now has " + expectedNumHosts + " hosts.");
+ var numHostsSeenByShard;
- // Wait for the election round to complete
- rsObj.getPrimary();
+ // Use a high timeout (5 minutes) because replica set refreshes are only done every 30
+ // seconds.
+ assert.soon(
+ function() {
+ numHostsSeenByShard = numRSHosts();
+ return numHostsSeenByShard === expectedNumHosts;
+ },
+ function() {
+ return ("Expected shard to see " + expectedNumHosts + " hosts but found " +
+ numHostsSeenByShard);
+ },
+ five_minutes);
- checkNumHosts(rsConfig.members.length);
+ jsTest.log("Waiting for the mongos to discover that the shard now has " + expectedNumHosts +
+ " hosts.");
+ var numHostsSeenByMongos;
- jsTest.log("Waiting for config.shards to reflect that " + removedNode.host +
- " has been removed.");
+ // Use a high timeout (5 minutes) because replica set refreshes are only done every 30
+ // seconds.
assert.soon(
function() {
- return configServerURL().indexOf(removedNode.host) < 0;
+ numHostsSeenByMongos = numMongosHosts();
+ return numHostsSeenByMongos === expectedNumHosts;
},
function() {
- return (removedNode.host + " was removed from " + rsObj.name +
- ", but is still seen in config.shards");
- });
+ return ("Expected mongos to see " + expectedNumHosts + " hosts on shard but found " +
+ numHostsSeenByMongos);
+ },
+ five_minutes);
+};
+
+var st = new ShardingTest({
+ name: 'mongos_no_replica_set_refresh',
+ shards: 1,
+ mongos: 1,
+ other: {
+ rs0: {
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ {rsConfig: {priority: 0}},
+ ],
+ }
+ }
+});
- jsTestLog("Adding the node back to the shard's replica set.");
+var rsObj = st.rs0;
+assert.commandWorked(rsObj.nodes[0].adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: 60 * 1000,
+}),
+ 'node 0 ' + rsObj.nodes[0].host + ' failed to become primary');
- config.shards.update({_id: rsObj.name}, {$set: {host: rsObj.name + "/" + rsObj.nodes[0].host}});
- printjson(config.shards.find().toArray());
+var mongos = st.s;
+var config = mongos.getDB("config");
- rsConfig.members.push(removedNode);
- rsConfig.version++;
- reconfig(rsObj, rsConfig);
+printjson(mongos.getCollection("foo.bar").findOne());
- checkNumHosts(rsConfig.members.length);
+jsTestLog("Removing a node from the shard's replica set.");
- jsTest.log("Waiting for config.shards to reflect that " + removedNode.host +
- " has been re-added.");
- assert.soon(
- function() {
- return configServerURL().indexOf(removedNode.host) >= 0;
- },
- function() {
- return (removedNode.host + " was re-added to " + rsObj.name +
- ", but is not seen in config.shards");
- });
+var rsConfig = rsObj.getReplSetConfigFromNode(0);
+
+var removedNode = rsConfig.members.pop();
+rsConfig.version++;
+reconfig(rsObj, rsConfig);
+
+// Wait for the election round to complete
+rsObj.getPrimary();
+
+checkNumHosts(rsConfig.members.length);
+
+jsTest.log("Waiting for config.shards to reflect that " + removedNode.host + " has been removed.");
+assert.soon(
+ function() {
+ return configServerURL().indexOf(removedNode.host) < 0;
+ },
+ function() {
+ return (removedNode.host + " was removed from " + rsObj.name +
+ ", but is still seen in config.shards");
+ });
+
+jsTestLog("Adding the node back to the shard's replica set.");
+
+config.shards.update({_id: rsObj.name}, {$set: {host: rsObj.name + "/" + rsObj.nodes[0].host}});
+printjson(config.shards.find().toArray());
- st.stop();
+rsConfig.members.push(removedNode);
+rsConfig.version++;
+reconfig(rsObj, rsConfig);
+
+checkNumHosts(rsConfig.members.length);
+
+jsTest.log("Waiting for config.shards to reflect that " + removedNode.host + " has been re-added.");
+assert.soon(
+ function() {
+ return configServerURL().indexOf(removedNode.host) >= 0;
+ },
+ function() {
+ return (removedNode.host + " was re-added to " + rsObj.name +
+ ", but is not seen in config.shards");
+ });
+st.stop();
}());
diff --git a/jstests/sharding/mongos_query_comment.js b/jstests/sharding/mongos_query_comment.js
index 963e43f6bf8..ccb10d16824 100644
--- a/jstests/sharding/mongos_query_comment.js
+++ b/jstests/sharding/mongos_query_comment.js
@@ -5,83 +5,80 @@
* comment to the find command fails.
*/
(function() {
- "use strict";
-
- // For profilerHasSingleMatchingEntryOrThrow.
- load("jstests/libs/profiler.js");
-
- const st = new ShardingTest({name: "mongos_comment_test", mongos: 1, shards: 1});
-
- const shard = st.shard0;
- const mongos = st.s;
-
- // Need references to the database via both mongos and mongod so that we can enable profiling &
- // test queries on the shard.
- const mongosDB = mongos.getDB("mongos_comment");
- const shardDB = shard.getDB("mongos_comment");
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- const mongosColl = mongosDB.test;
- const shardColl = shardDB.test;
-
- const collNS = mongosColl.getFullName();
-
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(mongosColl.insert({_id: i, a: i}));
- }
-
- // The profiler will be used to verify that comments are present on the shard.
- assert.commandWorked(shardDB.setProfilingLevel(2));
- const profiler = shardDB.system.profile;
-
- //
- // Set legacy read mode for the mongos and shard connections.
- //
- mongosDB.getMongo().forceReadMode("legacy");
- shardDB.getMongo().forceReadMode("legacy");
-
- // TEST CASE: A legacy string $comment meta-operator is propagated to the shards via mongos.
- assert.eq(mongosColl.find({$query: {a: 1}, $comment: "TEST"}).itcount(), 1);
- profilerHasSingleMatchingEntryOrThrow(
- {profileDB: shardDB, filter: {op: "query", ns: collNS, "command.comment": "TEST"}});
-
- // TEST CASE: A legacy BSONObj $comment is converted to a string and propagated via mongos.
- assert.eq(mongosColl.find({$query: {a: 1}, $comment: {c: 2, d: {e: "TEST"}}}).itcount(), 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {op: "query", ns: collNS, "command.comment": "{ c: 2.0, d: { e: \"TEST\" } }"}
- });
-
- // TEST CASE: Legacy BSONObj $comment is NOT converted to a string when issued on the mongod.
- assert.eq(shardColl.find({$query: {a: 1}, $comment: {c: 3, d: {e: "TEST"}}}).itcount(), 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {op: "query", ns: collNS, "command.comment": {c: 3, d: {e: "TEST"}}}
- });
-
- //
- // Revert to "commands" read mode for the find command test cases below.
- //
- mongosDB.getMongo().forceReadMode("commands");
- shardDB.getMongo().forceReadMode("commands");
-
- // TEST CASE: Verify that string find.comment and non-string find.filter.$comment propagate.
- assert.eq(mongosColl.find({a: 1, $comment: {b: "TEST"}}).comment("TEST").itcount(), 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {
- op: "query",
- ns: collNS, "command.comment": "TEST", "command.filter.$comment": {b: "TEST"}
- }
- });
-
- // TEST CASE: Verify that find command with a non-string comment parameter is rejected.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {"find": mongosColl.getName(), "filter": {a: 1}, "comment": {b: "TEST"}}),
- 9,
- "Non-string find command comment did not return an error.");
-
- st.stop();
+"use strict";
+
+// For profilerHasSingleMatchingEntryOrThrow.
+load("jstests/libs/profiler.js");
+
+const st = new ShardingTest({name: "mongos_comment_test", mongos: 1, shards: 1});
+
+const shard = st.shard0;
+const mongos = st.s;
+
+// Need references to the database via both mongos and mongod so that we can enable profiling &
+// test queries on the shard.
+const mongosDB = mongos.getDB("mongos_comment");
+const shardDB = shard.getDB("mongos_comment");
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+const mongosColl = mongosDB.test;
+const shardColl = shardDB.test;
+
+const collNS = mongosColl.getFullName();
+
+for (let i = 0; i < 5; ++i) {
+ assert.writeOK(mongosColl.insert({_id: i, a: i}));
+}
+
+// The profiler will be used to verify that comments are present on the shard.
+assert.commandWorked(shardDB.setProfilingLevel(2));
+const profiler = shardDB.system.profile;
+
+//
+// Set legacy read mode for the mongos and shard connections.
+//
+mongosDB.getMongo().forceReadMode("legacy");
+shardDB.getMongo().forceReadMode("legacy");
+
+// TEST CASE: A legacy string $comment meta-operator is propagated to the shards via mongos.
+assert.eq(mongosColl.find({$query: {a: 1}, $comment: "TEST"}).itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: shardDB, filter: {op: "query", ns: collNS, "command.comment": "TEST"}});
+
+// TEST CASE: A legacy BSONObj $comment is converted to a string and propagated via mongos.
+assert.eq(mongosColl.find({$query: {a: 1}, $comment: {c: 2, d: {e: "TEST"}}}).itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardDB,
+ filter: {op: "query", ns: collNS, "command.comment": "{ c: 2.0, d: { e: \"TEST\" } }"}
+});
+
+// TEST CASE: Legacy BSONObj $comment is NOT converted to a string when issued on the mongod.
+assert.eq(shardColl.find({$query: {a: 1}, $comment: {c: 3, d: {e: "TEST"}}}).itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardDB,
+ filter: {op: "query", ns: collNS, "command.comment": {c: 3, d: {e: "TEST"}}}
+});
+
+//
+// Revert to "commands" read mode for the find command test cases below.
+//
+mongosDB.getMongo().forceReadMode("commands");
+shardDB.getMongo().forceReadMode("commands");
+
+// TEST CASE: Verify that string find.comment and non-string find.filter.$comment propagate.
+assert.eq(mongosColl.find({a: 1, $comment: {b: "TEST"}}).comment("TEST").itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardDB,
+ filter:
+ {op: "query", ns: collNS, "command.comment": "TEST", "command.filter.$comment": {b: "TEST"}}
+});
+
+// TEST CASE: Verify that find command with a non-string comment parameter is rejected.
+assert.commandFailedWithCode(
+ mongosDB.runCommand({"find": mongosColl.getName(), "filter": {a: 1}, "comment": {b: "TEST"}}),
+ 9,
+ "Non-string find command comment did not return an error.");
+
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index 7b43adcdd49..d41759de5db 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -16,417 +16,415 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 1, other: {rs: true, rsOptions: {nodes: 2}}});
+var st = new ShardingTest({shards: 3, mongos: 1, other: {rs: true, rsOptions: {nodes: 2}}});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
- assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true}));
+assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true}));
- var collSharded = mongos.getCollection("fooSharded.barSharded");
- var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
+var collSharded = mongos.getCollection("fooSharded.barSharded");
+var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
- // Create the unsharded database
- assert.writeOK(collUnsharded.insert({some: "doc"}));
- assert.writeOK(collUnsharded.remove({}));
- assert.commandWorked(
- admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
+// Create the unsharded database
+assert.writeOK(collUnsharded.insert({some: "doc"}));
+assert.writeOK(collUnsharded.remove({}));
+assert.commandWorked(
+ admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
- // Create the sharded database
- assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
- assert.commandWorked(
- admin.runCommand({movePrimary: collSharded.getDB().toString(), to: st.shard0.shardName}));
- assert.commandWorked(
- admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
+// Create the sharded database
+assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+assert.commandWorked(
+ admin.runCommand({movePrimary: collSharded.getDB().toString(), to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
- // Secondaries do not refresh their in-memory routing table until a request with a higher
- // version is received, and refreshing requires communication with the primary to obtain the
- // newest version. Read from the secondaries once before taking down primaries to ensure they
- // have loaded the routing table into memory.
- // TODO SERVER-30148: replace this with calls to awaitReplication() on each shard owning data
- // for the sharded collection once secondaries refresh proactively.
- var mongosSetupConn = new Mongo(mongos.host);
- mongosSetupConn.setReadPref("secondary");
- assert(!mongosSetupConn.getCollection(collSharded.toString()).find({}).hasNext());
+// Secondaries do not refresh their in-memory routing table until a request with a higher
+// version is received, and refreshing requires communication with the primary to obtain the
+// newest version. Read from the secondaries once before taking down primaries to ensure they
+// have loaded the routing table into memory.
+// TODO SERVER-30148: replace this with calls to awaitReplication() on each shard owning data
+// for the sharded collection once secondaries refresh proactively.
+var mongosSetupConn = new Mongo(mongos.host);
+mongosSetupConn.setReadPref("secondary");
+assert(!mongosSetupConn.getCollection(collSharded.toString()).find({}).hasNext());
- gc(); // Clean up connections
+gc(); // Clean up connections
- st.printShardingStatus();
+st.printShardingStatus();
- //
- // Setup is complete
- //
+//
+// Setup is complete
+//
- jsTest.log("Inserting initial data...");
+jsTest.log("Inserting initial data...");
- var mongosConnActive = new Mongo(mongos.host);
- var mongosConnIdle = null;
- var mongosConnNew = null;
+var mongosConnActive = new Mongo(mongos.host);
+var mongosConnIdle = null;
+var mongosConnNew = null;
- var wc = {writeConcern: {w: 2, wtimeout: 60000}};
+var wc = {writeConcern: {w: 2, wtimeout: 60000}};
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
- jsTest.log("Stopping primary of third shard...");
+jsTest.log("Stopping primary of third shard...");
- mongosConnIdle = new Mongo(mongos.host);
+mongosConnIdle = new Mongo(mongos.host);
- st.rs2.stop(st.rs2.getPrimary());
+st.rs2.stop(st.rs2.getPrimary());
- jsTest.log("Testing active connection with third primary down...");
+jsTest.log("Testing active connection with third primary down...");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
- jsTest.log("Testing idle connection with third primary down...");
+jsTest.log("Testing idle connection with third primary down...");
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
- assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
-
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- jsTest.log("Testing new connections with third primary down...");
-
- mongosConnNew = new Mongo(mongos.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
-
- gc(); // Clean up new connections
-
- jsTest.log("Stopping primary of second shard...");
-
- mongosConnIdle = new Mongo(mongos.host);
-
- // Need to save this node for later
- var rs1Secondary = st.rs1.getSecondary();
-
- st.rs1.stop(st.rs1.getPrimary());
-
- jsTest.log("Testing active connection with second primary down...");
-
- // Reads with read prefs
- mongosConnActive.setSlaveOk();
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnActive.setSlaveOk(false);
-
- mongosConnActive.setReadPref("primary");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.throws(function() {
- mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- // Ensure read prefs override slaveOK
- mongosConnActive.setSlaveOk();
- mongosConnActive.setReadPref("primary");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.throws(function() {
- mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnActive.setSlaveOk(false);
-
- mongosConnActive.setReadPref("secondary");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnActive.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnActive.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnActive.setReadPref("nearest");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- // Writes
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
-
- jsTest.log("Testing idle connection with second primary down...");
-
- // Writes
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
- assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
-
- // Reads with read prefs
- mongosConnIdle.setSlaveOk();
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnIdle.setSlaveOk(false);
-
- mongosConnIdle.setReadPref("primary");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.throws(function() {
- mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- // Ensure read prefs override slaveOK
- mongosConnIdle.setSlaveOk();
- mongosConnIdle.setReadPref("primary");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.throws(function() {
- mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnIdle.setSlaveOk(false);
-
- mongosConnIdle.setReadPref("secondary");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnIdle.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnIdle.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnIdle.setReadPref("nearest");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- jsTest.log("Testing new connections with second primary down...");
-
- // Reads with read prefs
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primary");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primary");
- assert.throws(function() {
- mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primary");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- // Ensure read prefs override slaveok
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- mongosConnNew.setReadPref("primary");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- mongosConnNew.setReadPref("primary");
- assert.throws(function() {
- mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- mongosConnNew.setReadPref("primary");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondary");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondary");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondary");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("nearest");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("nearest");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("nearest");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- // Writes
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
-
- gc(); // Clean up new connections
-
- jsTest.log("Stopping primary of first shard...");
-
- mongosConnIdle = new Mongo(mongos.host);
-
- st.rs0.stop(st.rs0.getPrimary());
-
- jsTest.log("Testing active connection with first primary down...");
-
- mongosConnActive.setSlaveOk();
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -8}));
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 8}));
- assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 8}));
-
- jsTest.log("Testing idle connection with first primary down...");
-
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -9}));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 9}));
- assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 9}));
-
- mongosConnIdle.setSlaveOk();
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- jsTest.log("Testing new connections with first primary down...");
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -10}));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 10}));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 10}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- gc(); // Clean up new connections
+jsTest.log("Testing new connections with third primary down...");
- jsTest.log("Stopping second shard...");
+mongosConnNew = new Mongo(mongos.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
+
+gc(); // Clean up new connections
+
+jsTest.log("Stopping primary of second shard...");
+
+mongosConnIdle = new Mongo(mongos.host);
+
+// Need to save this node for later
+var rs1Secondary = st.rs1.getSecondary();
+
+st.rs1.stop(st.rs1.getPrimary());
+
+jsTest.log("Testing active connection with second primary down...");
+
+// Reads with read prefs
+mongosConnActive.setSlaveOk();
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnActive.setSlaveOk(false);
+
+mongosConnActive.setReadPref("primary");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.throws(function() {
+ mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+// Ensure read prefs override slaveOK
+mongosConnActive.setSlaveOk();
+mongosConnActive.setReadPref("primary");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.throws(function() {
+ mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnActive.setSlaveOk(false);
+
+mongosConnActive.setReadPref("secondary");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnActive.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnActive.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnActive.setReadPref("nearest");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+// Writes
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
+
+jsTest.log("Testing idle connection with second primary down...");
+
+// Writes
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
+
+// Reads with read prefs
+mongosConnIdle.setSlaveOk();
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnIdle.setSlaveOk(false);
+
+mongosConnIdle.setReadPref("primary");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.throws(function() {
+ mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+// Ensure read prefs override slaveOK
+mongosConnIdle.setSlaveOk();
+mongosConnIdle.setReadPref("primary");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.throws(function() {
+ mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnIdle.setSlaveOk(false);
+
+mongosConnIdle.setReadPref("secondary");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnIdle.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnIdle.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnIdle.setReadPref("nearest");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+jsTest.log("Testing new connections with second primary down...");
+
+// Reads with read prefs
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primary");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primary");
+assert.throws(function() {
+ mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primary");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+// Ensure read prefs override slaveok
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+mongosConnNew.setReadPref("primary");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+mongosConnNew.setReadPref("primary");
+assert.throws(function() {
+ mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+mongosConnNew.setReadPref("primary");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondary");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondary");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondary");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("nearest");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("nearest");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("nearest");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+// Writes
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
+
+gc(); // Clean up new connections
+
+jsTest.log("Stopping primary of first shard...");
+
+mongosConnIdle = new Mongo(mongos.host);
+
+st.rs0.stop(st.rs0.getPrimary());
+
+jsTest.log("Testing active connection with first primary down...");
+
+mongosConnActive.setSlaveOk();
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -8}));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 8}));
+assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 8}));
+
+jsTest.log("Testing idle connection with first primary down...");
+
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -9}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 9}));
+assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 9}));
+
+mongosConnIdle.setSlaveOk();
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+jsTest.log("Testing new connections with first primary down...");
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnIdle = new Mongo(mongos.host);
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -10}));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 10}));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 10}));
+
+gc(); // Clean up new connections
- st.rs1.stop(rs1Secondary);
+jsTest.log("Stopping second shard...");
- jsTest.log("Testing active connection with second shard down...");
+mongosConnIdle = new Mongo(mongos.host);
- mongosConnActive.setSlaveOk();
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+st.rs1.stop(rs1Secondary);
+
+jsTest.log("Testing active connection with second shard down...");
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -11}));
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 11}));
- assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 11}));
+mongosConnActive.setSlaveOk();
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- jsTest.log("Testing idle connection with second shard down...");
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -11}));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 11}));
+assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 11}));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -12}));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 12}));
- assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 12}));
+jsTest.log("Testing idle connection with second shard down...");
- mongosConnIdle.setSlaveOk();
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -12}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 12}));
+assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 12}));
- jsTest.log("Testing new connections with second shard down...");
+mongosConnIdle.setSlaveOk();
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+jsTest.log("Testing new connections with second shard down...");
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -13}));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 13}));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 13}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- gc(); // Clean up new connections
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -13}));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 13}));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 13}));
- st.stop();
+gc(); // Clean up new connections
+st.stop();
})();
diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js
index 5443450f3bc..7d4560b5ee6 100644
--- a/jstests/sharding/mongos_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_shard_failure_tolerance.js
@@ -14,128 +14,126 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 1});
+var st = new ShardingTest({shards: 3, mongos: 1});
- var admin = st.s0.getDB("admin");
+var admin = st.s0.getDB("admin");
- var collSharded = st.s0.getCollection("fooSharded.barSharded");
- var collUnsharded = st.s0.getCollection("fooUnsharded.barUnsharded");
+var collSharded = st.s0.getCollection("fooSharded.barSharded");
+var collUnsharded = st.s0.getCollection("fooUnsharded.barUnsharded");
- assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
- st.ensurePrimaryShard(collSharded.getDB().toString(), st.shard0.shardName);
+assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+st.ensurePrimaryShard(collSharded.getDB().toString(), st.shard0.shardName);
- assert.commandWorked(
- admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
- // Create the unsharded database
- assert.writeOK(collUnsharded.insert({some: "doc"}));
- assert.writeOK(collUnsharded.remove({}));
- st.ensurePrimaryShard(collUnsharded.getDB().toString(), st.shard0.shardName);
+// Create the unsharded database
+assert.writeOK(collUnsharded.insert({some: "doc"}));
+assert.writeOK(collUnsharded.remove({}));
+st.ensurePrimaryShard(collUnsharded.getDB().toString(), st.shard0.shardName);
- //
- // Setup is complete
- //
-
- jsTest.log("Inserting initial data...");
+//
+// Setup is complete
+//
- var mongosConnActive = new Mongo(st.s0.host);
- var mongosConnIdle = null;
- var mongosConnNew = null;
+jsTest.log("Inserting initial data...");
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
+var mongosConnActive = new Mongo(st.s0.host);
+var mongosConnIdle = null;
+var mongosConnNew = null;
- jsTest.log("Stopping third shard...");
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
- mongosConnIdle = new Mongo(st.s0.host);
+jsTest.log("Stopping third shard...");
- st.rs2.stopSet();
+mongosConnIdle = new Mongo(st.s0.host);
- jsTest.log("Testing active connection...");
+st.rs2.stopSet();
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+jsTest.log("Testing active connection...");
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- jsTest.log("Testing idle connection...");
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
- assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
+jsTest.log("Testing idle connection...");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
- jsTest.log("Testing new connections...");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+jsTest.log("Testing new connections...");
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- gc(); // Clean up new connections
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
- jsTest.log("Stopping second shard...");
+gc(); // Clean up new connections
- mongosConnIdle = new Mongo(st.s0.host);
+jsTest.log("Stopping second shard...");
- st.rs1.stopSet();
- jsTest.log("Testing active connection...");
+mongosConnIdle = new Mongo(st.s0.host);
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+st.rs1.stopSet();
+jsTest.log("Testing active connection...");
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
- jsTest.log("Testing idle connection...");
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}));
- assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
+jsTest.log("Testing idle connection...");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
- jsTest.log("Testing new connections...");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+jsTest.log("Testing new connections...");
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}));
- st.stop();
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
+st.stop();
})();
diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js
index d9114a6033f..66b71aa12c3 100644
--- a/jstests/sharding/mongos_validate_writes.js
+++ b/jstests/sharding/mongos_validate_writes.js
@@ -4,85 +4,85 @@
// Note that this is *unsafe* with broadcast removes and updates
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 3, other: {shardOptions: {verbose: 2}}});
+var st = new ShardingTest({shards: 2, mongos: 3, other: {shardOptions: {verbose: 2}}});
- var mongos = st.s0;
- var staleMongosA = st.s1;
- var staleMongosB = st.s2;
+var mongos = st.s0;
+var staleMongosA = st.s1;
+var staleMongosB = st.s2;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = mongos.getCollection("foo.bar");
- var staleCollA = staleMongosA.getCollection(coll + "");
- var staleCollB = staleMongosB.getCollection(coll + "");
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection("foo.bar");
+var staleCollA = staleMongosA.getCollection(coll + "");
+var staleCollB = staleMongosB.getCollection(coll + "");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
- coll.ensureIndex({a: 1});
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+coll.ensureIndex({a: 1});
- // Shard the collection on {a: 1} and move one chunk to another shard. Updates need to be across
- // two shards to trigger an error, otherwise they are versioned and will succeed after raising
- // a StaleConfigException.
- st.shardColl(coll, {a: 1}, {a: 0}, {a: 1}, coll.getDB(), true);
+// Shard the collection on {a: 1} and move one chunk to another shard. Updates need to be across
+// two shards to trigger an error, otherwise they are versioned and will succeed after raising
+// a StaleConfigException.
+st.shardColl(coll, {a: 1}, {a: 0}, {a: 1}, coll.getDB(), true);
- // Let the stale mongos see the collection state
- staleCollA.findOne();
- staleCollB.findOne();
+// Let the stale mongos see the collection state
+staleCollA.findOne();
+staleCollB.findOne();
- // Change the collection sharding state
- coll.drop();
- coll.ensureIndex({b: 1});
- st.shardColl(coll, {b: 1}, {b: 0}, {b: 1}, coll.getDB(), true);
+// Change the collection sharding state
+coll.drop();
+coll.ensureIndex({b: 1});
+st.shardColl(coll, {b: 1}, {b: 0}, {b: 1}, coll.getDB(), true);
- // Make sure that we can successfully insert, even though we have stale state
- assert.writeOK(staleCollA.insert({b: "b"}));
+// Make sure that we can successfully insert, even though we have stale state
+assert.writeOK(staleCollA.insert({b: "b"}));
- // Make sure we unsuccessfully insert with old info
- assert.writeError(staleCollB.insert({a: "a"}));
+// Make sure we unsuccessfully insert with old info
+assert.writeError(staleCollB.insert({a: "a"}));
- // Change the collection sharding state
- coll.drop();
- coll.ensureIndex({c: 1});
- st.shardColl(coll, {c: 1}, {c: 0}, {c: 1}, coll.getDB(), true);
+// Change the collection sharding state
+coll.drop();
+coll.ensureIndex({c: 1});
+st.shardColl(coll, {c: 1}, {c: 0}, {c: 1}, coll.getDB(), true);
- // Make sure we can successfully upsert, even though we have stale state
- assert.writeOK(staleCollA.update({c: "c"}, {c: "c"}, true));
+// Make sure we can successfully upsert, even though we have stale state
+assert.writeOK(staleCollA.update({c: "c"}, {c: "c"}, true));
- // Make sure we unsuccessfully upsert with old info
- assert.writeError(staleCollB.update({b: "b"}, {b: "b"}, true));
+// Make sure we unsuccessfully upsert with old info
+assert.writeError(staleCollB.update({b: "b"}, {b: "b"}, true));
- // Change the collection sharding state
- coll.drop();
- coll.ensureIndex({d: 1});
- st.shardColl(coll, {d: 1}, {d: 0}, {d: 1}, coll.getDB(), true);
+// Change the collection sharding state
+coll.drop();
+coll.ensureIndex({d: 1});
+st.shardColl(coll, {d: 1}, {d: 0}, {d: 1}, coll.getDB(), true);
- // Make sure we can successfully update, even though we have stale state
- assert.writeOK(coll.insert({d: "d"}));
+// Make sure we can successfully update, even though we have stale state
+assert.writeOK(coll.insert({d: "d"}));
- assert.writeOK(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
- assert.eq(staleCollA.findOne().x, "x");
+assert.writeOK(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
+assert.eq(staleCollA.findOne().x, "x");
- // Make sure we unsuccessfully update with old info
- assert.writeError(staleCollB.update({c: "c"}, {$set: {x: "y"}}, false, false));
- assert.eq(staleCollB.findOne().x, "x");
+// Make sure we unsuccessfully update with old info
+assert.writeError(staleCollB.update({c: "c"}, {$set: {x: "y"}}, false, false));
+assert.eq(staleCollB.findOne().x, "x");
- // Change the collection sharding state
- coll.drop();
- coll.ensureIndex({e: 1});
- // Deletes need to be across two shards to trigger an error.
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
- st.shardColl(coll, {e: 1}, {e: 0}, {e: 1}, coll.getDB(), true);
+// Change the collection sharding state
+coll.drop();
+coll.ensureIndex({e: 1});
+// Deletes need to be across two shards to trigger an error.
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+st.shardColl(coll, {e: 1}, {e: 0}, {e: 1}, coll.getDB(), true);
- // Make sure we can successfully remove, even though we have stale state
- assert.writeOK(coll.insert({e: "e"}));
+// Make sure we can successfully remove, even though we have stale state
+assert.writeOK(coll.insert({e: "e"}));
- assert.writeOK(staleCollA.remove({e: "e"}, true));
- assert.eq(null, staleCollA.findOne());
+assert.writeOK(staleCollA.remove({e: "e"}, true));
+assert.eq(null, staleCollA.findOne());
- // Make sure we unsuccessfully remove with old info
- assert.writeError(staleCollB.remove({d: "d"}, true));
+// Make sure we unsuccessfully remove with old info
+assert.writeError(staleCollB.remove({d: "d"}, true));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js b/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
index ccc8012e9c8..5bece4f1c76 100644
--- a/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
+++ b/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
@@ -3,49 +3,48 @@
* causes the recipient to fail the migration.
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "inputColl";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "inputColl";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- let donor = st.shard0;
- let recipient = st.shard1;
+let donor = st.shard0;
+let recipient = st.shard1;
- jsTest.log("Make " + donor.shardName + " the primary shard, and shard collection " + ns);
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, donor.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+jsTest.log("Make " + donor.shardName + " the primary shard, and shard collection " + ns);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, donor.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- jsTest.log("Insert a document with {_id: 0} into " + ns + " through mongos");
- assert.writeOK(st.s.getCollection(ns).insert({_id: 0}));
+jsTest.log("Insert a document with {_id: 0} into " + ns + " through mongos");
+assert.writeOK(st.s.getCollection(ns).insert({_id: 0}));
- jsTest.log("Insert a document with {_id: 1} into " + ns + " directly on the recipient");
- assert.writeOK(recipient.getCollection(ns).insert({_id: 1}));
+jsTest.log("Insert a document with {_id: 1} into " + ns + " directly on the recipient");
+assert.writeOK(recipient.getCollection(ns).insert({_id: 1}));
- jsTest.log("Check that the UUID on the recipient differs from the UUID on the donor");
- const recipientUUIDBefore =
- recipient.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
- const donorUUIDBefore = donor.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
- assert.neq(recipientUUIDBefore, donorUUIDBefore);
+jsTest.log("Check that the UUID on the recipient differs from the UUID on the donor");
+const recipientUUIDBefore =
+ recipient.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
+const donorUUIDBefore = donor.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
+assert.neq(recipientUUIDBefore, donorUUIDBefore);
- jsTest.log("Ensure that we fail to migrate data from the donor to the recipient");
- assert.commandFailed(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: recipient.shardName}));
+jsTest.log("Ensure that we fail to migrate data from the donor to the recipient");
+assert.commandFailed(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: recipient.shardName}));
- jsTest.log("Ensure the recipient's collection UUID is unmodified after the migration attempt");
- const recipientUUIDAfter =
- recipient.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
- assert.eq(recipientUUIDBefore, recipientUUIDAfter);
+jsTest.log("Ensure the recipient's collection UUID is unmodified after the migration attempt");
+const recipientUUIDAfter =
+ recipient.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
+assert.eq(recipientUUIDBefore, recipientUUIDAfter);
- jsTest.log("Ensure the document that was on the recipient was not deleted");
- assert.neq(null, recipient.getCollection(ns).findOne({_id: 1}));
+jsTest.log("Ensure the document that was on the recipient was not deleted");
+assert.neq(null, recipient.getCollection(ns).findOne({_id: 1}));
- jsTest.log("Ensure dropCollection causes the collection to be dropped even on the recipient");
- assert.commandWorked(st.s.getDB(dbName).runCommand({drop: collName}));
- assert.eq(0, recipient.getDB(dbName).getCollectionInfos({name: collName}).length);
+jsTest.log("Ensure dropCollection causes the collection to be dropped even on the recipient");
+assert.commandWorked(st.s.getDB(dbName).runCommand({drop: collName}));
+assert.eq(0, recipient.getDB(dbName).getCollectionInfos({name: collName}).length);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
index 118c70b1415..a2dc328da29 100644
--- a/jstests/sharding/movePrimary1.js
+++ b/jstests/sharding/movePrimary1.js
@@ -1,57 +1,56 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2});
+var s = new ShardingTest({shards: 2});
- assert.commandWorked(s.getDB('test1').runCommand({dropDatabase: 1}));
- var db = s.getDB('test1');
- var c = db.foo;
- c.save({a: 1});
- c.save({a: 2});
- c.save({a: 3});
- assert.eq(3, c.count());
+assert.commandWorked(s.getDB('test1').runCommand({dropDatabase: 1}));
+var db = s.getDB('test1');
+var c = db.foo;
+c.save({a: 1});
+c.save({a: 2});
+c.save({a: 3});
+assert.eq(3, c.count());
- assert.commandWorked(
- db.runCommand({create: "view", viewOn: "foo", pipeline: [{$match: {a: 3}}]}));
+assert.commandWorked(db.runCommand({create: "view", viewOn: "foo", pipeline: [{$match: {a: 3}}]}));
- var fromShard = s.getPrimaryShard('test1');
- var toShard = s.getOther(fromShard);
+var fromShard = s.getPrimaryShard('test1');
+var toShard = s.getOther(fromShard);
- assert.eq(3, fromShard.getDB("test1").foo.count(), "from doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").foo.count(), "to has data before move");
- assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect before move");
+assert.eq(3, fromShard.getDB("test1").foo.count(), "from doesn't have data before move");
+assert.eq(0, toShard.getDB("test1").foo.count(), "to has data before move");
+assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect before move");
- s.printShardingStatus();
- assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
- s.normalize(fromShard.name),
- "not in db correctly to start");
+s.printShardingStatus();
+assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ s.normalize(fromShard.name),
+ "not in db correctly to start");
- var oldShardName = s.config.databases.findOne({_id: "test1"}).primary;
+var oldShardName = s.config.databases.findOne({_id: "test1"}).primary;
- assert.commandWorked(s.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
- s.printShardingStatus();
- assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
- s.normalize(toShard.name),
- "to in config db didn't change after first move");
+assert.commandWorked(s.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
+s.printShardingStatus();
+assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ s.normalize(toShard.name),
+ "to in config db didn't change after first move");
- assert.eq(0, fromShard.getDB("test1").foo.count(), "from still has data after move");
- assert.eq(3, toShard.getDB("test1").foo.count(), "to doesn't have data after move");
- assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect after move");
+assert.eq(0, fromShard.getDB("test1").foo.count(), "from still has data after move");
+assert.eq(3, toShard.getDB("test1").foo.count(), "to doesn't have data after move");
+assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect after move");
- // Move back, now using shard name instead of server address
- assert.commandWorked(s.s0.adminCommand({movePrimary: "test1", to: oldShardName}));
- s.printShardingStatus();
- assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
- oldShardName,
- "to in config db didn't change after second move");
+// Move back, now using shard name instead of server address
+assert.commandWorked(s.s0.adminCommand({movePrimary: "test1", to: oldShardName}));
+s.printShardingStatus();
+assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ oldShardName,
+ "to in config db didn't change after second move");
- assert.eq(3, fromShard.getDB("test1").foo.count(), "from doesn't have data after move back");
- assert.eq(0, toShard.getDB("test1").foo.count(), "to has data after move back");
- assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect after move back");
+assert.eq(3, fromShard.getDB("test1").foo.count(), "from doesn't have data after move back");
+assert.eq(0, toShard.getDB("test1").foo.count(), "to has data after move back");
+assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect after move back");
- assert.commandFailedWithCode(s.s0.adminCommand({movePrimary: 'test1', to: 'dontexist'}),
- ErrorCodes.ShardNotFound,
- 'attempting to use non-existent shard as primary should fail');
+assert.commandFailedWithCode(s.s0.adminCommand({movePrimary: 'test1', to: 'dontexist'}),
+ ErrorCodes.ShardNotFound,
+ 'attempting to use non-existent shard as primary should fail');
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/move_chunk_basic.js b/jstests/sharding/move_chunk_basic.js
index 988d741e00e..0aad2048861 100644
--- a/jstests/sharding/move_chunk_basic.js
+++ b/jstests/sharding/move_chunk_basic.js
@@ -3,82 +3,82 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 1, shards: 2});
- var kDbName = 'db';
+var st = new ShardingTest({mongos: 1, shards: 2});
+var kDbName = 'db';
- var mongos = st.s0;
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var mongos = st.s0;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
- // Fail if invalid namespace.
- assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
+// Fail if invalid namespace.
+assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
- // Fail if database does not exist.
- assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
+// Fail if database does not exist.
+assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
- // Fail if collection is unsharded.
- assert.commandFailed(
- mongos.adminCommand({moveChunk: kDbName + '.xxx', find: {_id: 1}, to: shard1}));
+// Fail if collection is unsharded.
+assert.commandFailed(
+ mongos.adminCommand({moveChunk: kDbName + '.xxx', find: {_id: 1}, to: shard1}));
- function testHashed() {
- var ns = kDbName + '.fooHashed';
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
+function testHashed() {
+ var ns = kDbName + '.fooHashed';
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
- var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
- assert(aChunk);
+ var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
+ assert(aChunk);
- // Error if either of the bounds is not a valid shard key (BSON object - 1 yields a NaN)
- assert.commandFailed(
- mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min - 1, aChunk.max], to: shard1}));
- assert.commandFailed(
- mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
+ // Error if either of the bounds is not a valid shard key (BSON object - 1 yields a NaN)
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min - 1, aChunk.max], to: shard1}));
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
- // Fail if find and bounds are both set.
- assert.commandFailed(mongos.adminCommand(
- {moveChunk: ns, find: {_id: 1}, bounds: [aChunk.min, aChunk.max], to: shard1}));
+ // Fail if find and bounds are both set.
+ assert.commandFailed(mongos.adminCommand(
+ {moveChunk: ns, find: {_id: 1}, bounds: [aChunk.min, aChunk.max], to: shard1}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
- assert.eq(0, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard0}));
- assert.eq(1, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard1}));
+ assert.eq(0, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard0}));
+ assert.eq(1, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard1}));
- mongos.getDB(kDbName).fooHashed.drop();
- }
+ mongos.getDB(kDbName).fooHashed.drop();
+}
- function testNotHashed(keyDoc) {
- var ns = kDbName + '.foo';
+function testNotHashed(keyDoc) {
+ var ns = kDbName + '.foo';
- // Fail if find is not a valid shard key.
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
+ // Fail if find is not a valid shard key.
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- var chunkId = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0})._id;
+ var chunkId = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0})._id;
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {xxx: 1}, to: shard1}));
- assert.eq(shard0, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {xxx: 1}, to: shard1}));
+ assert.eq(shard0, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- // Fail if to shard does not exists
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: 'WrongShard'}));
+ // Fail if to shard does not exists
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: 'WrongShard'}));
- // Fail if chunk is already at shard
- assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ // Fail if chunk is already at shard
+ assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- mongos.getDB(kDbName).foo.drop();
- }
+ mongos.getDB(kDbName).foo.drop();
+}
- testHashed();
+testHashed();
- testNotHashed({a: 1});
+testNotHashed({a: 1});
- testNotHashed({a: 1, b: 1});
+testNotHashed({a: 1, b: 1});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js b/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
index 06802d65c61..c7602b4f644 100644
--- a/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
@@ -1,126 +1,125 @@
load("jstests/sharding/move_chunk_with_session_helper.js");
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- var checkFindAndModifyResult = function(expected, toCheck) {
- assert.eq(expected.ok, toCheck.ok);
- assert.eq(expected.value, toCheck.value);
- assert.eq(expected.lastErrorObject, toCheck.lastErrorObject);
- };
+var checkFindAndModifyResult = function(expected, toCheck) {
+ assert.eq(expected.ok, toCheck.ok);
+ assert.eq(expected.value, toCheck.value);
+ assert.eq(expected.lastErrorObject, toCheck.lastErrorObject);
+};
- var lsid = UUID();
- var tests = [
- {
- coll: 'findAndMod-upsert',
- cmd: {
- findAndModify: 'findAndMod-upsert',
- query: {x: 60},
- update: {$inc: {y: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(37),
- },
- setup: function(coll) {},
- checkRetryResult: function(result, retryResult) {
- checkFindAndModifyResult(result, retryResult);
- },
- checkDocuments: function(coll) {
- assert.eq(1, coll.findOne({x: 60}).y);
- },
+var lsid = UUID();
+var tests = [
+ {
+ coll: 'findAndMod-upsert',
+ cmd: {
+ findAndModify: 'findAndMod-upsert',
+ query: {x: 60},
+ update: {$inc: {y: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(37),
},
- {
- coll: 'findAndMod-update-preImage',
- cmd: {
- findAndModify: 'findAndMod-update-preImage',
- query: {x: 60},
- update: {$inc: {y: 1}},
- new: false,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(38),
- },
- setup: function(coll) {
- coll.insert({x: 60});
- },
- checkRetryResult: function(result, retryResult) {
- checkFindAndModifyResult(result, retryResult);
- },
- checkDocuments: function(coll) {
- assert.eq(1, coll.findOne({x: 60}).y);
- },
+ setup: function(coll) {},
+ checkRetryResult: function(result, retryResult) {
+ checkFindAndModifyResult(result, retryResult);
},
- {
- coll: 'findAndMod-update-postImage',
- cmd: {
- findAndModify: 'findAndMod-update-postImage',
- query: {x: 60},
- update: {$inc: {y: 1}},
- new: true,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- },
- setup: function(coll) {
- coll.insert({x: 60});
- },
- checkRetryResult: function(result, retryResult) {
- checkFindAndModifyResult(result, retryResult);
- },
- checkDocuments: function(coll) {
- assert.eq(1, coll.findOne({x: 60}).y);
- },
+ checkDocuments: function(coll) {
+ assert.eq(1, coll.findOne({x: 60}).y);
},
- {
- coll: 'findAndMod-delete',
- cmd: {
- findAndModify: 'findAndMod-delete',
- query: {x: 10},
- remove: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(40),
- },
- setup: function(coll) {
- var bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; i++) {
- bulk.insert({x: 10});
- }
- assert.writeOK(bulk.execute());
-
- },
- checkRetryResult: function(result, retryResult) {
- checkFindAndModifyResult(result, retryResult);
- },
- checkDocuments: function(coll) {
- assert.eq(9, coll.find({x: 10}).itcount());
- },
+ },
+ {
+ coll: 'findAndMod-update-preImage',
+ cmd: {
+ findAndModify: 'findAndMod-update-preImage',
+ query: {x: 60},
+ update: {$inc: {y: 1}},
+ new: false,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(38),
+ },
+ setup: function(coll) {
+ coll.insert({x: 60});
+ },
+ checkRetryResult: function(result, retryResult) {
+ checkFindAndModifyResult(result, retryResult);
+ },
+ checkDocuments: function(coll) {
+ assert.eq(1, coll.findOne({x: 60}).y);
+ },
+ },
+ {
+ coll: 'findAndMod-update-postImage',
+ cmd: {
+ findAndModify: 'findAndMod-update-postImage',
+ query: {x: 60},
+ update: {$inc: {y: 1}},
+ new: true,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ },
+ setup: function(coll) {
+ coll.insert({x: 60});
},
- ];
+ checkRetryResult: function(result, retryResult) {
+ checkFindAndModifyResult(result, retryResult);
+ },
+ checkDocuments: function(coll) {
+ assert.eq(1, coll.findOne({x: 60}).y);
+ },
+ },
+ {
+ coll: 'findAndMod-delete',
+ cmd: {
+ findAndModify: 'findAndMod-delete',
+ query: {x: 10},
+ remove: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(40),
+ },
+ setup: function(coll) {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < 10; i++) {
+ bulk.insert({x: 10});
+ }
+ assert.writeOK(bulk.execute());
+ },
+ checkRetryResult: function(result, retryResult) {
+ checkFindAndModifyResult(result, retryResult);
+ },
+ checkDocuments: function(coll) {
+ assert.eq(9, coll.find({x: 10}).itcount());
+ },
+ },
+];
- // Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
- // secondary to get elected, so we don't give it a zero priority.
- var st = new ShardingTest({
- mongos: 2,
- shards: {
- rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
- }
- });
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+// Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
+// secondary to get elected, so we don't give it a zero priority.
+var st = new ShardingTest({
+ mongos: 2,
+ shards: {
+ rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
+ }
+});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- tests.forEach(function(test) {
- testMoveChunkWithSession(
- st, test.coll, test.cmd, test.setup, test.checkRetryResult, test.checkDocuments);
- });
+tests.forEach(function(test) {
+ testMoveChunkWithSession(
+ st, test.coll, test.cmd, test.setup, test.checkRetryResult, test.checkDocuments);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_insert_with_write_retryability.js b/jstests/sharding/move_chunk_insert_with_write_retryability.js
index 0f755de41c4..c6a79000712 100644
--- a/jstests/sharding/move_chunk_insert_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_insert_with_write_retryability.js
@@ -1,48 +1,48 @@
load("jstests/sharding/move_chunk_with_session_helper.js");
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- // Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
- // secondary to get elected, so we don't give it a zero priority.
- var st = new ShardingTest({
- mongos: 2,
- shards: {
- rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
- }
- });
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+// Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
+// secondary to get elected, so we don't give it a zero priority.
+var st = new ShardingTest({
+ mongos: 2,
+ shards: {
+ rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
+ }
+});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- var coll = 'insert';
- var cmd = {
- insert: coll,
- documents: [{x: 10}, {x: 30}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(34),
- };
- var setup = function() {};
- var checkRetryResult = function(result, retryResult) {
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- };
- var checkDocuments = function(coll) {
- assert.eq(1, coll.find({x: 10}).itcount());
- assert.eq(1, coll.find({x: 30}).itcount());
- };
+var coll = 'insert';
+var cmd = {
+ insert: coll,
+ documents: [{x: 10}, {x: 30}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(34),
+};
+var setup = function() {};
+var checkRetryResult = function(result, retryResult) {
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+};
+var checkDocuments = function(coll) {
+ assert.eq(1, coll.find({x: 10}).itcount());
+ assert.eq(1, coll.find({x: 30}).itcount());
+};
- testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
+testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_open_cursors.js b/jstests/sharding/move_chunk_open_cursors.js
index fe0942f0558..1b15fb198cf 100644
--- a/jstests/sharding/move_chunk_open_cursors.js
+++ b/jstests/sharding/move_chunk_open_cursors.js
@@ -3,52 +3,50 @@
* migration.
*/
(function() {
- "use strict";
- const dbName = "test";
- const collName = jsTest.name();
- const testNs = dbName + "." + collName;
+"use strict";
+const dbName = "test";
+const collName = jsTest.name();
+const testNs = dbName + "." + collName;
- const nDocs = 1000 * 10;
- const st = new ShardingTest({shards: 2});
- const coll = st.s0.getDB(dbName)[collName];
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+const nDocs = 1000 * 10;
+const st = new ShardingTest({shards: 2});
+const coll = st.s0.getDB(dbName)[collName];
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- // Make sure we know which shard will host the data to begin.
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
- assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
+// Make sure we know which shard will host the data to begin.
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
+assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
- // Open some cursors before migrating data.
- // Ensure the cursor stage at the front of the pipeline does not buffer any data.
- assert.commandWorked(
- st.shard0.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
- const getMoreBatchSize = 100;
- const aggResponse = assert.commandWorked(
- coll.runCommand({aggregate: collName, pipeline: [], cursor: {batchSize: 0}}));
- const aggCursor = new DBCommandCursor(coll.getDB(), aggResponse, getMoreBatchSize);
+// Open some cursors before migrating data.
+// Ensure the cursor stage at the front of the pipeline does not buffer any data.
+assert.commandWorked(
+ st.shard0.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
+const getMoreBatchSize = 100;
+const aggResponse = assert.commandWorked(
+ coll.runCommand({aggregate: collName, pipeline: [], cursor: {batchSize: 0}}));
+const aggCursor = new DBCommandCursor(coll.getDB(), aggResponse, getMoreBatchSize);
- assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 2}}));
- assert(st.adminCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
+assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 2}}));
+assert(st.adminCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
- assert.eq(
- aggCursor.itcount(),
- nDocs,
- "expected agg cursor to return all matching documents, even though some have migrated");
+assert.eq(aggCursor.itcount(),
+ nDocs,
+ "expected agg cursor to return all matching documents, even though some have migrated");
- // Test the same behavior with the find command.
- const findResponse = assert.commandWorked(
- coll.runCommand({find: collName, filter: {}, batchSize: getMoreBatchSize}));
- const findCursor = new DBCommandCursor(coll.getDB(), findResponse, getMoreBatchSize);
- assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 4}}));
- assert(st.adminCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard1.shardName}));
+// Test the same behavior with the find command.
+const findResponse = assert.commandWorked(
+ coll.runCommand({find: collName, filter: {}, batchSize: getMoreBatchSize}));
+const findCursor = new DBCommandCursor(coll.getDB(), findResponse, getMoreBatchSize);
+assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 4}}));
+assert(st.adminCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard1.shardName}));
- assert.eq(
- findCursor.itcount(),
- nDocs,
- "expected find cursor to return all matching documents, even though some have migrated");
- st.stop();
+assert.eq(findCursor.itcount(),
+ nDocs,
+ "expected find cursor to return all matching documents, even though some have migrated");
+st.stop();
}());
diff --git a/jstests/sharding/move_chunk_remove_with_write_retryability.js b/jstests/sharding/move_chunk_remove_with_write_retryability.js
index 78c20ceddc6..c417710f462 100644
--- a/jstests/sharding/move_chunk_remove_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_remove_with_write_retryability.js
@@ -1,55 +1,55 @@
load("jstests/sharding/move_chunk_with_session_helper.js");
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- // Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
- // secondary to get elected, so we don't give it a zero priority.
- var st = new ShardingTest({
- mongos: 2,
- shards: {
- rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
- }
- });
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+// Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
+// secondary to get elected, so we don't give it a zero priority.
+var st = new ShardingTest({
+ mongos: 2,
+ shards: {
+ rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
+ }
+});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- var coll = 'delete';
- var cmd = {
- delete: coll,
- deletes: [{q: {x: 10}, limit: 1}, {q: {x: 20}, limit: 1}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(36),
- };
- var setup = function(coll) {
- var bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; i++) {
- bulk.insert({x: 10});
- bulk.insert({x: 20});
- }
- assert.writeOK(bulk.execute());
- };
- var checkRetryResult = function(result, retryResult) {
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- };
- var checkDocuments = function(coll) {
- assert.eq(9, coll.find({x: 10}).itcount());
- assert.eq(9, coll.find({x: 20}).itcount());
- };
+var coll = 'delete';
+var cmd = {
+ delete: coll,
+ deletes: [{q: {x: 10}, limit: 1}, {q: {x: 20}, limit: 1}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(36),
+};
+var setup = function(coll) {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < 10; i++) {
+ bulk.insert({x: 10});
+ bulk.insert({x: 20});
+ }
+ assert.writeOK(bulk.execute());
+};
+var checkRetryResult = function(result, retryResult) {
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+};
+var checkDocuments = function(coll) {
+ assert.eq(9, coll.find({x: 10}).itcount());
+ assert.eq(9, coll.find({x: 20}).itcount());
+};
- testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
+testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js b/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js
index 592ba020629..32a33e21a82 100644
--- a/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js
+++ b/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js
@@ -10,76 +10,141 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
- load('./jstests/libs/chunk_manipulation_util.js');
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- // For startParallelOps to write its state
- let staticMongod = MongoRunner.runMongod({});
-
- let st = new ShardingTest({shards: {rs0: {nodes: 2}, rs1: {nodes: 2}, rs2: {nodes: 2}}});
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
- let testDB = st.s.getDB(dbName);
- let testColl = testDB.foo;
-
- // Create a sharded collection with three chunks:
- // [-inf, -10), [-10, 10), [10, inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: -10}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 10}}));
-
- /**
- * Sets up a test by moving chunks to such that one chunk is on each
- * shard, with the following distribution:
- * shard0: [-inf, -10)
- * shard1: [-10, 10)
- * shard2: [10, inf)
- */
- function setUp() {
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: -100}, to: st.shard0.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: 1000}, to: st.shard2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
- }
-
- /**
- * Tears down a test by dropping all documents from the test collection.
- */
- function tearDown() {
- assert.commandWorked(testColl.deleteMany({}));
- }
-
- /**
- * Generic function to run a test. 'description' is a description of the test for logging
- * purposes and 'testBody' is the test function.
- */
- function test(description, testBody) {
- jsTest.log(`Running Test Setup: ${description}`);
- setUp();
- jsTest.log(`Running Test Body: ${description}`);
- testBody();
- jsTest.log(`Running Test Tear-Down: ${description}`);
- tearDown();
- jsTest.log(`Finished Running Test: ${description}`);
- }
-
- test("Updating shard key in retryable write receives error on retry", () => {
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+load('./jstests/libs/chunk_manipulation_util.js');
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+// For startParallelOps to write its state
+let staticMongod = MongoRunner.runMongod({});
+
+let st = new ShardingTest({shards: {rs0: {nodes: 2}, rs1: {nodes: 2}, rs2: {nodes: 2}}});
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+let testDB = st.s.getDB(dbName);
+let testColl = testDB.foo;
+
+// Create a sharded collection with three chunks:
+// [-inf, -10), [-10, 10), [10, inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: -10}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 10}}));
+
+/**
+ * Sets up a test by moving chunks to such that one chunk is on each
+ * shard, with the following distribution:
+ * shard0: [-inf, -10)
+ * shard1: [-10, 10)
+ * shard2: [10, inf)
+ */
+function setUp() {
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {x: -100}, to: st.shard0.shardName}));
+ assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {x: 1000}, to: st.shard2.shardName}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns});
+}
+
+/**
+ * Tears down a test by dropping all documents from the test collection.
+ */
+function tearDown() {
+ assert.commandWorked(testColl.deleteMany({}));
+}
+
+/**
+ * Generic function to run a test. 'description' is a description of the test for logging
+ * purposes and 'testBody' is the test function.
+ */
+function test(description, testBody) {
+ jsTest.log(`Running Test Setup: ${description}`);
+ setUp();
+ jsTest.log(`Running Test Body: ${description}`);
+ testBody();
+ jsTest.log(`Running Test Tear-Down: ${description}`);
+ tearDown();
+ jsTest.log(`Finished Running Test: ${description}`);
+}
+
+test("Updating shard key in retryable write receives error on retry", () => {
+ const shardKeyValueOnShard0 = -100;
+ const shardKeyValueOnShard1 = 0;
+
+ // Insert a single document on shard 0.
+ testColl.insert({x: shardKeyValueOnShard0});
+
+ const cmdObj = {
+ update: collName,
+ updates: [
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+ };
+
+ // Update the document shard key. The document should now be on shard 1.
+ const result = assert.commandWorked(testDB.runCommand(cmdObj));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+ assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+ // Retry the command. This should retry against shard 0, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+ ErrorCodes.IncompleteTransactionHistory);
+});
+
+test(
+ "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a new shard",
+ () => {
+ const shardKeyValueOnShard0 = -100;
+ const shardKeyValueOnShard1 = 0;
+
+ // Insert a single document on shard 0.
+ testColl.insert({x: shardKeyValueOnShard0});
+
+ const cmdObj = {
+ update: collName,
+ updates: [
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+ };
+
+ // Update the document shard key. The document should now be on shard 1.
+ const result = assert.commandWorked(testDB.runCommand(cmdObj));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+ assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+ // Move the chunk that contained the original document to shard 1.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard1.shardName}));
+
+ // Retry the command. This should retry against shard 1, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+ ErrorCodes.IncompleteTransactionHistory);
+ });
+
+test(
+ "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a new shard and then to a third shard",
+ () => {
const shardKeyValueOnShard0 = -100;
const shardKeyValueOnShard1 = 0;
@@ -102,295 +167,228 @@
assert.eq(result.nModified, 1);
assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
- // Retry the command. This should retry against shard 0, which should throw
+ // Move the chunk that contained the original document to shard 1.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard1.shardName}));
+
+ // Then move the same chunk that contained the original document to shard 2.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard2.shardName}));
+
+ // Retry the command. This should retry against shard 1, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+ ErrorCodes.IncompleteTransactionHistory);
+ });
+
+test(
+ "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a shard without knowledge of the transaction",
+ () => {
+ const shardKeyValueOnShard0 = -100;
+ const shardKeyValueOnShard1 = 0;
+
+ // Insert a single document on shard 0.
+ testColl.insert({x: shardKeyValueOnShard0});
+
+ const cmdObj = {
+ update: collName,
+ updates: [
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+ };
+
+ // Update the document shard key. The document should now be on shard 1.
+ const result = assert.commandWorked(testDB.runCommand(cmdObj));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+ assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+ // Move the chunk that contained the original document to shard 2,
+ // which does not know about the tranasaction.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard2.shardName}));
+
+ // Retry the command. This should retry against shard 2, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+ ErrorCodes.IncompleteTransactionHistory);
+ });
+
+test(
+ "config.transactions entries for single-shard transactions which commit during transferMods phase are successfully migrated as dead-end sentinels",
+ () => {
+ const shardKeyValueOnShard0 = -100;
+ const anotherShardKeyValueOnShard0 = -101;
+ const shardKeyValueOnShard1 = 0;
+ const lsid = {id: UUID()};
+ const txnNumber = 35;
+
+ // Insert a single document on shard 0.
+ assert.commandWorked(testColl.insert({x: shardKeyValueOnShard0}));
+
+ const cmdToRunInTransaction = {
+ update: collName,
+ updates: [
+ // Add a new field.
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {a: 4}}},
+ ],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+ };
+
+ const fakeRetryCmd = {
+ update: collName,
+ updates: [
+ // Add a new field.
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {a: 4}}},
+ ],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber)
+ };
+
+ pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ let joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns, st.shard1.shardName);
+
+ waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // Update a document being migrated.
+ const result = assert.commandWorked(testDB.runCommand(cmdToRunInTransaction));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+
+ assert.commandWorked(testDB.adminCommand({
+ commitTransaction: 1,
+ writeConcern: {w: "majority"},
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+ }));
+
+ // Check that the update from the transaction succeeded.
+ const resultingDoc = testColl.findOne({x: shardKeyValueOnShard0});
+ assert.neq(resultingDoc, null);
+ assert.eq(resultingDoc["a"], 4);
+
+ unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // Wait for moveChunk to complete
+ joinMoveChunk();
+
+ st.printShardingStatus();
+ // Retry the command. This should retry against shard 1, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(fakeRetryCmd),
+ ErrorCodes.IncompleteTransactionHistory);
+ });
+
+test(
+ "Update to shard key in retryable write during transferMods phase of chunk migration is migrated successfully to a node not involved in the shard key update",
+ () => {
+ const shardKeyValueOnShard0 = -100;
+ const shardKeyValueOnShard1 = 0;
+ const docId = 0;
+
+ // Insert a single document on shard 0.
+ assert.commandWorked(testColl.insert({_id: docId, x: shardKeyValueOnShard0}));
+
+ const cmdObj = {
+ update: collName,
+ updates: [
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+ };
+
+ pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // We're going to do a shard key update to move a document from shard 0 to shard 1, so
+ // here we move the chunk from shard 0 to shard 2, which won't be involved in the
+ // transaction created by the shard key update.
+ let joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns, st.shard2.shardName);
+
+ waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // Update the document shard key so that the document will move from shard 0 to shard 1.
+ const result = assert.commandWorked(testDB.runCommand(cmdObj));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+ assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+ unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // Wait for moveChunk to complete
+ joinMoveChunk();
+
+ st.printShardingStatus();
+ // Retry the command. This should retry against shard 2, which should throw
// IncompleteTransactionHistory.
assert.commandFailedWithCode(testDB.runCommand(cmdObj),
ErrorCodes.IncompleteTransactionHistory);
});
- test(
- "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a new shard",
- () => {
- const shardKeyValueOnShard0 = -100;
- const shardKeyValueOnShard1 = 0;
-
- // Insert a single document on shard 0.
- testColl.insert({x: shardKeyValueOnShard0});
-
- const cmdObj = {
- update: collName,
- updates: [
- {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
-
- // Update the document shard key. The document should now be on shard 1.
- const result = assert.commandWorked(testDB.runCommand(cmdObj));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
- assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- // Move the chunk that contained the original document to shard 1.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard1.shardName}));
-
- // Retry the command. This should retry against shard 1, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- ErrorCodes.IncompleteTransactionHistory);
-
- });
-
- test(
- "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a new shard and then to a third shard",
- () => {
- const shardKeyValueOnShard0 = -100;
- const shardKeyValueOnShard1 = 0;
-
- // Insert a single document on shard 0.
- testColl.insert({x: shardKeyValueOnShard0});
-
- const cmdObj = {
- update: collName,
- updates: [
- {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
-
- // Update the document shard key. The document should now be on shard 1.
- const result = assert.commandWorked(testDB.runCommand(cmdObj));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
- assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- // Move the chunk that contained the original document to shard 1.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard1.shardName}));
-
- // Then move the same chunk that contained the original document to shard 2.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard2.shardName}));
-
- // Retry the command. This should retry against shard 1, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- ErrorCodes.IncompleteTransactionHistory);
- });
-
- test(
- "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a shard without knowledge of the transaction",
- () => {
- const shardKeyValueOnShard0 = -100;
- const shardKeyValueOnShard1 = 0;
-
- // Insert a single document on shard 0.
- testColl.insert({x: shardKeyValueOnShard0});
-
- const cmdObj = {
- update: collName,
- updates: [
- {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
-
- // Update the document shard key. The document should now be on shard 1.
- const result = assert.commandWorked(testDB.runCommand(cmdObj));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
- assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- // Move the chunk that contained the original document to shard 2,
- // which does not know about the tranasaction.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard2.shardName}));
-
- // Retry the command. This should retry against shard 2, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- ErrorCodes.IncompleteTransactionHistory);
- });
-
- test(
- "config.transactions entries for single-shard transactions which commit during transferMods phase are successfully migrated as dead-end sentinels",
- () => {
- const shardKeyValueOnShard0 = -100;
- const anotherShardKeyValueOnShard0 = -101;
- const shardKeyValueOnShard1 = 0;
- const lsid = {id: UUID()};
- const txnNumber = 35;
-
- // Insert a single document on shard 0.
- assert.commandWorked(testColl.insert({x: shardKeyValueOnShard0}));
-
- const cmdToRunInTransaction = {
- update: collName,
- updates: [
- // Add a new field.
- {q: {x: shardKeyValueOnShard0}, u: {$set: {a: 4}}},
- ],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- };
-
- const fakeRetryCmd = {
- update: collName,
- updates: [
- // Add a new field.
- {q: {x: shardKeyValueOnShard0}, u: {$set: {a: 4}}},
- ],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber)
- };
-
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- let joinMoveChunk = moveChunkParallel(
- staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns, st.shard1.shardName);
-
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // Update a document being migrated.
- const result = assert.commandWorked(testDB.runCommand(cmdToRunInTransaction));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
-
- assert.commandWorked(testDB.adminCommand({
- commitTransaction: 1,
- writeConcern: {w: "majority"},
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
-
- // Check that the update from the transaction succeeded.
- const resultingDoc = testColl.findOne({x: shardKeyValueOnShard0});
- assert.neq(resultingDoc, null);
- assert.eq(resultingDoc["a"], 4);
-
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // Wait for moveChunk to complete
- joinMoveChunk();
-
- st.printShardingStatus();
- // Retry the command. This should retry against shard 1, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(fakeRetryCmd),
- ErrorCodes.IncompleteTransactionHistory);
- });
-
- test(
- "Update to shard key in retryable write during transferMods phase of chunk migration is migrated successfully to a node not involved in the shard key update",
- () => {
- const shardKeyValueOnShard0 = -100;
- const shardKeyValueOnShard1 = 0;
- const docId = 0;
-
- // Insert a single document on shard 0.
- assert.commandWorked(testColl.insert({_id: docId, x: shardKeyValueOnShard0}));
-
- const cmdObj = {
- update: collName,
- updates: [
- {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
-
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // We're going to do a shard key update to move a document from shard 0 to shard 1, so
- // here we move the chunk from shard 0 to shard 2, which won't be involved in the
- // transaction created by the shard key update.
- let joinMoveChunk = moveChunkParallel(
- staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns, st.shard2.shardName);
-
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // Update the document shard key so that the document will move from shard 0 to shard 1.
- const result = assert.commandWorked(testDB.runCommand(cmdObj));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
- assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // Wait for moveChunk to complete
- joinMoveChunk();
-
- st.printShardingStatus();
- // Retry the command. This should retry against shard 2, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- ErrorCodes.IncompleteTransactionHistory);
- });
-
- // TODO (SERVER-40815) This test currently fails with DuplicateKeyError on _id.
- //
- // test(
- // "Update to shard key in retryable write during transfer mods phase of chunk migration is
- // migrated successfully ",
- // () => {
- // const shardKeyValueOnShard0 = -100;
- // const shardKeyValueOnShard1 = 0;
- // const docId = 0;
-
- // // Insert a single document on shard 0.
- // assert.commandWorked(testColl.insert({_id: docId, x: shardKeyValueOnShard0}));
-
- // const cmdObj = {
- // update: collName,
- // updates: [
- // {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- // ],
- // ordered: false,
- // lsid: {id: UUID()},
- // txnNumber: NumberLong(35),
- // };
-
- // pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // let joinMoveChunk = moveChunkParallel(
- // staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns,
- // st.shard1.shardName);
-
- // waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // // Update the document shard key.
-
- // // THIS CURRENTLY FAILS WITH DuplicateKeyError on _id
- // const result = assert.commandWorked(testDB.runCommand(cmdObj));
- // assert.eq(result.n, 1);
- // assert.eq(result.nModified, 1);
- // assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- // unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // // Wait for moveChunk to complete
- // joinMoveChunk();
-
- // st.printShardingStatus();
- // // Retry the command. This should retry against shard 1, which should throw
- // // IncompleteTransactionHistory.
- // assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- // ErrorCodes.IncompleteTransactionHistory);
- // });
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+// TODO (SERVER-40815) This test currently fails with DuplicateKeyError on _id.
+//
+// test(
+// "Update to shard key in retryable write during transfer mods phase of chunk migration is
+// migrated successfully ",
+// () => {
+// const shardKeyValueOnShard0 = -100;
+// const shardKeyValueOnShard1 = 0;
+// const docId = 0;
+
+// // Insert a single document on shard 0.
+// assert.commandWorked(testColl.insert({_id: docId, x: shardKeyValueOnShard0}));
+
+// const cmdObj = {
+// update: collName,
+// updates: [
+// {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+// ],
+// ordered: false,
+// lsid: {id: UUID()},
+// txnNumber: NumberLong(35),
+// };
+
+// pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+// let joinMoveChunk = moveChunkParallel(
+// staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns,
+// st.shard1.shardName);
+
+// waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+// // Update the document shard key.
+
+// // THIS CURRENTLY FAILS WITH DuplicateKeyError on _id
+// const result = assert.commandWorked(testDB.runCommand(cmdObj));
+// assert.eq(result.n, 1);
+// assert.eq(result.nModified, 1);
+// assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+// unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+// // Wait for moveChunk to complete
+// joinMoveChunk();
+
+// st.printShardingStatus();
+// // Retry the command. This should retry against shard 1, which should throw
+// // IncompleteTransactionHistory.
+// assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+// ErrorCodes.IncompleteTransactionHistory);
+// });
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/move_chunk_update_with_write_retryability.js b/jstests/sharding/move_chunk_update_with_write_retryability.js
index 03748a56c20..b7d0ddae5d3 100644
--- a/jstests/sharding/move_chunk_update_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_update_with_write_retryability.js
@@ -1,58 +1,58 @@
load("jstests/sharding/move_chunk_with_session_helper.js");
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- // Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
- // secondary to get elected, so we don't give it a zero priority.
- var st = new ShardingTest({
- mongos: 2,
- shards: {
- rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
- }
- });
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+// Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
+// secondary to get elected, so we don't give it a zero priority.
+var st = new ShardingTest({
+ mongos: 2,
+ shards: {
+ rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
+ }
+});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- var coll = 'update';
- var cmd = {
- update: 'update',
- updates: [
- {q: {x: 10}, u: {$inc: {a: 1}}}, // in place
- {q: {x: 20}, u: {$inc: {b: 1}}, upsert: true},
- {q: {x: 30}, u: {x: 30, z: 1}} // replacement
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
- var setup = function(coll) {
- coll.insert({x: 10});
- coll.insert({x: 30});
- };
- var checkRetryResult = function(result, retryResult) {
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.nModified, retryResult.nModified);
- assert.eq(result.upserted, retryResult.upserted);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- };
- var checkDocuments = function(coll) {
- assert.eq(1, coll.findOne({x: 10}).a);
- assert.eq(1, coll.findOne({x: 20}).b);
- assert.eq(1, coll.findOne({x: 30}).z);
- };
+var coll = 'update';
+var cmd = {
+ update: 'update',
+ updates: [
+ {q: {x: 10}, u: {$inc: {a: 1}}}, // in place
+ {q: {x: 20}, u: {$inc: {b: 1}}, upsert: true},
+ {q: {x: 30}, u: {x: 30, z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+};
+var setup = function(coll) {
+ coll.insert({x: 10});
+ coll.insert({x: 30});
+};
+var checkRetryResult = function(result, retryResult) {
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.nModified, retryResult.nModified);
+ assert.eq(result.upserted, retryResult.upserted);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+};
+var checkDocuments = function(coll) {
+ assert.eq(1, coll.findOne({x: 10}).a);
+ assert.eq(1, coll.findOne({x: 20}).b);
+ assert.eq(1, coll.findOne({x: 30}).z);
+};
- testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
+testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_wc.js b/jstests/sharding/move_chunk_wc.js
index 601b327b76e..7dc75204d19 100644
--- a/jstests/sharding/move_chunk_wc.js
+++ b/jstests/sharding/move_chunk_wc.js
@@ -10,92 +10,107 @@
load('jstests/libs/write_concern_util.js');
(function() {
- "use strict";
- var st = new ShardingTest({
- shards: {
- rs0: {nodes: 3, settings: {chainingAllowed: false}},
- rs1: {nodes: 5, settings: {chainingAllowed: false}}
- },
- mongos: 1,
- config: 1,
- configReplSetTestOptions: {settings: {chainingAllowed: false}}
- });
+"use strict";
+var st = new ShardingTest({
+ shards: {
+ rs0: {nodes: 3, settings: {chainingAllowed: false}},
+ rs1: {nodes: 5, settings: {chainingAllowed: false}}
+ },
+ mongos: 1,
+ config: 1,
+ configReplSetTestOptions: {settings: {chainingAllowed: false}}
+});
- var mongos = st.s;
- var dbName = "move-chunk-wc-test";
- var db = mongos.getDB(dbName);
- var collName = 'leaves';
- var coll = db[collName];
- var numberDoc = 20;
- var s0 = st.shard0.shardName;
- var s1 = st.shard1.shardName;
+var mongos = st.s;
+var dbName = "move-chunk-wc-test";
+var db = mongos.getDB(dbName);
+var collName = 'leaves';
+var coll = db[collName];
+var numberDoc = 20;
+var s0 = st.shard0.shardName;
+var s1 = st.shard1.shardName;
- coll.ensureIndex({x: 1}, {unique: true});
- st.ensurePrimaryShard(db.toString(), s0);
- st.shardColl(collName, {x: 1}, {x: numberDoc / 2}, {x: numberDoc / 2}, db.toString(), true);
+coll.ensureIndex({x: 1}, {unique: true});
+st.ensurePrimaryShard(db.toString(), s0);
+st.shardColl(collName, {x: 1}, {x: numberDoc / 2}, {x: numberDoc / 2}, db.toString(), true);
- for (var i = 0; i < numberDoc; i++) {
- coll.insert({x: i});
- }
- assert.eq(coll.count(), numberDoc);
+for (var i = 0; i < numberDoc; i++) {
+ coll.insert({x: i});
+}
+assert.eq(coll.count(), numberDoc);
- // Checks that each shard has the expected number of chunks.
- function checkChunkCount(s0Count, s1Count) {
- var chunkCounts = st.chunkCounts(collName, db.toString());
- assert.eq(chunkCounts[s0], s0Count);
- assert.eq(chunkCounts[s1], s1Count);
- }
- checkChunkCount(1, 1);
+// Checks that each shard has the expected number of chunks.
+function checkChunkCount(s0Count, s1Count) {
+ var chunkCounts = st.chunkCounts(collName, db.toString());
+ assert.eq(chunkCounts[s0], s0Count);
+ assert.eq(chunkCounts[s1], s1Count);
+}
+checkChunkCount(1, 1);
- var req = {
- moveChunk: coll.toString(),
- find: {x: numberDoc / 2},
- to: s0,
- _secondaryThrottle: true,
- _waitForDelete: true
- };
+var req = {
+ moveChunk: coll.toString(),
+ find: {x: numberDoc / 2},
+ to: s0,
+ _secondaryThrottle: true,
+ _waitForDelete: true
+};
- req.writeConcern = {w: 1, wtimeout: 30000};
- jsTest.log("Testing " + tojson(req));
- var res = db.adminCommand(req);
- assert.commandWorked(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(2, 0);
+req.writeConcern = {
+ w: 1,
+ wtimeout: 30000
+};
+jsTest.log("Testing " + tojson(req));
+var res = db.adminCommand(req);
+assert.commandWorked(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(2, 0);
- // This should pass because w: majority is always passed to config servers.
- req.writeConcern = {w: 2, wtimeout: 30000};
- jsTest.log("Testing " + tojson(req));
- req.to = s1;
- res = db.adminCommand(req);
- assert.commandWorked(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(1, 1);
+// This should pass because w: majority is always passed to config servers.
+req.writeConcern = {
+ w: 2,
+ wtimeout: 30000
+};
+jsTest.log("Testing " + tojson(req));
+req.to = s1;
+res = db.adminCommand(req);
+assert.commandWorked(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(1, 1);
- // This should fail because the writeConcern cannot be satisfied on the to shard.
- req.writeConcern = {w: 4, wtimeout: 3000};
- jsTest.log("Testing " + tojson(req));
- req.to = s0;
- res = db.adminCommand(req);
- assert.commandFailed(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(1, 1);
+// This should fail because the writeConcern cannot be satisfied on the to shard.
+req.writeConcern = {
+ w: 4,
+ wtimeout: 3000
+};
+jsTest.log("Testing " + tojson(req));
+req.to = s0;
+res = db.adminCommand(req);
+assert.commandFailed(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(1, 1);
- // This should fail because the writeConcern cannot be satisfied on the from shard.
- req.writeConcern = {w: 6, wtimeout: 3000};
- jsTest.log("Testing " + tojson(req));
- req.to = s0;
- res = db.adminCommand(req);
- assert.commandFailed(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(1, 1);
+// This should fail because the writeConcern cannot be satisfied on the from shard.
+req.writeConcern = {
+ w: 6,
+ wtimeout: 3000
+};
+jsTest.log("Testing " + tojson(req));
+req.to = s0;
+res = db.adminCommand(req);
+assert.commandFailed(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(1, 1);
- // This should fail because the writeConcern is invalid and cannot be satisfied anywhere.
- req.writeConcern = {w: "invalid", wtimeout: 3000};
- jsTest.log("Testing " + tojson(req));
- req.to = s0;
- res = db.adminCommand(req);
- assert.commandFailed(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(1, 1);
- st.stop();
+// This should fail because the writeConcern is invalid and cannot be satisfied anywhere.
+req.writeConcern = {
+ w: "invalid",
+ wtimeout: 3000
+};
+jsTest.log("Testing " + tojson(req));
+req.to = s0;
+res = db.adminCommand(req);
+assert.commandFailed(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(1, 1);
+st.stop();
})();
diff --git a/jstests/sharding/move_primary_basic.js b/jstests/sharding/move_primary_basic.js
index 477d4732b20..dbfcb88d492 100644
--- a/jstests/sharding/move_primary_basic.js
+++ b/jstests/sharding/move_primary_basic.js
@@ -3,64 +3,63 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 1, shards: 2});
- var mongos = st.s0;
+var st = new ShardingTest({mongos: 1, shards: 2});
+var mongos = st.s0;
- var kDbName = 'db';
+var kDbName = 'db';
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
- assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
+assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
- // Can run only against the admin database.
- assert.commandFailedWithCode(
- mongos.getDB('test').runCommand({movePrimary: kDbName, to: shard0}),
- ErrorCodes.Unauthorized);
+// Can run only against the admin database.
+assert.commandFailedWithCode(mongos.getDB('test').runCommand({movePrimary: kDbName, to: shard0}),
+ ErrorCodes.Unauthorized);
- // Can't movePrimary for 'config' database.
- assert.commandFailed(mongos.adminCommand({movePrimary: 'config', to: shard0}));
+// Can't movePrimary for 'config' database.
+assert.commandFailed(mongos.adminCommand({movePrimary: 'config', to: shard0}));
- // Can't movePrimary for 'local' database.
- assert.commandFailed(mongos.adminCommand({movePrimary: 'local', to: shard0}));
+// Can't movePrimary for 'local' database.
+assert.commandFailed(mongos.adminCommand({movePrimary: 'local', to: shard0}));
- // Can't movePrimary for 'admin' database.
- assert.commandFailed(mongos.adminCommand({movePrimary: 'admin', to: shard0}));
+// Can't movePrimary for 'admin' database.
+assert.commandFailed(mongos.adminCommand({movePrimary: 'admin', to: shard0}));
- // Can't movePrimary for invalid db name.
- assert.commandFailed(mongos.adminCommand({movePrimary: 'a.b', to: shard0}));
- assert.commandFailed(mongos.adminCommand({movePrimary: '', to: shard0}));
+// Can't movePrimary for invalid db name.
+assert.commandFailed(mongos.adminCommand({movePrimary: 'a.b', to: shard0}));
+assert.commandFailed(mongos.adminCommand({movePrimary: '', to: shard0}));
- // Fail if 'to' shard does not exist or empty.
- assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: 'Unknown'}));
- assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: ''}));
- assert.commandFailed(mongos.adminCommand({movePrimary: kDbName}));
+// Fail if 'to' shard does not exist or empty.
+assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: 'Unknown'}));
+assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: ''}));
+assert.commandFailed(mongos.adminCommand({movePrimary: kDbName}));
- let versionBeforeMovePrimary = mongos.getDB('config').databases.findOne({_id: kDbName}).version;
+let versionBeforeMovePrimary = mongos.getDB('config').databases.findOne({_id: kDbName}).version;
- // Succeed if 'to' shard exists and verify metadata changes.
- assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
- assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+// Succeed if 'to' shard exists and verify metadata changes.
+assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
+assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
- assert.eq(versionBeforeMovePrimary.lastMod + 1,
- mongos.getDB('config').databases.findOne({_id: kDbName}).version.lastMod);
- assert.eq(versionBeforeMovePrimary.uuid,
- mongos.getDB('config').databases.findOne({_id: kDbName}).version.uuid);
+assert.eq(versionBeforeMovePrimary.lastMod + 1,
+ mongos.getDB('config').databases.findOne({_id: kDbName}).version.lastMod);
+assert.eq(versionBeforeMovePrimary.uuid,
+ mongos.getDB('config').databases.findOne({_id: kDbName}).version.uuid);
- // Succeed if 'to' shard is already the primary shard for the db.
- assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+// Succeed if 'to' shard is already the primary shard for the db.
+assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
+assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
- // Verify the version doesn't change if the 'to' shard is already the primary shard.
- assert.eq(versionBeforeMovePrimary.lastMod + 1,
- mongos.getDB('config').databases.findOne({_id: kDbName}).version.lastMod);
- assert.eq(versionBeforeMovePrimary.uuid,
- mongos.getDB('config').databases.findOne({_id: kDbName}).version.uuid);
+// Verify the version doesn't change if the 'to' shard is already the primary shard.
+assert.eq(versionBeforeMovePrimary.lastMod + 1,
+ mongos.getDB('config').databases.findOne({_id: kDbName}).version.lastMod);
+assert.eq(versionBeforeMovePrimary.uuid,
+ mongos.getDB('config').databases.findOne({_id: kDbName}).version.uuid);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_primary_clone_test.js b/jstests/sharding/move_primary_clone_test.js
index 6c30ff56b46..6baa283405a 100644
--- a/jstests/sharding/move_primary_clone_test.js
+++ b/jstests/sharding/move_primary_clone_test.js
@@ -1,218 +1,213 @@
(function() {
- 'use strict';
-
- function sortByName(a, b) {
- if (a.name < b.name)
- return -1;
- if (a.name > b.name)
- return 1;
- return 0;
- }
+'use strict';
- function checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, barUUID, fooUUID) {
- var res = toShard.getDB("test1").runCommand({listCollections: 1});
- assert.commandWorked(res);
+function sortByName(a, b) {
+ if (a.name < b.name)
+ return -1;
+ if (a.name > b.name)
+ return 1;
+ return 0;
+}
- var collections = res.cursor.firstBatch;
-
- // Sort collections by name.
- collections.sort(sortByName);
- assert.eq(collections.length, 2);
-
- var c1, c2;
- [c1, c2] = collections;
-
- function checkName(c, expectedName) {
- assert.eq(c.name,
- expectedName,
- 'Expected collection to be ' + expectedName + ', got ' + c.name);
- }
-
- function checkOptions(c, expectedOptions) {
- assert.hasFields(c, ['options'], 'Missing options field for collection ' + c.name);
- assert.hasFields(
- c.options, expectedOptions, 'Missing expected option(s) for collection ' + c.name);
- }
-
- function checkUUIDsEqual(c, expectedUUID) {
- assert.hasFields(c, ['info'], 'Missing info field for collection ' + c.name);
- assert.hasFields(c.info, ['uuid'], 'Missing uuid field for collection ' + c.name);
- assert.eq(c.info.uuid, expectedUUID, 'Incorrect uuid for collection ' + c.name);
- }
-
- function checkUUIDsNotEqual(c, originalUUID) {
- assert.hasFields(c, ['info'], 'Missing info field for collection ' + c.name);
- assert.hasFields(c.info, ['uuid'], 'Missing uuid field for collection ' + c.name);
- assert.neq(c.info.uuid,
- originalUUID,
- 'UUID for ' + c.name +
- ' should be different than the original collection but is the same');
- }
-
- function checkIndexes(collName, expectedIndexes) {
- var res = toShard.getDB('test1').runCommand({listIndexes: collName});
- assert.commandWorked(res, 'Failed to get indexes for collection ' + collName);
- var indexes = res.cursor.firstBatch;
- indexes.sort(sortByName);
-
- assert.eq(indexes.length, 2);
-
- indexes.forEach((index, i) => {
- var expected;
- if (i == 0) {
- expected = {name: "_id_", key: {_id: 1}};
- } else {
- expected = expectedIndexes[i - 1];
- }
- Object.keys(expected).forEach(k => {
- assert.eq(index[k], expected[k]);
- });
- });
- }
-
- function checkCount(shard, collName, count) {
- var res = shard.getDB('test1').runCommand({count: collName});
- assert.commandWorked(res);
- assert.eq(res.n, count);
- }
-
- checkName(c1, 'bar');
- checkName(c2, 'foo');
- checkOptions(c1, Object.keys(barOptions));
- checkIndexes('bar', barIndexes);
- checkOptions(c2, Object.keys(fooOptions));
- checkIndexes('foo', fooIndexes);
-
- if (sharded) {
- checkCount(fromShard, 'foo', 3);
- checkCount(fromShard, 'bar', 3);
- checkCount(toShard, 'foo', 0);
- checkCount(toShard, 'bar', 0);
-
- // UUIDs should be the same as the original
- checkUUIDsEqual(c1, barUUID);
- checkUUIDsEqual(c2, fooUUID);
- } else {
- checkCount(toShard, 'foo', 3);
- checkCount(toShard, 'bar', 3);
- checkCount(fromShard, 'foo', 0);
- checkCount(fromShard, 'bar', 0);
-
- // UUIDs should not be the same as the original
- checkUUIDsNotEqual(c1, barUUID);
- checkUUIDsNotEqual(c2, fooUUID);
- }
- }
+function checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, barUUID, fooUUID) {
+ var res = toShard.getDB("test1").runCommand({listCollections: 1});
+ assert.commandWorked(res);
- function createCollections(sharded) {
- assert.commandWorked(st.getDB('test1').runCommand({dropDatabase: 1}));
- var db = st.getDB('test1');
+ var collections = res.cursor.firstBatch;
- assert.commandWorked(db.createCollection('foo', fooOptions));
- assert.commandWorked(db.createCollection('bar', barOptions));
+ // Sort collections by name.
+ collections.sort(sortByName);
+ assert.eq(collections.length, 2);
- for (let i = 0; i < 3; i++) {
- assert.writeOK(db.foo.insert({a: i}));
- assert.writeOK(db.bar.insert({a: i}));
- }
- assert.eq(3, db.foo.count());
- assert.eq(3, db.bar.count());
+ var c1, c2;
+ [c1, c2] = collections;
- assert.commandWorked(db.runCommand({createIndexes: 'foo', indexes: fooIndexes}));
- assert.commandWorked(db.runCommand({createIndexes: 'bar', indexes: barIndexes}));
+ function checkName(c, expectedName) {
+ assert.eq(
+ c.name, expectedName, 'Expected collection to be ' + expectedName + ', got ' + c.name);
+ }
- if (sharded) {
- assert.commandWorked(db.adminCommand({enableSharding: 'test1'}));
- assert.commandWorked(db.adminCommand({shardCollection: 'test1.foo', key: {_id: 1}}));
- assert.commandWorked(db.adminCommand({shardCollection: 'test1.bar', key: {_id: 1}}));
- }
+ function checkOptions(c, expectedOptions) {
+ assert.hasFields(c, ['options'], 'Missing options field for collection ' + c.name);
+ assert.hasFields(
+ c.options, expectedOptions, 'Missing expected option(s) for collection ' + c.name);
}
- function movePrimaryWithFailpoint(sharded) {
- var db = st.getDB('test1');
- createCollections(sharded);
+ function checkUUIDsEqual(c, expectedUUID) {
+ assert.hasFields(c, ['info'], 'Missing info field for collection ' + c.name);
+ assert.hasFields(c.info, ['uuid'], 'Missing uuid field for collection ' + c.name);
+ assert.eq(c.info.uuid, expectedUUID, 'Incorrect uuid for collection ' + c.name);
+ }
- var fromShard = st.getPrimaryShard('test1');
- var toShard = st.getOther(fromShard);
+ function checkUUIDsNotEqual(c, originalUUID) {
+ assert.hasFields(c, ['info'], 'Missing info field for collection ' + c.name);
+ assert.hasFields(c.info, ['uuid'], 'Missing uuid field for collection ' + c.name);
+ assert.neq(c.info.uuid,
+ originalUUID,
+ 'UUID for ' + c.name +
+ ' should be different than the original collection but is the same');
+ }
- assert.eq(
- 3, fromShard.getDB("test1").foo.count(), "from shard doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").foo.count(), "to shard has data before move");
- assert.eq(
- 3, fromShard.getDB("test1").bar.count(), "from shard doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").bar.count(), "to shard has data before move");
+ function checkIndexes(collName, expectedIndexes) {
+ var res = toShard.getDB('test1').runCommand({listIndexes: collName});
+ assert.commandWorked(res, 'Failed to get indexes for collection ' + collName);
+ var indexes = res.cursor.firstBatch;
+ indexes.sort(sortByName);
+
+ assert.eq(indexes.length, 2);
+
+ indexes.forEach((index, i) => {
+ var expected;
+ if (i == 0) {
+ expected = {name: "_id_", key: {_id: 1}};
+ } else {
+ expected = expectedIndexes[i - 1];
+ }
+ Object.keys(expected).forEach(k => {
+ assert.eq(index[k], expected[k]);
+ });
+ });
+ }
+
+ function checkCount(shard, collName, count) {
+ var res = shard.getDB('test1').runCommand({count: collName});
+ assert.commandWorked(res);
+ assert.eq(res.n, count);
+ }
- var listCollsFrom = fromShard.getDB("test1").runCommand({listCollections: 1});
- var fromColls = listCollsFrom.cursor.firstBatch;
- fromColls.sort(sortByName);
- var baruuid = fromColls[0].info.uuid;
- var foouuid = fromColls[1].info.uuid;
+ checkName(c1, 'bar');
+ checkName(c2, 'foo');
+ checkOptions(c1, Object.keys(barOptions));
+ checkIndexes('bar', barIndexes);
+ checkOptions(c2, Object.keys(fooOptions));
+ checkIndexes('foo', fooIndexes);
+
+ if (sharded) {
+ checkCount(fromShard, 'foo', 3);
+ checkCount(fromShard, 'bar', 3);
+ checkCount(toShard, 'foo', 0);
+ checkCount(toShard, 'bar', 0);
+
+ // UUIDs should be the same as the original
+ checkUUIDsEqual(c1, barUUID);
+ checkUUIDsEqual(c2, fooUUID);
+ } else {
+ checkCount(toShard, 'foo', 3);
+ checkCount(toShard, 'bar', 3);
+ checkCount(fromShard, 'foo', 0);
+ checkCount(fromShard, 'bar', 0);
+
+ // UUIDs should not be the same as the original
+ checkUUIDsNotEqual(c1, barUUID);
+ checkUUIDsNotEqual(c2, fooUUID);
+ }
+}
- assert.commandWorked(toShard.getDB("admin").runCommand(
- {configureFailPoint: 'movePrimaryFailPoint', mode: 'alwaysOn'}));
+function createCollections(sharded) {
+ assert.commandWorked(st.getDB('test1').runCommand({dropDatabase: 1}));
+ var db = st.getDB('test1');
- // Failpoint will cause movePrimary to fail after the first collection has been copied over
- assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
+ assert.commandWorked(db.createCollection('foo', fooOptions));
+ assert.commandWorked(db.createCollection('bar', barOptions));
- assert.commandWorked(toShard.getDB("admin").runCommand(
- {configureFailPoint: 'movePrimaryFailPoint', mode: 'off'}));
-
- if (sharded) {
- // If the collections are sharded, the UUID of the collection on the donor should be
- // copied over and the options should be the same so retrying the move should succeed.
- assert.commandWorked(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
-
- checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, baruuid, foouuid);
-
- // Now change an option on the toShard, and verify that calling clone again fails if
- // the options don't match.
- assert.commandWorked(
- toShard.getDB('test1').runCommand({collMod: 'bar', validationLevel: 'moderate'}));
- assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: fromShard.name}));
- } else {
- // If the collections are unsharded, we should fail when any collections being copied
- // exist on the target shard.
- assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
- }
+ for (let i = 0; i < 3; i++) {
+ assert.writeOK(db.foo.insert({a: i}));
+ assert.writeOK(db.bar.insert({a: i}));
}
+ assert.eq(3, db.foo.count());
+ assert.eq(3, db.bar.count());
- function movePrimaryNoFailpoint(sharded) {
- var db = st.getDB('test1');
- createCollections(sharded);
+ assert.commandWorked(db.runCommand({createIndexes: 'foo', indexes: fooIndexes}));
+ assert.commandWorked(db.runCommand({createIndexes: 'bar', indexes: barIndexes}));
- var fromShard = st.getPrimaryShard('test1');
- var toShard = st.getOther(fromShard);
+ if (sharded) {
+ assert.commandWorked(db.adminCommand({enableSharding: 'test1'}));
+ assert.commandWorked(db.adminCommand({shardCollection: 'test1.foo', key: {_id: 1}}));
+ assert.commandWorked(db.adminCommand({shardCollection: 'test1.bar', key: {_id: 1}}));
+ }
+}
- assert.eq(
- 3, fromShard.getDB("test1").foo.count(), "from shard doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").foo.count(), "to shard has data before move");
- assert.eq(
- 3, fromShard.getDB("test1").bar.count(), "from shard doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").bar.count(), "to shard has data before move");
+function movePrimaryWithFailpoint(sharded) {
+ var db = st.getDB('test1');
+ createCollections(sharded);
+
+ var fromShard = st.getPrimaryShard('test1');
+ var toShard = st.getOther(fromShard);
+
+ assert.eq(3, fromShard.getDB("test1").foo.count(), "from shard doesn't have data before move");
+ assert.eq(0, toShard.getDB("test1").foo.count(), "to shard has data before move");
+ assert.eq(3, fromShard.getDB("test1").bar.count(), "from shard doesn't have data before move");
+ assert.eq(0, toShard.getDB("test1").bar.count(), "to shard has data before move");
+
+ var listCollsFrom = fromShard.getDB("test1").runCommand({listCollections: 1});
+ var fromColls = listCollsFrom.cursor.firstBatch;
+ fromColls.sort(sortByName);
+ var baruuid = fromColls[0].info.uuid;
+ var foouuid = fromColls[1].info.uuid;
+
+ assert.commandWorked(toShard.getDB("admin").runCommand(
+ {configureFailPoint: 'movePrimaryFailPoint', mode: 'alwaysOn'}));
+
+ // Failpoint will cause movePrimary to fail after the first collection has been copied over
+ assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
- var listCollsFrom = fromShard.getDB("test1").runCommand({listCollections: 1});
- var fromColls = listCollsFrom.cursor.firstBatch;
- fromColls.sort(sortByName);
- var baruuid = fromColls[0].info.uuid;
- var foouuid = fromColls[1].info.uuid;
+ assert.commandWorked(toShard.getDB("admin").runCommand(
+ {configureFailPoint: 'movePrimaryFailPoint', mode: 'off'}));
+ if (sharded) {
+ // If the collections are sharded, the UUID of the collection on the donor should be
+ // copied over and the options should be the same so retrying the move should succeed.
assert.commandWorked(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, baruuid, foouuid);
+
+ // Now change an option on the toShard, and verify that calling clone again fails if
+ // the options don't match.
+ assert.commandWorked(
+ toShard.getDB('test1').runCommand({collMod: 'bar', validationLevel: 'moderate'}));
+ assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: fromShard.name}));
+ } else {
+ // If the collections are unsharded, we should fail when any collections being copied
+ // exist on the target shard.
+ assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
}
+}
+
+function movePrimaryNoFailpoint(sharded) {
+ var db = st.getDB('test1');
+ createCollections(sharded);
+
+ var fromShard = st.getPrimaryShard('test1');
+ var toShard = st.getOther(fromShard);
+
+ assert.eq(3, fromShard.getDB("test1").foo.count(), "from shard doesn't have data before move");
+ assert.eq(0, toShard.getDB("test1").foo.count(), "to shard has data before move");
+ assert.eq(3, fromShard.getDB("test1").bar.count(), "from shard doesn't have data before move");
+ assert.eq(0, toShard.getDB("test1").bar.count(), "to shard has data before move");
+
+ var listCollsFrom = fromShard.getDB("test1").runCommand({listCollections: 1});
+ var fromColls = listCollsFrom.cursor.firstBatch;
+ fromColls.sort(sortByName);
+ var baruuid = fromColls[0].info.uuid;
+ var foouuid = fromColls[1].info.uuid;
+
+ assert.commandWorked(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
+
+ checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, baruuid, foouuid);
+}
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var fooOptions = {validationLevel: "off"};
- var barOptions = {validator: {$jsonSchema: {required: ['a']}}};
+var fooOptions = {validationLevel: "off"};
+var barOptions = {validator: {$jsonSchema: {required: ['a']}}};
- var fooIndexes = [{key: {a: 1}, name: 'index1', expireAfterSeconds: 5000}];
- var barIndexes = [{key: {a: -1}, name: 'index2'}];
+var fooIndexes = [{key: {a: 1}, name: 'index1', expireAfterSeconds: 5000}];
+var barIndexes = [{key: {a: -1}, name: 'index2'}];
- movePrimaryWithFailpoint(true);
- movePrimaryWithFailpoint(false);
- movePrimaryNoFailpoint(true);
- movePrimaryNoFailpoint(false);
+movePrimaryWithFailpoint(true);
+movePrimaryWithFailpoint(false);
+movePrimaryNoFailpoint(true);
+movePrimaryNoFailpoint(false);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_primary_fails_without_database_version.js b/jstests/sharding/move_primary_fails_without_database_version.js
index cf03d5e1cfe..27b447efdc4 100644
--- a/jstests/sharding/move_primary_fails_without_database_version.js
+++ b/jstests/sharding/move_primary_fails_without_database_version.js
@@ -1,19 +1,17 @@
// Tests that a movePrimary will fail if the database doesn't have a version in config.databases
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
+const dbName = "test";
- const st = new ShardingTest({shards: 2});
+const st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.getDB("config").getCollection("databases").insert({
- _id: dbName,
- partitioned: false,
- primary: st.shard0.shardName
- }));
+assert.commandWorked(st.s.getDB("config")
+ .getCollection("databases")
+ .insert({_id: dbName, partitioned: false, primary: st.shard0.shardName}));
- assert.commandFailedWithCode(st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName}),
- ErrorCodes.InternalError);
+assert.commandFailedWithCode(st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName}),
+ ErrorCodes.InternalError);
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/movechunk_commit_changelog_stats.js b/jstests/sharding/movechunk_commit_changelog_stats.js
index cff5ae2a445..0ec7e9261c1 100644
--- a/jstests/sharding/movechunk_commit_changelog_stats.js
+++ b/jstests/sharding/movechunk_commit_changelog_stats.js
@@ -3,38 +3,38 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 1, shards: 2});
- var kDbName = 'db';
+var st = new ShardingTest({mongos: 1, shards: 2});
+var kDbName = 'db';
- var mongos = st.s0;
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var mongos = st.s0;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
- function assertCountsInChangelog() {
- let changeLog = st.s.getDB('config').changelog.find({what: 'moveChunk.commit'}).toArray();
- assert.gt(changeLog.length, 0);
- for (let i = 0; i < changeLog.length; i++) {
- assert(changeLog[i].details.hasOwnProperty('counts'));
- }
+function assertCountsInChangelog() {
+ let changeLog = st.s.getDB('config').changelog.find({what: 'moveChunk.commit'}).toArray();
+ assert.gt(changeLog.length, 0);
+ for (let i = 0; i < changeLog.length; i++) {
+ assert(changeLog[i].details.hasOwnProperty('counts'));
}
+}
- var ns = kDbName + '.fooHashed';
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
+var ns = kDbName + '.fooHashed';
+assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
- var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
- assert(aChunk);
+var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
+assert(aChunk);
- // Assert counts field exists in the changelog entry for moveChunk.commit
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
- assertCountsInChangelog();
+// Assert counts field exists in the changelog entry for moveChunk.commit
+assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
+assertCountsInChangelog();
- mongos.getDB(kDbName).fooHashed.drop();
+mongos.getDB(kDbName).fooHashed.drop();
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
index 7158d12c719..3a03a485dc9 100644
--- a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
+++ b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
@@ -9,69 +9,69 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
+'use strict';
- // Intentionally use a config server with 1 node so that the step down and promotion to primary
- // are guaranteed to happen on the same host
- var st = new ShardingTest({config: 1, shards: 2});
- var mongos = st.s0;
+// Intentionally use a config server with 1 node so that the step down and promotion to primary
+// are guaranteed to happen on the same host
+var st = new ShardingTest({config: 1, shards: 2});
+var mongos = st.s0;
- assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
+assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
- var coll = mongos.getDB('TestDB').TestColl;
+var coll = mongos.getDB('TestDB').TestColl;
- // We have one chunk initially
- assert.writeOK(coll.insert({Key: 0, Value: 'Test value'}));
+// We have one chunk initially
+assert.writeOK(coll.insert({Key: 0, Value: 'Test value'}));
- pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- // For startParallelOps to write its state
- var staticMongod = MongoRunner.runMongod({});
+// For startParallelOps to write its state
+var staticMongod = MongoRunner.runMongod({});
- var joinMoveChunk = moveChunkParallel(
- staticMongod, mongos.host, {Key: 0}, null, 'TestDB.TestColl', st.shard1.shardName);
- waitForMigrateStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, mongos.host, {Key: 0}, null, 'TestDB.TestColl', st.shard1.shardName);
+waitForMigrateStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- // Stepdown the primary in order to force the balancer to stop. Use a timeout of 5 seconds for
- // both step down operations, because mongos will retry to find the CSRS primary for up to 20
- // seconds and we have two successive ones.
- assert.commandWorked(st.configRS.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
+// Stepdown the primary in order to force the balancer to stop. Use a timeout of 5 seconds for
+// both step down operations, because mongos will retry to find the CSRS primary for up to 20
+// seconds and we have two successive ones.
+assert.commandWorked(st.configRS.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
- // Ensure a new primary is found promptly
- st.configRS.getPrimary(30000);
+// Ensure a new primary is found promptly
+st.configRS.getPrimary(30000);
- assert.eq(1,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(0,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
+assert.eq(1,
+ mongos.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(0,
+ mongos.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
- // At this point, the balancer is in recovery mode. Ensure that stepdown can be done again and
- // the recovery mode interrupted.
- assert.commandWorked(st.configRS.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
+// At this point, the balancer is in recovery mode. Ensure that stepdown can be done again and
+// the recovery mode interrupted.
+assert.commandWorked(st.configRS.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
- // Ensure a new primary is found promptly
- st.configRS.getPrimary(30000);
+// Ensure a new primary is found promptly
+st.configRS.getPrimary(30000);
- unpauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+unpauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- // Ensure that migration succeeded
- joinMoveChunk();
+// Ensure that migration succeeded
+joinMoveChunk();
- assert.eq(0,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(1,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
+assert.eq(0,
+ mongos.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(1,
+ mongos.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/movechunk_parallel.js b/jstests/sharding/movechunk_parallel.js
index 37fddce75f4..4c486e64f89 100644
--- a/jstests/sharding/movechunk_parallel.js
+++ b/jstests/sharding/movechunk_parallel.js
@@ -4,78 +4,78 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
-
- // For startParallelOps to write its state
- var staticMongod = MongoRunner.runMongod({});
-
- var st = new ShardingTest({shards: 4});
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
-
- var coll = st.s0.getDB('TestDB').TestColl;
-
- // Create 4 chunks initially
- assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
-
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
-
- // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
-
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
-
- // Pause migrations at shards 2 and 3
- pauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
- pauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
-
- // Both move chunk operations should proceed
- var joinMoveChunk1 = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 10}, null, 'TestDB.TestColl', st.shard2.shardName);
- var joinMoveChunk2 = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 30}, null, 'TestDB.TestColl', st.shard3.shardName);
-
- waitForMigrateStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
- waitForMigrateStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
-
- unpauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
- unpauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
-
- joinMoveChunk1();
- joinMoveChunk2();
-
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
- .itcount());
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+'use strict';
+
+// For startParallelOps to write its state
+var staticMongod = MongoRunner.runMongod({});
+
+var st = new ShardingTest({shards: 4});
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
+
+var coll = st.s0.getDB('TestDB').TestColl;
+
+// Create 4 chunks initially
+assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
+assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
+assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
+assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
+
+// Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
+assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
+assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
+
+assert.eq(2,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(2,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
+
+// Pause migrations at shards 2 and 3
+pauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
+
+// Both move chunk operations should proceed
+var joinMoveChunk1 = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 10}, null, 'TestDB.TestColl', st.shard2.shardName);
+var joinMoveChunk2 = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 30}, null, 'TestDB.TestColl', st.shard3.shardName);
+
+waitForMigrateStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
+waitForMigrateStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
+
+unpauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
+unpauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
+
+joinMoveChunk1();
+joinMoveChunk2();
+
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
+ .itcount());
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js
index 0536c6a51b9..c860c16148e 100644
--- a/jstests/sharding/mrShardedOutputAuth.js
+++ b/jstests/sharding/mrShardedOutputAuth.js
@@ -6,90 +6,89 @@
(function() {
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- function doMapReduce(connection, outputDb) {
- // clean output db and run m/r
- outputDb.numbers_out.drop();
- printjson(connection.getDB('input').runCommand({
- mapreduce: "numbers",
- map: function() {
- emit(this.num, {count: 1});
- },
- reduce: function(k, values) {
- var result = {};
- values.forEach(function(value) {
- result.count = 1;
- });
- return result;
- },
- out: {merge: "numbers_out", sharded: true, db: "output"},
- verbose: true,
- query: {}
- }));
- }
-
- function assertSuccess(configDb, outputDb) {
- assert.eq(outputDb.numbers_out.count(), 50, "map/reduce failed");
- assert(!configDb.collections.findOne().dropped, "no sharded collections");
- }
-
- function assertFailure(configDb, outputDb) {
- assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded");
- }
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- name: "mrShardedOutputAuth",
- shards: 1,
- mongos: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
-
- // Setup the users to the input, output and admin databases
- var mongos = st.s;
- var adminDb = mongos.getDB("admin");
- adminDb.createUser({user: "user", pwd: "pass", roles: jsTest.adminUserRoles});
-
- var authenticatedConn = new Mongo(mongos.host);
- authenticatedConn.getDB('admin').auth("user", "pass");
- adminDb = authenticatedConn.getDB("admin");
-
- var configDb = authenticatedConn.getDB("config");
-
- var inputDb = authenticatedConn.getDB("input");
- inputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
-
- var outputDb = authenticatedConn.getDB("output");
- outputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
-
- // Setup the input db
- inputDb.numbers.drop();
- for (var i = 0; i < 50; i++) {
- inputDb.numbers.insert({num: i});
- }
- assert.eq(inputDb.numbers.count(), 50);
-
- // Setup a connection authenticated to both input and output db
- var inputOutputAuthConn = new Mongo(mongos.host);
- inputOutputAuthConn.getDB('input').auth("user", "pass");
- inputOutputAuthConn.getDB('output').auth("user", "pass");
- doMapReduce(inputOutputAuthConn, outputDb);
- assertSuccess(configDb, outputDb);
-
- // setup a connection authenticated to only input db
- var inputAuthConn = new Mongo(mongos.host);
- inputAuthConn.getDB('input').auth("user", "pass");
- doMapReduce(inputAuthConn, outputDb);
- assertFailure(configDb, outputDb);
-
- // setup a connection authenticated to only output db
- var outputAuthConn = new Mongo(mongos.host);
- outputAuthConn.getDB('output').auth("user", "pass");
- doMapReduce(outputAuthConn, outputDb);
- assertFailure(configDb, outputDb);
-
- st.stop();
-
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+function doMapReduce(connection, outputDb) {
+ // clean output db and run m/r
+ outputDb.numbers_out.drop();
+ printjson(connection.getDB('input').runCommand({
+ mapreduce: "numbers",
+ map: function() {
+ emit(this.num, {count: 1});
+ },
+ reduce: function(k, values) {
+ var result = {};
+ values.forEach(function(value) {
+ result.count = 1;
+ });
+ return result;
+ },
+ out: {merge: "numbers_out", sharded: true, db: "output"},
+ verbose: true,
+ query: {}
+ }));
+}
+
+function assertSuccess(configDb, outputDb) {
+ assert.eq(outputDb.numbers_out.count(), 50, "map/reduce failed");
+ assert(!configDb.collections.findOne().dropped, "no sharded collections");
+}
+
+function assertFailure(configDb, outputDb) {
+ assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded");
+}
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ name: "mrShardedOutputAuth",
+ shards: 1,
+ mongos: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
+
+// Setup the users to the input, output and admin databases
+var mongos = st.s;
+var adminDb = mongos.getDB("admin");
+adminDb.createUser({user: "user", pwd: "pass", roles: jsTest.adminUserRoles});
+
+var authenticatedConn = new Mongo(mongos.host);
+authenticatedConn.getDB('admin').auth("user", "pass");
+adminDb = authenticatedConn.getDB("admin");
+
+var configDb = authenticatedConn.getDB("config");
+
+var inputDb = authenticatedConn.getDB("input");
+inputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
+
+var outputDb = authenticatedConn.getDB("output");
+outputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
+
+// Setup the input db
+inputDb.numbers.drop();
+for (var i = 0; i < 50; i++) {
+ inputDb.numbers.insert({num: i});
+}
+assert.eq(inputDb.numbers.count(), 50);
+
+// Setup a connection authenticated to both input and output db
+var inputOutputAuthConn = new Mongo(mongos.host);
+inputOutputAuthConn.getDB('input').auth("user", "pass");
+inputOutputAuthConn.getDB('output').auth("user", "pass");
+doMapReduce(inputOutputAuthConn, outputDb);
+assertSuccess(configDb, outputDb);
+
+// setup a connection authenticated to only input db
+var inputAuthConn = new Mongo(mongos.host);
+inputAuthConn.getDB('input').auth("user", "pass");
+doMapReduce(inputAuthConn, outputDb);
+assertFailure(configDb, outputDb);
+
+// setup a connection authenticated to only output db
+var outputAuthConn = new Mongo(mongos.host);
+outputAuthConn.getDB('output').auth("user", "pass");
+doMapReduce(outputAuthConn, outputDb);
+assertFailure(configDb, outputDb);
+
+st.stop();
})();
diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js
index 765ba02ae16..bb129b2c6b7 100644
--- a/jstests/sharding/mr_and_agg_versioning.js
+++ b/jstests/sharding/mr_and_agg_versioning.js
@@ -1,65 +1,64 @@
// Test that map reduce and aggregate properly handle shard versioning.
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2, mongos: 3});
+var st = new ShardingTest({shards: 2, mongos: 3});
- var dbName = jsTest.name();
- var collName = dbName + ".coll";
- var numDocs = 50000;
- var numKeys = 1000;
+var dbName = jsTest.name();
+var collName = dbName + ".coll";
+var numDocs = 50000;
+var numKeys = 1000;
- st.s.adminCommand({enableSharding: dbName});
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- st.s.adminCommand({shardCollection: collName, key: {key: 1}});
+st.s.adminCommand({enableSharding: dbName});
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+st.s.adminCommand({shardCollection: collName, key: {key: 1}});
- // Load chunk data to the stale mongoses before moving a chunk
- var staleMongos1 = st.s1;
- var staleMongos2 = st.s2;
- staleMongos1.getCollection(collName).find().itcount();
- staleMongos2.getCollection(collName).find().itcount();
+// Load chunk data to the stale mongoses before moving a chunk
+var staleMongos1 = st.s1;
+var staleMongos2 = st.s2;
+staleMongos1.getCollection(collName).find().itcount();
+staleMongos2.getCollection(collName).find().itcount();
- st.s.adminCommand({split: collName, middle: {key: numKeys / 2}});
- st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: st.shard1.shardName});
+st.s.adminCommand({split: collName, middle: {key: numKeys / 2}});
+st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: st.shard1.shardName});
- var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
- }
- assert.writeOK(bulk.execute());
+var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
+}
+assert.writeOK(bulk.execute());
- // Add orphaned documents directly to the shards to ensure they are properly filtered out.
- st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
- st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
+// Add orphaned documents directly to the shards to ensure they are properly filtered out.
+st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
+st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
- jsTest.log("Doing mapReduce");
+jsTest.log("Doing mapReduce");
- var map = function() {
- emit(this.key, this.value);
- };
- var reduce = function(k, values) {
- var total = 0;
- for (var i = 0; i < values.length; i++) {
- total += values[i];
- }
- return total;
- };
- function validateOutput(output) {
- assert.eq(output.length, numKeys, tojson(output));
- for (var i = 0; i < output.length; i++) {
- assert.eq(output[i]._id * (numDocs / numKeys), output[i].value, tojson(output));
- }
+var map = function() {
+ emit(this.key, this.value);
+};
+var reduce = function(k, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i];
}
+ return total;
+};
+function validateOutput(output) {
+ assert.eq(output.length, numKeys, tojson(output));
+ for (var i = 0; i < output.length; i++) {
+ assert.eq(output[i]._id * (numDocs / numKeys), output[i].value, tojson(output));
+ }
+}
- var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
- validateOutput(res.results);
-
- jsTest.log("Doing aggregation");
+var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
+validateOutput(res.results);
- res = staleMongos2.getCollection(collName).aggregate(
- [{'$group': {_id: "$key", value: {"$sum": "$value"}}}, {'$sort': {_id: 1}}]);
- validateOutput(res.toArray());
+jsTest.log("Doing aggregation");
- st.stop();
+res = staleMongos2.getCollection(collName).aggregate(
+ [{'$group': {_id: "$key", value: {"$sum": "$value"}}}, {'$sort': {_id: 1}}]);
+validateOutput(res.toArray());
+st.stop();
})();
diff --git a/jstests/sharding/mr_output_sharded_validation.js b/jstests/sharding/mr_output_sharded_validation.js
index 1d33af3f83b..2643ec72f4c 100644
--- a/jstests/sharding/mr_output_sharded_validation.js
+++ b/jstests/sharding/mr_output_sharded_validation.js
@@ -3,42 +3,42 @@
// output namespace of the first phase of a mapReduce with sharded input before the final result
// collection is created. This test was designed to reproduce SERVER-36966.
(function() {
- "use strict";
-
- const st = new ShardingTest({shards: 2, config: 1, verbose: ''});
-
- const mongosDB = st.s.getDB("test");
- st.shardColl(mongosDB.foo, {_id: 1}, {_id: 0}, {_id: -1});
-
- assert.commandWorked(mongosDB.foo.insert([{_id: 1}, {_id: 2}]));
-
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: mongosDB.output.getFullName(), key: {_id: "hashed"}}));
-
- assert.commandWorked(mongosDB.foo.mapReduce(
- function() {
- emit(this._id, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {replace: "output", sharded: true}}));
-
- // Test that using just a collection name without specifying a merge mode or the 'sharded: true'
- // information will fail if the named collection is sharded.
- const error = assert.throws(() => mongosDB.foo.mapReduce(
- function() {
- emit(this._id, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: "output"}));
- assert.eq(error.code, 15920);
-
- for (let name of mongosDB.getCollectionNames()) {
- assert.eq(-1, name.indexOf("tmp.mrs"), name);
- }
-
- st.stop();
+"use strict";
+
+const st = new ShardingTest({shards: 2, config: 1, verbose: ''});
+
+const mongosDB = st.s.getDB("test");
+st.shardColl(mongosDB.foo, {_id: 1}, {_id: 0}, {_id: -1});
+
+assert.commandWorked(mongosDB.foo.insert([{_id: 1}, {_id: 2}]));
+
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosDB.output.getFullName(), key: {_id: "hashed"}}));
+
+assert.commandWorked(mongosDB.foo.mapReduce(
+ function() {
+ emit(this._id, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {replace: "output", sharded: true}}));
+
+// Test that using just a collection name without specifying a merge mode or the 'sharded: true'
+// information will fail if the named collection is sharded.
+const error = assert.throws(() => mongosDB.foo.mapReduce(
+ function() {
+ emit(this._id, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: "output"}));
+assert.eq(error.code, 15920);
+
+for (let name of mongosDB.getCollectionNames()) {
+ assert.eq(-1, name.indexOf("tmp.mrs"), name);
+}
+
+st.stop();
}());
diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js
index 225c9be324d..52622b4ce66 100644
--- a/jstests/sharding/mr_shard_version.js
+++ b/jstests/sharding/mr_shard_version.js
@@ -1,88 +1,87 @@
// Test for SERVER-4158 (version changes during mapreduce)
(function() {
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- // Stop balancer, since it'll just get in the way of these
- st.stopBalancer();
+// Stop balancer, since it'll just get in the way of these
+st.stopBalancer();
- var coll = st.s.getCollection(jsTest.name() + ".coll");
+var coll = st.s.getCollection(jsTest.name() + ".coll");
- var numDocs = 50000;
- var numKeys = 1000;
- var numTests = 3;
+var numDocs = 50000;
+var numKeys = 1000;
+var numTests = 3;
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, key: "" + (i % numKeys), value: i % numKeys});
- }
- assert.writeOK(bulk.execute());
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, key: "" + (i % numKeys), value: i % numKeys});
+}
+assert.writeOK(bulk.execute());
- assert.eq(numDocs, coll.find().itcount());
+assert.eq(numDocs, coll.find().itcount());
- var halfId = coll.find().itcount() / 2;
+var halfId = coll.find().itcount() / 2;
- // Shard collection in half
- st.shardColl(coll, {_id: 1}, {_id: halfId});
+// Shard collection in half
+st.shardColl(coll, {_id: 1}, {_id: halfId});
- st.printShardingStatus();
+st.printShardingStatus();
- jsTest.log("Collection now initialized with keys and values...");
+jsTest.log("Collection now initialized with keys and values...");
- jsTest.log("Starting migrations...");
+jsTest.log("Starting migrations...");
- var ops = {};
- for (var i = 0; i < st._connections.length; i++) {
- for (var j = 0; j < 2; j++) {
- ops["" + (i * 2 + j)] = {
- op: "command",
- ns: "admin",
- command: {
- moveChunk: "" + coll,
- find: {_id: (j == 0 ? 0 : halfId)},
- to: st._connections[i].shardName
- },
- };
- }
+var ops = {};
+for (var i = 0; i < st._connections.length; i++) {
+ for (var j = 0; j < 2; j++) {
+ ops["" + (i * 2 + j)] = {
+ op: "command",
+ ns: "admin",
+ command: {
+ moveChunk: "" + coll,
+ find: {_id: (j == 0 ? 0 : halfId)},
+ to: st._connections[i].shardName
+ },
+ };
}
+}
- var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
-
- jsTest.log("Starting m/r...");
+var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
- var map = function() {
- emit(this.key, this.value);
- };
- var reduce = function(k, values) {
- var total = 0;
- for (var i = 0; i < values.length; i++)
- total += values[i];
- return total;
- };
+jsTest.log("Starting m/r...");
- var outputColl = st.s.getCollection(jsTest.name() + ".mrOutput");
+var map = function() {
+ emit(this.key, this.value);
+};
+var reduce = function(k, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++)
+ total += values[i];
+ return total;
+};
- jsTest.log("Output coll : " + outputColl);
+var outputColl = st.s.getCollection(jsTest.name() + ".mrOutput");
- for (var t = 0; t < numTests; t++) {
- var results = coll.mapReduce(map, reduce, {out: {replace: outputColl.getName()}});
+jsTest.log("Output coll : " + outputColl);
- // Assert that the results are actually correct, all keys have values of (numDocs / numKeys)
- // x key
- var output = outputColl.find().sort({_id: 1}).toArray();
+for (var t = 0; t < numTests; t++) {
+ var results = coll.mapReduce(map, reduce, {out: {replace: outputColl.getName()}});
- // printjson( output )
+ // Assert that the results are actually correct, all keys have values of (numDocs / numKeys)
+ // x key
+ var output = outputColl.find().sort({_id: 1}).toArray();
- assert.eq(output.length, numKeys);
- printjson(output);
- for (var i = 0; i < output.length; i++)
- assert.eq(parseInt(output[i]._id) * (numDocs / numKeys), output[i].value);
- }
+ // printjson( output )
- jsTest.log("Finishing parallel migrations...");
+ assert.eq(output.length, numKeys);
+ printjson(output);
+ for (var i = 0; i < output.length; i++)
+ assert.eq(parseInt(output[i]._id) * (numDocs / numKeys), output[i].value);
+}
- printjson(benchFinish(bid));
+jsTest.log("Finishing parallel migrations...");
- st.stop();
+printjson(benchFinish(bid));
+st.stop();
})();
diff --git a/jstests/sharding/multi_coll_drop.js b/jstests/sharding/multi_coll_drop.js
index 076c39048f3..084577d8b27 100644
--- a/jstests/sharding/multi_coll_drop.js
+++ b/jstests/sharding/multi_coll_drop.js
@@ -1,44 +1,43 @@
// Tests the dropping and re-adding of a collection
(function() {
- var st = new ShardingTest({name: "multidrop", shards: 1, mongos: 2});
+var st = new ShardingTest({name: "multidrop", shards: 1, mongos: 2});
- var mA = st.s0;
- var mB = st.s1;
+var mA = st.s0;
+var mB = st.s1;
- var coll = mA.getCollection('multidrop.coll');
- var collB = mB.getCollection('multidrop.coll');
+var coll = mA.getCollection('multidrop.coll');
+var collB = mB.getCollection('multidrop.coll');
- jsTestLog("Shard and split collection...");
+jsTestLog("Shard and split collection...");
- var admin = mA.getDB("admin");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+var admin = mA.getDB("admin");
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- for (var i = -100; i < 100; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- }
+for (var i = -100; i < 100; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+}
- jsTestLog("Create versioned connection for each mongos...");
+jsTestLog("Create versioned connection for each mongos...");
- assert.eq(0, coll.find().itcount());
- assert.eq(0, collB.find().itcount());
+assert.eq(0, coll.find().itcount());
+assert.eq(0, collB.find().itcount());
- jsTestLog("Dropping sharded collection...");
- assert(coll.drop());
+jsTestLog("Dropping sharded collection...");
+assert(coll.drop());
- jsTestLog("Recreating collection...");
+jsTestLog("Recreating collection...");
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- for (var i = -10; i < 10; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- }
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+for (var i = -10; i < 10; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+}
- jsTestLog("Retrying connections...");
+jsTestLog("Retrying connections...");
- assert.eq(0, coll.find().itcount());
- assert.eq(0, collB.find().itcount());
-
- st.stop();
+assert.eq(0, coll.find().itcount());
+assert.eq(0, collB.find().itcount());
+st.stop();
})();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index 27d6dd447c7..29f350ae095 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -1,53 +1,53 @@
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 2});
+var st = new ShardingTest({shards: 2, mongos: 2});
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- // "test.foo" - sharded (by mongos 0)
- assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+// "test.foo" - sharded (by mongos 0)
+assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- // "test.existing" - unsharded
- assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
- assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
+// "test.existing" - unsharded
+assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
- // "test.existing" - unsharded to sharded (by mongos 1)
- assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing", key: {_id: 1}}));
- assert.commandWorked(st.s1.adminCommand({split: "test.existing", middle: {_id: 5}}));
- assert.commandWorked(
- st.s1.adminCommand({moveChunk: "test.existing", find: {_id: 1}, to: st.shard0.shardName}));
+// "test.existing" - unsharded to sharded (by mongos 1)
+assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing", key: {_id: 1}}));
+assert.commandWorked(st.s1.adminCommand({split: "test.existing", middle: {_id: 5}}));
+assert.commandWorked(
+ st.s1.adminCommand({moveChunk: "test.existing", find: {_id: 1}, to: st.shard0.shardName}));
- assert.eq(1, st.s0.getDB('test').existing.count({_id: 1})); // SERVER-2828
- assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing.count({_id: 1})); // SERVER-2828
+assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
- // Test stats
- assert.writeOK(st.s0.getDB('test').existing2.insert({_id: 1}));
- assert.eq(1, st.s0.getDB('test').existing2.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing2.count({_id: 1}));
+// Test stats
+assert.writeOK(st.s0.getDB('test').existing2.insert({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing2.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing2.count({_id: 1}));
- assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing2", key: {_id: 1}}));
- assert.eq(true, st.s1.getDB('test').existing2.stats().sharded);
+assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing2", key: {_id: 1}}));
+assert.eq(true, st.s1.getDB('test').existing2.stats().sharded);
- assert.commandWorked(st.s1.adminCommand({split: "test.existing2", middle: {_id: 5}}));
- {
- var res = st.s0.getDB('test').existing2.stats();
- printjson(res);
- assert.eq(true, res.sharded); // SERVER-2828
- }
+assert.commandWorked(st.s1.adminCommand({split: "test.existing2", middle: {_id: 5}}));
+{
+ var res = st.s0.getDB('test').existing2.stats();
+ printjson(res);
+ assert.eq(true, res.sharded); // SERVER-2828
+}
- // Test admin commands
- assert.writeOK(st.s0.getDB('test').existing3.insert({_id: 1}));
- assert.eq(1, st.s0.getDB('test').existing3.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing3.count({_id: 1}));
+// Test admin commands
+assert.writeOK(st.s0.getDB('test').existing3.insert({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing3.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing3.count({_id: 1}));
- assert.writeOK(st.s1.adminCommand({shardcollection: "test.existing3", key: {_id: 1}}));
- assert.commandWorked(st.s1.adminCommand({split: "test.existing3", middle: {_id: 5}}));
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: "test.existing3", find: {_id: 1}, to: st.shard0.shardName}));
+assert.writeOK(st.s1.adminCommand({shardcollection: "test.existing3", key: {_id: 1}}));
+assert.commandWorked(st.s1.adminCommand({split: "test.existing3", middle: {_id: 5}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: "test.existing3", find: {_id: 1}, to: st.shard0.shardName}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js
index 451bdbd80f1..7e2dce7c8b0 100644
--- a/jstests/sharding/multi_mongos2a.js
+++ b/jstests/sharding/multi_mongos2a.js
@@ -1,29 +1,29 @@
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 2});
+var st = new ShardingTest({shards: 2, mongos: 2});
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
- assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
+assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
- assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing", key: {_id: 1}}));
- assert.eq(true, st.s1.getDB('test').existing.stats().sharded);
+assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing", key: {_id: 1}}));
+assert.eq(true, st.s1.getDB('test').existing.stats().sharded);
- assert.commandWorked(st.s1.getDB("admin").runCommand({
- moveChunk: "test.existing",
- find: {_id: 1},
- to: st.getOther(st.getPrimaryShard("test")).name
- }));
+assert.commandWorked(st.s1.getDB("admin").runCommand({
+ moveChunk: "test.existing",
+ find: {_id: 1},
+ to: st.getOther(st.getPrimaryShard("test")).name
+}));
- assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/multi_shard_transaction_without_majority_reads.js b/jstests/sharding/multi_shard_transaction_without_majority_reads.js
index dafdf503f84..8ddb69a665d 100644
--- a/jstests/sharding/multi_shard_transaction_without_majority_reads.js
+++ b/jstests/sharding/multi_shard_transaction_without_majority_reads.js
@@ -6,34 +6,34 @@
*/
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2, rs: {nodes: 1, enableMajorityReadConcern: 'false'}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1, enableMajorityReadConcern: 'false'}});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- const coll = st.s0.getDB('TestDB').TestColl;
- assert.writeOK(coll.insert({_id: -1, x: 0}));
- assert.writeOK(coll.insert({_id: 1, x: 0}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: 1}}));
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: 'TestDB.TestColl', find: {_id: 1}, to: st.shard1.shardName}));
+const coll = st.s0.getDB('TestDB').TestColl;
+assert.writeOK(coll.insert({_id: -1, x: 0}));
+assert.writeOK(coll.insert({_id: 1, x: 0}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: 1}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'TestDB.TestColl', find: {_id: 1}, to: st.shard1.shardName}));
- assert.writeOK(coll.update({_id: -1}, {$inc: {x: 1}}));
- assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
+assert.writeOK(coll.update({_id: -1}, {$inc: {x: 1}}));
+assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
- const session = st.s0.startSession();
- const sessionColl = session.getDatabase('TestDB').TestColl;
+const session = st.s0.startSession();
+const sessionColl = session.getDatabase('TestDB').TestColl;
- session.startTransaction();
+session.startTransaction();
- assert.writeOK(sessionColl.update({_id: -1}, {$inc: {x: 1}}));
- assert.writeOK(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
+assert.writeOK(sessionColl.update({_id: -1}, {$inc: {x: 1}}));
+assert.writeOK(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.ReadConcernMajorityNotEnabled);
+assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js
index 9c4f37430da..90330f43cc0 100644
--- a/jstests/sharding/multi_write_target.js
+++ b/jstests/sharding/multi_write_target.js
@@ -2,72 +2,71 @@
// Tests that multi-writes (update/delete) target *all* shards and not just shards in the collection
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 2});
+var st = new ShardingTest({shards: 3, mongos: 2});
- var admin = st.s0.getDB("admin");
- var coll = st.s0.getCollection("foo.bar");
+var admin = st.s0.getDB("admin");
+var coll = st.s0.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {skey: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {skey: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 100}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: st.shard1.shardName}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {skey: 100}, to: st.shard2.shardName}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 0}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 100}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {skey: 100}, to: st.shard2.shardName}));
- jsTest.log("Testing multi-update...");
+jsTest.log("Testing multi-update...");
- // Put data on all shards
- assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
- assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
- assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
+// Put data on all shards
+assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
- // Non-multi-update doesn't work without shard key
- assert.writeError(coll.update({x: 1}, {$set: {updated: true}}, {multi: false}));
- assert.writeOK(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
+// Non-multi-update doesn't work without shard key
+assert.writeError(coll.update({x: 1}, {$set: {updated: true}}, {multi: false}));
+assert.writeOK(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
- // Ensure update goes to *all* shards
- assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updated: true}));
- assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({updated: true}));
- assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updated: true}));
+// Ensure update goes to *all* shards
+assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updated: true}));
+assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({updated: true}));
+assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updated: true}));
- // _id update works, and goes to all shards even on the stale mongos
- var staleColl = st.s1.getCollection('foo.bar');
- assert.writeOK(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
+// _id update works, and goes to all shards even on the stale mongos
+var staleColl = st.s1.getCollection('foo.bar');
+assert.writeOK(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
- // Ensure _id update goes to *all* shards
- assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updatedById: true}));
- assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updatedById: true}));
+// Ensure _id update goes to *all* shards
+assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updatedById: true}));
+assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updatedById: true}));
- jsTest.log("Testing multi-delete...");
+jsTest.log("Testing multi-delete...");
- // non-multi-delete doesn't work without shard key
- assert.writeError(coll.remove({x: 1}, {justOne: true}));
+// non-multi-delete doesn't work without shard key
+assert.writeError(coll.remove({x: 1}, {justOne: true}));
- assert.writeOK(coll.remove({x: 1}, {justOne: false}));
+assert.writeOK(coll.remove({x: 1}, {justOne: false}));
- // Ensure delete goes to *all* shards
- assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
- assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({x: 1}));
- assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
+// Ensure delete goes to *all* shards
+assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
+assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({x: 1}));
+assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
- // Put more on all shards
- assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
- assert.writeOK(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
- // Data not in chunks
- assert.writeOK(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
+// Put more on all shards
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+assert.writeOK(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+// Data not in chunks
+assert.writeOK(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
- assert.writeOK(coll.remove({_id: 0}, {justOne: true}));
+assert.writeOK(coll.remove({_id: 0}, {justOne: true}));
- // Ensure _id delete goes to *all* shards
- assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
- assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
-
- st.stop();
+// Ensure _id delete goes to *all* shards
+assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
+assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
+st.stop();
})();
diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js
index d7e7e884380..aa20b4ead0a 100644
--- a/jstests/sharding/names.js
+++ b/jstests/sharding/names.js
@@ -1,59 +1,56 @@
// Test that having replica set names the same as the names of other shards works fine
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 0, mongos: 1});
-
- var rsA = new ReplSetTest({nodes: 2, name: "rsA", nodeOptions: {shardsvr: ""}});
- var rsB = new ReplSetTest({nodes: 2, name: "rsB", nodeOptions: {shardsvr: ""}});
-
- rsA.startSet();
- rsB.startSet();
- rsA.initiate();
- rsB.initiate();
- rsA.getPrimary();
- rsB.getPrimary();
-
- var mongos = st.s;
- var config = mongos.getDB("config");
- var admin = mongos.getDB("admin");
-
- assert.commandWorked(mongos.adminCommand({addShard: rsA.getURL(), name: rsB.name}));
- printjson(config.shards.find().toArray());
-
- assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
- printjson(config.shards.find().toArray());
-
- assert.eq(2, config.shards.count(), "Error adding a shard");
- assert.eq(
- rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA");
- assert.eq(
- rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB");
-
- // Remove shard
- assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}),
- "failed to start draining shard");
- var res = assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}),
- "failed to remove shard");
-
- assert.eq(
- 1,
- config.shards.count(),
- "Shard was not removed: " + res + "; Shards: " + tojson(config.shards.find().toArray()));
- assert.eq(
- rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 2");
-
- // Re-add shard
- assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
- printjson(config.shards.find().toArray());
-
- assert.eq(2, config.shards.count(), "Error re-adding a shard");
- assert.eq(
- rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA 3");
- assert.eq(
- rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 3");
-
- rsA.stopSet();
- rsB.stopSet();
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 0, mongos: 1});
+
+var rsA = new ReplSetTest({nodes: 2, name: "rsA", nodeOptions: {shardsvr: ""}});
+var rsB = new ReplSetTest({nodes: 2, name: "rsB", nodeOptions: {shardsvr: ""}});
+
+rsA.startSet();
+rsB.startSet();
+rsA.initiate();
+rsB.initiate();
+rsA.getPrimary();
+rsB.getPrimary();
+
+var mongos = st.s;
+var config = mongos.getDB("config");
+var admin = mongos.getDB("admin");
+
+assert.commandWorked(mongos.adminCommand({addShard: rsA.getURL(), name: rsB.name}));
+printjson(config.shards.find().toArray());
+
+assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
+printjson(config.shards.find().toArray());
+
+assert.eq(2, config.shards.count(), "Error adding a shard");
+assert.eq(rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA");
+assert.eq(rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB");
+
+// Remove shard
+assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}),
+ "failed to start draining shard");
+var res =
+ assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}), "failed to remove shard");
+
+assert.eq(1,
+ config.shards.count(),
+ "Shard was not removed: " + res + "; Shards: " + tojson(config.shards.find().toArray()));
+assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 2");
+
+// Re-add shard
+assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
+printjson(config.shards.find().toArray());
+
+assert.eq(2, config.shards.count(), "Error re-adding a shard");
+assert.eq(
+ rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA 3");
+assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 3");
+
+rsA.stopSet();
+rsB.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/nonreplicated_uuids_on_shardservers.js b/jstests/sharding/nonreplicated_uuids_on_shardservers.js
index 7c7be172ee1..64716774fb7 100644
--- a/jstests/sharding/nonreplicated_uuids_on_shardservers.js
+++ b/jstests/sharding/nonreplicated_uuids_on_shardservers.js
@@ -1,23 +1,23 @@
// SERVER-32255 This test ensures a node started with --shardsvr and added to a replica set receives
// UUIDs upon re-initiation.
(function() {
- "use strict";
- load("jstests/libs/check_uuids.js");
- let st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
- let mongos = st.s;
- let rs = st.rs0;
+"use strict";
+load("jstests/libs/check_uuids.js");
+let st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
+let mongos = st.s;
+let rs = st.rs0;
- // Create `test.coll`.
- mongos.getDB("test").coll.insert({_id: 1, x: 1});
+// Create `test.coll`.
+mongos.getDB("test").coll.insert({_id: 1, x: 1});
- // Add a node with --shardsvr to the replica set.
- let newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}});
- rs.reInitiate();
- rs.awaitSecondaryNodes();
+// Add a node with --shardsvr to the replica set.
+let newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}});
+rs.reInitiate();
+rs.awaitSecondaryNodes();
- let secondaryAdminDB = newNode.getDB("admin");
+let secondaryAdminDB = newNode.getDB("admin");
- // Ensure the new node has UUIDs for all its collections.
- checkCollectionUUIDs(secondaryAdminDB);
- st.stop();
+// Ensure the new node has UUIDs for all its collections.
+checkCollectionUUIDs(secondaryAdminDB);
+st.stop();
})();
diff --git a/jstests/sharding/not_allowed_on_sharded_collection_cmd.js b/jstests/sharding/not_allowed_on_sharded_collection_cmd.js
index 2649994c300..7190e8b6bba 100644
--- a/jstests/sharding/not_allowed_on_sharded_collection_cmd.js
+++ b/jstests/sharding/not_allowed_on_sharded_collection_cmd.js
@@ -2,25 +2,24 @@
// collections.
(function() {
- const st = new ShardingTest({shards: 2, mongos: 2});
+const st = new ShardingTest({shards: 2, mongos: 2});
- const dbName = 'test';
- const coll = 'foo';
- const ns = dbName + '.' + coll;
+const dbName = 'test';
+const coll = 'foo';
+const ns = dbName + '.' + coll;
- const freshMongos = st.s0.getDB(dbName);
- const staleMongos = st.s1.getDB(dbName);
+const freshMongos = st.s0.getDB(dbName);
+const staleMongos = st.s1.getDB(dbName);
- assert.commandWorked(staleMongos.adminCommand({enableSharding: dbName}));
- assert.commandWorked(freshMongos.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(staleMongos.adminCommand({enableSharding: dbName}));
+assert.commandWorked(freshMongos.adminCommand({shardCollection: ns, key: {_id: 1}}));
- // Test that commands that should not be runnable on sharded collection do not work on sharded
- // collections, using both fresh mongos and stale mongos instances.
- assert.commandFailedWithCode(freshMongos.runCommand({convertToCapped: coll, size: 64 * 1024}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(staleMongos.runCommand({convertToCapped: coll, size: 32 * 1024}),
- ErrorCodes.IllegalOperation);
-
- st.stop();
+// Test that commands that should not be runnable on sharded collection do not work on sharded
+// collections, using both fresh mongos and stale mongos instances.
+assert.commandFailedWithCode(freshMongos.runCommand({convertToCapped: coll, size: 64 * 1024}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(staleMongos.runCommand({convertToCapped: coll, size: 32 * 1024}),
+ ErrorCodes.IllegalOperation);
+st.stop();
})();
diff --git a/jstests/sharding/now_variable_replset.js b/jstests/sharding/now_variable_replset.js
index ad5104a0695..29089ff5e1a 100644
--- a/jstests/sharding/now_variable_replset.js
+++ b/jstests/sharding/now_variable_replset.js
@@ -3,131 +3,130 @@
*/
// @tags: [requires_find_command]
(function() {
- "use strict";
-
- var replTest = new ReplSetTest({name: "now_and_cluster_time", nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- var db = replTest.getPrimary().getDB("test");
-
- const coll = db[jsTest.name()];
- const otherColl = db[coll.getName() + "_other"];
- otherColl.drop();
- coll.drop();
- db["viewWithNow"].drop();
- db["viewWithClusterTime"].drop();
-
- // Insert simple documents into the main test collection. Aggregation and view pipelines will
- // augment these docs with time-based fields.
- const numdocs = 1000;
- let bulk = coll.initializeUnorderedBulkOp();
+"use strict";
+
+var replTest = new ReplSetTest({name: "now_and_cluster_time", nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+var db = replTest.getPrimary().getDB("test");
+
+const coll = db[jsTest.name()];
+const otherColl = db[coll.getName() + "_other"];
+otherColl.drop();
+coll.drop();
+db["viewWithNow"].drop();
+db["viewWithClusterTime"].drop();
+
+// Insert simple documents into the main test collection. Aggregation and view pipelines will
+// augment these docs with time-based fields.
+const numdocs = 1000;
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
+
+// Insert into another collection with pre-made fields for testing the find() command.
+bulk = otherColl.initializeUnorderedBulkOp();
+const timeFieldValue = new Date();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
+}
+assert.commandWorked(bulk.execute());
+
+assert.commandWorked(
+ db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
+const viewWithNow = db["viewWithNow"];
+
+assert.commandWorked(db.createView(
+ "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+const viewWithClusterTime = db["viewWithClusterTime"];
+
+function toResultsArray(queryRes) {
+ return Array.isArray(queryRes) ? queryRes : queryRes.toArray();
+}
+
+function runTests(query) {
+ const results = toResultsArray(query());
+ assert.eq(results.length, numdocs);
+
+ // Make sure the values are the same for all documents
for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute());
-
- // Insert into another collection with pre-made fields for testing the find() command.
- bulk = otherColl.initializeUnorderedBulkOp();
- const timeFieldValue = new Date();
- for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
- }
- assert.commandWorked(bulk.execute());
-
- assert.commandWorked(
- db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
- const viewWithNow = db["viewWithNow"];
-
- assert.commandWorked(db.createView(
- "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
- const viewWithClusterTime = db["viewWithClusterTime"];
-
- function toResultsArray(queryRes) {
- return Array.isArray(queryRes) ? queryRes : queryRes.toArray();
- }
-
- function runTests(query) {
- const results = toResultsArray(query());
- assert.eq(results.length, numdocs);
-
- // Make sure the values are the same for all documents
- for (let i = 0; i < numdocs; ++i) {
- assert.eq(results[0].timeField, results[i].timeField);
- }
-
- // Sleep for a while and then rerun.
- sleep(3000);
-
- const resultsLater = toResultsArray(query());
- assert.eq(resultsLater.length, numdocs);
-
- // Later results should be later in time.
- assert.lte(results[0].timeField, resultsLater[0].timeField);
- }
-
- function baseCollectionNowFind() {
- return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
- }
-
- function baseCollectionClusterTimeFind() {
- // The test validator examines 'timeField', so we copy clusterTimeField into timeField here.
- const results =
- otherColl.find({$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}).toArray();
- results.forEach((val, idx) => {
- results[idx].timeField = results[idx].clusterTimeField;
- });
- return results;
- }
-
- function baseCollectionNowAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
- }
-
- function baseCollectionClusterTimeAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]);
- }
-
- function fromViewWithNow() {
- return viewWithNow.find();
- }
-
- function fromViewWithClusterTime() {
- return viewWithClusterTime.find();
- }
-
- function withExprNow() {
- return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
- }
-
- function withExprClusterTime() {
- return viewWithClusterTime.find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}});
+ assert.eq(results[0].timeField, results[i].timeField);
}
- // $$NOW
- runTests(baseCollectionNowFind);
- runTests(baseCollectionNowAgg);
- runTests(fromViewWithNow);
- runTests(withExprNow);
-
- // Test that $$NOW can be used in explain for both find and aggregate.
- assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
-
- // $$CLUSTER_TIME
- runTests(baseCollectionClusterTimeFind);
- runTests(baseCollectionClusterTimeAgg);
- runTests(fromViewWithClusterTime);
- runTests(withExprClusterTime);
-
- // Test that $$CLUSTER_TIME can be used in explain for both find and aggregate.
- assert.commandWorked(
- coll.explain().find({$expr: {$lte: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
-
- replTest.stopSet();
+ // Sleep for a while and then rerun.
+ sleep(3000);
+
+ const resultsLater = toResultsArray(query());
+ assert.eq(resultsLater.length, numdocs);
+
+ // Later results should be later in time.
+ assert.lte(results[0].timeField, resultsLater[0].timeField);
+}
+
+function baseCollectionNowFind() {
+ return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
+}
+
+function baseCollectionClusterTimeFind() {
+ // The test validator examines 'timeField', so we copy clusterTimeField into timeField here.
+ const results =
+ otherColl.find({$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}).toArray();
+ results.forEach((val, idx) => {
+ results[idx].timeField = results[idx].clusterTimeField;
+ });
+ return results;
+}
+
+function baseCollectionNowAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
+}
+
+function baseCollectionClusterTimeAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]);
+}
+
+function fromViewWithNow() {
+ return viewWithNow.find();
+}
+
+function fromViewWithClusterTime() {
+ return viewWithClusterTime.find();
+}
+
+function withExprNow() {
+ return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
+}
+
+function withExprClusterTime() {
+ return viewWithClusterTime.find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}});
+}
+
+// $$NOW
+runTests(baseCollectionNowFind);
+runTests(baseCollectionNowAgg);
+runTests(fromViewWithNow);
+runTests(withExprNow);
+
+// Test that $$NOW can be used in explain for both find and aggregate.
+assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
+
+// $$CLUSTER_TIME
+runTests(baseCollectionClusterTimeFind);
+runTests(baseCollectionClusterTimeAgg);
+runTests(fromViewWithClusterTime);
+runTests(withExprClusterTime);
+
+// Test that $$CLUSTER_TIME can be used in explain for both find and aggregate.
+assert.commandWorked(
+ coll.explain().find({$expr: {$lte: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
+assert.commandWorked(
+ viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+
+replTest.stopSet();
}());
diff --git a/jstests/sharding/now_variable_sharding.js b/jstests/sharding/now_variable_sharding.js
index 49e2833b46f..13f9b90e626 100644
--- a/jstests/sharding/now_variable_sharding.js
+++ b/jstests/sharding/now_variable_sharding.js
@@ -3,150 +3,149 @@
*/
// @tags: [requires_find_command]
(function() {
- "use strict";
-
- var st = new ShardingTest({mongos: 1, shards: 2});
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- st.adminCommand({enableSharding: "test"});
- st.ensurePrimaryShard("test", st.rs0.getURL());
-
- var db = st.s.getDB("test");
-
- const numdocs = 1000;
-
- const coll = db[jsTest.name()];
- const otherColl = db[coll.getName() + "_other"];
-
- for (let testColl of[coll, otherColl]) {
- testColl.createIndex({_id: 1}, {unique: true});
-
- st.adminCommand({shardcollection: testColl.getFullName(), key: {_id: 1}});
- st.adminCommand({split: testColl.getFullName(), middle: {_id: numdocs / 2}});
-
- st.adminCommand({
- moveChunk: testColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- });
- st.adminCommand({
- moveChunk: testColl.getFullName(),
- find: {_id: numdocs / 2},
- to: st.shard0.shardName,
- _waitForDelete: true
- });
- }
-
- // Insert simple documents into the main test collection. Aggregation and view pipelines will
- // augment these docs with time-based fields.
- let bulk = coll.initializeUnorderedBulkOp();
+"use strict";
+
+var st = new ShardingTest({mongos: 1, shards: 2});
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+st.adminCommand({enableSharding: "test"});
+st.ensurePrimaryShard("test", st.rs0.getURL());
+
+var db = st.s.getDB("test");
+
+const numdocs = 1000;
+
+const coll = db[jsTest.name()];
+const otherColl = db[coll.getName() + "_other"];
+
+for (let testColl of [coll, otherColl]) {
+ testColl.createIndex({_id: 1}, {unique: true});
+
+ st.adminCommand({shardcollection: testColl.getFullName(), key: {_id: 1}});
+ st.adminCommand({split: testColl.getFullName(), middle: {_id: numdocs / 2}});
+
+ st.adminCommand({
+ moveChunk: testColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+ });
+ st.adminCommand({
+ moveChunk: testColl.getFullName(),
+ find: {_id: numdocs / 2},
+ to: st.shard0.shardName,
+ _waitForDelete: true
+ });
+}
+
+// Insert simple documents into the main test collection. Aggregation and view pipelines will
+// augment these docs with time-based fields.
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
+
+// Insert into another collection with pre-made fields for testing the find() command.
+bulk = otherColl.initializeUnorderedBulkOp();
+const timeFieldValue = new Date();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
+}
+assert.commandWorked(bulk.execute());
+
+assert.commandWorked(
+ db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
+const viewWithNow = db["viewWithNow"];
+
+assert.commandWorked(db.createView(
+ "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+const viewWithClusterTime = db["viewWithClusterTime"];
+
+function toResultsArray(queryRes) {
+ return Array.isArray(queryRes) ? queryRes : queryRes.toArray();
+}
+
+function runTests(query) {
+ const results = toResultsArray(query());
+ assert.eq(results.length, numdocs);
+
+ // Make sure the values are the same for all documents
for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute());
-
- // Insert into another collection with pre-made fields for testing the find() command.
- bulk = otherColl.initializeUnorderedBulkOp();
- const timeFieldValue = new Date();
- for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
- }
- assert.commandWorked(bulk.execute());
-
- assert.commandWorked(
- db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
- const viewWithNow = db["viewWithNow"];
-
- assert.commandWorked(db.createView(
- "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
- const viewWithClusterTime = db["viewWithClusterTime"];
-
- function toResultsArray(queryRes) {
- return Array.isArray(queryRes) ? queryRes : queryRes.toArray();
- }
-
- function runTests(query) {
- const results = toResultsArray(query());
- assert.eq(results.length, numdocs);
-
- // Make sure the values are the same for all documents
- for (let i = 0; i < numdocs; ++i) {
- assert.eq(results[0].timeField, results[i].timeField);
- }
-
- // Sleep for a while and then rerun.
- sleep(3000);
-
- const resultsLater = toResultsArray(query());
- assert.eq(resultsLater.length, numdocs);
-
- // Later results should be later in time.
- assert.lte(results[0].timeField, resultsLater[0].timeField);
- }
-
- function baseCollectionNowFind() {
- return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
- }
-
- function baseCollectionClusterTimeFind() {
- // The test validator examines 'timeField', so we copy clusterTimeField into timeField here.
- const results =
- otherColl.find({$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}).toArray();
- results.forEach((val, idx) => {
- results[idx].timeField = results[idx].clusterTimeField;
- });
- return results;
- }
-
- function baseCollectionNowAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
- }
-
- function baseCollectionClusterTimeAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]);
- }
-
- function fromViewWithNow() {
- return viewWithNow.find();
- }
-
- function fromViewWithClusterTime() {
- return viewWithClusterTime.find();
- }
-
- function withExprNow() {
- return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
- }
-
- function withExprClusterTime() {
- return viewWithClusterTime.find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}});
+ assert.eq(results[0].timeField, results[i].timeField);
}
- // $$NOW
- runTests(baseCollectionNowFind);
- runTests(baseCollectionNowAgg);
- runTests(fromViewWithNow);
- runTests(withExprNow);
-
- // Test that $$NOW can be used in explain for both find and aggregate.
- assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
-
- // $$CLUSTER_TIME
- runTests(baseCollectionClusterTimeFind);
- runTests(baseCollectionClusterTimeAgg);
- runTests(fromViewWithClusterTime);
- runTests(withExprClusterTime);
-
- // Test that $$CLUSTER_TIME can be used in explain for both find and aggregate.
- assert.commandWorked(
- coll.explain().find({$expr: {$lte: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
-
- st.stop();
+ // Sleep for a while and then rerun.
+ sleep(3000);
+
+ const resultsLater = toResultsArray(query());
+ assert.eq(resultsLater.length, numdocs);
+
+ // Later results should be later in time.
+ assert.lte(results[0].timeField, resultsLater[0].timeField);
+}
+
+function baseCollectionNowFind() {
+ return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
+}
+
+function baseCollectionClusterTimeFind() {
+ // The test validator examines 'timeField', so we copy clusterTimeField into timeField here.
+ const results =
+ otherColl.find({$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}).toArray();
+ results.forEach((val, idx) => {
+ results[idx].timeField = results[idx].clusterTimeField;
+ });
+ return results;
+}
+
+function baseCollectionNowAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
+}
+
+function baseCollectionClusterTimeAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]);
+}
+
+function fromViewWithNow() {
+ return viewWithNow.find();
+}
+
+function fromViewWithClusterTime() {
+ return viewWithClusterTime.find();
+}
+
+function withExprNow() {
+ return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
+}
+
+function withExprClusterTime() {
+ return viewWithClusterTime.find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}});
+}
+
+// $$NOW
+runTests(baseCollectionNowFind);
+runTests(baseCollectionNowAgg);
+runTests(fromViewWithNow);
+runTests(withExprNow);
+
+// Test that $$NOW can be used in explain for both find and aggregate.
+assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
+
+// $$CLUSTER_TIME
+runTests(baseCollectionClusterTimeFind);
+runTests(baseCollectionClusterTimeAgg);
+runTests(fromViewWithClusterTime);
+runTests(withExprClusterTime);
+
+// Test that $$CLUSTER_TIME can be used in explain for both find and aggregate.
+assert.commandWorked(
+ coll.explain().find({$expr: {$lte: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
+assert.commandWorked(
+ viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+
+st.stop();
}());
diff --git a/jstests/sharding/operation_time_api.js b/jstests/sharding/operation_time_api.js
index a64468f027b..cd694503590 100644
--- a/jstests/sharding/operation_time_api.js
+++ b/jstests/sharding/operation_time_api.js
@@ -6,66 +6,66 @@
* - standalone mongod
*/
(function() {
- "use strict";
+"use strict";
- function responseContainsTimestampOperationTime(res) {
- return res.operationTime !== undefined && isTimestamp(res.operationTime);
- }
+function responseContainsTimestampOperationTime(res) {
+ return res.operationTime !== undefined && isTimestamp(res.operationTime);
+}
- function isTimestamp(val) {
- return Object.prototype.toString.call(val) === "[object Timestamp]";
- }
+function isTimestamp(val) {
+ return Object.prototype.toString.call(val) === "[object Timestamp]";
+}
- // A mongos that talks to a non-sharded collection on a sharded replica set returns an
- // operationTime that is a Timestamp.
- var st = new ShardingTest({name: "operation_time_api", shards: {rs0: {nodes: 1}}});
+// A mongos that talks to a non-sharded collection on a sharded replica set returns an
+// operationTime that is a Timestamp.
+var st = new ShardingTest({name: "operation_time_api", shards: {rs0: {nodes: 1}}});
- var testDB = st.s.getDB("test");
- var res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 1}]}));
- assert(responseContainsTimestampOperationTime(res),
- "Expected response from a mongos talking to a non-sharded collection on a sharded " +
- "replica set to contain an operationTime, received: " + tojson(res));
+var testDB = st.s.getDB("test");
+var res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 1}]}));
+assert(responseContainsTimestampOperationTime(res),
+ "Expected response from a mongos talking to a non-sharded collection on a sharded " +
+ "replica set to contain an operationTime, received: " + tojson(res));
- // A mongos that talks to a sharded collection on a sharded replica set returns an operationTime
- // that is a Timestamp.
- assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
- assert.commandWorked(st.s.adminCommand({shardCollection: "test.bar", key: {x: 1}}));
+// A mongos that talks to a sharded collection on a sharded replica set returns an operationTime
+// that is a Timestamp.
+assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
+assert.commandWorked(st.s.adminCommand({shardCollection: "test.bar", key: {x: 1}}));
- res = assert.commandWorked(testDB.runCommand({insert: "bar", documents: [{x: 2}]}));
- assert(responseContainsTimestampOperationTime(res),
- "Expected response from a mongos inserting to a sharded collection on a sharded " +
- "replica set to contain an operationTime, received: " + tojson(res));
+res = assert.commandWorked(testDB.runCommand({insert: "bar", documents: [{x: 2}]}));
+assert(responseContainsTimestampOperationTime(res),
+ "Expected response from a mongos inserting to a sharded collection on a sharded " +
+ "replica set to contain an operationTime, received: " + tojson(res));
- // A mongod in a sharded replica set returns an operationTime that is a Timestamp.
- testDB = st.rs0.getPrimary().getDB("test");
- res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 3}]}));
- assert(responseContainsTimestampOperationTime(res),
- "Expected response from a mongod in a sharded replica set to contain an " +
- "operationTime, received: " + tojson(res));
+// A mongod in a sharded replica set returns an operationTime that is a Timestamp.
+testDB = st.rs0.getPrimary().getDB("test");
+res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 3}]}));
+assert(responseContainsTimestampOperationTime(res),
+ "Expected response from a mongod in a sharded replica set to contain an " +
+ "operationTime, received: " + tojson(res));
- st.stop();
+st.stop();
- // A mongod from a non-sharded replica set returns an operationTime that is a Timestamp.
- var replTest = new ReplSetTest({name: "operation_time_api_non_sharded_replset", nodes: 1});
- replTest.startSet();
- replTest.initiate();
+// A mongod from a non-sharded replica set returns an operationTime that is a Timestamp.
+var replTest = new ReplSetTest({name: "operation_time_api_non_sharded_replset", nodes: 1});
+replTest.startSet();
+replTest.initiate();
- testDB = replTest.getPrimary().getDB("test");
- res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 4}]}));
- assert(responseContainsTimestampOperationTime(res),
- "Expected response from a non-sharded replica set to contain an operationTime, " +
- "received: " + tojson(res));
+testDB = replTest.getPrimary().getDB("test");
+res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 4}]}));
+assert(responseContainsTimestampOperationTime(res),
+ "Expected response from a non-sharded replica set to contain an operationTime, " +
+ "received: " + tojson(res));
- replTest.stopSet();
+replTest.stopSet();
- // A standalone mongod does not return an operationTime.
- var standalone = MongoRunner.runMongod();
+// A standalone mongod does not return an operationTime.
+var standalone = MongoRunner.runMongod();
- testDB = standalone.getDB("test");
- res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 5}]}));
- assert(!responseContainsTimestampOperationTime(res),
- "Expected response from a standalone mongod to not contain an operationTime, " +
- "received: " + tojson(res));
+testDB = standalone.getDB("test");
+res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 5}]}));
+assert(!responseContainsTimestampOperationTime(res),
+ "Expected response from a standalone mongod to not contain an operationTime, " +
+ "received: " + tojson(res));
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/sharding/oplog_document_key.js b/jstests/sharding/oplog_document_key.js
index ba20575b031..d138457e4f0 100644
--- a/jstests/sharding/oplog_document_key.js
+++ b/jstests/sharding/oplog_document_key.js
@@ -6,149 +6,149 @@
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({name: 'test', shards: {rs0: {nodes: 1}}});
- var db = st.s.getDB('test');
+var st = new ShardingTest({name: 'test', shards: {rs0: {nodes: 1}}});
+var db = st.s.getDB('test');
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- // 'test.un' is left unsharded.
- assert.commandWorked(db.adminCommand({shardcollection: 'test.byId', key: {_id: 1}}));
- assert.commandWorked(db.adminCommand({shardcollection: 'test.byX', key: {x: 1}}));
- assert.commandWorked(db.adminCommand({shardcollection: 'test.byXId', key: {x: 1, _id: 1}}));
- assert.commandWorked(db.adminCommand({shardcollection: 'test.byIdX', key: {_id: 1, x: 1}}));
+// 'test.un' is left unsharded.
+assert.commandWorked(db.adminCommand({shardcollection: 'test.byId', key: {_id: 1}}));
+assert.commandWorked(db.adminCommand({shardcollection: 'test.byX', key: {x: 1}}));
+assert.commandWorked(db.adminCommand({shardcollection: 'test.byXId', key: {x: 1, _id: 1}}));
+assert.commandWorked(db.adminCommand({shardcollection: 'test.byIdX', key: {_id: 1, x: 1}}));
- assert.writeOK(db.un.insert({_id: 10, x: 50, y: 60}));
- assert.writeOK(db.un.insert({_id: 30, x: 70, y: 80}));
+assert.writeOK(db.un.insert({_id: 10, x: 50, y: 60}));
+assert.writeOK(db.un.insert({_id: 30, x: 70, y: 80}));
- assert.writeOK(db.byId.insert({_id: 11, x: 51, y: 61}));
- assert.writeOK(db.byId.insert({_id: 31, x: 71, y: 81}));
+assert.writeOK(db.byId.insert({_id: 11, x: 51, y: 61}));
+assert.writeOK(db.byId.insert({_id: 31, x: 71, y: 81}));
- assert.writeOK(db.byX.insert({_id: 12, x: 52, y: 62}));
- assert.writeOK(db.byX.insert({_id: 32, x: 72, y: 82}));
+assert.writeOK(db.byX.insert({_id: 12, x: 52, y: 62}));
+assert.writeOK(db.byX.insert({_id: 32, x: 72, y: 82}));
- assert.writeOK(db.byXId.insert({_id: 13, x: 53, y: 63}));
- assert.writeOK(db.byXId.insert({_id: 33, x: 73, y: 83}));
+assert.writeOK(db.byXId.insert({_id: 13, x: 53, y: 63}));
+assert.writeOK(db.byXId.insert({_id: 33, x: 73, y: 83}));
- assert.writeOK(db.byIdX.insert({_id: 14, x: 54, y: 64}));
- assert.writeOK(db.byIdX.insert({_id: 34, x: 74, y: 84}));
+assert.writeOK(db.byIdX.insert({_id: 14, x: 54, y: 64}));
+assert.writeOK(db.byIdX.insert({_id: 34, x: 74, y: 84}));
- var oplog = st.rs0.getPrimary().getDB('local').oplog.rs;
+var oplog = st.rs0.getPrimary().getDB('local').oplog.rs;
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'un'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'un'");
- assert.writeOK(db.un.update({_id: 10, x: 50}, {$set: {y: 70}})); // in place
- assert.writeOK(db.un.update({_id: 30, x: 70}, {y: 75})); // replacement
+assert.writeOK(db.un.update({_id: 10, x: 50}, {$set: {y: 70}})); // in place
+assert.writeOK(db.un.update({_id: 30, x: 70}, {y: 75})); // replacement
- // unsharded, only _id appears in o2:
+// unsharded, only _id appears in o2:
- var a = oplog.findOne({ns: 'test.un', op: 'u', 'o2._id': 10});
- assert.eq(a.o2, {_id: 10});
+var a = oplog.findOne({ns: 'test.un', op: 'u', 'o2._id': 10});
+assert.eq(a.o2, {_id: 10});
- var b = oplog.findOne({ns: 'test.un', op: 'u', 'o2._id': 30});
- assert.eq(b.o2, {_id: 30});
+var b = oplog.findOne({ns: 'test.un', op: 'u', 'o2._id': 30});
+assert.eq(b.o2, {_id: 30});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'byId'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'byId'");
- assert.writeOK(db.byId.update({_id: 11}, {$set: {y: 71}})); // in place
- assert.writeOK(db.byId.update({_id: 31}, {x: 71, y: 76})); // replacement
+assert.writeOK(db.byId.update({_id: 11}, {$set: {y: 71}})); // in place
+assert.writeOK(db.byId.update({_id: 31}, {x: 71, y: 76})); // replacement
- // sharded by {_id: 1}: only _id appears in o2:
+// sharded by {_id: 1}: only _id appears in o2:
- a = oplog.findOne({ns: 'test.byId', op: 'u', 'o2._id': 11});
- assert.eq(a.o2, {_id: 11});
+a = oplog.findOne({ns: 'test.byId', op: 'u', 'o2._id': 11});
+assert.eq(a.o2, {_id: 11});
- b = oplog.findOne({ns: 'test.byId', op: 'u', 'o2._id': 31});
- assert.eq(b.o2, {_id: 31});
+b = oplog.findOne({ns: 'test.byId', op: 'u', 'o2._id': 31});
+assert.eq(b.o2, {_id: 31});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'byX'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'byX'");
- assert.writeOK(db.byX.update({x: 52}, {$set: {y: 72}})); // in place
- assert.writeOK(db.byX.update({x: 72}, {x: 72, y: 77})); // replacement
+assert.writeOK(db.byX.update({x: 52}, {$set: {y: 72}})); // in place
+assert.writeOK(db.byX.update({x: 72}, {x: 72, y: 77})); // replacement
- // sharded by {x: 1}: x appears in o2, followed by _id:
+// sharded by {x: 1}: x appears in o2, followed by _id:
- a = oplog.findOne({ns: 'test.byX', op: 'u', 'o2._id': 12});
- assert.eq(a.o2, {x: 52, _id: 12});
+a = oplog.findOne({ns: 'test.byX', op: 'u', 'o2._id': 12});
+assert.eq(a.o2, {x: 52, _id: 12});
- b = oplog.findOne({ns: 'test.byX', op: 'u', 'o2._id': 32});
- assert.eq(b.o2, {x: 72, _id: 32});
+b = oplog.findOne({ns: 'test.byX', op: 'u', 'o2._id': 32});
+assert.eq(b.o2, {x: 72, _id: 32});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'byXId'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'byXId'");
- assert.writeOK(db.byXId.update({_id: 13, x: 53}, {$set: {y: 73}})); // in place
- assert.writeOK(db.byXId.update({_id: 33, x: 73}, {x: 73, y: 78})); // replacement
+assert.writeOK(db.byXId.update({_id: 13, x: 53}, {$set: {y: 73}})); // in place
+assert.writeOK(db.byXId.update({_id: 33, x: 73}, {x: 73, y: 78})); // replacement
- // sharded by {x: 1, _id: 1}: x appears in o2, followed by _id:
+// sharded by {x: 1, _id: 1}: x appears in o2, followed by _id:
- a = oplog.findOne({ns: 'test.byXId', op: 'u', 'o2._id': 13});
- assert.eq(a.o2, {x: 53, _id: 13});
+a = oplog.findOne({ns: 'test.byXId', op: 'u', 'o2._id': 13});
+assert.eq(a.o2, {x: 53, _id: 13});
- b = oplog.findOne({ns: 'test.byXId', op: 'u', 'o2._id': 33});
- assert.eq(b.o2, {x: 73, _id: 33});
+b = oplog.findOne({ns: 'test.byXId', op: 'u', 'o2._id': 33});
+assert.eq(b.o2, {x: 73, _id: 33});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'byIdX'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'byIdX'");
- assert.writeOK(db.byIdX.update({_id: 14, x: 54}, {$set: {y: 74}})); // in place
- assert.writeOK(db.byIdX.update({_id: 34, x: 74}, {x: 74, y: 79})); // replacement
+assert.writeOK(db.byIdX.update({_id: 14, x: 54}, {$set: {y: 74}})); // in place
+assert.writeOK(db.byIdX.update({_id: 34, x: 74}, {x: 74, y: 79})); // replacement
- // sharded by {_id: 1, x: 1}: _id appears in o2, followed by x:
+// sharded by {_id: 1, x: 1}: _id appears in o2, followed by x:
- a = oplog.findOne({ns: 'test.byIdX', op: 'u', 'o2._id': 14});
- assert.eq(a.o2, {_id: 14, x: 54});
+a = oplog.findOne({ns: 'test.byIdX', op: 'u', 'o2._id': 14});
+assert.eq(a.o2, {_id: 14, x: 54});
- b = oplog.findOne({ns: 'test.byIdX', op: 'u', 'o2._id': 34});
- assert.eq(b.o2, {_id: 34, x: 74});
+b = oplog.findOne({ns: 'test.byIdX', op: 'u', 'o2._id': 34});
+assert.eq(b.o2, {_id: 34, x: 74});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test remove command: 'un'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test remove command: 'un'");
- assert.writeOK(db.un.remove({_id: 10}));
- assert.writeOK(db.un.remove({_id: 30}));
+assert.writeOK(db.un.remove({_id: 10}));
+assert.writeOK(db.un.remove({_id: 30}));
- a = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 10});
- assert.eq(a.o, {_id: 10});
- b = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 30});
- assert.eq(b.o, {_id: 30});
+a = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 10});
+assert.eq(a.o, {_id: 10});
+b = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 30});
+assert.eq(b.o, {_id: 30});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test remove command: 'byX'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test remove command: 'byX'");
- assert.writeOK(db.byX.remove({_id: 12}));
- assert.writeOK(db.byX.remove({_id: 32}));
+assert.writeOK(db.byX.remove({_id: 12}));
+assert.writeOK(db.byX.remove({_id: 32}));
- a = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 12});
- assert.eq(a.o, {x: 52, _id: 12});
- b = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 32});
- assert.eq(b.o, {x: 72, _id: 32});
+a = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 12});
+assert.eq(a.o, {x: 52, _id: 12});
+b = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 32});
+assert.eq(b.o, {x: 72, _id: 32});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test remove command: 'byXId'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test remove command: 'byXId'");
- assert.writeOK(db.byXId.remove({_id: 13}));
- assert.writeOK(db.byXId.remove({_id: 33}));
+assert.writeOK(db.byXId.remove({_id: 13}));
+assert.writeOK(db.byXId.remove({_id: 33}));
- a = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 13});
- assert.eq(a.o, {x: 53, _id: 13});
- b = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 33});
- assert.eq(b.o, {x: 73, _id: 33});
+a = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 13});
+assert.eq(a.o, {x: 53, _id: 13});
+b = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 33});
+assert.eq(b.o, {x: 73, _id: 33});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test remove command: 'byIdX'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test remove command: 'byIdX'");
- assert.writeOK(db.byIdX.remove({_id: 14}));
- assert.writeOK(db.byIdX.remove({_id: 34}));
+assert.writeOK(db.byIdX.remove({_id: 14}));
+assert.writeOK(db.byIdX.remove({_id: 34}));
- a = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 14});
- assert.eq(a.o, {_id: 14, x: 54});
- b = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 34});
- assert.eq(b.o, {_id: 34, x: 74});
+a = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 14});
+assert.eq(a.o, {_id: 14, x: 54});
+b = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 34});
+assert.eq(b.o, {_id: 34, x: 74});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/out_fails_to_replace_sharded_collection.js b/jstests/sharding/out_fails_to_replace_sharded_collection.js
index d96a5489ac5..ba72de459ee 100644
--- a/jstests/sharding/out_fails_to_replace_sharded_collection.js
+++ b/jstests/sharding/out_fails_to_replace_sharded_collection.js
@@ -1,46 +1,46 @@
// Tests that an aggregate with an $out cannot output to a sharded collection, even if the
// collection becomes sharded during the aggregation.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For 'assertErrorCode'.
-
- const st = new ShardingTest({shards: 2});
-
- const mongosDB = st.s.getDB("test");
- const sourceColl = mongosDB.source;
- const targetColl = mongosDB.target;
-
- assert.commandWorked(sourceColl.insert(Array.from({length: 10}, (_, i) => ({_id: i}))));
-
- // First simply test that the $out fails if the target collection is definitely sharded, meaning
- // it starts as sharded and remains sharded for the duration of the $out.
- st.shardColl(targetColl, {_id: 1}, false);
- assertErrorCode(sourceColl, [{$out: targetColl.getName()}], 28769);
-
- // Test that the "legacy" mode will not succeed when outputting to a sharded collection, even
- // for explain.
- let error = assert.throws(() => sourceColl.explain().aggregate([{$out: targetColl.getName()}]));
- assert.eq(error.code, 28769);
-
- // Then test that the $out fails if the collection becomes sharded between establishing the
- // cursor and performing the $out.
- targetColl.drop();
- const cursorResponse = assert.commandWorked(mongosDB.runCommand({
- aggregate: sourceColl.getName(),
- pipeline: [{$out: targetColl.getName()}],
- cursor: {batchSize: 0}
- }));
- st.shardColl(targetColl, {_id: 1}, false);
- error = assert.throws(() => new DBCommandCursor(mongosDB, cursorResponse).itcount());
- // On master, we check whether the output collection is sharded at parse time so this error code
- // is simply 'CommandFailed' because it is a failed rename going through the DBDirectClient. The
- // message should indicate that the rename failed. In a mixed-version environment we can end up
- // with the code 17017 because a v4.0 shard will assert the collection is unsharded before
- // performing any writes but after parse time, instead of relying on the rename to fail. Because
- // this test is run in a mixed-version passthrough we have to allow both. Once 4.2 becomes the
- // last stable version, this assertion can be tightened up to only expect CommandFailed.
- assert.contains(error.code, [ErrorCodes.CommandFailed, 17017]);
-
- st.stop();
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For 'assertErrorCode'.
+
+const st = new ShardingTest({shards: 2});
+
+const mongosDB = st.s.getDB("test");
+const sourceColl = mongosDB.source;
+const targetColl = mongosDB.target;
+
+assert.commandWorked(sourceColl.insert(Array.from({length: 10}, (_, i) => ({_id: i}))));
+
+// First simply test that the $out fails if the target collection is definitely sharded, meaning
+// it starts as sharded and remains sharded for the duration of the $out.
+st.shardColl(targetColl, {_id: 1}, false);
+assertErrorCode(sourceColl, [{$out: targetColl.getName()}], 28769);
+
+// Test that the "legacy" mode will not succeed when outputting to a sharded collection, even
+// for explain.
+let error = assert.throws(() => sourceColl.explain().aggregate([{$out: targetColl.getName()}]));
+assert.eq(error.code, 28769);
+
+// Then test that the $out fails if the collection becomes sharded between establishing the
+// cursor and performing the $out.
+targetColl.drop();
+const cursorResponse = assert.commandWorked(mongosDB.runCommand({
+ aggregate: sourceColl.getName(),
+ pipeline: [{$out: targetColl.getName()}],
+ cursor: {batchSize: 0}
+}));
+st.shardColl(targetColl, {_id: 1}, false);
+error = assert.throws(() => new DBCommandCursor(mongosDB, cursorResponse).itcount());
+// On master, we check whether the output collection is sharded at parse time so this error code
+// is simply 'CommandFailed' because it is a failed rename going through the DBDirectClient. The
+// message should indicate that the rename failed. In a mixed-version environment we can end up
+// with the code 17017 because a v4.0 shard will assert the collection is unsharded before
+// performing any writes but after parse time, instead of relying on the rename to fail. Because
+// this test is run in a mixed-version passthrough we have to allow both. Once 4.2 becomes the
+// last stable version, this assertion can be tightened up to only expect CommandFailed.
+assert.contains(error.code, [ErrorCodes.CommandFailed, 17017]);
+
+st.stop();
}());
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index 04217dcc390..c02e708fd53 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -1,58 +1,55 @@
// This test fails when run with authentication because benchRun with auth is broken: SERVER-6388
(function() {
- "use strict";
-
- var numShards = 3;
- var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-
- var db = s.getDB("test");
-
- var N = 10000;
- var shards = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
-
- for (var i = 0; i < N; i += (N / 10)) {
- s.adminCommand({split: "test.foo", middle: {_id: i}});
- s.s.getDB('admin').runCommand({
- moveChunk: "test.foo",
- find: {_id: i},
- to: shards[Math.floor(Math.random() * numShards)]
- });
- }
-
- s.startBalancer();
-
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (i = 0; i < N; i++)
- bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
-
- var doCommand = function(dbname, cmd) {
- x = benchRun({
- ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
- host: db.getMongo().host,
- parallel: 2,
- seconds: 2
- });
- printjson(x);
- x = benchRun({
- ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
- host: s._mongos[1].host,
- parallel: 2,
- seconds: 2
- });
- printjson(x);
- };
-
- doCommand("test", {dbstats: 1});
- doCommand("config", {dbstats: 1});
-
- var x = s.getDB("config").stats();
- assert(x.ok, tojson(x));
+"use strict";
+
+var numShards = 3;
+var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+var db = s.getDB("test");
+
+var N = 10000;
+var shards = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
+
+for (var i = 0; i < N; i += (N / 10)) {
+ s.adminCommand({split: "test.foo", middle: {_id: i}});
+ s.s.getDB('admin').runCommand(
+ {moveChunk: "test.foo", find: {_id: i}, to: shards[Math.floor(Math.random() * numShards)]});
+}
+
+s.startBalancer();
+
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (i = 0; i < N; i++)
+ bulk.insert({_id: i});
+assert.writeOK(bulk.execute());
+
+var doCommand = function(dbname, cmd) {
+ x = benchRun({
+ ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
+ host: db.getMongo().host,
+ parallel: 2,
+ seconds: 2
+ });
+ printjson(x);
+ x = benchRun({
+ ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
+ host: s._mongos[1].host,
+ parallel: 2,
+ seconds: 2
+ });
printjson(x);
+};
+
+doCommand("test", {dbstats: 1});
+doCommand("config", {dbstats: 1});
+
+var x = s.getDB("config").stats();
+assert(x.ok, tojson(x));
+printjson(x);
- s.stop();
+s.stop();
}());
diff --git a/jstests/sharding/pending_chunk.js b/jstests/sharding/pending_chunk.js
index fb8730b6ab4..06f9a2afec0 100644
--- a/jstests/sharding/pending_chunk.js
+++ b/jstests/sharding/pending_chunk.js
@@ -3,81 +3,80 @@
//
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2, mongos: 2, other: {separateConfig: true}});
+var st = new ShardingTest({shards: 2, mongos: 2, other: {separateConfig: true}});
- var mongos = st.s0;
- var admin = mongos.getDB('admin');
- var coll = mongos.getCollection('foo.bar');
- var ns = coll.getFullName();
- var dbName = coll.getDB().getName();
+var mongos = st.s0;
+var admin = mongos.getDB('admin');
+var coll = mongos.getCollection('foo.bar');
+var ns = coll.getFullName();
+var dbName = coll.getDB().getName();
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
- // Turn off best-effort recipient metadata refresh post-migration commit on both shards because
- // it would clean up the pending chunks on migration recipients.
- assert.commandWorked(st.shard0.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
- assert.commandWorked(st.shard1.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
+// Turn off best-effort recipient metadata refresh post-migration commit on both shards because
+// it would clean up the pending chunks on migration recipients.
+assert.commandWorked(st.shard0.getDB('admin').runCommand(
+ {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
+assert.commandWorked(st.shard1.getDB('admin').runCommand(
+ {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
- jsTest.log('Moving some chunks to shard1...');
+jsTest.log('Moving some chunks to shard1...');
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
- function getMetadata(shard) {
- var admin = shard.getDB('admin'),
- metadata = admin.runCommand({getShardVersion: ns, fullMetadata: true}).metadata;
+function getMetadata(shard) {
+ var admin = shard.getDB('admin'),
+ metadata = admin.runCommand({getShardVersion: ns, fullMetadata: true}).metadata;
- jsTest.log('Got metadata: ' + tojson(metadata));
- return metadata;
- }
+ jsTest.log('Got metadata: ' + tojson(metadata));
+ return metadata;
+}
- var metadata = getMetadata(st.shard1);
- assert.eq(metadata.pending[0][0]._id, 1);
- assert.eq(metadata.pending[0][1]._id, MaxKey);
+var metadata = getMetadata(st.shard1);
+assert.eq(metadata.pending[0][0]._id, 1);
+assert.eq(metadata.pending[0][1]._id, MaxKey);
- jsTest.log('Moving some chunks back to shard0 after empty...');
+jsTest.log('Moving some chunks back to shard0 after empty...');
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: -1}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: -1}, to: st.shard1.shardName, _waitForDelete: true}));
- metadata = getMetadata(st.shard0);
- assert.eq(metadata.shardVersion.t, 0);
- assert.neq(metadata.collVersion.t, 0);
- assert.eq(metadata.pending.length, 0);
+metadata = getMetadata(st.shard0);
+assert.eq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.pending.length, 0);
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
- metadata = getMetadata(st.shard0);
- assert.eq(metadata.shardVersion.t, 0);
- assert.neq(metadata.collVersion.t, 0);
- assert.eq(metadata.pending[0][0]._id, 1);
- assert.eq(metadata.pending[0][1]._id, MaxKey);
+metadata = getMetadata(st.shard0);
+assert.eq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.pending[0][0]._id, 1);
+assert.eq(metadata.pending[0][1]._id, MaxKey);
- // The pending chunk should be promoted to a real chunk when shard0 reloads
- // its config.
- jsTest.log('Checking that pending chunk is promoted on reload...');
+// The pending chunk should be promoted to a real chunk when shard0 reloads
+// its config.
+jsTest.log('Checking that pending chunk is promoted on reload...');
- assert.eq(null, coll.findOne({_id: 1}));
+assert.eq(null, coll.findOne({_id: 1}));
- metadata = getMetadata(st.shard0);
- assert.neq(metadata.shardVersion.t, 0);
- assert.neq(metadata.collVersion.t, 0);
- assert.eq(metadata.chunks[0][0]._id, 1);
- assert.eq(metadata.chunks[0][1]._id, MaxKey);
+metadata = getMetadata(st.shard0);
+assert.neq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.chunks[0][0]._id, 1);
+assert.eq(metadata.chunks[0][1]._id, MaxKey);
- st.printShardingStatus();
-
- st.stop();
+st.printShardingStatus();
+st.stop();
})();
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index 6dd3b30344f..b1a730db297 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -7,194 +7,193 @@
// Insert docs with same val for 'skey' but different vals for 'extra'.
// Move chunks around and check that [min,max) chunk boundaries are properly obeyed.
(function() {
- 'use strict';
-
- // TODO: SERVER-33601 remove shardAsReplicaSet: false
- var s = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
-
- var db = s.getDB("test");
- var admin = s.getDB("admin");
- var config = s.getDB("config");
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- //******************Part 1********************
-
- var coll = db.foo;
-
- var longStr = 'a';
- while (longStr.length < 1024 * 128) {
- longStr += longStr;
+'use strict';
+
+// TODO: SERVER-33601 remove shardAsReplicaSet: false
+var s = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+
+var db = s.getDB("test");
+var admin = s.getDB("admin");
+var config = s.getDB("config");
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+//******************Part 1********************
+
+var coll = db.foo;
+
+var longStr = 'a';
+while (longStr.length < 1024 * 128) {
+ longStr += longStr;
+}
+var bulk = coll.initializeUnorderedBulkOp();
+for (i = 0; i < 100; i++) {
+ bulk.insert({num: i, str: longStr});
+ bulk.insert({num: i + 100, x: i, str: longStr});
+}
+assert.writeOK(bulk.execute());
+
+// no usable index yet, should throw
+assert.throws(function() {
+ s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+});
+
+// create usable index
+assert.commandWorked(coll.ensureIndex({num: 1, x: 1}));
+
+// usable index, but doc with empty 'num' value, so still should throw
+assert.writeOK(coll.insert({x: -5}));
+assert.throws(function() {
+ s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+});
+
+// remove the bad doc. now should finally succeed
+assert.writeOK(coll.remove({x: -5}));
+assert.commandWorked(s.s0.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}}));
+
+// make sure extra index is not created
+assert.eq(2, coll.getIndexes().length);
+
+// make sure balancing happens
+s.awaitBalance(coll.getName(), db.getName());
+
+// Make sure our initial balance cleanup doesn't interfere with later migrations.
+assert.soon(function() {
+ print("Waiting for migration cleanup to occur...");
+ return coll.count() == coll.find().itcount();
+});
+
+s.stopBalancer();
+
+// test splitting
+assert.commandWorked(s.s0.adminCommand({split: coll.getFullName(), middle: {num: 50}}));
+
+// test moving
+assert.commandWorked(s.s0.adminCommand({
+ movechunk: coll.getFullName(),
+ find: {num: 20},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+}));
+
+//******************Part 2********************
+
+// Migrations and splits will still work on a sharded collection that only has multi key
+// index.
+db.user.ensureIndex({num: 1, x: 1});
+db.adminCommand({shardCollection: 'test.user', key: {num: 1}});
+
+var indexCount = db.user.getIndexes().length;
+assert.eq(2,
+ indexCount, // indexes for _id_ and num_1_x_1
+ 'index count not expected: ' + tojson(db.user.getIndexes()));
+
+var array = [];
+for (var item = 0; item < 50; item++) {
+ array.push(item);
+}
+
+for (var docs = 0; docs < 1000; docs++) {
+ db.user.insert({num: docs, x: array});
+}
+
+assert.eq(1000, db.user.find().itcount());
+
+assert.commandWorked(admin.runCommand({
+ movechunk: 'test.user',
+ find: {num: 70},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+}));
+
+var expectedShardCount = {shard0000: 0, shard0001: 0};
+config.chunks.find({ns: 'test.user'}).forEach(function(chunkDoc) {
+ var min = chunkDoc.min.num;
+ var max = chunkDoc.max.num;
+
+ if (min < 0 || min == MinKey) {
+ min = 0;
}
- var bulk = coll.initializeUnorderedBulkOp();
- for (i = 0; i < 100; i++) {
- bulk.insert({num: i, str: longStr});
- bulk.insert({num: i + 100, x: i, str: longStr});
- }
- assert.writeOK(bulk.execute());
-
- // no usable index yet, should throw
- assert.throws(function() {
- s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
- });
-
- // create usable index
- assert.commandWorked(coll.ensureIndex({num: 1, x: 1}));
-
- // usable index, but doc with empty 'num' value, so still should throw
- assert.writeOK(coll.insert({x: -5}));
- assert.throws(function() {
- s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
- });
-
- // remove the bad doc. now should finally succeed
- assert.writeOK(coll.remove({x: -5}));
- assert.commandWorked(s.s0.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}}));
-
- // make sure extra index is not created
- assert.eq(2, coll.getIndexes().length);
-
- // make sure balancing happens
- s.awaitBalance(coll.getName(), db.getName());
-
- // Make sure our initial balance cleanup doesn't interfere with later migrations.
- assert.soon(function() {
- print("Waiting for migration cleanup to occur...");
- return coll.count() == coll.find().itcount();
- });
-
- s.stopBalancer();
-
- // test splitting
- assert.commandWorked(s.s0.adminCommand({split: coll.getFullName(), middle: {num: 50}}));
-
- // test moving
- assert.commandWorked(s.s0.adminCommand({
- movechunk: coll.getFullName(),
- find: {num: 20},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- }));
- //******************Part 2********************
-
- // Migrations and splits will still work on a sharded collection that only has multi key
- // index.
- db.user.ensureIndex({num: 1, x: 1});
- db.adminCommand({shardCollection: 'test.user', key: {num: 1}});
-
- var indexCount = db.user.getIndexes().length;
- assert.eq(2,
- indexCount, // indexes for _id_ and num_1_x_1
- 'index count not expected: ' + tojson(db.user.getIndexes()));
-
- var array = [];
- for (var item = 0; item < 50; item++) {
- array.push(item);
+ if (max > 1000 || max == MaxKey) {
+ max = 1000;
}
- for (var docs = 0; docs < 1000; docs++) {
- db.user.insert({num: docs, x: array});
+ if (max > 0) {
+ expectedShardCount[chunkDoc.shard] += (max - min);
}
+});
- assert.eq(1000, db.user.find().itcount());
-
- assert.commandWorked(admin.runCommand({
- movechunk: 'test.user',
- find: {num: 70},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- }));
-
- var expectedShardCount = {shard0000: 0, shard0001: 0};
- config.chunks.find({ns: 'test.user'}).forEach(function(chunkDoc) {
- var min = chunkDoc.min.num;
- var max = chunkDoc.max.num;
-
- if (min < 0 || min == MinKey) {
- min = 0;
- }
-
- if (max > 1000 || max == MaxKey) {
- max = 1000;
- }
-
- if (max > 0) {
- expectedShardCount[chunkDoc.shard] += (max - min);
- }
- });
+assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count());
+assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count());
- assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count());
- assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count());
+assert.commandWorked(admin.runCommand({split: 'test.user', middle: {num: 70}}));
- assert.commandWorked(admin.runCommand({split: 'test.user', middle: {num: 70}}));
+assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count());
+assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count());
- assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count());
- assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count());
+//******************Part 3********************
- //******************Part 3********************
+// Check chunk boundaries obeyed when using prefix shard key.
+// This test repeats with shard key as the prefix of different longer indices.
- // Check chunk boundaries obeyed when using prefix shard key.
- // This test repeats with shard key as the prefix of different longer indices.
-
- for (i = 0; i < 3; i++) {
- // setup new collection on shard0
- var coll2 = db.foo2;
- coll2.drop();
- if (s.getPrimaryShardIdForDatabase(coll2.getDB()) != s.shard0.shardName) {
- var moveRes =
- admin.runCommand({movePrimary: coll2.getDB() + "", to: s.shard0.shardName});
- assert.eq(moveRes.ok, 1, "primary not moved correctly");
- }
+for (i = 0; i < 3; i++) {
+ // setup new collection on shard0
+ var coll2 = db.foo2;
+ coll2.drop();
+ if (s.getPrimaryShardIdForDatabase(coll2.getDB()) != s.shard0.shardName) {
+ var moveRes = admin.runCommand({movePrimary: coll2.getDB() + "", to: s.shard0.shardName});
+ assert.eq(moveRes.ok, 1, "primary not moved correctly");
+ }
- // declare a longer index
- if (i == 0) {
- assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1}));
- } else if (i == 1) {
- assert.commandWorked(coll2.ensureIndex({skey: 1, extra: -1}));
- } else if (i == 2) {
- assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1, superfluous: -1}));
- }
+ // declare a longer index
+ if (i == 0) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1}));
+ } else if (i == 1) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: -1}));
+ } else if (i == 2) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1, superfluous: -1}));
+ }
- // then shard collection on prefix
- var shardRes = admin.runCommand({shardCollection: coll2 + "", key: {skey: 1}});
- assert.eq(shardRes.ok, 1, "collection not sharded");
+ // then shard collection on prefix
+ var shardRes = admin.runCommand({shardCollection: coll2 + "", key: {skey: 1}});
+ assert.eq(shardRes.ok, 1, "collection not sharded");
- // insert docs with same value for skey
- bulk = coll2.initializeUnorderedBulkOp();
- for (var i = 0; i < 5; i++) {
- for (var j = 0; j < 5; j++) {
- bulk.insert({skey: 0, extra: i, superfluous: j});
- }
+ // insert docs with same value for skey
+ bulk = coll2.initializeUnorderedBulkOp();
+ for (var i = 0; i < 5; i++) {
+ for (var j = 0; j < 5; j++) {
+ bulk.insert({skey: 0, extra: i, superfluous: j});
}
- assert.writeOK(bulk.execute());
+ }
+ assert.writeOK(bulk.execute());
- // split on that key, and check it makes 2 chunks
- var splitRes = admin.runCommand({split: coll2 + "", middle: {skey: 0}});
- assert.eq(splitRes.ok, 1, "split didn't work");
- assert.eq(config.chunks.find({ns: coll2.getFullName()}).count(), 2);
+ // split on that key, and check it makes 2 chunks
+ var splitRes = admin.runCommand({split: coll2 + "", middle: {skey: 0}});
+ assert.eq(splitRes.ok, 1, "split didn't work");
+ assert.eq(config.chunks.find({ns: coll2.getFullName()}).count(), 2);
- // movechunk should move ALL docs since they have same value for skey
- moveRes = admin.runCommand(
- {moveChunk: coll2 + "", find: {skey: 0}, to: s.shard1.shardName, _waitForDelete: true});
- assert.eq(moveRes.ok, 1, "movechunk didn't work");
+ // movechunk should move ALL docs since they have same value for skey
+ moveRes = admin.runCommand(
+ {moveChunk: coll2 + "", find: {skey: 0}, to: s.shard1.shardName, _waitForDelete: true});
+ assert.eq(moveRes.ok, 1, "movechunk didn't work");
- // Make sure our migration eventually goes through before testing individual shards
- assert.soon(function() {
- print("Waiting for migration cleanup to occur...");
- return coll2.count() == coll2.find().itcount();
- });
+ // Make sure our migration eventually goes through before testing individual shards
+ assert.soon(function() {
+ print("Waiting for migration cleanup to occur...");
+ return coll2.count() == coll2.find().itcount();
+ });
- // check no orphaned docs on the shards
- assert.eq(0, s.shard0.getCollection(coll2 + "").find().itcount());
- assert.eq(25, s.shard1.getCollection(coll2 + "").find().itcount());
+ // check no orphaned docs on the shards
+ assert.eq(0, s.shard0.getCollection(coll2 + "").find().itcount());
+ assert.eq(25, s.shard1.getCollection(coll2 + "").find().itcount());
- // and check total
- assert.eq(25, coll2.find().itcount(), "bad total number of docs after move");
+ // and check total
+ assert.eq(25, coll2.find().itcount(), "bad total number of docs after move");
- s.printShardingStatus();
- }
+ s.printShardingStatus();
+}
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/prepare_transaction_then_migrate.js b/jstests/sharding/prepare_transaction_then_migrate.js
index 038ebfb8463..034259d02be 100644
--- a/jstests/sharding/prepare_transaction_then_migrate.js
+++ b/jstests/sharding/prepare_transaction_then_migrate.js
@@ -7,65 +7,67 @@
*/
(function() {
- "use strict";
- load('jstests/libs/chunk_manipulation_util.js');
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
+"use strict";
+load('jstests/libs/chunk_manipulation_util.js');
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
- const dbName = "test";
- const collName = "user";
+const dbName = "test";
+const collName = "user";
- const staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+const staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- const st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}});
- st.adminCommand({enableSharding: 'test'});
- st.ensurePrimaryShard('test', st.shard0.shardName);
- st.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+const st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}});
+st.adminCommand({enableSharding: 'test'});
+st.ensurePrimaryShard('test', st.shard0.shardName);
+st.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
- const session = st.s.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = st.s.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(sessionColl.insert({_id: 1}));
- const lsid = {id: UUID()};
- const txnNumber = 0;
- const stmtId = 0;
+const lsid = {
+ id: UUID()
+};
+const txnNumber = 0;
+const stmtId = 0;
- assert.commandWorked(st.s0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: 2}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- startTransaction: true,
- autocommit: false,
- }));
+assert.commandWorked(st.s0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: 2}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ startTransaction: true,
+ autocommit: false,
+}));
- const res = assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
- prepareTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
+const res = assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
+ prepareTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
- const joinMoveChunk = moveChunkParallel(
- staticMongod, st.s.host, {_id: 1}, null, 'test.user', st.shard1.shardName);
+const joinMoveChunk =
+ moveChunkParallel(staticMongod, st.s.host, {_id: 1}, null, 'test.user', st.shard1.shardName);
- // Wait for catchup to verify that the migration has exited the clone phase.
- waitForMigrateStep(st.shard1, migrateStepNames.catchup);
+// Wait for catchup to verify that the migration has exited the clone phase.
+waitForMigrateStep(st.shard1, migrateStepNames.catchup);
- assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- commitTimestamp: res.prepareTimestamp,
- }));
+assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ commitTimestamp: res.prepareTimestamp,
+}));
- joinMoveChunk();
+joinMoveChunk();
- assert.eq(sessionColl.find({_id: 2}).count(), 1);
+assert.eq(sessionColl.find({_id: 2}).count(), 1);
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index ec71924fc53..288d6abe694 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -1,40 +1,39 @@
(function() {
- var s = new ShardingTest({name: "presplit", shards: 2, mongos: 1, other: {chunkSize: 1}});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- // Insert enough data in 'test.foo' to fill several chunks, if it was sharded.
- bigString = "";
- while (bigString.length < 10000) {
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- }
-
- db = s.getDB("test");
- inserted = 0;
- num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (20 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
-
- // Make sure that there's only one chunk holding all the data.
- s.printChunks();
- primary = s.getPrimaryShard("test").getDB("test");
- assert.eq(0, s.config.chunks.count({"ns": "test.foo"}), "single chunk assertion");
- assert.eq(num, primary.foo.count());
-
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-
- // Make sure the collection's original chunk got split
- s.printChunks();
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "many chunks assertion");
- assert.eq(num, primary.foo.count());
-
- s.printChangeLog();
- s.stop();
-
+var s = new ShardingTest({name: "presplit", shards: 2, mongos: 1, other: {chunkSize: 1}});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+// Insert enough data in 'test.foo' to fill several chunks, if it was sharded.
+bigString = "";
+while (bigString.length < 10000) {
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+}
+
+db = s.getDB("test");
+inserted = 0;
+num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (20 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+
+// Make sure that there's only one chunk holding all the data.
+s.printChunks();
+primary = s.getPrimaryShard("test").getDB("test");
+assert.eq(0, s.config.chunks.count({"ns": "test.foo"}), "single chunk assertion");
+assert.eq(num, primary.foo.count());
+
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+// Make sure the collection's original chunk got split
+s.printChunks();
+assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "many chunks assertion");
+assert.eq(num, primary.foo.count());
+
+s.printChangeLog();
+s.stop();
})();
diff --git a/jstests/sharding/primary_config_server_blackholed_from_mongos.js b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
index a7cc266c1a2..674dc1f9235 100644
--- a/jstests/sharding/primary_config_server_blackholed_from_mongos.js
+++ b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
@@ -1,75 +1,75 @@
// Ensures that if the primary config server is blackholed from the point of view of mongos, CRUD
// and read-only config operations continue to work.
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1, useBridge: true});
+var st = new ShardingTest({shards: 2, mongos: 1, useBridge: true});
- var testDB = st.s.getDB('BlackHoleDB');
- var configDB = st.s.getDB('config');
+var testDB = st.s.getDB('BlackHoleDB');
+var configDB = st.s.getDB('config');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
- assert.commandWorked(
- testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
+assert.commandWorked(
+ testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
- var bulk = testDB.ShardedColl.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+var bulk = testDB.ShardedColl.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- const configPrimary = st.configRS.getPrimary();
- const admin = configPrimary.getDB("admin");
+const configPrimary = st.configRS.getPrimary();
+const admin = configPrimary.getDB("admin");
- // Set the priority and votes to 0 for secondary config servers so that in the case
- // of an election, they cannot step up. If a different node were to step up, the
- // config server would no longer be blackholed from mongos.
- let conf = admin.runCommand({replSetGetConfig: 1}).config;
- for (let i = 0; i < conf.members.length; i++) {
- if (conf.members[i].host !== configPrimary.host) {
- conf.members[i].votes = 0;
- conf.members[i].priority = 0;
- }
+// Set the priority and votes to 0 for secondary config servers so that in the case
+// of an election, they cannot step up. If a different node were to step up, the
+// config server would no longer be blackholed from mongos.
+let conf = admin.runCommand({replSetGetConfig: 1}).config;
+for (let i = 0; i < conf.members.length; i++) {
+ if (conf.members[i].host !== configPrimary.host) {
+ conf.members[i].votes = 0;
+ conf.members[i].priority = 0;
}
- conf.version++;
- const response = admin.runCommand({replSetReconfig: conf});
- assert.commandWorked(response);
+}
+conf.version++;
+const response = admin.runCommand({replSetReconfig: conf});
+assert.commandWorked(response);
- jsTest.log('Partitioning the config server primary from the mongos');
- configPrimary.discardMessagesFrom(st.s, 1.0);
- st.s.discardMessagesFrom(configPrimary, 1.0);
+jsTest.log('Partitioning the config server primary from the mongos');
+configPrimary.discardMessagesFrom(st.s, 1.0);
+st.s.discardMessagesFrom(configPrimary, 1.0);
- assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
+assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
- // This should fail, because the primary is not available
- jsTest.log('Doing write operation on a new database and collection');
- assert.writeError(st.s.getDB('NonExistentDB')
- .TestColl.insert({_id: 0, value: 'This value will never be inserted'},
- {maxTimeMS: 15000}));
+// This should fail, because the primary is not available
+jsTest.log('Doing write operation on a new database and collection');
+assert.writeError(
+ st.s.getDB('NonExistentDB')
+ .TestColl.insert({_id: 0, value: 'This value will never be inserted'}, {maxTimeMS: 15000}));
- jsTest.log('Doing CRUD operations on the sharded collection');
- assert.eq(1000, testDB.ShardedColl.find().itcount());
- assert.writeOK(testDB.ShardedColl.insert({_id: 1000}));
- assert.eq(1001, testDB.ShardedColl.find().count());
+jsTest.log('Doing CRUD operations on the sharded collection');
+assert.eq(1000, testDB.ShardedColl.find().itcount());
+assert.writeOK(testDB.ShardedColl.insert({_id: 1000}));
+assert.eq(1001, testDB.ShardedColl.find().count());
- jsTest.log('Doing read operations on a config server collection');
+jsTest.log('Doing read operations on a config server collection');
- // Should fail due to primary read preference
- assert.throws(function() {
- configDB.chunks.find().itcount();
- });
- assert.throws(function() {
- configDB.chunks.find().count();
- });
- assert.throws(function() {
- configDB.chunks.aggregate().itcount();
- });
+// Should fail due to primary read preference
+assert.throws(function() {
+ configDB.chunks.find().itcount();
+});
+assert.throws(function() {
+ configDB.chunks.find().count();
+});
+assert.throws(function() {
+ configDB.chunks.aggregate().itcount();
+});
- // With secondary read pref config server reads should work
- st.s.setReadPref('secondary');
- assert.lt(0, configDB.chunks.find().itcount());
- assert.lt(0, configDB.chunks.find().count());
- assert.lt(0, configDB.chunks.aggregate().itcount());
+// With secondary read pref config server reads should work
+st.s.setReadPref('secondary');
+assert.lt(0, configDB.chunks.find().itcount());
+assert.lt(0, configDB.chunks.find().count());
+assert.lt(0, configDB.chunks.aggregate().itcount());
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 3b1548aabab..18bc8bdea6e 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -3,251 +3,248 @@
// headings and the names of sharded collections and their shard keys.
(function() {
- 'use strict';
+'use strict';
- const MONGOS_COUNT = 2;
+const MONGOS_COUNT = 2;
- var st = new ShardingTest({shards: 1, mongos: MONGOS_COUNT, config: 1});
+var st = new ShardingTest({shards: 1, mongos: MONGOS_COUNT, config: 1});
- var standalone = MongoRunner.runMongod();
+var standalone = MongoRunner.runMongod();
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
- // Wait for the background thread from the mongos to insert their entries before beginning
- // the tests.
- assert.soon(function() {
- return MONGOS_COUNT == mongos.getDB('config').mongos.count();
- });
-
- function grabStatusOutput(configdb, verbose) {
- var res = print.captureAllOutput(function() {
- return printShardingStatus(configdb, verbose);
- });
- var output = res.output.join("\n");
- jsTestLog(output);
- return output;
- }
+// Wait for the background thread from the mongos to insert their entries before beginning
+// the tests.
+assert.soon(function() {
+ return MONGOS_COUNT == mongos.getDB('config').mongos.count();
+});
- function assertPresentInOutput(output, content, what) {
- assert(output.includes(content),
- what + " \"" + content + "\" NOT present in output of " +
- "printShardingStatus() (but it should be)");
+function grabStatusOutput(configdb, verbose) {
+ var res = print.captureAllOutput(function() {
+ return printShardingStatus(configdb, verbose);
+ });
+ var output = res.output.join("\n");
+ jsTestLog(output);
+ return output;
+}
+
+function assertPresentInOutput(output, content, what) {
+ assert(output.includes(content),
+ what + " \"" + content + "\" NOT present in output of " +
+ "printShardingStatus() (but it should be)");
+}
+
+function assertNotPresentInOutput(output, content, what) {
+ assert(!output.includes(content),
+ what + " \"" + content + "\" IS present in output of " +
+ "printShardingStatus() (but it should not be)");
+}
+
+////////////////////////
+// Basic tests
+////////////////////////
+
+var dbName = "thisIsTheDatabase";
+var collName = "thisIsTheCollection";
+var shardKeyName = "thisIsTheShardKey";
+var nsName = dbName + "." + collName;
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+var key = {};
+key[shardKeyName] = 1;
+assert.commandWorked(admin.runCommand({shardCollection: nsName, key: key}));
+
+function testBasic(output) {
+ assertPresentInOutput(output, "shards:", "section header");
+ assertPresentInOutput(output, "databases:", "section header");
+ assertPresentInOutput(output, "balancer:", "section header");
+ assertPresentInOutput(output, "active mongoses:", "section header");
+ assertNotPresentInOutput(output, "most recently active mongoses:", "section header");
+
+ assertPresentInOutput(output, dbName, "database");
+ assertPresentInOutput(output, collName, "collection");
+ assertPresentInOutput(output, shardKeyName, "shard key");
+}
+
+function testBasicNormalOnly(output) {
+ assertPresentInOutput(output, tojson(version) + " : 2\n", "active mongos version");
+}
+
+function testBasicVerboseOnly(output) {
+ assertPresentInOutput(output, '"mongoVersion" : ' + tojson(version), "active mongos version");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "active mongos hostname");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "active mongos hostname");
+}
+
+var buildinfo = assert.commandWorked(mongos.adminCommand("buildinfo"));
+var serverStatus1 = assert.commandWorked(mongos.adminCommand("serverStatus"));
+var serverStatus2 = assert.commandWorked(st.s1.adminCommand("serverStatus"));
+var version = buildinfo.version;
+var s1Host = serverStatus1.host;
+var s2Host = serverStatus2.host;
+
+// Normal, active mongoses
+var outputNormal = grabStatusOutput(st.config, false);
+testBasic(outputNormal);
+testBasicNormalOnly(outputNormal);
+
+var outputVerbose = grabStatusOutput(st.config, true);
+testBasic(outputVerbose);
+testBasicVerboseOnly(outputVerbose);
+
+// Take a copy of the config db, in order to test the harder-to-setup cases below.
+// Copy into a standalone to also test running printShardingStatus() against a config dump.
+var config = mongos.getDB("config");
+var configCopy = standalone.getDB("configCopy");
+config.getCollectionInfos().forEach(function(c) {
+ // Create collection with options.
+ assert.commandWorked(configCopy.createCollection(c.name, c.options));
+ // Clone the docs.
+ config.getCollection(c.name).find().hint({_id: 1}).forEach(function(d) {
+ assert.writeOK(configCopy.getCollection(c.name).insert(d));
+ });
+ // Build the indexes.
+ config.getCollection(c.name).getIndexes().forEach(function(i) {
+ var key = i.key;
+ delete i.key;
+ delete i.ns;
+ delete i.v;
+ assert.commandWorked(configCopy.getCollection(c.name).ensureIndex(key, i));
+ });
+});
+
+// Inactive mongoses
+// Make the first ping be older than now by 1 second more than the threshold
+// Make the second ping be older still by the same amount again
+var pingAdjustMs = 60000 + 1000;
+var then = new Date();
+then.setTime(then.getTime() - pingAdjustMs);
+configCopy.mongos.update({_id: s1Host}, {$set: {ping: then}});
+then.setTime(then.getTime() - pingAdjustMs);
+configCopy.mongos.update({_id: s2Host}, {$set: {ping: then}});
+
+var output = grabStatusOutput(configCopy, false);
+assertPresentInOutput(output, "most recently active mongoses:", "section header");
+assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
+
+var output = grabStatusOutput(configCopy, true);
+assertPresentInOutput(output, "most recently active mongoses:", "section header");
+assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "recent mongos hostname");
+assertNotPresentInOutput(output, '"_id" : ' + tojson(s2Host), "old mongos hostname");
+
+// Older mongoses
+configCopy.mongos.remove({_id: s1Host});
+
+var output = grabStatusOutput(configCopy, false);
+assertPresentInOutput(output, "most recently active mongoses:", "section header");
+assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
+
+var output = grabStatusOutput(configCopy, true);
+assertPresentInOutput(output, "most recently active mongoses:", "section header");
+assertNotPresentInOutput(output, '"_id" : ' + tojson(s1Host), "removed mongos hostname");
+assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "recent mongos hostname");
+
+// No mongoses at all
+configCopy.mongos.remove({});
+
+var output = grabStatusOutput(configCopy, false);
+assertPresentInOutput(output, "most recently active mongoses:\n none", "no mongoses");
+
+var output = grabStatusOutput(configCopy, true);
+assertPresentInOutput(
+ output, "most recently active mongoses:\n none", "no mongoses (verbose)");
+
+assert(mongos.getDB(dbName).dropDatabase());
+
+////////////////////////
+// Extended tests
+////////////////////////
+
+var testCollDetailsNum = 0;
+function testCollDetails(args) {
+ if (args === undefined || typeof (args) != "object") {
+ args = {};
}
- function assertNotPresentInOutput(output, content, what) {
- assert(!output.includes(content),
- what + " \"" + content + "\" IS present in output of " +
- "printShardingStatus() (but it should not be)");
- }
+ var getCollName = function(x) {
+ return "test.test" + x.zeroPad(4);
+ };
+ var collName = getCollName(testCollDetailsNum);
- ////////////////////////
- // Basic tests
- ////////////////////////
-
- var dbName = "thisIsTheDatabase";
- var collName = "thisIsTheCollection";
- var shardKeyName = "thisIsTheShardKey";
- var nsName = dbName + "." + collName;
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- var key = {};
- key[shardKeyName] = 1;
- assert.commandWorked(admin.runCommand({shardCollection: nsName, key: key}));
-
- function testBasic(output) {
- assertPresentInOutput(output, "shards:", "section header");
- assertPresentInOutput(output, "databases:", "section header");
- assertPresentInOutput(output, "balancer:", "section header");
- assertPresentInOutput(output, "active mongoses:", "section header");
- assertNotPresentInOutput(output, "most recently active mongoses:", "section header");
-
- assertPresentInOutput(output, dbName, "database");
- assertPresentInOutput(output, collName, "collection");
- assertPresentInOutput(output, shardKeyName, "shard key");
+ var cmdObj = {shardCollection: collName, key: {_id: 1}};
+ if (args.unique) {
+ cmdObj.unique = true;
}
+ assert.commandWorked(admin.runCommand(cmdObj));
- function testBasicNormalOnly(output) {
- assertPresentInOutput(output, tojson(version) + " : 2\n", "active mongos version");
+ if (args.hasOwnProperty("unique")) {
+ assert.writeOK(mongos.getDB("config").collections.update({_id: collName},
+ {$set: {"unique": args.unique}}));
}
-
- function testBasicVerboseOnly(output) {
- assertPresentInOutput(
- output, '"mongoVersion" : ' + tojson(version), "active mongos version");
- assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "active mongos hostname");
- assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "active mongos hostname");
+ if (args.hasOwnProperty("noBalance")) {
+ assert.writeOK(mongos.getDB("config").collections.update(
+ {_id: collName}, {$set: {"noBalance": args.noBalance}}));
}
- var buildinfo = assert.commandWorked(mongos.adminCommand("buildinfo"));
- var serverStatus1 = assert.commandWorked(mongos.adminCommand("serverStatus"));
- var serverStatus2 = assert.commandWorked(st.s1.adminCommand("serverStatus"));
- var version = buildinfo.version;
- var s1Host = serverStatus1.host;
- var s2Host = serverStatus2.host;
-
- // Normal, active mongoses
- var outputNormal = grabStatusOutput(st.config, false);
- testBasic(outputNormal);
- testBasicNormalOnly(outputNormal);
-
- var outputVerbose = grabStatusOutput(st.config, true);
- testBasic(outputVerbose);
- testBasicVerboseOnly(outputVerbose);
-
- // Take a copy of the config db, in order to test the harder-to-setup cases below.
- // Copy into a standalone to also test running printShardingStatus() against a config dump.
- var config = mongos.getDB("config");
- var configCopy = standalone.getDB("configCopy");
- config.getCollectionInfos().forEach(function(c) {
- // Create collection with options.
- assert.commandWorked(configCopy.createCollection(c.name, c.options));
- // Clone the docs.
- config.getCollection(c.name).find().hint({_id: 1}).forEach(function(d) {
- assert.writeOK(configCopy.getCollection(c.name).insert(d));
- });
- // Build the indexes.
- config.getCollection(c.name).getIndexes().forEach(function(i) {
- var key = i.key;
- delete i.key;
- delete i.ns;
- delete i.v;
- assert.commandWorked(configCopy.getCollection(c.name).ensureIndex(key, i));
- });
- });
-
- // Inactive mongoses
- // Make the first ping be older than now by 1 second more than the threshold
- // Make the second ping be older still by the same amount again
- var pingAdjustMs = 60000 + 1000;
- var then = new Date();
- then.setTime(then.getTime() - pingAdjustMs);
- configCopy.mongos.update({_id: s1Host}, {$set: {ping: then}});
- then.setTime(then.getTime() - pingAdjustMs);
- configCopy.mongos.update({_id: s2Host}, {$set: {ping: then}});
-
- var output = grabStatusOutput(configCopy, false);
- assertPresentInOutput(output, "most recently active mongoses:", "section header");
- assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
-
- var output = grabStatusOutput(configCopy, true);
- assertPresentInOutput(output, "most recently active mongoses:", "section header");
- assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "recent mongos hostname");
- assertNotPresentInOutput(output, '"_id" : ' + tojson(s2Host), "old mongos hostname");
+ var output = grabStatusOutput(st.config);
- // Older mongoses
- configCopy.mongos.remove({_id: s1Host});
+ assertPresentInOutput(output, collName, "collection");
- var output = grabStatusOutput(configCopy, false);
- assertPresentInOutput(output, "most recently active mongoses:", "section header");
- assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
-
- var output = grabStatusOutput(configCopy, true);
- assertPresentInOutput(output, "most recently active mongoses:", "section header");
- assertNotPresentInOutput(output, '"_id" : ' + tojson(s1Host), "removed mongos hostname");
- assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "recent mongos hostname");
+ // If any of the previous collection names are present, then their optional indicators
+ // might also be present. This might taint the results when we go searching through
+ // the output.
+ // This also means that earlier collNames can't be a prefix of later collNames.
+ for (var i = 0; i < testCollDetailsNum; i++) {
+ assertNotPresentInOutput(output, getCollName(i), "previous collection");
+ }
- // No mongoses at all
- configCopy.mongos.remove({});
+ assertPresentInOutput(output, "unique: " + (!!args.unique), "unique shard key indicator");
- var output = grabStatusOutput(configCopy, false);
- assertPresentInOutput(output, "most recently active mongoses:\n none", "no mongoses");
+ if (args.hasOwnProperty("unique") && typeof (args.unique) != "boolean") {
+ // non-bool: actual value must be shown
+ assertPresentInOutput(output, tojson(args.unique), "unique shard key indicator (non bool)");
+ }
- var output = grabStatusOutput(configCopy, true);
assertPresentInOutput(
- output, "most recently active mongoses:\n none", "no mongoses (verbose)");
-
- assert(mongos.getDB(dbName).dropDatabase());
-
- ////////////////////////
- // Extended tests
- ////////////////////////
-
- var testCollDetailsNum = 0;
- function testCollDetails(args) {
- if (args === undefined || typeof(args) != "object") {
- args = {};
- }
-
- var getCollName = function(x) {
- return "test.test" + x.zeroPad(4);
- };
- var collName = getCollName(testCollDetailsNum);
-
- var cmdObj = {shardCollection: collName, key: {_id: 1}};
- if (args.unique) {
- cmdObj.unique = true;
- }
- assert.commandWorked(admin.runCommand(cmdObj));
-
- if (args.hasOwnProperty("unique")) {
- assert.writeOK(mongos.getDB("config").collections.update(
- {_id: collName}, {$set: {"unique": args.unique}}));
- }
- if (args.hasOwnProperty("noBalance")) {
- assert.writeOK(mongos.getDB("config").collections.update(
- {_id: collName}, {$set: {"noBalance": args.noBalance}}));
- }
-
- var output = grabStatusOutput(st.config);
-
- assertPresentInOutput(output, collName, "collection");
-
- // If any of the previous collection names are present, then their optional indicators
- // might also be present. This might taint the results when we go searching through
- // the output.
- // This also means that earlier collNames can't be a prefix of later collNames.
- for (var i = 0; i < testCollDetailsNum; i++) {
- assertNotPresentInOutput(output, getCollName(i), "previous collection");
- }
-
- assertPresentInOutput(output, "unique: " + (!!args.unique), "unique shard key indicator");
-
- if (args.hasOwnProperty("unique") && typeof(args.unique) != "boolean") {
- // non-bool: actual value must be shown
- assertPresentInOutput(
- output, tojson(args.unique), "unique shard key indicator (non bool)");
- }
-
- assertPresentInOutput(output,
- "balancing: " + (!args.noBalance),
- "balancing indicator (inverse of noBalance)");
- if (args.hasOwnProperty("noBalance") && typeof(args.noBalance) != "boolean") {
- // non-bool: actual value must be shown
- assertPresentInOutput(output, tojson(args.noBalance), "noBalance indicator (non bool)");
- }
-
- try {
- mongos.getCollection(collName).drop();
- } catch (e) {
- // Ignore drop errors because they are from the illegal values in the collection entry
- assert.writeOK(mongos.getDB("config").collections.remove({_id: collName}));
- }
-
- testCollDetailsNum++;
+ output, "balancing: " + (!args.noBalance), "balancing indicator (inverse of noBalance)");
+ if (args.hasOwnProperty("noBalance") && typeof (args.noBalance) != "boolean") {
+ // non-bool: actual value must be shown
+ assertPresentInOutput(output, tojson(args.noBalance), "noBalance indicator (non bool)");
+ }
+
+ try {
+ mongos.getCollection(collName).drop();
+ } catch (e) {
+ // Ignore drop errors because they are from the illegal values in the collection entry
+ assert.writeOK(mongos.getDB("config").collections.remove({_id: collName}));
}
- assert.commandWorked(admin.runCommand({enableSharding: "test"}));
+ testCollDetailsNum++;
+}
+
+assert.commandWorked(admin.runCommand({enableSharding: "test"}));
- // Defaults
- testCollDetails({});
+// Defaults
+testCollDetails({});
- // Expected values
- testCollDetails({unique: false, noBalance: false});
- testCollDetails({unique: true, noBalance: true});
+// Expected values
+testCollDetails({unique: false, noBalance: false});
+testCollDetails({unique: true, noBalance: true});
- // Unexpected truthy values
- testCollDetails({unique: "truthy unique value 1", noBalance: "truthy noBalance value 1"});
- testCollDetails({unique: 1, noBalance: 1});
- testCollDetails({unique: -1, noBalance: -1});
- testCollDetails({unique: {}, noBalance: {}});
+// Unexpected truthy values
+testCollDetails({unique: "truthy unique value 1", noBalance: "truthy noBalance value 1"});
+testCollDetails({unique: 1, noBalance: 1});
+testCollDetails({unique: -1, noBalance: -1});
+testCollDetails({unique: {}, noBalance: {}});
- // Unexpected falsy values
- testCollDetails({unique: "", noBalance: ""});
- testCollDetails({unique: 0, noBalance: 0});
+// Unexpected falsy values
+testCollDetails({unique: "", noBalance: ""});
+testCollDetails({unique: 0, noBalance: 0});
- assert(mongos.getDB("test").dropDatabase());
+assert(mongos.getDB("test").dropDatabase());
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/query_after_multi_write.js b/jstests/sharding/query_after_multi_write.js
index 4cfff22be4a..3405d0e2b40 100644
--- a/jstests/sharding/query_after_multi_write.js
+++ b/jstests/sharding/query_after_multi_write.js
@@ -1,63 +1,62 @@
(function() {
- "use strict";
+"use strict";
- /**
- * Test that queries will be properly routed after executing a write that does not
- * perform any shard version checks.
- */
- var runTest = function(writeFunc) {
- var st = new ShardingTest({shards: 2, mongos: 2});
+/**
+ * Test that queries will be properly routed after executing a write that does not
+ * perform any shard version checks.
+ */
+var runTest = function(writeFunc) {
+ var st = new ShardingTest({shards: 2, mongos: 2});
- var testDB = st.s.getDB('test');
- testDB.dropDatabase();
+ var testDB = st.s.getDB('test');
+ testDB.dropDatabase();
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- var testDB2 = st.s1.getDB('test');
- testDB2.user.insert({x: 123456});
+ var testDB2 = st.s1.getDB('test');
+ testDB2.user.insert({x: 123456});
- // Move chunk to bump version on a different mongos.
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+ // Move chunk to bump version on a different mongos.
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Issue a query and make sure it gets routed to the right shard.
- assert.neq(null, testDB2.user.findOne({x: 123456}));
+ // Issue a query and make sure it gets routed to the right shard.
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
- // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
- // incremented to 3
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
+ // incremented to 3
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- // Issue a query and make sure it gets routed to the right shard again.
- assert.neq(null, testDB2.user.findOne({x: 123456}));
+ // Issue a query and make sure it gets routed to the right shard again.
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
- // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
- // incremented to 4
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+ // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
+ // incremented to 4
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Ensure that write commands with multi version do not reset the connection shard version
- // to
- // ignored.
- writeFunc(testDB2);
+ // Ensure that write commands with multi version do not reset the connection shard version
+ // to
+ // ignored.
+ writeFunc(testDB2);
- assert.neq(null, testDB2.user.findOne({x: 123456}));
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
- st.stop();
- };
+ st.stop();
+};
- runTest(function(db) {
- db.user.update({}, {$inc: {y: 987654}}, false, true);
- });
-
- runTest(function(db) {
- db.user.remove({y: 'noMatch'}, false);
- });
+runTest(function(db) {
+ db.user.update({}, {$inc: {y: 987654}}, false, true);
+});
+runTest(function(db) {
+ db.user.remove({y: 'noMatch'}, false);
+});
})();
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index b760af12609..65739b2b9a5 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -1,357 +1,344 @@
// Tests user queries over the config servers.
(function() {
- 'use strict';
-
- var getListCollectionsCursor = function(database, options, subsequentBatchSize) {
- return new DBCommandCursor(
- database, database.runCommand("listCollections", options), subsequentBatchSize);
- };
-
- var getListIndexesCursor = function(coll, options, subsequentBatchSize) {
- return new DBCommandCursor(
- coll.getDB(), coll.runCommand("listIndexes", options), subsequentBatchSize);
- };
-
- var arrayGetNames = function(array) {
- return array.map(function(spec) {
- return spec.name;
- });
- };
-
- var cursorGetCollectionNames = function(cursor) {
- return arrayGetNames(cursor.toArray());
- };
-
- var sortArrayByName = function(array) {
- return array.sort(function(a, b) {
- return a.name > b.name;
- });
- };
-
- var cursorGetIndexNames = function(cursor) {
- return arrayGetNames(sortArrayByName(cursor.toArray()));
- };
-
- var sortArrayById = function(array) {
- return array.sort(function(a, b) {
- return a._id > b._id;
- });
- };
-
- var dropCollectionIfExists = function(coll) {
- try {
- coll.drop();
- } catch (err) {
- assert.eq(err.code, ErrorCodes.NamespaceNotFound);
- }
- };
-
- /**
- * Sets up the test database with with several sharded collections.
- *
- * @return The list of collection namespaces that were added to the test database.
- */
- var setupTestCollections = function(st) {
- // testKeys and testCollNames are parallel arrays, testKeys contains the shard key of the
- // corresponding collection whose name is in testCollNames.
- var testCollNames = ["4a1", "1a12", "3a1b1", "2a1b1c1", "b1", "b1c1", "d1"];
- var testKeys =
- [{a: 1}, {a: 1}, {a: 1, b: 1}, {a: 1, b: 1, c: 1}, {b: 1}, {b: 1, c: 1}, {d: 1}];
- var testDB = st.s.getDB("test");
-
- assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
- var testNamespaces = testCollNames.map(function(e) {
- return testDB.getName() + "." + e;
- });
- for (var i = 0; i < testKeys.length; i++) {
- assert.commandWorked(
- st.s.adminCommand({shardcollection: testNamespaces[i], key: testKeys[i]}));
- }
-
- return testNamespaces;
- };
-
- /**
- * Test that a list collections query works on the config database. This test cannot detect
- * whether list collections lists extra collections.
- */
- var testListConfigCollections = function(st) {
- // This test depends on all the collections in the configCollList being in the config
- // database.
- var configCollList = [
- "chunks",
- "collections",
- "databases",
- "lockpings",
- "locks",
- "shards",
- "tags",
- "version"
- ];
- var configDB = st.s.getDB("config");
- var userAddedColl = configDB.userAddedColl;
- var cursor;
- var cursorArray;
-
- dropCollectionIfExists(userAddedColl);
- configDB.createCollection(userAddedColl.getName());
- configCollList.push(userAddedColl.getName());
-
- cursor = getListCollectionsCursor(configDB);
- cursorArray = cursorGetCollectionNames(cursor);
- for (var i = 0; i < configCollList.length; i++) {
- assert(cursorArray.indexOf(configCollList[i]) > -1, "Missing " + configCollList[i]);
+'use strict';
+
+var getListCollectionsCursor = function(database, options, subsequentBatchSize) {
+ return new DBCommandCursor(
+ database, database.runCommand("listCollections", options), subsequentBatchSize);
+};
+
+var getListIndexesCursor = function(coll, options, subsequentBatchSize) {
+ return new DBCommandCursor(
+ coll.getDB(), coll.runCommand("listIndexes", options), subsequentBatchSize);
+};
+
+var arrayGetNames = function(array) {
+ return array.map(function(spec) {
+ return spec.name;
+ });
+};
+
+var cursorGetCollectionNames = function(cursor) {
+ return arrayGetNames(cursor.toArray());
+};
+
+var sortArrayByName = function(array) {
+ return array.sort(function(a, b) {
+ return a.name > b.name;
+ });
+};
+
+var cursorGetIndexNames = function(cursor) {
+ return arrayGetNames(sortArrayByName(cursor.toArray()));
+};
+
+var sortArrayById = function(array) {
+ return array.sort(function(a, b) {
+ return a._id > b._id;
+ });
+};
+
+var dropCollectionIfExists = function(coll) {
+ try {
+ coll.drop();
+ } catch (err) {
+ assert.eq(err.code, ErrorCodes.NamespaceNotFound);
+ }
+};
+
+/**
+ * Sets up the test database with with several sharded collections.
+ *
+ * @return The list of collection namespaces that were added to the test database.
+ */
+var setupTestCollections = function(st) {
+ // testKeys and testCollNames are parallel arrays, testKeys contains the shard key of the
+ // corresponding collection whose name is in testCollNames.
+ var testCollNames = ["4a1", "1a12", "3a1b1", "2a1b1c1", "b1", "b1c1", "d1"];
+ var testKeys = [{a: 1}, {a: 1}, {a: 1, b: 1}, {a: 1, b: 1, c: 1}, {b: 1}, {b: 1, c: 1}, {d: 1}];
+ var testDB = st.s.getDB("test");
+
+ assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
+ var testNamespaces = testCollNames.map(function(e) {
+ return testDB.getName() + "." + e;
+ });
+ for (var i = 0; i < testKeys.length; i++) {
+ assert.commandWorked(
+ st.s.adminCommand({shardcollection: testNamespaces[i], key: testKeys[i]}));
+ }
+
+ return testNamespaces;
+};
+
+/**
+ * Test that a list collections query works on the config database. This test cannot detect
+ * whether list collections lists extra collections.
+ */
+var testListConfigCollections = function(st) {
+ // This test depends on all the collections in the configCollList being in the config
+ // database.
+ var configCollList =
+ ["chunks", "collections", "databases", "lockpings", "locks", "shards", "tags", "version"];
+ var configDB = st.s.getDB("config");
+ var userAddedColl = configDB.userAddedColl;
+ var cursor;
+ var cursorArray;
+
+ dropCollectionIfExists(userAddedColl);
+ configDB.createCollection(userAddedColl.getName());
+ configCollList.push(userAddedColl.getName());
+
+ cursor = getListCollectionsCursor(configDB);
+ cursorArray = cursorGetCollectionNames(cursor);
+ for (var i = 0; i < configCollList.length; i++) {
+ assert(cursorArray.indexOf(configCollList[i]) > -1, "Missing " + configCollList[i]);
+ }
+
+ cursor = getListCollectionsCursor(configDB, {cursor: {batchSize: 1}}, 1);
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert(cursorArray.indexOf(cursor.next().name) > -1);
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert(cursorArray.indexOf(cursor.next().name) > -1);
+
+ assert(userAddedColl.drop());
+};
+
+/**
+ * Test that a list indexes query works on the chunks collection of the config database.
+ */
+var testListConfigChunksIndexes = function(st) {
+ // This test depends on all the indexes in the configChunksIndexes being the exact indexes
+ // in the config chunks collection.
+ var configChunksIndexes = ["_id_", "ns_1_lastmod_1", "ns_1_min_1", "ns_1_shard_1_min_1"];
+ var configDB = st.s.getDB("config");
+ var cursor;
+ var cursorArray = [];
+
+ cursor = getListIndexesCursor(configDB.chunks);
+ assert.eq(cursorGetIndexNames(cursor), configChunksIndexes);
+
+ cursor = getListIndexesCursor(configDB.chunks, {cursor: {batchSize: 2}}, 2);
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ cursorArray.push(cursor.next());
+ cursorArray.push(cursor.next());
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ cursorArray.push(cursor.next());
+ cursorArray.push(cursor.next());
+ assert(!cursor.hasNext());
+ assert.eq(arrayGetNames(sortArrayByName(cursorArray)), configChunksIndexes);
+};
+
+/**
+ * Test queries over the collections collection of the config database.
+ */
+var queryConfigCollections = function(st, testNamespaces) {
+ var configDB = st.s.getDB("config");
+ var cursor;
+
+ // Find query.
+ cursor = configDB.collections.find({"key.a": 1}, {dropped: 1, "key.a": 1, "key.c": 1})
+ .sort({"_id": 1})
+ .batchSize(2);
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: testNamespaces[1], dropped: false, key: {a: 1}});
+ assert.eq(cursor.next(), {_id: testNamespaces[3], dropped: false, key: {a: 1, c: 1}});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: testNamespaces[2], dropped: false, key: {a: 1}});
+ assert.eq(cursor.next(), {_id: testNamespaces[0], dropped: false, key: {a: 1}});
+ assert(!cursor.hasNext());
+
+ // Aggregate query.
+ cursor = configDB.collections.aggregate(
+ [
+ {$match: {"key.b": 1}},
+ {$sort: {"_id": 1}},
+ {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
+ ],
+ {cursor: {batchSize: 2}});
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: testNamespaces[3], keyb: 1, keyc: 1});
+ assert.eq(cursor.next(), {_id: testNamespaces[2], keyb: 1});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: testNamespaces[4], keyb: 1});
+ assert.eq(cursor.next(), {_id: testNamespaces[5], keyb: 1, keyc: 1});
+ assert(!cursor.hasNext());
+};
+
+/**
+ * Test queries over the chunks collection of the config database.
+ */
+var queryConfigChunks = function(st) {
+ var configDB = st.s.getDB("config");
+ var testDB = st.s.getDB("test2");
+ var testColl = testDB.testColl;
+ var testCollData = [{e: 1}, {e: 3}, {e: 4}, {e: 5}, {e: 7}, {e: 9}, {e: 10}, {e: 12}];
+ var cursor;
+ var result;
+
+ // Get shard names.
+ cursor = configDB.shards.find().sort({_id: 1});
+ var shard1 = cursor.next()._id;
+ var shard2 = cursor.next()._id;
+ assert(!cursor.hasNext());
+ assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
+ st.ensurePrimaryShard(testDB.getName(), shard1);
+
+ // Setup.
+ assert.commandWorked(st.s.adminCommand({shardcollection: testColl.getFullName(), key: {e: 1}}));
+ for (var i = 0; i < testCollData.length; i++) {
+ assert.writeOK(testColl.insert(testCollData[i]));
+ }
+ assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 2}}));
+ assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 6}}));
+ assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 8}}));
+ assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 11}}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 1}, to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 9}, to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 12}, to: shard2}));
+
+ // Find query.
+ cursor = configDB.chunks.find({ns: testColl.getFullName()}, {_id: 0, min: 1, max: 1, shard: 1})
+ .sort({"min.e": 1});
+ assert.eq(cursor.next(), {min: {e: {"$minKey": 1}}, "max": {"e": 2}, shard: shard2});
+ assert.eq(cursor.next(), {min: {e: 2}, max: {e: 6}, shard: shard1});
+ assert.eq(cursor.next(), {min: {e: 6}, max: {e: 8}, shard: shard1});
+ assert.eq(cursor.next(), {min: {e: 8}, max: {e: 11}, shard: shard2});
+ assert.eq(cursor.next(), {min: {e: 11}, max: {e: {"$maxKey": 1}}, shard: shard2});
+ assert(!cursor.hasNext());
+
+ // Count query with filter.
+ assert.eq(configDB.chunks.count({ns: testColl.getFullName()}), 5);
+
+ // Distinct query.
+ assert.eq(configDB.chunks.distinct("shard").sort(), [shard1, shard2]);
+
+ // Map reduce query.
+ var mapFunction = function() {
+ if (this.ns == "test2.testColl") {
+ emit(this.shard, 1);
}
-
- cursor = getListCollectionsCursor(configDB, {cursor: {batchSize: 1}}, 1);
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert(cursorArray.indexOf(cursor.next().name) > -1);
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert(cursorArray.indexOf(cursor.next().name) > -1);
-
- assert(userAddedColl.drop());
};
-
- /**
- * Test that a list indexes query works on the chunks collection of the config database.
- */
- var testListConfigChunksIndexes = function(st) {
- // This test depends on all the indexes in the configChunksIndexes being the exact indexes
- // in the config chunks collection.
- var configChunksIndexes = ["_id_", "ns_1_lastmod_1", "ns_1_min_1", "ns_1_shard_1_min_1"];
- var configDB = st.s.getDB("config");
- var cursor;
- var cursorArray = [];
-
- cursor = getListIndexesCursor(configDB.chunks);
- assert.eq(cursorGetIndexNames(cursor), configChunksIndexes);
-
- cursor = getListIndexesCursor(configDB.chunks, {cursor: {batchSize: 2}}, 2);
- assert.eq(cursor.objsLeftInBatch(), 2);
- cursorArray.push(cursor.next());
- cursorArray.push(cursor.next());
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 2);
- cursorArray.push(cursor.next());
- cursorArray.push(cursor.next());
- assert(!cursor.hasNext());
- assert.eq(arrayGetNames(sortArrayByName(cursorArray)), configChunksIndexes);
+ var reduceFunction = function(key, values) {
+ return {chunks: values.length};
};
-
- /**
- * Test queries over the collections collection of the config database.
- */
- var queryConfigCollections = function(st, testNamespaces) {
- var configDB = st.s.getDB("config");
- var cursor;
-
- // Find query.
- cursor = configDB.collections.find({"key.a": 1}, {dropped: 1, "key.a": 1, "key.c": 1})
- .sort({"_id": 1})
- .batchSize(2);
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: testNamespaces[1], dropped: false, key: {a: 1}});
- assert.eq(cursor.next(), {_id: testNamespaces[3], dropped: false, key: {a: 1, c: 1}});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: testNamespaces[2], dropped: false, key: {a: 1}});
- assert.eq(cursor.next(), {_id: testNamespaces[0], dropped: false, key: {a: 1}});
- assert(!cursor.hasNext());
-
- // Aggregate query.
- cursor = configDB.collections.aggregate(
- [
- {$match: {"key.b": 1}},
- {$sort: {"_id": 1}},
- {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
- ],
- {cursor: {batchSize: 2}});
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: testNamespaces[3], keyb: 1, keyc: 1});
- assert.eq(cursor.next(), {_id: testNamespaces[2], keyb: 1});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: testNamespaces[4], keyb: 1});
- assert.eq(cursor.next(), {_id: testNamespaces[5], keyb: 1, keyc: 1});
- assert(!cursor.hasNext());
+ result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
+ assert.eq(result.ok, 1);
+ assert.eq(sortArrayById(result.results),
+ [{_id: shard1, value: {chunks: 2}}, {_id: shard2, value: {chunks: 3}}]);
+};
+
+/**
+ * Test queries over a user created collection of an arbitrary database on the config servers.
+ */
+var queryUserCreated = function(database) {
+ var userColl = database.userColl;
+ var userCollData = [
+ {_id: 1, g: 1, c: 4, s: "c", u: [1, 2]},
+ {_id: 2, g: 1, c: 5, s: "b", u: [1]},
+ {_id: 3, g: 2, c: 16, s: "g", u: [3]},
+ {_id: 4, g: 2, c: 1, s: "a", u: [2, 4]},
+ {_id: 5, g: 2, c: 18, s: "d", u: [3]},
+ {_id: 6, g: 3, c: 11, s: "e", u: [2, 3]},
+ {_id: 7, g: 3, c: 2, s: "f", u: [1]}
+ ];
+ var userCollIndexes = ["_id_", "s_1"];
+ var cursor;
+ var cursorArray;
+ var result;
+
+ // Setup.
+ dropCollectionIfExists(userColl);
+ for (var i = 0; i < userCollData.length; i++) {
+ assert.writeOK(userColl.insert(userCollData[i]));
+ }
+ assert.commandWorked(userColl.createIndex({s: 1}));
+
+ // List indexes.
+ cursorArray = [];
+ cursor = getListIndexesCursor(userColl, {cursor: {batchSize: 1}}, 1);
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ cursorArray.push(cursor.next());
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ cursorArray.push(cursor.next());
+ assert(!cursor.hasNext());
+ assert.eq(arrayGetNames(sortArrayByName(cursorArray)), userCollIndexes);
+
+ // Find query.
+ cursor = userColl.find({g: {$gte: 2}}, {_id: 0, c: 1}).sort({s: 1}).batchSize(2);
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {c: 1});
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert.eq(cursor.next(), {c: 18});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {c: 11});
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert.eq(cursor.next(), {c: 2});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert.eq(cursor.next(), {c: 16});
+ assert(!cursor.hasNext());
+
+ // Aggregate query.
+ cursor = userColl.aggregate(
+ [
+ {$match: {c: {$gt: 1}}},
+ {$unwind: "$u"},
+ {$group: {_id: "$u", sum: {$sum: "$c"}}},
+ {$sort: {_id: 1}}
+ ],
+ {cursor: {batchSize: 2}});
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: 1, sum: 11});
+ assert.eq(cursor.next(), {_id: 2, sum: 15});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert.eq(cursor.next(), {_id: 3, sum: 45});
+ assert(!cursor.hasNext());
+
+ // Count query without filter.
+ assert.eq(userColl.count(), userCollData.length);
+
+ // Count query with filter.
+ assert.eq(userColl.count({g: 2}), 3);
+
+ // Distinct query.
+ assert.eq(userColl.distinct("g").sort(), [1, 2, 3]);
+
+ // Map reduce query.
+ var mapFunction = function() {
+ emit(this.g, 1);
};
-
- /**
- * Test queries over the chunks collection of the config database.
- */
- var queryConfigChunks = function(st) {
- var configDB = st.s.getDB("config");
- var testDB = st.s.getDB("test2");
- var testColl = testDB.testColl;
- var testCollData = [{e: 1}, {e: 3}, {e: 4}, {e: 5}, {e: 7}, {e: 9}, {e: 10}, {e: 12}];
- var cursor;
- var result;
-
- // Get shard names.
- cursor = configDB.shards.find().sort({_id: 1});
- var shard1 = cursor.next()._id;
- var shard2 = cursor.next()._id;
- assert(!cursor.hasNext());
- assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), shard1);
-
- // Setup.
- assert.commandWorked(
- st.s.adminCommand({shardcollection: testColl.getFullName(), key: {e: 1}}));
- for (var i = 0; i < testCollData.length; i++) {
- assert.writeOK(testColl.insert(testCollData[i]));
- }
- assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 2}}));
- assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 6}}));
- assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 8}}));
- assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 11}}));
- assert.commandWorked(
- st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 1}, to: shard2}));
- assert.commandWorked(
- st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 9}, to: shard2}));
- assert.commandWorked(
- st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 12}, to: shard2}));
-
- // Find query.
- cursor =
- configDB.chunks.find({ns: testColl.getFullName()}, {_id: 0, min: 1, max: 1, shard: 1})
- .sort({"min.e": 1});
- assert.eq(cursor.next(), {min: {e: {"$minKey": 1}}, "max": {"e": 2}, shard: shard2});
- assert.eq(cursor.next(), {min: {e: 2}, max: {e: 6}, shard: shard1});
- assert.eq(cursor.next(), {min: {e: 6}, max: {e: 8}, shard: shard1});
- assert.eq(cursor.next(), {min: {e: 8}, max: {e: 11}, shard: shard2});
- assert.eq(cursor.next(), {min: {e: 11}, max: {e: {"$maxKey": 1}}, shard: shard2});
- assert(!cursor.hasNext());
-
- // Count query with filter.
- assert.eq(configDB.chunks.count({ns: testColl.getFullName()}), 5);
-
- // Distinct query.
- assert.eq(configDB.chunks.distinct("shard").sort(), [shard1, shard2]);
-
- // Map reduce query.
- var mapFunction = function() {
- if (this.ns == "test2.testColl") {
- emit(this.shard, 1);
- }
- };
- var reduceFunction = function(key, values) {
- return {chunks: values.length};
- };
- result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
- assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results),
- [{_id: shard1, value: {chunks: 2}}, {_id: shard2, value: {chunks: 3}}]);
+ var reduceFunction = function(key, values) {
+ return {count: values.length};
};
-
- /**
- * Test queries over a user created collection of an arbitrary database on the config servers.
- */
- var queryUserCreated = function(database) {
- var userColl = database.userColl;
- var userCollData = [
- {_id: 1, g: 1, c: 4, s: "c", u: [1, 2]},
- {_id: 2, g: 1, c: 5, s: "b", u: [1]},
- {_id: 3, g: 2, c: 16, s: "g", u: [3]},
- {_id: 4, g: 2, c: 1, s: "a", u: [2, 4]},
- {_id: 5, g: 2, c: 18, s: "d", u: [3]},
- {_id: 6, g: 3, c: 11, s: "e", u: [2, 3]},
- {_id: 7, g: 3, c: 2, s: "f", u: [1]}
- ];
- var userCollIndexes = ["_id_", "s_1"];
- var cursor;
- var cursorArray;
- var result;
-
- // Setup.
- dropCollectionIfExists(userColl);
- for (var i = 0; i < userCollData.length; i++) {
- assert.writeOK(userColl.insert(userCollData[i]));
- }
- assert.commandWorked(userColl.createIndex({s: 1}));
-
- // List indexes.
- cursorArray = [];
- cursor = getListIndexesCursor(userColl, {cursor: {batchSize: 1}}, 1);
- assert.eq(cursor.objsLeftInBatch(), 1);
- cursorArray.push(cursor.next());
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 1);
- cursorArray.push(cursor.next());
- assert(!cursor.hasNext());
- assert.eq(arrayGetNames(sortArrayByName(cursorArray)), userCollIndexes);
-
- // Find query.
- cursor = userColl.find({g: {$gte: 2}}, {_id: 0, c: 1}).sort({s: 1}).batchSize(2);
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {c: 1});
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert.eq(cursor.next(), {c: 18});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {c: 11});
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert.eq(cursor.next(), {c: 2});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert.eq(cursor.next(), {c: 16});
- assert(!cursor.hasNext());
-
- // Aggregate query.
- cursor = userColl.aggregate(
- [
- {$match: {c: {$gt: 1}}},
- {$unwind: "$u"},
- {$group: {_id: "$u", sum: {$sum: "$c"}}},
- {$sort: {_id: 1}}
- ],
- {cursor: {batchSize: 2}});
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: 1, sum: 11});
- assert.eq(cursor.next(), {_id: 2, sum: 15});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert.eq(cursor.next(), {_id: 3, sum: 45});
- assert(!cursor.hasNext());
-
- // Count query without filter.
- assert.eq(userColl.count(), userCollData.length);
-
- // Count query with filter.
- assert.eq(userColl.count({g: 2}), 3);
-
- // Distinct query.
- assert.eq(userColl.distinct("g").sort(), [1, 2, 3]);
-
- // Map reduce query.
- var mapFunction = function() {
- emit(this.g, 1);
- };
- var reduceFunction = function(key, values) {
- return {count: values.length};
- };
- result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
- assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results), [
- {_id: 1, value: {count: 2}},
- {_id: 2, value: {count: 3}},
- {_id: 3, value: {count: 2}}
- ]);
-
- assert(userColl.drop());
- };
-
- var st = new ShardingTest({shards: 2, mongos: 1});
- var testNamespaces = setupTestCollections(st);
- var configDB = st.s.getDB("config");
- var adminDB = st.s.getDB("admin");
-
- testListConfigCollections(st);
- testListConfigChunksIndexes(st);
- queryConfigCollections(st, testNamespaces);
- queryConfigChunks(st);
- queryUserCreated(configDB);
- queryUserCreated(adminDB);
- st.stop();
+ result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
+ assert.eq(result.ok, 1);
+ assert.eq(
+ sortArrayById(result.results),
+ [{_id: 1, value: {count: 2}}, {_id: 2, value: {count: 3}}, {_id: 3, value: {count: 2}}]);
+
+ assert(userColl.drop());
+};
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+var testNamespaces = setupTestCollections(st);
+var configDB = st.s.getDB("config");
+var adminDB = st.s.getDB("admin");
+
+testListConfigCollections(st);
+testListConfigChunksIndexes(st);
+queryConfigCollections(st, testNamespaces);
+queryConfigChunks(st);
+queryUserCreated(configDB);
+queryUserCreated(adminDB);
+st.stop();
})();
diff --git a/jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js b/jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js
index 1e3f85f499b..60c654404e5 100644
--- a/jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js
+++ b/jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js
@@ -15,67 +15,66 @@
* @tags: [uses_transactions, uses_multi_shard_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
- TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
- // Helper to add generic txn fields to a command.
- function addTxnFieldsToCmd(cmd, lsid, txnNumber) {
- return Object.extend(
- cmd, {lsid, txnNumber: NumberLong(txnNumber), stmtId: NumberInt(0), autocommit: false});
- }
+// Helper to add generic txn fields to a command.
+function addTxnFieldsToCmd(cmd, lsid, txnNumber) {
+ return Object.extend(
+ cmd, {lsid, txnNumber: NumberLong(txnNumber), stmtId: NumberInt(0), autocommit: false});
+}
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: [{verbose: 1}, {verbose: 1}], config: 1});
+const st = new ShardingTest({shards: [{verbose: 1}, {verbose: 1}], config: 1});
- // Set up sharded collection with two chunks - [-inf, 0), [0, inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+// Set up sharded collection with two chunks - [-inf, 0), [0, inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- st.rs0.getPrimary().adminCommand(
- {configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
- // Move a chunk away from Shard0 (the donor) so its range deleter will asynchronously delete the
- // chunk's range. Flush its metadata to avoid StaleConfig during the later transaction.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: st.shard1.shardName}));
- assert.commandWorked(st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+st.rs0.getPrimary().adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
+// Move a chunk away from Shard0 (the donor) so its range deleter will asynchronously delete the
+// chunk's range. Flush its metadata to avoid StaleConfig during the later transaction.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: st.shard1.shardName}));
+assert.commandWorked(st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
- // Insert a doc into the chunk still owned by the donor shard in a transaction then prepare the
- // transaction so readers of that doc will enter a prepare conflict retry loop.
- const lsid = {id: UUID()};
- const txnNumber = 0;
- assert.commandWorked(st.s.getDB(dbName).runCommand(addTxnFieldsToCmd(
- {insert: collName, documents: [{_id: -5}], startTransaction: true}, lsid, txnNumber)));
+// Insert a doc into the chunk still owned by the donor shard in a transaction then prepare the
+// transaction so readers of that doc will enter a prepare conflict retry loop.
+const lsid = {
+ id: UUID()
+};
+const txnNumber = 0;
+assert.commandWorked(st.s.getDB(dbName).runCommand(addTxnFieldsToCmd(
+ {insert: collName, documents: [{_id: -5}], startTransaction: true}, lsid, txnNumber)));
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- addTxnFieldsToCmd({prepareTransaction: 1}, lsid, txnNumber)));
+assert.commandWorked(
+ st.rs0.getPrimary().adminCommand(addTxnFieldsToCmd({prepareTransaction: 1}, lsid, txnNumber)));
- // Set a failpoint to hang right after beginning the index scan for documents to delete.
- st.rs0.getPrimary().adminCommand(
- {configureFailPoint: 'hangBeforeDoingDeletion', mode: 'alwaysOn'});
+// Set a failpoint to hang right after beginning the index scan for documents to delete.
+st.rs0.getPrimary().adminCommand({configureFailPoint: 'hangBeforeDoingDeletion', mode: 'alwaysOn'});
- // Allow the range deleter to run. It should get stuck in a prepare conflict retry loop.
- st.rs0.getPrimary().adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'off'});
+// Allow the range deleter to run. It should get stuck in a prepare conflict retry loop.
+st.rs0.getPrimary().adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'off'});
- // Wait until we've started the index scan to delete documents.
- waitForFailpoint("Hit hangBeforeDoingDeletion failpoint", 1);
+// Wait until we've started the index scan to delete documents.
+waitForFailpoint("Hit hangBeforeDoingDeletion failpoint", 1);
- // Let the deletion continue.
- st.rs0.getPrimary().adminCommand({configureFailPoint: 'hangBeforeDoingDeletion', mode: 'off'});
+// Let the deletion continue.
+st.rs0.getPrimary().adminCommand({configureFailPoint: 'hangBeforeDoingDeletion', mode: 'off'});
- // Attempt to step down the primary.
- assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
+// Attempt to step down the primary.
+assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
- // Cleanup the transaction so the sharding test can shut down.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- addTxnFieldsToCmd({abortTransaction: 1}, lsid, txnNumber)));
+// Cleanup the transaction so the sharding test can shut down.
+assert.commandWorked(
+ st.rs0.getPrimary().adminCommand(addTxnFieldsToCmd({abortTransaction: 1}, lsid, txnNumber)));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/read_after_optime.js b/jstests/sharding/read_after_optime.js
index ce8c5201179..11be1022efd 100644
--- a/jstests/sharding/read_after_optime.js
+++ b/jstests/sharding/read_after_optime.js
@@ -1,49 +1,49 @@
// Test read after opTime functionality with maxTimeMS on config servers (CSRS only)`.
(function() {
- 'use strict';
+'use strict';
- var shardingTest = new ShardingTest({shards: 0});
- assert(shardingTest.configRS, 'this test requires config servers to run in CSRS mode');
+var shardingTest = new ShardingTest({shards: 0});
+assert(shardingTest.configRS, 'this test requires config servers to run in CSRS mode');
- var configReplSetTest = shardingTest.configRS;
- var primaryConn = configReplSetTest.getPrimary();
+var configReplSetTest = shardingTest.configRS;
+var primaryConn = configReplSetTest.getPrimary();
- var lastOp = configReplSetTest.awaitLastOpCommitted();
- assert(lastOp, 'invalid op returned from ReplSetTest.awaitLastOpCommitted()');
+var lastOp = configReplSetTest.awaitLastOpCommitted();
+assert(lastOp, 'invalid op returned from ReplSetTest.awaitLastOpCommitted()');
- var config = configReplSetTest.getReplSetConfigFromNode();
- var term = lastOp.t;
+var config = configReplSetTest.getReplSetConfigFromNode();
+var term = lastOp.t;
- var runFindCommand = function(ts) {
- return primaryConn.getDB('local').runCommand({
- find: 'oplog.rs',
- readConcern: {
- afterOpTime: {
- ts: ts,
- t: term,
- },
+var runFindCommand = function(ts) {
+ return primaryConn.getDB('local').runCommand({
+ find: 'oplog.rs',
+ readConcern: {
+ afterOpTime: {
+ ts: ts,
+ t: term,
},
- maxTimeMS: 5000,
- });
- };
-
- assert.commandWorked(runFindCommand(lastOp.ts));
-
- var pingIntervalSeconds = 10;
- assert.commandFailedWithCode(
- runFindCommand(new Timestamp(lastOp.ts.getTime() + pingIntervalSeconds * 5, 0)),
- ErrorCodes.MaxTimeMSExpired);
-
- var msg = 'Command on database local timed out waiting for read concern to be satisfied.';
- assert.soon(function() {
- var logMessages = assert.commandWorked(primaryConn.adminCommand({getLog: 'global'})).log;
- for (var i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(msg) != -1) {
- return true;
- }
+ },
+ maxTimeMS: 5000,
+ });
+};
+
+assert.commandWorked(runFindCommand(lastOp.ts));
+
+var pingIntervalSeconds = 10;
+assert.commandFailedWithCode(
+ runFindCommand(new Timestamp(lastOp.ts.getTime() + pingIntervalSeconds * 5, 0)),
+ ErrorCodes.MaxTimeMSExpired);
+
+var msg = 'Command on database local timed out waiting for read concern to be satisfied.';
+assert.soon(function() {
+ var logMessages = assert.commandWorked(primaryConn.adminCommand({getLog: 'global'})).log;
+ for (var i = 0; i < logMessages.length; i++) {
+ if (logMessages[i].indexOf(msg) != -1) {
+ return true;
}
- return false;
- }, 'Did not see any log entries containing the following message: ' + msg, 60000, 300);
- shardingTest.stop();
+ }
+ return false;
+}, 'Did not see any log entries containing the following message: ' + msg, 60000, 300);
+shardingTest.stop();
})();
diff --git a/jstests/sharding/read_committed_lookup.js b/jstests/sharding/read_committed_lookup.js
index 4ecfd0c6e1f..72046bbf260 100644
--- a/jstests/sharding/read_committed_lookup.js
+++ b/jstests/sharding/read_committed_lookup.js
@@ -8,59 +8,58 @@ load("jstests/libs/read_committed_lib.js"); // For testReadCommittedLookup
(function() {
- // Manually create a shard.
- const replSetName = "lookup_read_majority";
- let rst = new ReplSetTest({
- nodes: 3,
- name: replSetName,
- nodeOptions: {
- enableMajorityReadConcern: "",
- shardsvr: "",
- }
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
+// Manually create a shard.
+const replSetName = "lookup_read_majority";
+let rst = new ReplSetTest({
+ nodes: 3,
+ name: replSetName,
+ nodeOptions: {
+ enableMajorityReadConcern: "",
+ shardsvr: "",
}
+});
- const nodes = rst.nodeList();
- const config = {
- _id: replSetName,
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], priority: 0},
- {_id: 2, host: nodes[2], arbiterOnly: true},
- ]
- };
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ rst.stopSet();
+ return;
+}
- rst.initiate(config);
+const nodes = rst.nodeList();
+const config = {
+ _id: replSetName,
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], priority: 0},
+ {_id: 2, host: nodes[2], arbiterOnly: true},
+ ]
+};
- let shardSecondary = rst._slaves[0];
+rst.initiate(config);
- // Confirm read committed works on a cluster with a database that is not sharding enabled.
- let st = new ShardingTest({
- manualAddShard: true,
- });
- assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
- testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
+let shardSecondary = rst._slaves[0];
- // Confirm read committed works on a cluster with:
- // - A sharding enabled database
- // - An unsharded local collection
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
+// Confirm read committed works on a cluster with a database that is not sharding enabled.
+let st = new ShardingTest({
+ manualAddShard: true,
+});
+assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
+testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
- // Confirm read committed works on a cluster with:
- // - A sharding enabled database
- // - A sharded local collection.
- assert.commandWorked(st.s.getDB("test").runCommand(
- {createIndexes: 'local', indexes: [{name: "foreignKey_1", key: {foreignKey: 1}}]}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.local', key: {foreignKey: 1}}));
- testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
+// Confirm read committed works on a cluster with:
+// - A sharding enabled database
+// - An unsharded local collection
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
- st.stop();
- rst.stopSet();
+// Confirm read committed works on a cluster with:
+// - A sharding enabled database
+// - A sharded local collection.
+assert.commandWorked(st.s.getDB("test").runCommand(
+ {createIndexes: 'local', indexes: [{name: "foreignKey_1", key: {foreignKey: 1}}]}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.local', key: {foreignKey: 1}}));
+testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
+st.stop();
+rst.stopSet();
})();
diff --git a/jstests/sharding/read_does_not_create_namespaces.js b/jstests/sharding/read_does_not_create_namespaces.js
index 8ee48576ba1..21e50372c6b 100644
--- a/jstests/sharding/read_does_not_create_namespaces.js
+++ b/jstests/sharding/read_does_not_create_namespaces.js
@@ -2,15 +2,14 @@
// cause entries to be created in the catalog.
(function() {
- var shardingTest = new ShardingTest({name: 'read_does_not_create_namespaces', shards: 1});
- var db = shardingTest.getDB('NonExistentDB');
+var shardingTest = new ShardingTest({name: 'read_does_not_create_namespaces', shards: 1});
+var db = shardingTest.getDB('NonExistentDB');
- assert.isnull(db.nonExistentColl.findOne({}));
+assert.isnull(db.nonExistentColl.findOne({}));
- // Neither the database nor the collection should have been created
- assert.isnull(shardingTest.getDB('config').databases.findOne({_id: 'NonExistentDB'}));
- assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
-
- shardingTest.stop();
+// Neither the database nor the collection should have been created
+assert.isnull(shardingTest.getDB('config').databases.findOne({_id: 'NonExistentDB'}));
+assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
+shardingTest.stop();
})();
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index 99e662d57b1..454cc2fb6dd 100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -7,204 +7,202 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- var PRI_TAG = {dc: 'ny'};
- var SEC_TAGS = [{dc: 'sf', s: "1"}, {dc: 'ma', s: "2"}, {dc: 'eu', s: "3"}, {dc: 'jp', s: "4"}];
- var NODES = SEC_TAGS.length + 1;
+var PRI_TAG = {dc: 'ny'};
+var SEC_TAGS = [{dc: 'sf', s: "1"}, {dc: 'ma', s: "2"}, {dc: 'eu', s: "3"}, {dc: 'jp', s: "4"}];
+var NODES = SEC_TAGS.length + 1;
- var doTest = function(useDollarQuerySyntax) {
- var st =
- new ShardingTest({shards: {rs0: {nodes: NODES, oplogSize: 10, useHostName: true}}});
- var replTest = st.rs0;
- var primaryNode = replTest.getPrimary();
+var doTest = function(useDollarQuerySyntax) {
+ var st = new ShardingTest({shards: {rs0: {nodes: NODES, oplogSize: 10, useHostName: true}}});
+ var replTest = st.rs0;
+ var primaryNode = replTest.getPrimary();
- // The $-prefixed query syntax is only legal for compatibility mode reads, not for the
- // find/getMore commands.
- if (useDollarQuerySyntax && st.s.getDB("test").getMongo().useReadCommands()) {
- st.stop();
- return;
- }
+ // The $-prefixed query syntax is only legal for compatibility mode reads, not for the
+ // find/getMore commands.
+ if (useDollarQuerySyntax && st.s.getDB("test").getMongo().useReadCommands()) {
+ st.stop();
+ return;
+ }
- var setupConf = function() {
- var replConf = primaryNode.getDB('local').system.replset.findOne();
- replConf.version = (replConf.version || 0) + 1;
+ var setupConf = function() {
+ var replConf = primaryNode.getDB('local').system.replset.findOne();
+ replConf.version = (replConf.version || 0) + 1;
- var secIdx = 0;
- for (var x = 0; x < NODES; x++) {
- var node = replConf.members[x];
+ var secIdx = 0;
+ for (var x = 0; x < NODES; x++) {
+ var node = replConf.members[x];
- if (node.host == primaryNode.name) {
- node.tags = PRI_TAG;
- } else {
- node.tags = SEC_TAGS[secIdx++];
- node.priority = 0;
- }
- }
-
- try {
- primaryNode.getDB('admin').runCommand({replSetReconfig: replConf});
- } catch (x) {
- jsTest.log('Exception expected because reconfiguring would close all conn, got ' +
- x);
+ if (node.host == primaryNode.name) {
+ node.tags = PRI_TAG;
+ } else {
+ node.tags = SEC_TAGS[secIdx++];
+ node.priority = 0;
}
+ }
- return replConf;
- };
+ try {
+ primaryNode.getDB('admin').runCommand({replSetReconfig: replConf});
+ } catch (x) {
+ jsTest.log('Exception expected because reconfiguring would close all conn, got ' + x);
+ }
- var checkTag = function(nodeToCheck, tag) {
- for (var idx = 0; idx < NODES; idx++) {
- var node = replConf.members[idx];
+ return replConf;
+ };
- if (node.host == nodeToCheck) {
- jsTest.log('node[' + node.host + '], Tag: ' + tojson(node['tags']));
- jsTest.log('tagToCheck: ' + tojson(tag));
+ var checkTag = function(nodeToCheck, tag) {
+ for (var idx = 0; idx < NODES; idx++) {
+ var node = replConf.members[idx];
- var nodeTag = node['tags'];
+ if (node.host == nodeToCheck) {
+ jsTest.log('node[' + node.host + '], Tag: ' + tojson(node['tags']));
+ jsTest.log('tagToCheck: ' + tojson(tag));
- for (var key in tag) {
- assert.eq(tag[key], nodeTag[key]);
- }
+ var nodeTag = node['tags'];
- return;
+ for (var key in tag) {
+ assert.eq(tag[key], nodeTag[key]);
}
- }
-
- assert(false, 'node ' + nodeToCheck + ' not part of config!');
- };
- var replConf = setupConf();
-
- var conn = st.s;
-
- // Wait until the ReplicaSetMonitor refreshes its view and see the tags
- var replConfig = replTest.getReplSetConfigFromNode();
- replConfig.members.forEach(function(node) {
- var nodeConn = new Mongo(node.host);
- awaitRSClientHosts(conn, nodeConn, {ok: true, tags: node.tags}, replTest);
- });
- replTest.awaitReplication();
+ return;
+ }
+ }
- jsTest.log('New rs config: ' + tojson(primaryNode.getDB('local').system.replset.findOne()));
- jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
+ assert(false, 'node ' + nodeToCheck + ' not part of config!');
+ };
- var coll = conn.getDB('test').user;
+ var replConf = setupConf();
- assert.soon(function() {
- var res = coll.insert({x: 1}, {writeConcern: {w: NODES}});
- if (!res.hasWriteError()) {
- return true;
- }
+ var conn = st.s;
- var err = res.getWriteError().errmsg;
- // Transient transport errors may be expected b/c of the replSetReconfig
- if (err.indexOf("transport error") == -1) {
- throw err;
- }
- return false;
- });
+ // Wait until the ReplicaSetMonitor refreshes its view and see the tags
+ var replConfig = replTest.getReplSetConfigFromNode();
+ replConfig.members.forEach(function(node) {
+ var nodeConn = new Mongo(node.host);
+ awaitRSClientHosts(conn, nodeConn, {ok: true, tags: node.tags}, replTest);
+ });
+ replTest.awaitReplication();
- var getExplain = function(readPrefMode, readPrefTags) {
- if (useDollarQuerySyntax) {
- var readPrefObj = {mode: readPrefMode};
+ jsTest.log('New rs config: ' + tojson(primaryNode.getDB('local').system.replset.findOne()));
+ jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
- if (readPrefTags) {
- readPrefObj.tags = readPrefTags;
- }
+ var coll = conn.getDB('test').user;
- return coll.find({$query: {}, $readPreference: readPrefObj, $explain: true})
- .limit(-1)
- .next();
- } else {
- return coll.find().readPref(readPrefMode, readPrefTags).explain("executionStats");
- }
- };
-
- var getExplainServer = function(explain) {
- assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
- var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
- return serverInfo.host + ":" + serverInfo.port.toString();
- };
-
- // Read pref should work without slaveOk
- var explain = getExplain("secondary");
- var explainServer = getExplainServer(explain);
- assert.neq(primaryNode.name, explainServer);
-
- conn.setSlaveOk();
-
- // It should also work with slaveOk
- explain = getExplain("secondary");
- explainServer = getExplainServer(explain);
- assert.neq(primaryNode.name, explainServer);
-
- // Check that $readPreference does not influence the actual query
- assert.eq(1, explain.executionStats.nReturned);
-
- explain = getExplain("secondaryPreferred", [{s: "2"}]);
- explainServer = getExplainServer(explain);
- checkTag(explainServer, {s: "2"});
- assert.eq(1, explain.executionStats.nReturned);
-
- // Cannot use tags with primaryOnly
- assert.throws(function() {
- getExplain("primary", [{s: "2"}]);
- });
-
- // Ok to use empty tags on primaryOnly
- explain = getExplain("primary", [{}]);
- explainServer = getExplainServer(explain);
- assert.eq(primaryNode.name, explainServer);
-
- explain = getExplain("primary", []);
- explainServer = getExplainServer(explain);
- assert.eq(primaryNode.name, explainServer);
-
- // Check that mongos will try the next tag if nothing matches the first
- explain = getExplain("secondary", [{z: "3"}, {dc: "jp"}]);
- explainServer = getExplainServer(explain);
- checkTag(explainServer, {dc: "jp"});
- assert.eq(1, explain.executionStats.nReturned);
-
- // Check that mongos will fallback to primary if none of tags given matches
- explain = getExplain("secondaryPreferred", [{z: "3"}, {dc: "ph"}]);
- explainServer = getExplainServer(explain);
- // Call getPrimary again since the primary could have changed after the restart.
- assert.eq(replTest.getPrimary().name, explainServer);
- assert.eq(1, explain.executionStats.nReturned);
-
- // Kill all members except one
- var stoppedNodes = [];
- for (var x = 0; x < NODES - 1; x++) {
- replTest.stop(x);
- stoppedNodes.push(replTest.nodes[x]);
+ assert.soon(function() {
+ var res = coll.insert({x: 1}, {writeConcern: {w: NODES}});
+ if (!res.hasWriteError()) {
+ return true;
}
- // Wait for ReplicaSetMonitor to realize nodes are down
- awaitRSClientHosts(conn, stoppedNodes, {ok: false}, replTest.name);
-
- // Wait for the last node to be in steady state -> secondary (not recovering)
- var lastNode = replTest.nodes[NODES - 1];
- awaitRSClientHosts(conn, lastNode, {ok: true, secondary: true}, replTest.name);
+ var err = res.getWriteError().errmsg;
+ // Transient transport errors may be expected b/c of the replSetReconfig
+ if (err.indexOf("transport error") == -1) {
+ throw err;
+ }
+ return false;
+ });
- jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
+ var getExplain = function(readPrefMode, readPrefTags) {
+ if (useDollarQuerySyntax) {
+ var readPrefObj = {mode: readPrefMode};
- // Test to make sure that connection is ok, in prep for priOnly test
- explain = getExplain("nearest");
- explainServer = getExplainServer(explain);
- assert.eq(explainServer, replTest.nodes[NODES - 1].name);
- assert.eq(1, explain.executionStats.nReturned);
+ if (readPrefTags) {
+ readPrefObj.tags = readPrefTags;
+ }
- // Should assert if request with priOnly but no primary
- assert.throws(function() {
- getExplain("primary");
- });
+ return coll.find({$query: {}, $readPreference: readPrefObj, $explain: true})
+ .limit(-1)
+ .next();
+ } else {
+ return coll.find().readPref(readPrefMode, readPrefTags).explain("executionStats");
+ }
+ };
- st.stop();
+ var getExplainServer = function(explain) {
+ assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
+ var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
+ return serverInfo.host + ":" + serverInfo.port.toString();
};
- doTest(false);
- doTest(true);
+ // Read pref should work without slaveOk
+ var explain = getExplain("secondary");
+ var explainServer = getExplainServer(explain);
+ assert.neq(primaryNode.name, explainServer);
+
+ conn.setSlaveOk();
+
+ // It should also work with slaveOk
+ explain = getExplain("secondary");
+ explainServer = getExplainServer(explain);
+ assert.neq(primaryNode.name, explainServer);
+
+ // Check that $readPreference does not influence the actual query
+ assert.eq(1, explain.executionStats.nReturned);
+
+ explain = getExplain("secondaryPreferred", [{s: "2"}]);
+ explainServer = getExplainServer(explain);
+ checkTag(explainServer, {s: "2"});
+ assert.eq(1, explain.executionStats.nReturned);
+
+ // Cannot use tags with primaryOnly
+ assert.throws(function() {
+ getExplain("primary", [{s: "2"}]);
+ });
+
+ // Ok to use empty tags on primaryOnly
+ explain = getExplain("primary", [{}]);
+ explainServer = getExplainServer(explain);
+ assert.eq(primaryNode.name, explainServer);
+
+ explain = getExplain("primary", []);
+ explainServer = getExplainServer(explain);
+ assert.eq(primaryNode.name, explainServer);
+
+ // Check that mongos will try the next tag if nothing matches the first
+ explain = getExplain("secondary", [{z: "3"}, {dc: "jp"}]);
+ explainServer = getExplainServer(explain);
+ checkTag(explainServer, {dc: "jp"});
+ assert.eq(1, explain.executionStats.nReturned);
+
+ // Check that mongos will fallback to primary if none of tags given matches
+ explain = getExplain("secondaryPreferred", [{z: "3"}, {dc: "ph"}]);
+ explainServer = getExplainServer(explain);
+ // Call getPrimary again since the primary could have changed after the restart.
+ assert.eq(replTest.getPrimary().name, explainServer);
+ assert.eq(1, explain.executionStats.nReturned);
+
+ // Kill all members except one
+ var stoppedNodes = [];
+ for (var x = 0; x < NODES - 1; x++) {
+ replTest.stop(x);
+ stoppedNodes.push(replTest.nodes[x]);
+ }
+
+ // Wait for ReplicaSetMonitor to realize nodes are down
+ awaitRSClientHosts(conn, stoppedNodes, {ok: false}, replTest.name);
+
+ // Wait for the last node to be in steady state -> secondary (not recovering)
+ var lastNode = replTest.nodes[NODES - 1];
+ awaitRSClientHosts(conn, lastNode, {ok: true, secondary: true}, replTest.name);
+
+ jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
+
+ // Test to make sure that connection is ok, in prep for priOnly test
+ explain = getExplain("nearest");
+ explainServer = getExplainServer(explain);
+ assert.eq(explainServer, replTest.nodes[NODES - 1].name);
+ assert.eq(1, explain.executionStats.nReturned);
+
+ // Should assert if request with priOnly but no primary
+ assert.throws(function() {
+ getExplain("primary");
+ });
+
+ st.stop();
+};
+
+doTest(false);
+doTest(true);
})();
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index a103338751f..e41a3c0b670 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -198,10 +198,7 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
aggregate: 1,
pipeline: [
{$currentOp: {}},
- {
- $lookup:
- {from: "dummy", localField: "dummy", foreignField: "dummy", as: "dummy"}
- }
+ {$lookup: {from: "dummy", localField: "dummy", foreignField: "dummy", as: "dummy"}}
],
comment: curOpComment,
cursor: {}
@@ -251,7 +248,6 @@ var testBadMode = function(conn, hostList, isMongos, mode, tagSets) {
};
var testAllModes = function(conn, hostList, isMongos) {
-
// The primary is tagged with { tag: 'one' } and the secondary with
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
diff --git a/jstests/sharding/read_pref_multi_mongos_stale_config.js b/jstests/sharding/read_pref_multi_mongos_stale_config.js
index c00d202cdc0..b451b976d39 100644
--- a/jstests/sharding/read_pref_multi_mongos_stale_config.js
+++ b/jstests/sharding/read_pref_multi_mongos_stale_config.js
@@ -1,41 +1,40 @@
// Tests that a mongos will correctly retry a stale shard version when read preference is used
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({
- shards: {rs0: {quiet: ''}, rs1: {quiet: ''}},
- mongos: 2,
- other: {mongosOptions: {verbose: 2}}
- });
+var st = new ShardingTest({
+ shards: {rs0: {quiet: ''}, rs1: {quiet: ''}},
+ mongos: 2,
+ other: {mongosOptions: {verbose: 2}}
+});
- var testDB1 = st.s0.getDB('test');
- var testDB2 = st.s1.getDB('test');
+var testDB1 = st.s0.getDB('test');
+var testDB2 = st.s1.getDB('test');
- // Trigger a query on mongos 1 so it will have a view of test.user as being unsharded.
- testDB1.user.findOne();
+// Trigger a query on mongos 1 so it will have a view of test.user as being unsharded.
+testDB1.user.findOne();
- assert.commandWorked(testDB2.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(testDB2.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(testDB2.adminCommand({split: 'test.user', middle: {x: 100}}));
+assert.commandWorked(testDB2.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(testDB2.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(testDB2.adminCommand({split: 'test.user', middle: {x: 100}}));
- var configDB2 = st.s1.getDB('config');
- var chunkToMove = configDB2.chunks.find().sort({min: 1}).next();
- var toShard = configDB2.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
- assert.commandWorked(
- testDB2.adminCommand({moveChunk: 'test.user', to: toShard, find: {x: 50}}));
+var configDB2 = st.s1.getDB('config');
+var chunkToMove = configDB2.chunks.find().sort({min: 1}).next();
+var toShard = configDB2.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
+assert.commandWorked(testDB2.adminCommand({moveChunk: 'test.user', to: toShard, find: {x: 50}}));
- // Insert a document into each chunk
- assert.writeOK(testDB2.user.insert({x: 30}));
- assert.writeOK(testDB2.user.insert({x: 130}));
+// Insert a document into each chunk
+assert.writeOK(testDB2.user.insert({x: 30}));
+assert.writeOK(testDB2.user.insert({x: 130}));
- // The testDB1 mongos does not know the chunk has been moved, and will retry
- var cursor = testDB1.user.find({x: 30}).readPref('primary');
- assert(cursor.hasNext());
- assert.eq(30, cursor.next().x);
+// The testDB1 mongos does not know the chunk has been moved, and will retry
+var cursor = testDB1.user.find({x: 30}).readPref('primary');
+assert(cursor.hasNext());
+assert.eq(30, cursor.next().x);
- cursor = testDB1.user.find({x: 130}).readPref('primary');
- assert(cursor.hasNext());
- assert.eq(130, cursor.next().x);
+cursor = testDB1.user.find({x: 130}).readPref('primary');
+assert(cursor.hasNext());
+assert.eq(130, cursor.next().x);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index ba3fe454c2b..40326f50fec 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -8,128 +8,127 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js");
+'use strict';
+load("jstests/replsets/rslib.js");
- var shardTest =
- new ShardingTest({name: "recovering_slaveok", shards: 2, mongos: 2, other: {rs: true}});
+var shardTest =
+ new ShardingTest({name: "recovering_slaveok", shards: 2, mongos: 2, other: {rs: true}});
- var mongos = shardTest.s0;
- var mongosSOK = shardTest.s1;
- mongosSOK.setSlaveOk();
+var mongos = shardTest.s0;
+var mongosSOK = shardTest.s1;
+mongosSOK.setSlaveOk();
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
- const dbName = "test";
- var dbase = mongos.getDB(dbName);
- var coll = dbase.getCollection("foo");
- var dbaseSOk = mongosSOK.getDB("" + dbase);
- var collSOk = mongosSOK.getCollection("" + coll);
+const dbName = "test";
+var dbase = mongos.getDB(dbName);
+var coll = dbase.getCollection("foo");
+var dbaseSOk = mongosSOK.getDB("" + dbase);
+var collSOk = mongosSOK.getCollection("" + coll);
- var rsA = shardTest.rs0;
- var rsB = shardTest.rs1;
+var rsA = shardTest.rs0;
+var rsB = shardTest.rs1;
- assert.writeOK(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
- assert.writeOK(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
+assert.writeOK(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
+assert.writeOK(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
- rsA.awaitReplication();
- rsB.awaitReplication();
+rsA.awaitReplication();
+rsB.awaitReplication();
- print("1: initial insert");
+print("1: initial insert");
- assert.writeOK(coll.save({_id: -1, a: "a", date: new Date()}));
- assert.writeOK(coll.save({_id: 1, b: "b", date: new Date()}));
+assert.writeOK(coll.save({_id: -1, a: "a", date: new Date()}));
+assert.writeOK(coll.save({_id: 1, b: "b", date: new Date()}));
- print("2: shard collection");
+print("2: shard collection");
- shardTest.shardColl(coll,
- /* shardBy */ {_id: 1},
- /* splitAt */ {_id: 0},
- /* move chunk */ {_id: 0},
- /* dbname */ null,
- /* waitForDelete */ true);
+shardTest.shardColl(coll,
+ /* shardBy */ {_id: 1},
+ /* splitAt */ {_id: 0},
+ /* move chunk */ {_id: 0},
+ /* dbname */ null,
+ /* waitForDelete */ true);
- print("3: test normal and slaveOk queries");
+print("3: test normal and slaveOk queries");
- // Make shardA and rsA the same
- var shardA = shardTest.getShard(coll, {_id: -1});
- var shardAColl = shardA.getCollection("" + coll);
- var shardB = shardTest.getShard(coll, {_id: 1});
+// Make shardA and rsA the same
+var shardA = shardTest.getShard(coll, {_id: -1});
+var shardAColl = shardA.getCollection("" + coll);
+var shardB = shardTest.getShard(coll, {_id: 1});
- if (shardA.name == rsB.getURL()) {
- var swap = rsB;
- rsB = rsA;
- rsA = swap;
- }
+if (shardA.name == rsB.getURL()) {
+ var swap = rsB;
+ rsB = rsA;
+ rsA = swap;
+}
- rsA.awaitReplication();
- rsB.awaitReplication();
+rsA.awaitReplication();
+rsB.awaitReplication();
- // Because of async migration cleanup, we need to wait for this condition to be true
- assert.soon(function() {
- return coll.find().itcount() == collSOk.find().itcount();
- });
+// Because of async migration cleanup, we need to wait for this condition to be true
+assert.soon(function() {
+ return coll.find().itcount() == collSOk.find().itcount();
+});
- assert.eq(shardAColl.find().itcount(), 1);
- assert.eq(shardAColl.findOne()._id, -1);
+assert.eq(shardAColl.find().itcount(), 1);
+assert.eq(shardAColl.findOne()._id, -1);
- print("5: make one of the secondaries RECOVERING");
+print("5: make one of the secondaries RECOVERING");
- var secs = rsA.getSecondaries();
- var goodSec = secs[0];
- var badSec = secs[1];
+var secs = rsA.getSecondaries();
+var goodSec = secs[0];
+var badSec = secs[1];
- assert.commandWorked(badSec.adminCommand("replSetMaintenance"));
- rsA.waitForState(badSec, ReplSetTest.State.RECOVERING);
+assert.commandWorked(badSec.adminCommand("replSetMaintenance"));
+rsA.waitForState(badSec, ReplSetTest.State.RECOVERING);
- print("6: stop non-RECOVERING secondary");
+print("6: stop non-RECOVERING secondary");
- rsA.stop(goodSec);
+rsA.stop(goodSec);
- print("7: check our regular and slaveOk query");
+print("7: check our regular and slaveOk query");
- assert.eq(2, coll.find().itcount());
- assert.eq(2, collSOk.find().itcount());
+assert.eq(2, coll.find().itcount());
+assert.eq(2, collSOk.find().itcount());
- print("8: restart both our secondaries clean");
+print("8: restart both our secondaries clean");
- rsA.restart(rsA.getSecondaries(), {remember: true, startClean: true}, undefined, 5 * 60 * 1000);
+rsA.restart(rsA.getSecondaries(), {remember: true, startClean: true}, undefined, 5 * 60 * 1000);
- print("9: wait for recovery");
+print("9: wait for recovery");
- rsA.waitForState(rsA.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+rsA.waitForState(rsA.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- print("10: check our regular and slaveOk query");
+print("10: check our regular and slaveOk query");
- // We need to make sure our nodes are considered accessible from mongos - otherwise we fail
- // See SERVER-7274
- awaitRSClientHosts(coll.getMongo(), rsA.nodes, {ok: true});
- awaitRSClientHosts(coll.getMongo(), rsB.nodes, {ok: true});
+// We need to make sure our nodes are considered accessible from mongos - otherwise we fail
+// See SERVER-7274
+awaitRSClientHosts(coll.getMongo(), rsA.nodes, {ok: true});
+awaitRSClientHosts(coll.getMongo(), rsB.nodes, {ok: true});
- // We need to make sure at least one secondary is accessible from mongos - otherwise we fail
- // See SERVER-7699
- awaitRSClientHosts(collSOk.getMongo(), [rsA.getSecondaries()[0]], {secondary: true, ok: true});
- awaitRSClientHosts(collSOk.getMongo(), [rsB.getSecondaries()[0]], {secondary: true, ok: true});
+// We need to make sure at least one secondary is accessible from mongos - otherwise we fail
+// See SERVER-7699
+awaitRSClientHosts(collSOk.getMongo(), [rsA.getSecondaries()[0]], {secondary: true, ok: true});
+awaitRSClientHosts(collSOk.getMongo(), [rsB.getSecondaries()[0]], {secondary: true, ok: true});
- print("SlaveOK Query...");
- var sOKCount = collSOk.find().itcount();
+print("SlaveOK Query...");
+var sOKCount = collSOk.find().itcount();
- var collCount = null;
- try {
- print("Normal query...");
- collCount = coll.find().itcount();
- } catch (e) {
- printjson(e);
+var collCount = null;
+try {
+ print("Normal query...");
+ collCount = coll.find().itcount();
+} catch (e) {
+ printjson(e);
- // There may have been a stepdown caused by step 8, so we run this twice in a row. The first
- // time can error out.
- print("Error may have been caused by stepdown, try again.");
- collCount = coll.find().itcount();
- }
+ // There may have been a stepdown caused by step 8, so we run this twice in a row. The first
+ // time can error out.
+ print("Error may have been caused by stepdown, try again.");
+ collCount = coll.find().itcount();
+}
- assert.eq(collCount, sOKCount);
-
- shardTest.stop();
+assert.eq(collCount, sOKCount);
+shardTest.stop();
})();
diff --git a/jstests/sharding/refresh_sessions.js b/jstests/sharding/refresh_sessions.js
index ee4ee125db3..c6d229707ca 100644
--- a/jstests/sharding/refresh_sessions.js
+++ b/jstests/sharding/refresh_sessions.js
@@ -1,86 +1,86 @@
(function() {
- "use strict";
-
- // This test makes assumptions about the number of logical sessions.
- TestData.disableImplicitSessions = true;
-
- var sessionsDb = "config";
- var refresh = {refreshLogicalSessionCacheNow: 1};
- var startSession = {startSession: 1};
-
- // Create a cluster with 1 shard.
- var cluster = new ShardingTest({shards: 2});
-
- // Test that we can refresh without any sessions, as a sanity check.
- {
- assert.commandWorked(cluster.s.getDB(sessionsDb).runCommand(refresh));
- assert.commandWorked(cluster.shard0.getDB(sessionsDb).runCommand(refresh));
- assert.commandWorked(cluster.shard1.getDB(sessionsDb).runCommand(refresh));
- }
-
- // Test that refreshing on mongos flushes local records to the collection.
- {
- var mongos = cluster.s.getDB(sessionsDb);
- var sessionCount = mongos.system.sessions.count();
-
- // Start one session.
- assert.commandWorked(mongos.runCommand(startSession));
- assert.commandWorked(mongos.runCommand(refresh));
-
- // Test that it landed in the collection.
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 1,
- "refresh on mongos did not flush session record");
- }
-
- // Test that refreshing on mongod flushes local records to the collection.
- {
- var mongos = cluster.s.getDB(sessionsDb);
- var shard = cluster.shard0.getDB(sessionsDb);
- var sessionCount = mongos.system.sessions.count();
-
- assert.commandWorked(shard.runCommand(startSession));
- assert.commandWorked(shard.runCommand(refresh));
-
- // Test that the new record landed in the collection.
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 1,
- "refresh on mongod did not flush session record");
- }
-
- // Test that refreshing on all servers flushes all records.
- {
- var mongos = cluster.s.getDB(sessionsDb);
- var shard0 = cluster.shard0.getDB(sessionsDb);
- var shard1 = cluster.shard1.getDB(sessionsDb);
-
- var sessionCount = mongos.system.sessions.count();
-
- assert.commandWorked(mongos.runCommand(startSession));
- assert.commandWorked(shard0.runCommand(startSession));
- assert.commandWorked(shard1.runCommand(startSession));
-
- // All records should be in local caches only.
- assert.eq(mongos.system.sessions.count(),
- sessionCount,
- "startSession should not flush records to disk");
-
- // Refresh on each server, see that it ups the session count.
- assert.commandWorked(mongos.runCommand(refresh));
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 1,
- "refresh on mongos did not flush session records to disk");
-
- assert.commandWorked(shard0.runCommand(refresh));
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 2,
- "refresh on shard did not flush session records to disk");
-
- assert.commandWorked(shard1.runCommand(refresh));
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 3,
- "refresh on shard did not flush session records to disk");
- }
-
- cluster.stop();
+"use strict";
+
+// This test makes assumptions about the number of logical sessions.
+TestData.disableImplicitSessions = true;
+
+var sessionsDb = "config";
+var refresh = {refreshLogicalSessionCacheNow: 1};
+var startSession = {startSession: 1};
+
+// Create a cluster with 1 shard.
+var cluster = new ShardingTest({shards: 2});
+
+// Test that we can refresh without any sessions, as a sanity check.
+{
+ assert.commandWorked(cluster.s.getDB(sessionsDb).runCommand(refresh));
+ assert.commandWorked(cluster.shard0.getDB(sessionsDb).runCommand(refresh));
+ assert.commandWorked(cluster.shard1.getDB(sessionsDb).runCommand(refresh));
+}
+
+// Test that refreshing on mongos flushes local records to the collection.
+{
+ var mongos = cluster.s.getDB(sessionsDb);
+ var sessionCount = mongos.system.sessions.count();
+
+ // Start one session.
+ assert.commandWorked(mongos.runCommand(startSession));
+ assert.commandWorked(mongos.runCommand(refresh));
+
+ // Test that it landed in the collection.
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 1,
+ "refresh on mongos did not flush session record");
+}
+
+// Test that refreshing on mongod flushes local records to the collection.
+{
+ var mongos = cluster.s.getDB(sessionsDb);
+ var shard = cluster.shard0.getDB(sessionsDb);
+ var sessionCount = mongos.system.sessions.count();
+
+ assert.commandWorked(shard.runCommand(startSession));
+ assert.commandWorked(shard.runCommand(refresh));
+
+ // Test that the new record landed in the collection.
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 1,
+ "refresh on mongod did not flush session record");
+}
+
+// Test that refreshing on all servers flushes all records.
+{
+ var mongos = cluster.s.getDB(sessionsDb);
+ var shard0 = cluster.shard0.getDB(sessionsDb);
+ var shard1 = cluster.shard1.getDB(sessionsDb);
+
+ var sessionCount = mongos.system.sessions.count();
+
+ assert.commandWorked(mongos.runCommand(startSession));
+ assert.commandWorked(shard0.runCommand(startSession));
+ assert.commandWorked(shard1.runCommand(startSession));
+
+ // All records should be in local caches only.
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount,
+ "startSession should not flush records to disk");
+
+ // Refresh on each server, see that it ups the session count.
+ assert.commandWorked(mongos.runCommand(refresh));
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 1,
+ "refresh on mongos did not flush session records to disk");
+
+ assert.commandWorked(shard0.runCommand(refresh));
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 2,
+ "refresh on shard did not flush session records to disk");
+
+ assert.commandWorked(shard1.runCommand(refresh));
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 3,
+ "refresh on shard did not flush session records to disk");
+}
+
+cluster.stop();
})();
diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js
index e2300a3e896..df836cd8ef0 100644
--- a/jstests/sharding/regex_targeting.js
+++ b/jstests/sharding/regex_targeting.js
@@ -1,288 +1,283 @@
// This checks to make sure that sharded regex queries behave the same as unsharded regex queries
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
-
- //
- // Set up multiple collections to target with regex shard keys on two shards
- //
-
- var coll = mongos.getCollection("foo.bar");
- var collSharded = mongos.getCollection("foo.barSharded");
- var collCompound = mongos.getCollection("foo.barCompound");
- var collNested = mongos.getCollection("foo.barNested");
- var collHashed = mongos.getCollection("foo.barHashed");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
- st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName);
-
- //
- // Split the collection so that "abcde-0" and "abcde-1" go on different shards when possible
- //
-
- assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {a: "abcde-1"}}));
- assert.commandWorked(admin.runCommand({
- moveChunk: collSharded.toString(),
- find: {a: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- assert.commandWorked(
- admin.runCommand({shardCollection: collCompound.toString(), key: {a: 1, b: 1}}));
- assert.commandWorked(
- admin.runCommand({split: collCompound.toString(), middle: {a: "abcde-1", b: 0}}));
- assert.commandWorked(admin.runCommand({
- moveChunk: collCompound.toString(),
- find: {a: 0, b: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- assert.commandWorked(
- admin.runCommand({shardCollection: collNested.toString(), key: {'a.b': 1}}));
- assert.commandWorked(
- admin.runCommand({split: collNested.toString(), middle: {'a.b': "abcde-1"}}));
- assert.commandWorked(admin.runCommand({
- moveChunk: collNested.toString(),
- find: {a: {b: 0}},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- assert.commandWorked(
- admin.runCommand({shardCollection: collHashed.toString(), key: {hash: "hashed"}}));
-
- st.printShardingStatus();
-
- //
- //
- // Cannot insert regex _id
- assert.writeError(coll.insert({_id: /regex value/}));
- assert.writeError(collSharded.insert({_id: /regex value/, a: 0}));
- assert.writeError(collCompound.insert({_id: /regex value/, a: 0, b: 0}));
- assert.writeError(collNested.insert({_id: /regex value/, a: {b: 0}}));
- assert.writeError(collHashed.insert({_id: /regex value/, hash: 0}));
-
- //
- //
- // (For now) we can insert a regex shard key
- assert.writeOK(collSharded.insert({a: /regex value/}));
- assert.writeOK(collCompound.insert({a: /regex value/, b: "other value"}));
- assert.writeOK(collNested.insert({a: {b: /regex value/}}));
- assert.writeOK(collHashed.insert({hash: /regex value/}));
-
- //
- //
- // Query by regex should hit all matching keys, across all shards if applicable
- coll.remove({});
- assert.writeOK(coll.insert({a: "abcde-0"}));
- assert.writeOK(coll.insert({a: "abcde-1"}));
- assert.writeOK(coll.insert({a: /abcde.*/}));
- assert.eq(coll.find().itcount(), coll.find({a: /abcde.*/}).itcount());
-
- collSharded.remove({});
- assert.writeOK(collSharded.insert({a: "abcde-0"}));
- assert.writeOK(collSharded.insert({a: "abcde-1"}));
- assert.writeOK(collSharded.insert({a: /abcde.*/}));
- assert.eq(collSharded.find().itcount(), collSharded.find({a: /abcde.*/}).itcount());
-
- collCompound.remove({});
- assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
- assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
- assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
- assert.eq(collCompound.find().itcount(), collCompound.find({a: /abcde.*/}).itcount());
-
- collNested.remove({});
- assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
- assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
- assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
- assert.eq(collNested.find().itcount(), collNested.find({'a.b': /abcde.*/}).itcount());
-
- collHashed.remove({});
- while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
- st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
- }
- assert.writeOK(collHashed.insert({hash: /abcde.*/}));
- assert.eq(collHashed.find().itcount(), collHashed.find({hash: /abcde.*/}).itcount());
-
- //
- //
- // Update by regex should hit all matching keys, across all shards if applicable
- coll.remove({});
- assert.writeOK(coll.insert({a: "abcde-0"}));
- assert.writeOK(coll.insert({a: "abcde-1"}));
- assert.writeOK(coll.insert({a: /abcde.*/}));
- assert.writeOK(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(coll.find().itcount(), coll.find({updated: true}).itcount());
-
- collSharded.remove({});
- assert.writeOK(collSharded.insert({a: "abcde-0"}));
- assert.writeOK(collSharded.insert({a: "abcde-1"}));
- assert.writeOK(collSharded.insert({a: /abcde.*/}));
- assert.writeOK(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(collSharded.find().itcount(), collSharded.find({updated: true}).itcount());
-
- collCompound.remove({});
- assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
- assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
- assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
- assert.writeOK(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(collCompound.find().itcount(), collCompound.find({updated: true}).itcount());
-
- collNested.remove({});
- assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
- assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
- assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
- assert.writeOK(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(collNested.find().itcount(), collNested.find({updated: true}).itcount());
-
- collHashed.remove({});
- while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
- st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
- }
- assert.writeOK(collHashed.insert({hash: /abcde.*/}));
- assert.writeOK(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(collHashed.find().itcount(), collHashed.find({updated: true}).itcount());
-
- collSharded.remove({});
- collCompound.remove({});
- collNested.remove({});
-
- //
- //
- // Op-style updates with regex should fail on sharded collections.
- // Query clause is targeted, and regex in query clause is ambiguous.
- assert.commandFailedWithCode(
- collSharded.update({a: /abcde-1/}, {"$set": {b: 1}}, {upsert: false}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- collSharded.update({a: /abcde-[1-2]/}, {"$set": {b: 1}}, {upsert: false}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- collNested.update({a: {b: /abcde-1/}}, {"$set": {"a.b": /abcde-1/, b: 1}}, {upsert: false}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- collNested.update({"a.b": /abcde.*/}, {"$set": {b: 1}}, {upsert: false}),
- ErrorCodes.InvalidOptions);
-
- //
- //
- // Replacement style updates with regex should work on sharded collections.
- // If query clause is ambiguous, we fallback to using update clause for targeting.
- assert.commandWorked(collSharded.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: false}));
- assert.commandWorked(collSharded.update({a: /abcde-1/}, {a: /abcde-1/, b: 1}, {upsert: false}));
- assert.commandWorked(
- collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: false}));
- assert.commandWorked(
- collNested.update({'a.b': /abcde-1/}, {a: {b: /abcde.*/}}, {upsert: false}));
-
- //
- //
- // Upsert with op-style regex should fail on sharded collections
- // Query clause is targeted, and regex in query clause is ambiguous
-
- // The queries will also be interpreted as regex based prefix search and cannot target a single
- // shard.
- assert.writeError(collSharded.update({a: /abcde.*/}, {$set: {a: /abcde.*/}}, {upsert: true}));
- assert.writeError(
- collCompound.update({a: /abcde-1/}, {$set: {a: /abcde.*/, b: 1}}, {upsert: true}));
- // Exact regex in query never equality
- assert.writeError(
- collNested.update({'a.b': /abcde.*/}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
- // Even nested regexes are not extracted in queries
- assert.writeError(
- collNested.update({a: {b: /abcde.*/}}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
- assert.writeError(collNested.update({c: 1}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
-
- //
- //
- // Upsert by replacement-style regex should fail on sharded collections
- // Query clause is targeted, and regex in query clause is ambiguous
- assert.commandFailedWithCode(collSharded.update({a: /abcde.*/}, {a: /abcde.*/}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- collCompound.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- collNested.update({'a.b': /abcde-1/}, {a: {b: /abcde.*/}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(collNested.update({c: 1}, {a: {b: /abcde.*/}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- //
- //
- // Remove by regex should hit all matching keys, across all shards if applicable
- coll.remove({});
- assert.writeOK(coll.insert({a: "abcde-0"}));
- assert.writeOK(coll.insert({a: "abcde-1"}));
- assert.writeOK(coll.insert({a: /abcde.*/}));
- assert.writeOK(coll.remove({a: /abcde.*/}));
- assert.eq(0, coll.find({}).itcount());
-
- collSharded.remove({});
- assert.writeOK(collSharded.insert({a: "abcde-0"}));
- assert.writeOK(collSharded.insert({a: "abcde-1"}));
- assert.writeOK(collSharded.insert({a: /abcde.*/}));
- assert.writeOK(collSharded.remove({a: /abcde.*/}));
- assert.eq(0, collSharded.find({}).itcount());
-
- collCompound.remove({});
- assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
- assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
- assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
- assert.writeOK(collCompound.remove({a: /abcde.*/}));
- assert.eq(0, collCompound.find({}).itcount());
-
- collNested.remove({});
- assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
- assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
- assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
- assert.writeOK(collNested.remove({'a.b': /abcde.*/}));
- assert.eq(0, collNested.find({}).itcount());
-
- collHashed.remove({});
- while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
- st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
- }
- assert.writeOK(collHashed.insert({hash: /abcde.*/}));
- assert.writeOK(collHashed.remove({hash: /abcde.*/}));
- assert.eq(0, collHashed.find({}).itcount());
-
- //
- //
- // Query/Update/Remove by nested regex is different depending on how the nested regex is
- // specified
- coll.remove({});
- assert.writeOK(coll.insert({a: {b: "abcde-0"}}));
- assert.writeOK(coll.insert({a: {b: "abcde-1"}}));
- assert.writeOK(coll.insert({a: {b: /abcde.*/}}));
- assert.eq(1, coll.find({a: {b: /abcde.*/}}).itcount());
- assert.writeOK(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
- assert.eq(1, coll.find({updated: true}).itcount());
- assert.writeOK(coll.remove({a: {b: /abcde.*/}}));
- assert.eq(2, coll.find().itcount());
-
- collNested.remove({});
- assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
- assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
- assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
- assert.eq(1, collNested.find({a: {b: /abcde.*/}}).itcount());
- assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
- assert.eq(1, collNested.find({updated: true}).itcount());
- assert.writeOK(collNested.remove({a: {b: /abcde.*/}}));
- assert.eq(2, collNested.find().itcount());
-
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 2});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+
+//
+// Set up multiple collections to target with regex shard keys on two shards
+//
+
+var coll = mongos.getCollection("foo.bar");
+var collSharded = mongos.getCollection("foo.barSharded");
+var collCompound = mongos.getCollection("foo.barCompound");
+var collNested = mongos.getCollection("foo.barNested");
+var collHashed = mongos.getCollection("foo.barHashed");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName);
+
+//
+// Split the collection so that "abcde-0" and "abcde-1" go on different shards when possible
+//
+
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {a: "abcde-1"}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collSharded.toString(),
+ find: {a: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(
+ admin.runCommand({shardCollection: collCompound.toString(), key: {a: 1, b: 1}}));
+assert.commandWorked(
+ admin.runCommand({split: collCompound.toString(), middle: {a: "abcde-1", b: 0}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collCompound.toString(),
+ find: {a: 0, b: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(admin.runCommand({shardCollection: collNested.toString(), key: {'a.b': 1}}));
+assert.commandWorked(admin.runCommand({split: collNested.toString(), middle: {'a.b': "abcde-1"}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collNested.toString(),
+ find: {a: {b: 0}},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(
+ admin.runCommand({shardCollection: collHashed.toString(), key: {hash: "hashed"}}));
+
+st.printShardingStatus();
+
+//
+//
+// Cannot insert regex _id
+assert.writeError(coll.insert({_id: /regex value/}));
+assert.writeError(collSharded.insert({_id: /regex value/, a: 0}));
+assert.writeError(collCompound.insert({_id: /regex value/, a: 0, b: 0}));
+assert.writeError(collNested.insert({_id: /regex value/, a: {b: 0}}));
+assert.writeError(collHashed.insert({_id: /regex value/, hash: 0}));
+
+//
+//
+// (For now) we can insert a regex shard key
+assert.writeOK(collSharded.insert({a: /regex value/}));
+assert.writeOK(collCompound.insert({a: /regex value/, b: "other value"}));
+assert.writeOK(collNested.insert({a: {b: /regex value/}}));
+assert.writeOK(collHashed.insert({hash: /regex value/}));
+
+//
+//
+// Query by regex should hit all matching keys, across all shards if applicable
+coll.remove({});
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.eq(coll.find().itcount(), coll.find({a: /abcde.*/}).itcount());
+
+collSharded.remove({});
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.eq(collSharded.find().itcount(), collSharded.find({a: /abcde.*/}).itcount());
+
+collCompound.remove({});
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.eq(collCompound.find().itcount(), collCompound.find({a: /abcde.*/}).itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.eq(collNested.find().itcount(), collNested.find({'a.b': /abcde.*/}).itcount());
+
+collHashed.remove({});
+while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
+ st.shard1.getCollection(collHashed.toString()).count() == 0) {
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+}
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.eq(collHashed.find().itcount(), collHashed.find({hash: /abcde.*/}).itcount());
+
+//
+//
+// Update by regex should hit all matching keys, across all shards if applicable
+coll.remove({});
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.writeOK(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(coll.find().itcount(), coll.find({updated: true}).itcount());
+
+collSharded.remove({});
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.writeOK(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collSharded.find().itcount(), collSharded.find({updated: true}).itcount());
+
+collCompound.remove({});
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.writeOK(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collCompound.find().itcount(), collCompound.find({updated: true}).itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.writeOK(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collNested.find().itcount(), collNested.find({updated: true}).itcount());
+
+collHashed.remove({});
+while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
+ st.shard1.getCollection(collHashed.toString()).count() == 0) {
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+}
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.writeOK(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collHashed.find().itcount(), collHashed.find({updated: true}).itcount());
+
+collSharded.remove({});
+collCompound.remove({});
+collNested.remove({});
+
+//
+//
+// Op-style updates with regex should fail on sharded collections.
+// Query clause is targeted, and regex in query clause is ambiguous.
+assert.commandFailedWithCode(collSharded.update({a: /abcde-1/}, {"$set": {b: 1}}, {upsert: false}),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ collSharded.update({a: /abcde-[1-2]/}, {"$set": {b: 1}}, {upsert: false}),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ collNested.update({a: {b: /abcde-1/}}, {"$set": {"a.b": /abcde-1/, b: 1}}, {upsert: false}),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ collNested.update({"a.b": /abcde.*/}, {"$set": {b: 1}}, {upsert: false}),
+ ErrorCodes.InvalidOptions);
+
+//
+//
+// Replacement style updates with regex should work on sharded collections.
+// If query clause is ambiguous, we fallback to using update clause for targeting.
+assert.commandWorked(collSharded.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: false}));
+assert.commandWorked(collSharded.update({a: /abcde-1/}, {a: /abcde-1/, b: 1}, {upsert: false}));
+assert.commandWorked(collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: false}));
+assert.commandWorked(collNested.update({'a.b': /abcde-1/}, {a: {b: /abcde.*/}}, {upsert: false}));
+
+//
+//
+// Upsert with op-style regex should fail on sharded collections
+// Query clause is targeted, and regex in query clause is ambiguous
+
+// The queries will also be interpreted as regex based prefix search and cannot target a single
+// shard.
+assert.writeError(collSharded.update({a: /abcde.*/}, {$set: {a: /abcde.*/}}, {upsert: true}));
+assert.writeError(
+ collCompound.update({a: /abcde-1/}, {$set: {a: /abcde.*/, b: 1}}, {upsert: true}));
+// Exact regex in query never equality
+assert.writeError(
+ collNested.update({'a.b': /abcde.*/}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
+// Even nested regexes are not extracted in queries
+assert.writeError(
+ collNested.update({a: {b: /abcde.*/}}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
+assert.writeError(collNested.update({c: 1}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
+
+//
+//
+// Upsert by replacement-style regex should fail on sharded collections
+// Query clause is targeted, and regex in query clause is ambiguous
+assert.commandFailedWithCode(collSharded.update({a: /abcde.*/}, {a: /abcde.*/}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ collCompound.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ collNested.update({'a.b': /abcde-1/}, {a: {b: /abcde.*/}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(collNested.update({c: 1}, {a: {b: /abcde.*/}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+//
+//
+// Remove by regex should hit all matching keys, across all shards if applicable
+coll.remove({});
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.writeOK(coll.remove({a: /abcde.*/}));
+assert.eq(0, coll.find({}).itcount());
+
+collSharded.remove({});
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.writeOK(collSharded.remove({a: /abcde.*/}));
+assert.eq(0, collSharded.find({}).itcount());
+
+collCompound.remove({});
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.writeOK(collCompound.remove({a: /abcde.*/}));
+assert.eq(0, collCompound.find({}).itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.writeOK(collNested.remove({'a.b': /abcde.*/}));
+assert.eq(0, collNested.find({}).itcount());
+
+collHashed.remove({});
+while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
+ st.shard1.getCollection(collHashed.toString()).count() == 0) {
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+}
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.writeOK(collHashed.remove({hash: /abcde.*/}));
+assert.eq(0, collHashed.find({}).itcount());
+
+//
+//
+// Query/Update/Remove by nested regex is different depending on how the nested regex is
+// specified
+coll.remove({});
+assert.writeOK(coll.insert({a: {b: "abcde-0"}}));
+assert.writeOK(coll.insert({a: {b: "abcde-1"}}));
+assert.writeOK(coll.insert({a: {b: /abcde.*/}}));
+assert.eq(1, coll.find({a: {b: /abcde.*/}}).itcount());
+assert.writeOK(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.eq(1, coll.find({updated: true}).itcount());
+assert.writeOK(coll.remove({a: {b: /abcde.*/}}));
+assert.eq(2, coll.find().itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.eq(1, collNested.find({a: {b: /abcde.*/}}).itcount());
+assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.eq(1, collNested.find({updated: true}).itcount());
+assert.writeOK(collNested.remove({a: {b: /abcde.*/}}));
+assert.eq(2, collNested.find().itcount());
+
+st.stop();
})();
diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js
index 8ccf8dadcf2..3c8364382ce 100644
--- a/jstests/sharding/remove1.js
+++ b/jstests/sharding/remove1.js
@@ -1,48 +1,46 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, other: {enableBalancer: true}});
- var config = s.s0.getDB('config');
+var s = new ShardingTest({shards: 2, other: {enableBalancer: true}});
+var config = s.s0.getDB('config');
- assert.commandWorked(s.s0.adminCommand({enableSharding: 'needToMove'}));
- s.ensurePrimaryShard('needToMove', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({enableSharding: 'needToMove'}));
+s.ensurePrimaryShard('needToMove', s.shard0.shardName);
- // Returns an error when trying to remove a shard that doesn't exist.
- assert.commandFailedWithCode(s.s0.adminCommand({removeshard: "shardz"}),
- ErrorCodes.ShardNotFound);
+// Returns an error when trying to remove a shard that doesn't exist.
+assert.commandFailedWithCode(s.s0.adminCommand({removeshard: "shardz"}), ErrorCodes.ShardNotFound);
- // First remove puts in draining mode, the second tells me a db needs to move, the third
- // actually removes
- assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName}));
+// First remove puts in draining mode, the second tells me a db needs to move, the third
+// actually removes
+assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName}));
- // Can't have more than one draining shard at a time
- assert.commandFailedWithCode(s.s0.adminCommand({removeshard: s.shard1.shardName}),
- ErrorCodes.ConflictingOperationInProgress);
- assert.eq(s.s0.adminCommand({removeshard: s.shard0.shardName}).dbsToMove,
- ['needToMove'],
- "didn't show db to move");
+// Can't have more than one draining shard at a time
+assert.commandFailedWithCode(s.s0.adminCommand({removeshard: s.shard1.shardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+assert.eq(s.s0.adminCommand({removeshard: s.shard0.shardName}).dbsToMove,
+ ['needToMove'],
+ "didn't show db to move");
- s.s0.getDB('needToMove').dropDatabase();
+s.s0.getDB('needToMove').dropDatabase();
- // Ensure the balancer moves the config.system.sessions collection chunks out of the shard being
- // removed
- s.awaitBalancerRound();
+// Ensure the balancer moves the config.system.sessions collection chunks out of the shard being
+// removed
+s.awaitBalancerRound();
- var removeResult = assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName}));
- assert.eq('completed', removeResult.state, 'Shard was not removed: ' + tojson(removeResult));
+var removeResult = assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName}));
+assert.eq('completed', removeResult.state, 'Shard was not removed: ' + tojson(removeResult));
- var existingShards = config.shards.find({}).toArray();
- assert.eq(1,
- existingShards.length,
- "Removed server still appears in count: " + tojson(existingShards));
+var existingShards = config.shards.find({}).toArray();
+assert.eq(
+ 1, existingShards.length, "Removed server still appears in count: " + tojson(existingShards));
- assert.commandFailed(s.s0.adminCommand({removeshard: s.shard1.shardName}));
+assert.commandFailed(s.s0.adminCommand({removeshard: s.shard1.shardName}));
- // Should create a shard0002 shard
- var conn = MongoRunner.runMongod({shardsvr: ""});
- assert.commandWorked(s.s0.adminCommand({addshard: conn.host}));
- assert.eq(2, s.config.shards.count(), "new server does not appear in count");
+// Should create a shard0002 shard
+var conn = MongoRunner.runMongod({shardsvr: ""});
+assert.commandWorked(s.s0.adminCommand({addshard: conn.host}));
+assert.eq(2, s.config.shards.count(), "new server does not appear in count");
- MongoRunner.stopMongod(conn);
- s.stop();
+MongoRunner.stopMongod(conn);
+s.stop();
})();
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index 55258dec663..eb7418b76ed 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -12,195 +12,193 @@ load("jstests/replsets/rslib.js");
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
-
- function seedString(replTest) {
- var members = replTest.getReplSetConfig().members.map(function(elem) {
- return elem.host;
- });
- return replTest.name + '/' + members.join(',');
- }
-
- function removeShard(st, replTest) {
- jsTest.log("Removing shard with name: " + replTest.name);
- var res = st.s.adminCommand({removeShard: replTest.name});
+'use strict';
+
+function seedString(replTest) {
+ var members = replTest.getReplSetConfig().members.map(function(elem) {
+ return elem.host;
+ });
+ return replTest.name + '/' + members.join(',');
+}
+
+function removeShard(st, replTest) {
+ jsTest.log("Removing shard with name: " + replTest.name);
+ var res = st.s.adminCommand({removeShard: replTest.name});
+ assert.commandWorked(res);
+ assert.eq('started', res.state);
+ assert.soon(function() {
+ res = st.s.adminCommand({removeShard: replTest.name});
assert.commandWorked(res);
- assert.eq('started', res.state);
- assert.soon(function() {
- res = st.s.adminCommand({removeShard: replTest.name});
- assert.commandWorked(res);
- return ('completed' === res.state);
- }, "failed to remove shard: " + tojson(res));
-
- // Drop the database so the shard can be re-added.
- assert.commandWorked(replTest.getPrimary().getDB(coll.getDB().getName()).dropDatabase());
- }
-
- function addShard(st, replTest) {
- var seed = seedString(replTest);
- print("Adding shard with seed: " + seed);
- try {
- assert.eq(true, st.adminCommand({addshard: seed}));
- } catch (e) {
- print("First attempt to addShard failed, trying again");
- // transport error on first attempt is expected. Make sure second attempt goes through
- assert.eq(true, st.adminCommand({addshard: seed}));
- }
- awaitRSClientHosts(
- new Mongo(st.s.host), replTest.getSecondaries(), {ok: true, secondary: true});
-
- assert.soon(function() {
- var x = st.chunkDiff(coll.getName(), coll.getDB().getName());
- print("chunk diff: " + x);
- return x < 2;
- }, "no balance happened", 30 * 60 * 1000);
-
- try {
- assert.eq(300, coll.find().itcount());
- } catch (e) {
- // Expected. First query might get transport error and need to reconnect.
- printjson(e);
- assert.eq(300, coll.find().itcount());
- }
- print("Shard added successfully");
- }
-
- var st = new ShardingTest(
- {shards: {rs0: {nodes: 2}, rs1: {nodes: 2}}, other: {chunkSize: 1, enableBalancer: true}});
-
- // Pending resolution of SERVER-8598, we need to wait for deletion after chunk migrations to
- // avoid a pending delete re-creating a database after it was dropped.
- st.s.getDB("config").settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
-
- var conn = new Mongo(st.s.host);
- var coll = conn.getCollection("test.remove2");
- coll.drop();
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: coll.getDB().getName()}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {i: 1}}));
-
- // Setup initial data
- var str = 'a';
- while (str.length < 1024 * 16) {
- str += str;
- }
-
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 300; i++) {
- bulk.insert({i: i % 10, str: str});
+ return ('completed' === res.state);
+ }, "failed to remove shard: " + tojson(res));
+
+ // Drop the database so the shard can be re-added.
+ assert.commandWorked(replTest.getPrimary().getDB(coll.getDB().getName()).dropDatabase());
+}
+
+function addShard(st, replTest) {
+ var seed = seedString(replTest);
+ print("Adding shard with seed: " + seed);
+ try {
+ assert.eq(true, st.adminCommand({addshard: seed}));
+ } catch (e) {
+ print("First attempt to addShard failed, trying again");
+ // transport error on first attempt is expected. Make sure second attempt goes through
+ assert.eq(true, st.adminCommand({addshard: seed}));
}
- assert.writeOK(bulk.execute());
-
- assert.eq(300, coll.find().itcount());
+ awaitRSClientHosts(
+ new Mongo(st.s.host), replTest.getSecondaries(), {ok: true, secondary: true});
assert.soon(function() {
- var x = st.chunkDiff('remove2', "test");
+ var x = st.chunkDiff(coll.getName(), coll.getDB().getName());
print("chunk diff: " + x);
return x < 2;
}, "no balance happened", 30 * 60 * 1000);
- assert.eq(300, coll.find().itcount());
-
- st.printShardingStatus();
-
- var rst1 = st.rs1;
- // Remove shard and add it back in, without shutting it down.
- jsTestLog("Attempting to remove shard and add it back in");
- removeShard(st, rst1);
- addShard(st, rst1);
-
- // Remove shard, restart set, then add it back in.
- jsTestLog("Attempting to remove shard, restart the set, and then add it back in");
- var originalSeed = seedString(rst1);
-
- removeShard(st, rst1);
- rst1.stopSet();
- print("Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out");
- sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
-
- rst1.startSet({restart: true});
- rst1.initiate();
- rst1.awaitReplication();
-
- assert.eq(
- originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
- addShard(st, rst1);
-
- // Shut down shard and wait for its ReplicaSetMonitor to be cleaned up, then start it back up
- // and use it.
- //
- // TODO: test this both with AND without waiting for the ReplicaSetMonitor to be cleaned up.
- //
- // This part doesn't pass, even without cleaning up the ReplicaSetMonitor - see SERVER-5900.
- /*
- printjson( conn.getDB('admin').runCommand({movePrimary : 'test2', to : rst1.name}) );
- printjson( conn.getDB('admin').runCommand({setParameter : 1, replMonitorMaxFailedChecks : 5}) );
- jsTestLog( "Shutting down set" )
- rst1.stopSet();
- jsTestLog( "sleeping for 20 seconds to make sure ReplicaSetMonitor gets cleaned up");
- sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
-
- // Should fail since rst1 is the primary for test2
- assert.throws(function() {conn.getDB('test2').foo.find().itcount()});
- jsTestLog( "Bringing set back up" );
- rst1.startSet();
- rst1.initiate();
- rst1.awaitReplication();
-
- jsTestLog( "Checking that set is usable again" );
- //conn.getDB('admin').runCommand({flushRouterConfig:1}); // Uncommenting this makes test pass
- conn.getDB('test2').foo.insert({a:1});
- gle = conn.getDB('test2').runCommand('getLastError');
- if ( !gle.ok ) {
- // Expected. First write will fail and need to re-connect
- print( "write failed" );
- printjson( gle );
- conn.getDB('test2').foo.insert({a:1});
- assert( conn.getDB('test2').getLastErrorObj().ok );
+ try {
+ assert.eq(300, coll.find().itcount());
+ } catch (e) {
+ // Expected. First query might get transport error and need to reconnect.
+ printjson(e);
+ assert.eq(300, coll.find().itcount());
}
-
- assert.eq( 1, conn.getDB('test2').foo.find().itcount() );
- assert( conn.getDB('test2').dropDatabase().ok );
- */
-
- // Remove shard and add a new shard with the same replica set and shard name, but different
- // ports
- jsTestLog("Attempt removing shard and adding a new shard with the same Replica Set name");
- removeShard(st, rst1);
- rst1.stopSet();
- print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
- sleep(60000);
-
- var rst2 = new ReplSetTest({name: rst1.name, nodes: 2, useHostName: true});
- rst2.startSet({shardsvr: ""});
- rst2.initiate();
- rst2.awaitReplication();
-
- addShard(st, rst2);
- printjson(st.admin.runCommand({movePrimary: 'test2', to: rst2.name}));
-
- assert.eq(300, coll.find().itcount());
- conn.getDB('test2').foo.insert({a: 1});
- assert.eq(1, conn.getDB('test2').foo.find().itcount());
-
- // Can't shut down with rst2 in the set or ShardingTest will fail trying to cleanup on shutdown.
- // Have to take out rst2 and put rst1 back into the set so that it can clean up.
- jsTestLog("Putting ShardingTest back to state it expects");
- printjson(st.admin.runCommand({movePrimary: 'test2', to: st.rs0.name}));
- removeShard(st, rst2);
- rst2.stopSet();
-
- print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
- sleep(60000);
-
- rst1.startSet({restart: true});
- rst1.initiate();
- rst1.awaitReplication();
-
- assert.eq(
- originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
- addShard(st, rst1);
-
- st.stop();
+ print("Shard added successfully");
+}
+
+var st = new ShardingTest(
+ {shards: {rs0: {nodes: 2}, rs1: {nodes: 2}}, other: {chunkSize: 1, enableBalancer: true}});
+
+// Pending resolution of SERVER-8598, we need to wait for deletion after chunk migrations to
+// avoid a pending delete re-creating a database after it was dropped.
+st.s.getDB("config").settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
+
+var conn = new Mongo(st.s.host);
+var coll = conn.getCollection("test.remove2");
+coll.drop();
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: coll.getDB().getName()}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {i: 1}}));
+
+// Setup initial data
+var str = 'a';
+while (str.length < 1024 * 16) {
+ str += str;
+}
+
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10, str: str});
+}
+assert.writeOK(bulk.execute());
+
+assert.eq(300, coll.find().itcount());
+
+assert.soon(function() {
+ var x = st.chunkDiff('remove2', "test");
+ print("chunk diff: " + x);
+ return x < 2;
+}, "no balance happened", 30 * 60 * 1000);
+
+assert.eq(300, coll.find().itcount());
+
+st.printShardingStatus();
+
+var rst1 = st.rs1;
+// Remove shard and add it back in, without shutting it down.
+jsTestLog("Attempting to remove shard and add it back in");
+removeShard(st, rst1);
+addShard(st, rst1);
+
+// Remove shard, restart set, then add it back in.
+jsTestLog("Attempting to remove shard, restart the set, and then add it back in");
+var originalSeed = seedString(rst1);
+
+removeShard(st, rst1);
+rst1.stopSet();
+print("Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out");
+sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
+
+rst1.startSet({restart: true});
+rst1.initiate();
+rst1.awaitReplication();
+
+assert.eq(originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
+addShard(st, rst1);
+
+// Shut down shard and wait for its ReplicaSetMonitor to be cleaned up, then start it back up
+// and use it.
+//
+// TODO: test this both with AND without waiting for the ReplicaSetMonitor to be cleaned up.
+//
+// This part doesn't pass, even without cleaning up the ReplicaSetMonitor - see SERVER-5900.
+/*
+printjson( conn.getDB('admin').runCommand({movePrimary : 'test2', to : rst1.name}) );
+printjson( conn.getDB('admin').runCommand({setParameter : 1, replMonitorMaxFailedChecks : 5}) );
+jsTestLog( "Shutting down set" )
+rst1.stopSet();
+jsTestLog( "sleeping for 20 seconds to make sure ReplicaSetMonitor gets cleaned up");
+sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
+
+// Should fail since rst1 is the primary for test2
+assert.throws(function() {conn.getDB('test2').foo.find().itcount()});
+jsTestLog( "Bringing set back up" );
+rst1.startSet();
+rst1.initiate();
+rst1.awaitReplication();
+
+jsTestLog( "Checking that set is usable again" );
+//conn.getDB('admin').runCommand({flushRouterConfig:1}); // Uncommenting this makes test pass
+conn.getDB('test2').foo.insert({a:1});
+gle = conn.getDB('test2').runCommand('getLastError');
+if ( !gle.ok ) {
+ // Expected. First write will fail and need to re-connect
+ print( "write failed" );
+ printjson( gle );
+ conn.getDB('test2').foo.insert({a:1});
+ assert( conn.getDB('test2').getLastErrorObj().ok );
+}
+
+assert.eq( 1, conn.getDB('test2').foo.find().itcount() );
+assert( conn.getDB('test2').dropDatabase().ok );
+*/
+
+// Remove shard and add a new shard with the same replica set and shard name, but different
+// ports
+jsTestLog("Attempt removing shard and adding a new shard with the same Replica Set name");
+removeShard(st, rst1);
+rst1.stopSet();
+print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
+sleep(60000);
+
+var rst2 = new ReplSetTest({name: rst1.name, nodes: 2, useHostName: true});
+rst2.startSet({shardsvr: ""});
+rst2.initiate();
+rst2.awaitReplication();
+
+addShard(st, rst2);
+printjson(st.admin.runCommand({movePrimary: 'test2', to: rst2.name}));
+
+assert.eq(300, coll.find().itcount());
+conn.getDB('test2').foo.insert({a: 1});
+assert.eq(1, conn.getDB('test2').foo.find().itcount());
+
+// Can't shut down with rst2 in the set or ShardingTest will fail trying to cleanup on shutdown.
+// Have to take out rst2 and put rst1 back into the set so that it can clean up.
+jsTestLog("Putting ShardingTest back to state it expects");
+printjson(st.admin.runCommand({movePrimary: 'test2', to: st.rs0.name}));
+removeShard(st, rst2);
+rst2.stopSet();
+
+print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
+sleep(60000);
+
+rst1.startSet({restart: true});
+rst1.initiate();
+rst1.awaitReplication();
+
+assert.eq(originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
+addShard(st, rst1);
+
+st.stop();
})();
diff --git a/jstests/sharding/remove3.js b/jstests/sharding/remove3.js
index ab066f92f9d..b9d45baf789 100644
--- a/jstests/sharding/remove3.js
+++ b/jstests/sharding/remove3.js
@@ -1,44 +1,43 @@
// Validates the remove/drain shard functionality when there is data on the shard being removed
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({name: "remove_shard3", shards: 2, mongos: 2});
+var st = new ShardingTest({name: "remove_shard3", shards: 2, mongos: 2});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll', key: {_id: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll', middle: {_id: 0}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll', middle: {_id: 0}}));
- // Insert some documents and make sure there are docs on both shards
- st.s0.getDB('TestDB').Coll.insert({_id: -1, value: 'Negative value'});
- st.s0.getDB('TestDB').Coll.insert({_id: 1, value: 'Positive value'});
+// Insert some documents and make sure there are docs on both shards
+st.s0.getDB('TestDB').Coll.insert({_id: -1, value: 'Negative value'});
+st.s0.getDB('TestDB').Coll.insert({_id: 1, value: 'Positive value'});
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
- // Make sure both mongos instances know of the latest metadata
- assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
- assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
+// Make sure both mongos instances know of the latest metadata
+assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
+assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
- // Remove st.shard1.shardName
- var removeRes;
- removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
- assert.eq('started', removeRes.state);
- removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
- assert.eq('ongoing', removeRes.state);
+// Remove st.shard1.shardName
+var removeRes;
+removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
+assert.eq('started', removeRes.state);
+removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
+assert.eq('ongoing', removeRes.state);
- // Move the one chunk off st.shard1.shardName
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
+// Move the one chunk off st.shard1.shardName
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
- // Remove shard must succeed now
- removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
- assert.eq('completed', removeRes.state);
+// Remove shard must succeed now
+removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
+assert.eq('completed', removeRes.state);
- // Make sure both mongos instance refresh their metadata and do not reference the missing shard
- assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
- assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
-
- st.stop();
+// Make sure both mongos instance refresh their metadata and do not reference the missing shard
+assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
+assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
+st.stop();
})();
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index 92d1c46ba50..bcd37cdf570 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -2,84 +2,84 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- var s = new ShardingTest({shards: 2, mongos: 1, rs: {oplogSize: 10}});
+var s = new ShardingTest({shards: 2, mongos: 1, rs: {oplogSize: 10}});
- var db = s.getDB("test");
- var replTest = s.rs0;
+var db = s.getDB("test");
+var replTest = s.rs0;
- assert.writeOK(db.foo.insert({_id: 1}));
- db.foo.renameCollection('bar');
- assert.isnull(db.getLastError(), '1.0');
- assert.eq(db.bar.findOne(), {_id: 1}, '1.1');
- assert.eq(db.bar.count(), 1, '1.2');
- assert.eq(db.foo.count(), 0, '1.3');
+assert.writeOK(db.foo.insert({_id: 1}));
+db.foo.renameCollection('bar');
+assert.isnull(db.getLastError(), '1.0');
+assert.eq(db.bar.findOne(), {_id: 1}, '1.1');
+assert.eq(db.bar.count(), 1, '1.2');
+assert.eq(db.foo.count(), 0, '1.3');
- assert.writeOK(db.foo.insert({_id: 2}));
- db.foo.renameCollection('bar', true);
- assert.isnull(db.getLastError(), '2.0');
- assert.eq(db.bar.findOne(), {_id: 2}, '2.1');
- assert.eq(db.bar.count(), 1, '2.2');
- assert.eq(db.foo.count(), 0, '2.3');
+assert.writeOK(db.foo.insert({_id: 2}));
+db.foo.renameCollection('bar', true);
+assert.isnull(db.getLastError(), '2.0');
+assert.eq(db.bar.findOne(), {_id: 2}, '2.1');
+assert.eq(db.bar.count(), 1, '2.2');
+assert.eq(db.foo.count(), 0, '2.3');
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard0.shardName);
- // Ensure renaming to or from a sharded collection fails.
- jsTest.log('Testing renaming sharded collections');
- assert.commandWorked(
- s.s0.adminCommand({shardCollection: 'test.shardedColl', key: {_id: 'hashed'}}));
+// Ensure renaming to or from a sharded collection fails.
+jsTest.log('Testing renaming sharded collections');
+assert.commandWorked(
+ s.s0.adminCommand({shardCollection: 'test.shardedColl', key: {_id: 'hashed'}}));
- // Renaming from a sharded collection
- assert.commandFailed(db.shardedColl.renameCollection('somethingElse'));
+// Renaming from a sharded collection
+assert.commandFailed(db.shardedColl.renameCollection('somethingElse'));
- // Renaming to a sharded collection
- assert.commandFailed(db.bar.renameCollection('shardedColl'));
+// Renaming to a sharded collection
+assert.commandFailed(db.bar.renameCollection('shardedColl'));
- const dropTarget = true;
- assert.commandFailed(db.bar.renameCollection('shardedColl', dropTarget));
+const dropTarget = true;
+assert.commandFailed(db.bar.renameCollection('shardedColl', dropTarget));
- jsTest.log('Testing renaming sharded collections, directly on the shard');
- var primary = replTest.getPrimary();
- assert.commandFailed(primary.getDB('test').shardedColl.renameCollection('somethingElse'));
- assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl'));
- assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl', dropTarget));
+jsTest.log('Testing renaming sharded collections, directly on the shard');
+var primary = replTest.getPrimary();
+assert.commandFailed(primary.getDB('test').shardedColl.renameCollection('somethingElse'));
+assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl'));
+assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl', dropTarget));
- jsTest.log("Testing write concern (1)");
+jsTest.log("Testing write concern (1)");
- assert.writeOK(db.foo.insert({_id: 3}));
- db.foo.renameCollection('bar', true);
+assert.writeOK(db.foo.insert({_id: 3}));
+db.foo.renameCollection('bar', true);
- var ans = db.runCommand({getLastError: 1, w: 3});
- printjson(ans);
- assert.isnull(ans.err, '3.0');
+var ans = db.runCommand({getLastError: 1, w: 3});
+printjson(ans);
+assert.isnull(ans.err, '3.0');
- assert.eq(db.bar.findOne(), {_id: 3}, '3.1');
- assert.eq(db.bar.count(), 1, '3.2');
- assert.eq(db.foo.count(), 0, '3.3');
+assert.eq(db.bar.findOne(), {_id: 3}, '3.1');
+assert.eq(db.bar.count(), 1, '3.2');
+assert.eq(db.foo.count(), 0, '3.3');
- // Ensure write concern works by shutting down 1 node in a replica set shard
- jsTest.log("Testing write concern (2)");
+// Ensure write concern works by shutting down 1 node in a replica set shard
+jsTest.log("Testing write concern (2)");
- // Kill any node. Don't care if it's a primary or secondary.
- replTest.stop(0);
+// Kill any node. Don't care if it's a primary or secondary.
+replTest.stop(0);
- // Call getPrimary() to populate replTest._slaves.
- replTest.getPrimary();
- let liveSlaves = replTest._slaves.filter(function(node) {
- return node.host !== replTest.nodes[0].host;
- });
- replTest.awaitSecondaryNodes(null, liveSlaves);
- awaitRSClientHosts(s.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name);
+// Call getPrimary() to populate replTest._slaves.
+replTest.getPrimary();
+let liveSlaves = replTest._slaves.filter(function(node) {
+ return node.host !== replTest.nodes[0].host;
+});
+replTest.awaitSecondaryNodes(null, liveSlaves);
+awaitRSClientHosts(s.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name);
- assert.writeOK(db.foo.insert({_id: 4}));
- assert.commandWorked(db.foo.renameCollection('bar', true));
+assert.writeOK(db.foo.insert({_id: 4}));
+assert.commandWorked(db.foo.renameCollection('bar', true));
- ans = db.runCommand({getLastError: 1, w: 3, wtimeout: 5000});
- assert.eq(ans.err, "timeout", 'gle: ' + tojson(ans));
+ans = db.runCommand({getLastError: 1, w: 3, wtimeout: 5000});
+assert.eq(ans.err, "timeout", 'gle: ' + tojson(ans));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/rename_across_mongos.js b/jstests/sharding/rename_across_mongos.js
index e9c435ecff1..de2fa50bcea 100644
--- a/jstests/sharding/rename_across_mongos.js
+++ b/jstests/sharding/rename_across_mongos.js
@@ -1,29 +1,28 @@
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({name: 'rename_across_mongos', shards: 1, mongos: 2});
- var dbName = 'RenameDB';
+var st = new ShardingTest({name: 'rename_across_mongos', shards: 1, mongos: 2});
+var dbName = 'RenameDB';
- st.s0.getDB(dbName).dropDatabase();
- st.s1.getDB(dbName).dropDatabase();
+st.s0.getDB(dbName).dropDatabase();
+st.s1.getDB(dbName).dropDatabase();
- // Create collection on first mongos and insert a document
- assert.commandWorked(st.s0.getDB(dbName).runCommand({create: 'CollNameBeforeRename'}));
- assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
+// Create collection on first mongos and insert a document
+assert.commandWorked(st.s0.getDB(dbName).runCommand({create: 'CollNameBeforeRename'}));
+assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
- if (st.configRS) {
- // Ensure that the second mongos will see the newly created database metadata when
- // it tries to do the collection rename.
- st.configRS.awaitLastOpCommitted();
- }
+if (st.configRS) {
+ // Ensure that the second mongos will see the newly created database metadata when
+ // it tries to do the collection rename.
+ st.configRS.awaitLastOpCommitted();
+}
- // Rename collection on second mongos and ensure the document is found
- assert.commandWorked(
- st.s1.getDB(dbName).CollNameBeforeRename.renameCollection('CollNameAfterRename'));
- assert.eq([{Key: 1, Value: 1}],
- st.s1.getDB(dbName).CollNameAfterRename.find({}, {_id: false}).toArray());
-
- st.stop();
+// Rename collection on second mongos and ensure the document is found
+assert.commandWorked(
+ st.s1.getDB(dbName).CollNameBeforeRename.renameCollection('CollNameAfterRename'));
+assert.eq([{Key: 1, Value: 1}],
+ st.s1.getDB(dbName).CollNameAfterRename.find({}, {_id: false}).toArray());
+st.stop();
})();
diff --git a/jstests/sharding/repl_monitor_refresh.js b/jstests/sharding/repl_monitor_refresh.js
index b3d91d04065..20f1d930d98 100644
--- a/jstests/sharding/repl_monitor_refresh.js
+++ b/jstests/sharding/repl_monitor_refresh.js
@@ -5,78 +5,77 @@ load("jstests/replsets/rslib.js");
* become invalid when a replica set reconfig happens.
*/
(function() {
- "use strict";
-
- // Skip db hash check and shard replication since the removed node has wrong config and is still
- // alive.
- TestData.skipCheckDBHashes = true;
- TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
-
- var NODE_COUNT = 3;
- var st = new ShardingTest({shards: {rs0: {nodes: NODE_COUNT, oplogSize: 10}}});
- var replTest = st.rs0;
- var mongos = st.s;
-
- var shardDoc = mongos.getDB('config').shards.findOne();
- assert.eq(NODE_COUNT, shardDoc.host.split(',').length); // seed list should contain all nodes
-
- /* Make sure that the first node is not the primary (by making the second one primary).
- * We need to do this since the ReplicaSetMonitor iterates over the nodes one
- * by one and you can't remove a node that is currently the primary.
- */
- var connPoolStats = mongos.getDB('admin').runCommand({connPoolStats: 1});
- var targetHostName = connPoolStats['replicaSets'][replTest.name].hosts[1].addr;
-
- var priConn = replTest.getPrimary();
- var confDoc = priConn.getDB("local").system.replset.findOne();
-
- for (var idx = 0; idx < confDoc.members.length; idx++) {
- if (confDoc.members[idx].host == targetHostName) {
- confDoc.members[idx].priority = 100;
- } else {
- confDoc.members[idx].priority = 1;
- }
- }
-
- confDoc.version++;
-
- jsTest.log('Changing conf to ' + tojson(confDoc));
-
- reconfig(replTest, confDoc);
+"use strict";
- awaitRSClientHosts(mongos, {host: targetHostName}, {ok: true, ismaster: true});
+// Skip db hash check and shard replication since the removed node has wrong config and is still
+// alive.
+TestData.skipCheckDBHashes = true;
+TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
- // Remove first node from set
- confDoc.members.shift();
- confDoc.version++;
+var NODE_COUNT = 3;
+var st = new ShardingTest({shards: {rs0: {nodes: NODE_COUNT, oplogSize: 10}}});
+var replTest = st.rs0;
+var mongos = st.s;
- reconfig(replTest, confDoc);
+var shardDoc = mongos.getDB('config').shards.findOne();
+assert.eq(NODE_COUNT, shardDoc.host.split(',').length); // seed list should contain all nodes
- jsTest.log("Waiting for mongos to reflect change in shard replica set membership.");
- var replView;
- assert.soon(
- function() {
- var connPoolStats = mongos.getDB('admin').runCommand('connPoolStats');
- replView = connPoolStats.replicaSets[replTest.name].hosts;
- return replView.length == confDoc.members.length;
- },
- function() {
- return ("Expected to find " + confDoc.members.length + " nodes but found " +
- replView.length + " in " + tojson(replView));
- });
-
- jsTest.log("Waiting for config.shards to reflect change in shard replica set membership.");
- assert.soon(
- function() {
- shardDoc = mongos.getDB('config').shards.findOne();
- // seed list should contain one less node
- return shardDoc.host.split(',').length == confDoc.members.length;
- },
- function() {
- return ("Expected to find " + confDoc.members.length + " nodes but found " +
- shardDoc.host.split(',').length + " in " + shardDoc.host);
- });
+/* Make sure that the first node is not the primary (by making the second one primary).
+ * We need to do this since the ReplicaSetMonitor iterates over the nodes one
+ * by one and you can't remove a node that is currently the primary.
+ */
+var connPoolStats = mongos.getDB('admin').runCommand({connPoolStats: 1});
+var targetHostName = connPoolStats['replicaSets'][replTest.name].hosts[1].addr;
- st.stop();
+var priConn = replTest.getPrimary();
+var confDoc = priConn.getDB("local").system.replset.findOne();
+for (var idx = 0; idx < confDoc.members.length; idx++) {
+ if (confDoc.members[idx].host == targetHostName) {
+ confDoc.members[idx].priority = 100;
+ } else {
+ confDoc.members[idx].priority = 1;
+ }
+}
+
+confDoc.version++;
+
+jsTest.log('Changing conf to ' + tojson(confDoc));
+
+reconfig(replTest, confDoc);
+
+awaitRSClientHosts(mongos, {host: targetHostName}, {ok: true, ismaster: true});
+
+// Remove first node from set
+confDoc.members.shift();
+confDoc.version++;
+
+reconfig(replTest, confDoc);
+
+jsTest.log("Waiting for mongos to reflect change in shard replica set membership.");
+var replView;
+assert.soon(
+ function() {
+ var connPoolStats = mongos.getDB('admin').runCommand('connPoolStats');
+ replView = connPoolStats.replicaSets[replTest.name].hosts;
+ return replView.length == confDoc.members.length;
+ },
+ function() {
+ return ("Expected to find " + confDoc.members.length + " nodes but found " +
+ replView.length + " in " + tojson(replView));
+ });
+
+jsTest.log("Waiting for config.shards to reflect change in shard replica set membership.");
+assert.soon(
+ function() {
+ shardDoc = mongos.getDB('config').shards.findOne();
+ // seed list should contain one less node
+ return shardDoc.host.split(',').length == confDoc.members.length;
+ },
+ function() {
+ return ("Expected to find " + confDoc.members.length + " nodes but found " +
+ shardDoc.host.split(',').length + " in " + shardDoc.host);
+ });
+
+st.stop();
}());
diff --git a/jstests/sharding/replication_with_undefined_shard_key.js b/jstests/sharding/replication_with_undefined_shard_key.js
index 8e37c171735..2da48889a4c 100644
--- a/jstests/sharding/replication_with_undefined_shard_key.js
+++ b/jstests/sharding/replication_with_undefined_shard_key.js
@@ -1,30 +1,30 @@
// Test for SERVER-31953 where secondaries crash when replicating an oplog entry where the document
// identifier in the oplog entry contains a shard key value that contains an undefined value.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({mongos: 1, config: 1, shard: 1, rs: {nodes: 2}});
- const mongosDB = st.s.getDB("test");
- const mongosColl = mongosDB.mycoll;
+const st = new ShardingTest({mongos: 1, config: 1, shard: 1, rs: {nodes: 2}});
+const mongosDB = st.s.getDB("test");
+const mongosColl = mongosDB.mycoll;
- // Shard the test collection on the "x" field.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(mongosDB.adminCommand({
- shardCollection: mongosColl.getFullName(),
- key: {x: 1},
- }));
+// Shard the test collection on the "x" field.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(mongosDB.adminCommand({
+ shardCollection: mongosColl.getFullName(),
+ key: {x: 1},
+}));
- // Insert a document with a literal undefined value.
- assert.writeOK(mongosColl.insert({x: undefined}));
+// Insert a document with a literal undefined value.
+assert.writeOK(mongosColl.insert({x: undefined}));
- jsTestLog("Doing writes that generate oplog entries including undefined document key");
+jsTestLog("Doing writes that generate oplog entries including undefined document key");
- assert.writeOK(mongosColl.update(
- {},
- {$set: {a: 1}},
- {multi: true, writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
- assert.writeOK(
- mongosColl.remove({}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
+assert.writeOK(mongosColl.update(
+ {},
+ {$set: {a: 1}},
+ {multi: true, writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
+assert.writeOK(
+ mongosColl.remove({}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index f8b6bf74b21..453c4d980f1 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -19,31 +19,31 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js");
+'use strict';
+load("jstests/replsets/rslib.js");
- var st = new ShardingTest({shards: 1, rs: {oplogSize: 10}});
- var replTest = st.rs0;
+var st = new ShardingTest({shards: 1, rs: {oplogSize: 10}});
+var replTest = st.rs0;
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- // The cluster now has the shard information. Then kill the replica set so when mongos restarts
- // and tries to create a ReplSetMonitor for that shard, it will not be able to connect to any of
- // the seed servers.
- // Don't clear the data directory so that the shardIdentity is not deleted.
- replTest.stopSet(undefined /* send default signal */, true /* don't clear data directory */);
+// The cluster now has the shard information. Then kill the replica set so when mongos restarts
+// and tries to create a ReplSetMonitor for that shard, it will not be able to connect to any of
+// the seed servers.
+// Don't clear the data directory so that the shardIdentity is not deleted.
+replTest.stopSet(undefined /* send default signal */, true /* don't clear data directory */);
- st.restartMongos(0);
+st.restartMongos(0);
- replTest.startSet({restart: true, noCleanData: true});
- replTest.awaitSecondaryNodes();
+replTest.startSet({restart: true, noCleanData: true});
+replTest.awaitSecondaryNodes();
- // Verify that the replSetMonitor can reach the restarted set
- awaitRSClientHosts(st.s0, replTest.nodes, {ok: true});
- replTest.awaitNodesAgreeOnPrimary();
+// Verify that the replSetMonitor can reach the restarted set
+awaitRSClientHosts(st.s0, replTest.nodes, {ok: true});
+replTest.awaitNodesAgreeOnPrimary();
- assert.writeOK(st.s0.getDB('test').user.insert({x: 1}));
+assert.writeOK(st.s0.getDB('test').user.insert({x: 1}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/restart_transactions.js b/jstests/sharding/restart_transactions.js
index caa60e9bce1..d1f505b1d9c 100644
--- a/jstests/sharding/restart_transactions.js
+++ b/jstests/sharding/restart_transactions.js
@@ -5,167 +5,167 @@
* @tags: [requires_sharding, uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- const collName = "restart_transactions";
-
- function runTest(routerDB, directDB) {
- // Set up the underlying collection.
- const routerColl = routerDB[collName];
- assert.commandWorked(
- routerDB.createCollection(routerColl.getName(), {writeConcern: {w: "majority"}}));
-
- //
- // Can restart a transaction that has been aborted.
- //
-
- let txnNumber = 0;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true,
- }));
- assert.commandWorked(directDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
-
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
-
- //
- // Cannot restart a transaction that is in progress.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
-
- //
- // Cannot restart a transaction that has completed a retryable write.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand(
- {insert: collName, documents: [{x: txnNumber}], txnNumber: NumberLong(txnNumber)}));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
-
- //
- // Cannot restart a transaction that has been committed.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
- assert.commandWorked(directDB.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
-
- //
- // Cannot restart a transaction that has been prepared.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
- assert.commandWorked(directDB.adminCommand(
- {prepareTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
-
- assert.commandWorked(directDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
-
- //
- // Cannot restart a transaction that has been aborted after being prepared.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
- assert.commandWorked(directDB.adminCommand(
- {prepareTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
- assert.commandWorked(directDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
- }
-
- const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
-
- // Directly connect to the shard primary to simulate internal retries by mongos.
- const shardDBName = "test";
- const shardSession = st.rs0.getPrimary().startSession({causalConsistency: false});
- const shardDB = shardSession.getDatabase(shardDBName);
-
- runTest(st.s.getDB(shardDBName), shardDB);
-
- // TODO SERVER-36632: Consider allowing commands in a transaction to run against the config or
- // admin databases, excluding special collections.
+"use strict";
+
+const collName = "restart_transactions";
+
+function runTest(routerDB, directDB) {
+ // Set up the underlying collection.
+ const routerColl = routerDB[collName];
+ assert.commandWorked(
+ routerDB.createCollection(routerColl.getName(), {writeConcern: {w: "majority"}}));
+
+ //
+ // Can restart a transaction that has been aborted.
+ //
+
+ let txnNumber = 0;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true,
+ }));
+ assert.commandWorked(directDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+
+ //
+ // Cannot restart a transaction that is in progress.
+ //
+
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+
+ //
+ // Cannot restart a transaction that has completed a retryable write.
+ //
+
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand(
+ {insert: collName, documents: [{x: txnNumber}], txnNumber: NumberLong(txnNumber)}));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+
+ //
+ // Cannot restart a transaction that has been committed.
+ //
+
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(directDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+ }));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+
+ //
+ // Cannot restart a transaction that has been prepared.
+ //
+
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(directDB.adminCommand(
+ {prepareTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+
+ assert.commandWorked(directDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+
//
- // Directly connect to the config sever primary to simulate internal retries by mongos.
- // const configDBName = "config";
- // const configSession = st.configRS.getPrimary().startSession({causalConsistency: false});
- // const configDB = configSession.getDatabase(configDBName);
+ // Cannot restart a transaction that has been aborted after being prepared.
//
- // runTest(st.s.getDB(configDBName), configDB);
- st.stop();
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(directDB.adminCommand(
+ {prepareTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+ assert.commandWorked(directDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+}
+
+const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
+
+// Directly connect to the shard primary to simulate internal retries by mongos.
+const shardDBName = "test";
+const shardSession = st.rs0.getPrimary().startSession({causalConsistency: false});
+const shardDB = shardSession.getDatabase(shardDBName);
+
+runTest(st.s.getDB(shardDBName), shardDB);
+
+// TODO SERVER-36632: Consider allowing commands in a transaction to run against the config or
+// admin databases, excluding special collections.
+//
+// Directly connect to the config sever primary to simulate internal retries by mongos.
+// const configDBName = "config";
+// const configSession = st.configRS.getPrimary().startSession({causalConsistency: false});
+// const configDB = configSession.getDatabase(configDBName);
+//
+// runTest(st.s.getDB(configDBName), configDB);
+
+st.stop();
})();
diff --git a/jstests/sharding/resume_change_stream.js b/jstests/sharding/resume_change_stream.js
index 9b5e33d0173..19c53012fda 100644
--- a/jstests/sharding/resume_change_stream.js
+++ b/jstests/sharding/resume_change_stream.js
@@ -2,211 +2,211 @@
// We need to use a readConcern in this test, which requires read commands.
// @tags: [requires_find_command, uses_change_streams]
(function() {
- "use strict";
+"use strict";
+
+load('jstests/replsets/rslib.js'); // For getLatestOp.
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+
+// This test only works on storage engines that support committed reads, skip it if the
+// configured engine doesn't support it.
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const oplogSize = 1; // size in MB
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ oplogSize: oplogSize,
+ enableMajorityReadConcern: '',
+ // Use the noop writer with a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+ }
+});
- load('jstests/replsets/rslib.js'); // For getLatestOp.
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+let cst = new ChangeStreamTest(mongosDB);
- // This test only works on storage engines that support committed reads, skip it if the
- // configured engine doesn't support it.
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+function testResume(mongosColl, collToWatch) {
+ mongosColl.drop();
- const oplogSize = 1; // size in MB
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- oplogSize: oplogSize,
- enableMajorityReadConcern: '',
- // Use the noop writer with a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- }
- });
+ // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+ assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+ st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
+ // Shard the test collection on _id.
+ assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- let cst = new ChangeStreamTest(mongosDB);
+ // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+ assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- function testResume(mongosColl, collToWatch) {
- mongosColl.drop();
+ // Move the [0, MaxKey] chunk to st.shard1.shardName.
+ assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+ // Write a document to each chunk.
+ assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+ let changeStream = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+ // We awaited the replication of the first writes, so the change stream shouldn't return
+ // them.
+ assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+ // Record current time to resume a change stream later in the test.
+ const resumeTimeFirstUpdate = mongosDB.runCommand({isMaster: 1}).$clusterTime.clusterTime;
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
- let changeStream = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
+ // Test that we see the two writes, and remember their resume tokens.
+ let next = cst.getOneChange(changeStream);
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey._id, -1);
+ const resumeTokenFromFirstUpdateOnShard0 = next._id;
- // We awaited the replication of the first writes, so the change stream shouldn't return
- // them.
- assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
+ next = cst.getOneChange(changeStream);
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey._id, 1);
+ const resumeTokenFromFirstUpdateOnShard1 = next._id;
- // Record current time to resume a change stream later in the test.
- const resumeTimeFirstUpdate = mongosDB.runCommand({isMaster: 1}).$clusterTime.clusterTime;
+ // Write some additional documents, then test that it's possible to resume after the first
+ // update.
+ assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
+ changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
+ collection: collToWatch
+ });
- // Test that we see the two writes, and remember their resume tokens.
- let next = cst.getOneChange(changeStream);
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey._id, -1);
- const resumeTokenFromFirstUpdateOnShard0 = next._id;
+ for (let nextExpectedId of [1, -2, 2]) {
+ assert.eq(cst.getOneChange(changeStream).documentKey._id, nextExpectedId);
+ }
- next = cst.getOneChange(changeStream);
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey._id, 1);
- const resumeTokenFromFirstUpdateOnShard1 = next._id;
+ // Test that the stream can't resume if the resume token is no longer present in the oplog.
+
+ // Roll over the entire oplog on the shard with the resume token for the first update.
+ const shardWithResumeToken = st.rs1.getPrimary(); // Resume from shard 1.
+ const mostRecentOplogEntry = getLatestOp(shardWithResumeToken);
+ assert.neq(mostRecentOplogEntry, null);
+ const largeStr = new Array(4 * 1024 * oplogSize).join('abcdefghi');
+ let i = 0;
+
+ function oplogIsRolledOver() {
+ // The oplog has rolled over if the op that used to be newest is now older than the
+ // oplog's current oldest entry. Said another way, the oplog is rolled over when
+ // everything in the oplog is newer than what used to be the newest entry.
+ return bsonWoCompare(
+ mostRecentOplogEntry.ts,
+ getLeastRecentOp({server: shardWithResumeToken, readConcern: "majority"}).ts) <
+ 0;
+ }
- // Write some additional documents, then test that it's possible to resume after the first
- // update.
- assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+ while (!oplogIsRolledOver()) {
+ let idVal = 100 + (i++);
+ assert.writeOK(
+ mongosColl.insert({_id: idVal, long_str: largeStr}, {writeConcern: {w: "majority"}}));
+ sleep(100);
+ }
- changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
- collection: collToWatch
- });
+ ChangeStreamTest.assertChangeStreamThrowsCode({
+ db: mongosDB,
+ collName: collToWatch,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard1}}],
+ expectedCode: 40576
+ });
- for (let nextExpectedId of[1, -2, 2]) {
- assert.eq(cst.getOneChange(changeStream).documentKey._id, nextExpectedId);
- }
-
- // Test that the stream can't resume if the resume token is no longer present in the oplog.
-
- // Roll over the entire oplog on the shard with the resume token for the first update.
- const shardWithResumeToken = st.rs1.getPrimary(); // Resume from shard 1.
- const mostRecentOplogEntry = getLatestOp(shardWithResumeToken);
- assert.neq(mostRecentOplogEntry, null);
- const largeStr = new Array(4 * 1024 * oplogSize).join('abcdefghi');
- let i = 0;
-
- function oplogIsRolledOver() {
- // The oplog has rolled over if the op that used to be newest is now older than the
- // oplog's current oldest entry. Said another way, the oplog is rolled over when
- // everything in the oplog is newer than what used to be the newest entry.
- return bsonWoCompare(mostRecentOplogEntry.ts, getLeastRecentOp({
- server: shardWithResumeToken,
- readConcern: "majority"
- }).ts) < 0;
- }
-
- while (!oplogIsRolledOver()) {
- let idVal = 100 + (i++);
- assert.writeOK(mongosColl.insert({_id: idVal, long_str: largeStr},
- {writeConcern: {w: "majority"}}));
- sleep(100);
- }
-
- ChangeStreamTest.assertChangeStreamThrowsCode({
- db: mongosDB,
- collName: collToWatch,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard1}}],
- expectedCode: 40576
- });
+ ChangeStreamTest.assertChangeStreamThrowsCode({
+ db: mongosDB,
+ collName: collToWatch,
+ pipeline: [{$changeStream: {startAtOperationTime: resumeTimeFirstUpdate}}],
+ expectedCode: 40576
+ });
- ChangeStreamTest.assertChangeStreamThrowsCode({
- db: mongosDB,
- collName: collToWatch,
- pipeline: [{$changeStream: {startAtOperationTime: resumeTimeFirstUpdate}}],
- expectedCode: 40576
- });
+ // Test that the change stream can't resume if the resume token *is* present in the oplog,
+ // but one of the shards has rolled over its oplog enough that it doesn't have a long enough
+ // history to resume. Since we just rolled over the oplog on shard 1, we know that
+ // 'resumeTokenFromFirstUpdateOnShard0' is still present on shard 0, but shard 1 doesn't
+ // have any changes earlier than that, so won't be able to resume.
+ ChangeStreamTest.assertChangeStreamThrowsCode({
+ db: mongosDB,
+ collName: collToWatch,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
+ expectedCode: 40576
+ });
- // Test that the change stream can't resume if the resume token *is* present in the oplog,
- // but one of the shards has rolled over its oplog enough that it doesn't have a long enough
- // history to resume. Since we just rolled over the oplog on shard 1, we know that
- // 'resumeTokenFromFirstUpdateOnShard0' is still present on shard 0, but shard 1 doesn't
- // have any changes earlier than that, so won't be able to resume.
- ChangeStreamTest.assertChangeStreamThrowsCode({
- db: mongosDB,
- collName: collToWatch,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
- expectedCode: 40576
- });
+ // Drop the collection.
+ assert(mongosColl.drop());
+
+ // Shard the test collection on shardKey.
+ assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
+
+ // Split the collection into 2 chunks: [MinKey, 50), [50, MaxKey].
+ assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 50}}));
+
+ // Move the [50, MaxKey] chunk to st.shard1.shardName.
+ assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {shardKey: 51}, to: st.rs1.getURL()}));
+
+ const numberOfDocs = 100;
+
+ // Insert test documents.
+ for (let counter = 0; counter < numberOfDocs / 5; ++counter) {
+ assert.writeOK(mongosColl.insert({_id: "abcd" + counter, shardKey: counter * 5 + 0},
+ {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: "Abcd" + counter, shardKey: counter * 5 + 1},
+ {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: "aBcd" + counter, shardKey: counter * 5 + 2},
+ {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: "abCd" + counter, shardKey: counter * 5 + 3},
+ {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: "abcD" + counter, shardKey: counter * 5 + 4},
+ {writeConcern: {w: "majority"}}));
+ }
- // Drop the collection.
- assert(mongosColl.drop());
-
- // Shard the test collection on shardKey.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 50), [50, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 50}}));
-
- // Move the [50, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {shardKey: 51}, to: st.rs1.getURL()}));
-
- const numberOfDocs = 100;
-
- // Insert test documents.
- for (let counter = 0; counter < numberOfDocs / 5; ++counter) {
- assert.writeOK(mongosColl.insert({_id: "abcd" + counter, shardKey: counter * 5 + 0},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "Abcd" + counter, shardKey: counter * 5 + 1},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "aBcd" + counter, shardKey: counter * 5 + 2},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "abCd" + counter, shardKey: counter * 5 + 3},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "abcD" + counter, shardKey: counter * 5 + 4},
- {writeConcern: {w: "majority"}}));
- }
-
- let allChangesCursor = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
-
- // Perform the multi-update that will induce timestamp collisions
- assert.writeOK(mongosColl.update({}, {$set: {updated: true}}, {multi: true}));
-
- // Loop over documents and open inner change streams resuming from a specified position.
- // Note we skip the last document as it does not have the next document so we would
- // hang indefinitely.
- for (let counter = 0; counter < numberOfDocs - 1; ++counter) {
- let next = cst.getOneChange(allChangesCursor);
-
- const resumeToken = next._id;
- const caseInsensitive = {locale: "en_US", strength: 2};
- let resumedCaseInsensitiveCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- collection: collToWatch,
- aggregateOptions: {collation: caseInsensitive}
- });
- cst.getOneChange(resumedCaseInsensitiveCursor);
- }
+ let allChangesCursor = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
+
+ // Perform the multi-update that will induce timestamp collisions
+ assert.writeOK(mongosColl.update({}, {$set: {updated: true}}, {multi: true}));
+
+ // Loop over documents and open inner change streams resuming from a specified position.
+ // Note we skip the last document as it does not have the next document so we would
+ // hang indefinitely.
+ for (let counter = 0; counter < numberOfDocs - 1; ++counter) {
+ let next = cst.getOneChange(allChangesCursor);
+
+ const resumeToken = next._id;
+ const caseInsensitive = {locale: "en_US", strength: 2};
+ let resumedCaseInsensitiveCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ collection: collToWatch,
+ aggregateOptions: {collation: caseInsensitive}
+ });
+ cst.getOneChange(resumedCaseInsensitiveCursor);
}
+}
- // Test change stream on a single collection.
- testResume(mongosColl, mongosColl.getName());
+// Test change stream on a single collection.
+testResume(mongosColl, mongosColl.getName());
- // Test change stream on all collections.
- testResume(mongosColl, 1);
+// Test change stream on all collections.
+testResume(mongosColl, 1);
- cst.cleanUp();
+cst.cleanUp();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/resume_change_stream_from_stale_mongos.js b/jstests/sharding/resume_change_stream_from_stale_mongos.js
index 7b8dd9cf673..fbc8bd904bb 100644
--- a/jstests/sharding/resume_change_stream_from_stale_mongos.js
+++ b/jstests/sharding/resume_change_stream_from_stale_mongos.js
@@ -3,88 +3,88 @@
// a stale shard version.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- // Create a 2-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
- // second so that each shard is continually advancing its optime, allowing the
- // AsyncResultsMerger to return sorted results even if some shards have not yet produced any
- // data.
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
- });
+// Create a 2-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
+// second so that each shard is continually advancing its optime, allowing the
+// AsyncResultsMerger to return sorted results even if some shards have not yet produced any
+// data.
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+});
- const firstMongosDB = st.s0.getDB(jsTestName());
- const firstMongosColl = firstMongosDB.test;
+const firstMongosDB = st.s0.getDB(jsTestName());
+const firstMongosColl = firstMongosDB.test;
- // Enable sharding on the test DB and ensure its primary is shard 0.
- assert.commandWorked(firstMongosDB.adminCommand({enableSharding: firstMongosDB.getName()}));
- st.ensurePrimaryShard(firstMongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard 0.
+assert.commandWorked(firstMongosDB.adminCommand({enableSharding: firstMongosDB.getName()}));
+st.ensurePrimaryShard(firstMongosDB.getName(), st.rs0.getURL());
- // Establish a change stream while it is unsharded, then shard the collection, move a chunk, and
- // record a resume token after the first chunk migration.
- let changeStream = firstMongosColl.aggregate([{$changeStream: {}}]);
+// Establish a change stream while it is unsharded, then shard the collection, move a chunk, and
+// record a resume token after the first chunk migration.
+let changeStream = firstMongosColl.aggregate([{$changeStream: {}}]);
- assert.writeOK(firstMongosColl.insert({_id: -1}));
- assert.writeOK(firstMongosColl.insert({_id: 1}));
+assert.writeOK(firstMongosColl.insert({_id: -1}));
+assert.writeOK(firstMongosColl.insert({_id: 1}));
- for (let nextId of[-1, 1]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.fullDocument, {_id: nextId});
- }
+for (let nextId of [-1, 1]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.fullDocument, {_id: nextId});
+}
- // Shard the test collection on _id, split the collection into 2 chunks: [MinKey, 0) and
- // [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
- assert.commandWorked(firstMongosDB.adminCommand(
- {shardCollection: firstMongosColl.getFullName(), key: {_id: 1}}));
- assert.commandWorked(
- firstMongosDB.adminCommand({split: firstMongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(firstMongosDB.adminCommand(
- {moveChunk: firstMongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+// Shard the test collection on _id, split the collection into 2 chunks: [MinKey, 0) and
+// [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
+assert.commandWorked(
+ firstMongosDB.adminCommand({shardCollection: firstMongosColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(
+ firstMongosDB.adminCommand({split: firstMongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(firstMongosDB.adminCommand(
+ {moveChunk: firstMongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- // Then do one insert to each shard.
- assert.writeOK(firstMongosColl.insert({_id: -2}));
- assert.writeOK(firstMongosColl.insert({_id: 2}));
+// Then do one insert to each shard.
+assert.writeOK(firstMongosColl.insert({_id: -2}));
+assert.writeOK(firstMongosColl.insert({_id: 2}));
- // The change stream should see all the inserts after internally re-establishing cursors after
- // the chunk split.
- let resumeToken = null; // We'll fill this out to be the token of the last change.
- for (let nextId of[-2, 2]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.fullDocument, {_id: nextId});
- resumeToken = next._id;
- }
+// The change stream should see all the inserts after internally re-establishing cursors after
+// the chunk split.
+let resumeToken = null; // We'll fill this out to be the token of the last change.
+for (let nextId of [-2, 2]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.fullDocument, {_id: nextId});
+ resumeToken = next._id;
+}
- // Do some writes that occur on each shard after the resume token.
- assert.writeOK(firstMongosColl.insert({_id: -3}));
- assert.writeOK(firstMongosColl.insert({_id: 3}));
+// Do some writes that occur on each shard after the resume token.
+assert.writeOK(firstMongosColl.insert({_id: -3}));
+assert.writeOK(firstMongosColl.insert({_id: 3}));
- // Now try to resume the change stream using a stale mongos which believes the collection is
- // unsharded. The first mongos should use the shard versioning protocol to discover that the
- // collection is no longer unsharded, and re-target to all shards in the cluster.
- changeStream.close();
- const secondMongosColl = st.s1.getDB(jsTestName()).test;
- changeStream = secondMongosColl.aggregate([{$changeStream: {resumeAfter: resumeToken}}]);
- // Verify we can see both inserts that occurred after the resume point.
- for (let nextId of[-3, 3]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.fullDocument, {_id: nextId});
- }
+// Now try to resume the change stream using a stale mongos which believes the collection is
+// unsharded. The first mongos should use the shard versioning protocol to discover that the
+// collection is no longer unsharded, and re-target to all shards in the cluster.
+changeStream.close();
+const secondMongosColl = st.s1.getDB(jsTestName()).test;
+changeStream = secondMongosColl.aggregate([{$changeStream: {resumeAfter: resumeToken}}]);
+// Verify we can see both inserts that occurred after the resume point.
+for (let nextId of [-3, 3]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.fullDocument, {_id: nextId});
+}
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/resume_change_stream_on_subset_of_shards.js b/jstests/sharding/resume_change_stream_on_subset_of_shards.js
index 3c51004ed3c..b914a310e82 100644
--- a/jstests/sharding/resume_change_stream_on_subset_of_shards.js
+++ b/jstests/sharding/resume_change_stream_on_subset_of_shards.js
@@ -2,74 +2,73 @@
// the collection.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- // Create a 3-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
- // second so that each shard is continually advancing its optime, allowing the
- // AsyncResultsMerger to return sorted results even if some shards have not yet produced any
- // data.
- const st = new ShardingTest({
- shards: 3,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
- });
+// Create a 3-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
+// second so that each shard is continually advancing its optime, allowing the
+// AsyncResultsMerger to return sorted results even if some shards have not yet produced any
+// data.
+const st = new ShardingTest({
+ shards: 3,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB.test;
- // Enable sharding on the test DB and ensure its primary is shard 0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard 0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Shard the test collection on _id, split the collection into 2 chunks: [MinKey, 0) and
- // [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+// Shard the test collection on _id, split the collection into 2 chunks: [MinKey, 0) and
+// [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- // Establish a change stream...
- let changeStream = mongosColl.watch();
+// Establish a change stream...
+let changeStream = mongosColl.watch();
- // ... then do one write to produce a resume token...
- assert.writeOK(mongosColl.insert({_id: -2}));
- assert.soon(() => changeStream.hasNext());
- const resumeToken = changeStream.next()._id;
-
- // ... followed by one write to each chunk for testing purposes, i.e. shards 0 and 1.
- assert.writeOK(mongosColl.insert({_id: -1}));
- assert.writeOK(mongosColl.insert({_id: 1}));
-
- // The change stream should see all the inserts after establishing cursors on all shards.
- for (let nextId of[-1, 1]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.fullDocument, {_id: nextId});
- jsTestLog(`Saw insert for _id ${nextId}`);
- }
+// ... then do one write to produce a resume token...
+assert.writeOK(mongosColl.insert({_id: -2}));
+assert.soon(() => changeStream.hasNext());
+const resumeToken = changeStream.next()._id;
- // Insert another document after storing the resume token.
- assert.writeOK(mongosColl.insert({_id: 2}));
+// ... followed by one write to each chunk for testing purposes, i.e. shards 0 and 1.
+assert.writeOK(mongosColl.insert({_id: -1}));
+assert.writeOK(mongosColl.insert({_id: 1}));
- // Resume the change stream and verify that it correctly sees the next insert. This is meant
- // to test resuming a change stream when not all shards are aware that the collection exists,
- // since shard 2 has no data at this point.
- changeStream = mongosColl.watch([], {resumeAfter: resumeToken});
+// The change stream should see all the inserts after establishing cursors on all shards.
+for (let nextId of [-1, 1]) {
assert.soon(() => changeStream.hasNext());
let next = changeStream.next();
- assert.eq(next.documentKey, {_id: -1});
- assert.eq(next.fullDocument, {_id: -1});
assert.eq(next.operationType, "insert");
+ assert.eq(next.fullDocument, {_id: nextId});
+ jsTestLog(`Saw insert for _id ${nextId}`);
+}
+
+// Insert another document after storing the resume token.
+assert.writeOK(mongosColl.insert({_id: 2}));
+
+// Resume the change stream and verify that it correctly sees the next insert. This is meant
+// to test resuming a change stream when not all shards are aware that the collection exists,
+// since shard 2 has no data at this point.
+changeStream = mongosColl.watch([], {resumeAfter: resumeToken});
+assert.soon(() => changeStream.hasNext());
+let next = changeStream.next();
+assert.eq(next.documentKey, {_id: -1});
+assert.eq(next.fullDocument, {_id: -1});
+assert.eq(next.operationType, "insert");
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/retryable_writes.js b/jstests/sharding/retryable_writes.js
index cfcef0d0eed..d35172edf3a 100644
--- a/jstests/sharding/retryable_writes.js
+++ b/jstests/sharding/retryable_writes.js
@@ -3,531 +3,528 @@
* retry is as expected and it does not create additional oplog entries.
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- function checkFindAndModifyResult(expected, toCheck) {
- assert.eq(expected.ok, toCheck.ok);
- assert.eq(expected.value, toCheck.value);
- assert.docEq(expected.lastErrorObject, toCheck.lastErrorObject);
- }
-
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("transactions"),
- "Expected the serverStatus response to have a 'transactions' field");
- assert.hasFields(
- serverStatusResponse.transactions,
- ["retriedCommandsCount", "retriedStatementsCount", "transactionsCollectionWriteCount"],
- "The 'transactions' field in serverStatus did not have all of the expected fields");
- }
-
- function verifyServerStatusChanges(
- initialStats, newStats, newCommands, newStatements, newCollectionWrites) {
- assert.eq(initialStats.retriedCommandsCount + newCommands,
- newStats.retriedCommandsCount,
- "expected retriedCommandsCount to increase by " + newCommands);
- assert.eq(initialStats.retriedStatementsCount + newStatements,
- newStats.retriedStatementsCount,
- "expected retriedStatementsCount to increase by " + newStatements);
- assert.eq(initialStats.transactionsCollectionWriteCount + newCollectionWrites,
- newStats.transactionsCollectionWriteCount,
- "expected retriedCommandsCount to increase by " + newCollectionWrites);
- }
-
- function runTests(mainConn, priConn) {
- var lsid = UUID();
-
- ////////////////////////////////////////////////////////////////////////
- // Test insert command
-
- let initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- var cmd = {
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+function checkFindAndModifyResult(expected, toCheck) {
+ assert.eq(expected.ok, toCheck.ok);
+ assert.eq(expected.value, toCheck.value);
+ assert.docEq(expected.lastErrorObject, toCheck.lastErrorObject);
+}
+
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("transactions"),
+ "Expected the serverStatus response to have a 'transactions' field");
+ assert.hasFields(
+ serverStatusResponse.transactions,
+ ["retriedCommandsCount", "retriedStatementsCount", "transactionsCollectionWriteCount"],
+ "The 'transactions' field in serverStatus did not have all of the expected fields");
+}
+
+function verifyServerStatusChanges(
+ initialStats, newStats, newCommands, newStatements, newCollectionWrites) {
+ assert.eq(initialStats.retriedCommandsCount + newCommands,
+ newStats.retriedCommandsCount,
+ "expected retriedCommandsCount to increase by " + newCommands);
+ assert.eq(initialStats.retriedStatementsCount + newStatements,
+ newStats.retriedStatementsCount,
+ "expected retriedStatementsCount to increase by " + newStatements);
+ assert.eq(initialStats.transactionsCollectionWriteCount + newCollectionWrites,
+ newStats.transactionsCollectionWriteCount,
+ "expected retriedCommandsCount to increase by " + newCollectionWrites);
+}
+
+function runTests(mainConn, priConn) {
+ var lsid = UUID();
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test insert command
+
+ let initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ var cmd = {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(34),
+ };
+
+ var testDBMain = mainConn.getDB('test');
+ var result = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ var oplog = priConn.getDB('local').oplog.rs;
+ var insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
+
+ var testDBPri = priConn.getDB('test');
+ assert.eq(2, testDBPri.user.find().itcount());
+
+ var retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+
+ assert.eq(2, testDBPri.user.find().itcount());
+ assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
+
+ let newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 2 /* newStatements */,
+ 1 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test update command
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ cmd = {
+ update: 'user',
+ updates: [
+ {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
+ {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
+ {q: {_id: 30}, u: {z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(35),
+ };
+
+ result = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ let updateOplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
+
+ // Upserts are stored as inserts in the oplog, so check inserts too.
+ insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
+
+ assert.eq(3, testDBPri.user.find().itcount());
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.nModified, retryResult.nModified);
+ assert.eq(result.upserted, retryResult.upserted);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+
+ assert.eq(3, testDBPri.user.find().itcount());
+
+ assert.eq({_id: 10, x: 1}, testDBPri.user.findOne({_id: 10}));
+ assert.eq({_id: 20, y: 1}, testDBPri.user.findOne({_id: 20}));
+ assert.eq({_id: 30, z: 1}, testDBPri.user.findOne({_id: 30}));
+
+ assert.eq(updateOplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
+ assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 3 /* newStatements */,
+ 3 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test delete command
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ assert.writeOK(testDBMain.user.insert({_id: 40, x: 1}));
+ assert.writeOK(testDBMain.user.insert({_id: 50, y: 1}));
+
+ assert.eq(2, testDBPri.user.find({x: 1}).itcount());
+ assert.eq(2, testDBPri.user.find({y: 1}).itcount());
+
+ cmd = {
+ delete: 'user',
+ deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(36),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ let deleteOplogEntries = oplog.find({ns: 'test.user', op: 'd'}).itcount();
+
+ assert.eq(1, testDBPri.user.find({x: 1}).itcount());
+ assert.eq(1, testDBPri.user.find({y: 1}).itcount());
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+
+ assert.eq(1, testDBPri.user.find({x: 1}).itcount());
+ assert.eq(1, testDBPri.user.find({y: 1}).itcount());
+
+ assert.eq(deleteOplogEntries, oplog.find({ns: 'test.user', op: 'd'}).itcount());
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 2 /* newStatements */,
+ 2 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (upsert)
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(37),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
+ updateOplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
+ assert.eq({_id: 60, x: 1}, testDBPri.user.findOne({_id: 60}));
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ assert.eq({_id: 60, x: 1}, testDBPri.user.findOne({_id: 60}));
+ assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
+ assert.eq(updateOplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
+
+ checkFindAndModifyResult(result, retryResult);
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 1 /* newStatements */,
+ 1 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (update, return pre-image)
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: false,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(38),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ var oplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
+ assert.eq({_id: 60, x: 2}, testDBPri.user.findOne({_id: 60}));
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ assert.eq({_id: 60, x: 2}, testDBPri.user.findOne({_id: 60}));
+ assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
+
+ checkFindAndModifyResult(result, retryResult);
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 1 /* newStatements */,
+ 1 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (update, return post-image)
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ oplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
+ assert.eq({_id: 60, x: 3}, testDBPri.user.findOne({_id: 60}));
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ assert.eq({_id: 60, x: 3}, testDBPri.user.findOne({_id: 60}));
+ assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
+
+ checkFindAndModifyResult(result, retryResult);
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 1 /* newStatements */,
+ 1 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (remove, return pre-image)
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ assert.writeOK(testDBMain.user.insert({_id: 70, f: 1}));
+ assert.writeOK(testDBMain.user.insert({_id: 80, f: 1}));
+
+ cmd = {
+ findAndModify: 'user',
+ query: {f: 1},
+ remove: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(40),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ oplogEntries = oplog.find({ns: 'test.user', op: 'd'}).itcount();
+ var docCount = testDBPri.user.find().itcount();
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'd'}).itcount());
+ assert.eq(docCount, testDBPri.user.find().itcount());
+
+ checkFindAndModifyResult(result, retryResult);
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 1 /* newStatements */,
+ 1 /* newCollectionWrites */);
+}
+
+function runFailpointTests(mainConn, priConn) {
+ // Test the 'onPrimaryTransactionalWrite' failpoint
+ var lsid = UUID();
+ var testDb = mainConn.getDB('TestDB');
+
+ // Test connection close (default behaviour). The connection will get closed, but the
+ // inserts must succeed
+ assert.commandWorked(priConn.adminCommand(
+ {configureFailPoint: 'onPrimaryTransactionalWrite', mode: 'alwaysOn'}));
+
+ try {
+ // Set skipRetryOnNetworkError so the shell doesn't automatically retry, since the
+ // command has a txnNumber.
+ TestData.skipRetryOnNetworkError = true;
+ var res = assert.commandWorked(testDb.runCommand({
insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(34),
- };
-
- var testDBMain = mainConn.getDB('test');
- var result = assert.commandWorked(testDBMain.runCommand(cmd));
-
- var oplog = priConn.getDB('local').oplog.rs;
- var insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
-
- var testDBPri = priConn.getDB('test');
- assert.eq(2, testDBPri.user.find().itcount());
-
- var retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
-
- assert.eq(2, testDBPri.user.find().itcount());
- assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
-
- let newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 2 /* newStatements */,
- 1 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test update command
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- cmd = {
- update: 'user',
- updates: [
- {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
- {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
- {q: {_id: 30}, u: {z: 1}} // replacement
- ],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(35),
- };
-
- result = assert.commandWorked(testDBMain.runCommand(cmd));
-
- let updateOplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
-
- // Upserts are stored as inserts in the oplog, so check inserts too.
- insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
-
- assert.eq(3, testDBPri.user.find().itcount());
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.nModified, retryResult.nModified);
- assert.eq(result.upserted, retryResult.upserted);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
-
- assert.eq(3, testDBPri.user.find().itcount());
-
- assert.eq({_id: 10, x: 1}, testDBPri.user.findOne({_id: 10}));
- assert.eq({_id: 20, y: 1}, testDBPri.user.findOne({_id: 20}));
- assert.eq({_id: 30, z: 1}, testDBPri.user.findOne({_id: 30}));
-
- assert.eq(updateOplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
- assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 3 /* newStatements */,
- 3 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test delete command
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- assert.writeOK(testDBMain.user.insert({_id: 40, x: 1}));
- assert.writeOK(testDBMain.user.insert({_id: 50, y: 1}));
-
- assert.eq(2, testDBPri.user.find({x: 1}).itcount());
- assert.eq(2, testDBPri.user.find({y: 1}).itcount());
-
- cmd = {
- delete: 'user',
- deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(36),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- let deleteOplogEntries = oplog.find({ns: 'test.user', op: 'd'}).itcount();
-
- assert.eq(1, testDBPri.user.find({x: 1}).itcount());
- assert.eq(1, testDBPri.user.find({y: 1}).itcount());
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
-
- assert.eq(1, testDBPri.user.find({x: 1}).itcount());
- assert.eq(1, testDBPri.user.find({y: 1}).itcount());
-
- assert.eq(deleteOplogEntries, oplog.find({ns: 'test.user', op: 'd'}).itcount());
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 2 /* newStatements */,
- 2 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (upsert)
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- cmd = {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(37),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
- updateOplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
- assert.eq({_id: 60, x: 1}, testDBPri.user.findOne({_id: 60}));
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
-
- assert.eq({_id: 60, x: 1}, testDBPri.user.findOne({_id: 60}));
- assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
- assert.eq(updateOplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
-
- checkFindAndModifyResult(result, retryResult);
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 1 /* newStatements */,
- 1 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (update, return pre-image)
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- cmd = {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: false,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(38),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- var oplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
- assert.eq({_id: 60, x: 2}, testDBPri.user.findOne({_id: 60}));
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
-
- assert.eq({_id: 60, x: 2}, testDBPri.user.findOne({_id: 60}));
- assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
-
- checkFindAndModifyResult(result, retryResult);
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 1 /* newStatements */,
- 1 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (update, return post-image)
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- cmd = {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: true,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- oplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
- assert.eq({_id: 60, x: 3}, testDBPri.user.findOne({_id: 60}));
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
-
- assert.eq({_id: 60, x: 3}, testDBPri.user.findOne({_id: 60}));
- assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
-
- checkFindAndModifyResult(result, retryResult);
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 1 /* newStatements */,
- 1 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (remove, return pre-image)
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- assert.writeOK(testDBMain.user.insert({_id: 70, f: 1}));
- assert.writeOK(testDBMain.user.insert({_id: 80, f: 1}));
-
- cmd = {
- findAndModify: 'user',
- query: {f: 1},
- remove: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(40),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- oplogEntries = oplog.find({ns: 'test.user', op: 'd'}).itcount();
- var docCount = testDBPri.user.find().itcount();
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
-
- assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'd'}).itcount());
- assert.eq(docCount, testDBPri.user.find().itcount());
-
- checkFindAndModifyResult(result, retryResult);
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 1 /* newStatements */,
- 1 /* newCollectionWrites */);
- }
-
- function runFailpointTests(mainConn, priConn) {
- // Test the 'onPrimaryTransactionalWrite' failpoint
- var lsid = UUID();
- var testDb = mainConn.getDB('TestDB');
-
- // Test connection close (default behaviour). The connection will get closed, but the
- // inserts must succeed
- assert.commandWorked(priConn.adminCommand(
- {configureFailPoint: 'onPrimaryTransactionalWrite', mode: 'alwaysOn'}));
-
- try {
- // Set skipRetryOnNetworkError so the shell doesn't automatically retry, since the
- // command has a txnNumber.
- TestData.skipRetryOnNetworkError = true;
- var res = assert.commandWorked(testDb.runCommand({
- insert: 'user',
- documents: [{x: 0}, {x: 1}],
- ordered: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(1)
- }));
- // Mongos will automatically retry on retryable errors if the request has a txnNumber,
- // and the retry path for already completed writes does not trigger the failpoint, so
- // the command will succeed when run through mongos.
- assert.eq(2, res.n);
- assert.eq(false, res.hasOwnProperty("writeErrors"));
- } catch (e) {
- var exceptionMsg = e.toString();
- assert(isNetworkError(e), 'Incorrect exception thrown: ' + exceptionMsg);
- } finally {
- TestData.skipRetryOnNetworkError = false;
- }
-
- let collCount = 0;
- assert.soon(() => {
- collCount = testDb.user.find({}).itcount();
- return collCount == 2;
- }, 'testDb.user returned ' + collCount + ' entries');
-
- // Test exception throw. One update must succeed and the other must fail.
- assert.commandWorked(priConn.adminCommand({
- configureFailPoint: 'onPrimaryTransactionalWrite',
- mode: {skip: 1},
- data: {
- closeConnection: false,
- failBeforeCommitExceptionCode: ErrorCodes.InternalError
- }
- }));
-
- var cmd = {
- update: 'user',
- updates: [{q: {x: 0}, u: {$inc: {y: 1}}}, {q: {x: 1}, u: {$inc: {y: 1}}}],
+ documents: [{x: 0}, {x: 1}],
ordered: true,
lsid: {id: lsid},
- txnNumber: NumberLong(2)
- };
-
- var writeResult = testDb.runCommand(cmd);
-
- assert.eq(1, writeResult.nModified);
- assert.eq(1, writeResult.writeErrors.length);
- assert.eq(1, writeResult.writeErrors[0].index);
- assert.eq(ErrorCodes.InternalError, writeResult.writeErrors[0].code);
-
- assert.commandWorked(
- priConn.adminCommand({configureFailPoint: 'onPrimaryTransactionalWrite', mode: 'off'}));
-
- var writeResult = testDb.runCommand(cmd);
- assert.eq(2, writeResult.nModified);
-
- var collContents = testDb.user.find({}).sort({x: 1}).toArray();
- assert.eq(2, collContents.length);
- assert.eq(0, collContents[0].x);
- assert.eq(1, collContents[0].y);
- assert.eq(1, collContents[1].x);
- assert.eq(1, collContents[1].y);
- }
-
- function runMultiTests(mainConn) {
- // Test the behavior of retryable writes with multi=true / limit=0
- var lsid = {id: UUID()};
- var testDb = mainConn.getDB('test_multi');
-
- // Only the update statements with multi=true in a batch fail.
- var cmd = {
- update: 'user',
- updates: [{q: {x: 1}, u: {y: 1}}, {q: {x: 2}, u: {z: 1}, multi: true}],
- ordered: true,
- lsid: lsid,
- txnNumber: NumberLong(1),
- };
- var res = assert.commandWorkedIgnoringWriteErrors(testDb.runCommand(cmd));
- assert.eq(1,
- res.writeErrors.length,
- 'expected only one write error, received: ' + tojson(res.writeErrors));
- assert.eq(1,
- res.writeErrors[0].index,
- 'expected the update at index 1 to fail, not the update at index: ' +
- res.writeErrors[0].index);
- assert.eq(ErrorCodes.InvalidOptions,
- res.writeErrors[0].code,
- 'expected to fail with code ' + ErrorCodes.InvalidOptions + ', received: ' +
- res.writeErrors[0].code);
-
- // Only the delete statements with limit=0 in a batch fail.
- cmd = {
- delete: 'user',
- deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 0}],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(1),
- };
- res = assert.commandWorkedIgnoringWriteErrors(testDb.runCommand(cmd));
- assert.eq(1,
- res.writeErrors.length,
- 'expected only one write error, received: ' + tojson(res.writeErrors));
- assert.eq(1,
- res.writeErrors[0].index,
- 'expected the delete at index 1 to fail, not the delete at index: ' +
- res.writeErrors[0].index);
- assert.eq(ErrorCodes.InvalidOptions,
- res.writeErrors[0].code,
- 'expected to fail with code ' + ErrorCodes.InvalidOptions + ', received: ' +
- res.writeErrors[0].code);
- }
-
- function runInvalidTests(mainConn) {
- var lsid = {id: UUID()};
- var localDB = mainConn.getDB('local');
-
- let cmd = {
- insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(10),
- };
-
- let res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
- assert.eq(2, res.writeErrors.length);
-
- localDB.user.insert({_id: 10, x: 1});
- localDB.user.insert({_id: 30, z: 2});
-
- cmd = {
- update: 'user',
- updates: [
- {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
- {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
- {q: {_id: 30}, u: {z: 1}} // replacement
- ],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(11),
- };
-
- res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
- assert.eq(3, res.writeErrors.length);
-
- cmd = {
- delete: 'user',
- deletes: [{q: {x: 1}, limit: 1}, {q: {z: 2}, limit: 1}],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(12),
- };
-
- res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
- assert.eq(2, res.writeErrors.length);
-
- cmd = {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(37),
- };
-
- assert.commandFailed(localDB.runCommand(cmd));
+ txnNumber: NumberLong(1)
+ }));
+ // Mongos will automatically retry on retryable errors if the request has a txnNumber,
+ // and the retry path for already completed writes does not trigger the failpoint, so
+ // the command will succeed when run through mongos.
+ assert.eq(2, res.n);
+ assert.eq(false, res.hasOwnProperty("writeErrors"));
+ } catch (e) {
+ var exceptionMsg = e.toString();
+ assert(isNetworkError(e), 'Incorrect exception thrown: ' + exceptionMsg);
+ } finally {
+ TestData.skipRetryOnNetworkError = false;
}
- // Tests for replica set
- var replTest = new ReplSetTest({nodes: 2});
- replTest.startSet({verbose: 5});
- replTest.initiate();
-
- var priConn = replTest.getPrimary();
-
- runTests(priConn, priConn);
- runFailpointTests(priConn, priConn);
- runMultiTests(priConn);
- runInvalidTests(priConn);
-
- replTest.stopSet();
-
- // Tests for sharded cluster
- var st = new ShardingTest({shards: {rs0: {nodes: 1, verbose: 5}}});
-
- runTests(st.s0, st.rs0.getPrimary());
- runFailpointTests(st.s0, st.rs0.getPrimary());
- runMultiTests(st.s0);
-
- st.stop();
+ let collCount = 0;
+ assert.soon(() => {
+ collCount = testDb.user.find({}).itcount();
+ return collCount == 2;
+ }, 'testDb.user returned ' + collCount + ' entries');
+
+ // Test exception throw. One update must succeed and the other must fail.
+ assert.commandWorked(priConn.adminCommand({
+ configureFailPoint: 'onPrimaryTransactionalWrite',
+ mode: {skip: 1},
+ data: {closeConnection: false, failBeforeCommitExceptionCode: ErrorCodes.InternalError}
+ }));
+
+ var cmd = {
+ update: 'user',
+ updates: [{q: {x: 0}, u: {$inc: {y: 1}}}, {q: {x: 1}, u: {$inc: {y: 1}}}],
+ ordered: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(2)
+ };
+
+ var writeResult = testDb.runCommand(cmd);
+
+ assert.eq(1, writeResult.nModified);
+ assert.eq(1, writeResult.writeErrors.length);
+ assert.eq(1, writeResult.writeErrors[0].index);
+ assert.eq(ErrorCodes.InternalError, writeResult.writeErrors[0].code);
+
+ assert.commandWorked(
+ priConn.adminCommand({configureFailPoint: 'onPrimaryTransactionalWrite', mode: 'off'}));
+
+ var writeResult = testDb.runCommand(cmd);
+ assert.eq(2, writeResult.nModified);
+
+ var collContents = testDb.user.find({}).sort({x: 1}).toArray();
+ assert.eq(2, collContents.length);
+ assert.eq(0, collContents[0].x);
+ assert.eq(1, collContents[0].y);
+ assert.eq(1, collContents[1].x);
+ assert.eq(1, collContents[1].y);
+}
+
+function runMultiTests(mainConn) {
+ // Test the behavior of retryable writes with multi=true / limit=0
+ var lsid = {id: UUID()};
+ var testDb = mainConn.getDB('test_multi');
+
+ // Only the update statements with multi=true in a batch fail.
+ var cmd = {
+ update: 'user',
+ updates: [{q: {x: 1}, u: {y: 1}}, {q: {x: 2}, u: {z: 1}, multi: true}],
+ ordered: true,
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ };
+ var res = assert.commandWorkedIgnoringWriteErrors(testDb.runCommand(cmd));
+ assert.eq(1,
+ res.writeErrors.length,
+ 'expected only one write error, received: ' + tojson(res.writeErrors));
+ assert.eq(1,
+ res.writeErrors[0].index,
+ 'expected the update at index 1 to fail, not the update at index: ' +
+ res.writeErrors[0].index);
+ assert.eq(ErrorCodes.InvalidOptions,
+ res.writeErrors[0].code,
+ 'expected to fail with code ' + ErrorCodes.InvalidOptions +
+ ', received: ' + res.writeErrors[0].code);
+
+ // Only the delete statements with limit=0 in a batch fail.
+ cmd = {
+ delete: 'user',
+ deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 0}],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ };
+ res = assert.commandWorkedIgnoringWriteErrors(testDb.runCommand(cmd));
+ assert.eq(1,
+ res.writeErrors.length,
+ 'expected only one write error, received: ' + tojson(res.writeErrors));
+ assert.eq(1,
+ res.writeErrors[0].index,
+ 'expected the delete at index 1 to fail, not the delete at index: ' +
+ res.writeErrors[0].index);
+ assert.eq(ErrorCodes.InvalidOptions,
+ res.writeErrors[0].code,
+ 'expected to fail with code ' + ErrorCodes.InvalidOptions +
+ ', received: ' + res.writeErrors[0].code);
+}
+
+function runInvalidTests(mainConn) {
+ var lsid = {id: UUID()};
+ var localDB = mainConn.getDB('local');
+
+ let cmd = {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(10),
+ };
+
+ let res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
+ assert.eq(2, res.writeErrors.length);
+
+ localDB.user.insert({_id: 10, x: 1});
+ localDB.user.insert({_id: 30, z: 2});
+
+ cmd = {
+ update: 'user',
+ updates: [
+ {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
+ {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
+ {q: {_id: 30}, u: {z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(11),
+ };
+
+ res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
+ assert.eq(3, res.writeErrors.length);
+
+ cmd = {
+ delete: 'user',
+ deletes: [{q: {x: 1}, limit: 1}, {q: {z: 2}, limit: 1}],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(12),
+ };
+
+ res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
+ assert.eq(2, res.writeErrors.length);
+
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(37),
+ };
+
+ assert.commandFailed(localDB.runCommand(cmd));
+}
+
+// Tests for replica set
+var replTest = new ReplSetTest({nodes: 2});
+replTest.startSet({verbose: 5});
+replTest.initiate();
+
+var priConn = replTest.getPrimary();
+
+runTests(priConn, priConn);
+runFailpointTests(priConn, priConn);
+runMultiTests(priConn);
+runInvalidTests(priConn);
+
+replTest.stopSet();
+
+// Tests for sharded cluster
+var st = new ShardingTest({shards: {rs0: {nodes: 1, verbose: 5}}});
+
+runTests(st.s0, st.rs0.getPrimary());
+runFailpointTests(st.s0, st.rs0.getPrimary());
+runMultiTests(st.s0);
+
+st.stop();
})();
diff --git a/jstests/sharding/rs_stepdown_and_pooling.js b/jstests/sharding/rs_stepdown_and_pooling.js
index e298c61ff17..61c2a64ba9c 100644
--- a/jstests/sharding/rs_stepdown_and_pooling.js
+++ b/jstests/sharding/rs_stepdown_and_pooling.js
@@ -2,101 +2,101 @@
// Tests what happens when a replica set primary goes down with pooled connections.
//
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
+"use strict";
+load("jstests/replsets/rslib.js");
- var st = new ShardingTest({shards: {rs0: {nodes: 2}}, mongos: 1});
+var st = new ShardingTest({shards: {rs0: {nodes: 2}}, mongos: 1});
- // Stop balancer to eliminate weird conn stuff
- st.stopBalancer();
+// Stop balancer to eliminate weird conn stuff
+st.stopBalancer();
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var db = coll.getDB();
+var mongos = st.s0;
+var coll = mongos.getCollection("foo.bar");
+var db = coll.getDB();
- // Test is not valid for Win32
- var is32Bits = (db.serverBuildInfo().bits == 32);
- if (is32Bits && _isWindows()) {
- // Win32 doesn't provide the polling interface we need to implement the check tested here
- jsTest.log("Test is not valid on Win32 platform.");
+// Test is not valid for Win32
+var is32Bits = (db.serverBuildInfo().bits == 32);
+if (is32Bits && _isWindows()) {
+ // Win32 doesn't provide the polling interface we need to implement the check tested here
+ jsTest.log("Test is not valid on Win32 platform.");
- } else {
- // Non-Win32 platform
+} else {
+ // Non-Win32 platform
- var primary = st.rs0.getPrimary();
- var secondary = st.rs0.getSecondary();
+ var primary = st.rs0.getPrimary();
+ var secondary = st.rs0.getSecondary();
- jsTest.log("Creating new connections...");
+ jsTest.log("Creating new connections...");
- // Create a bunch of connections to the primary node through mongos.
- // jstest ->(x10)-> mongos ->(x10)-> primary
- var conns = [];
- for (var i = 0; i < 50; i++) {
- conns.push(new Mongo(mongos.host));
- conns[i].getCollection(coll + "").findOne();
- }
+ // Create a bunch of connections to the primary node through mongos.
+ // jstest ->(x10)-> mongos ->(x10)-> primary
+ var conns = [];
+ for (var i = 0; i < 50; i++) {
+ conns.push(new Mongo(mongos.host));
+ conns[i].getCollection(coll + "").findOne();
+ }
- jsTest.log("Returning the connections back to the pool.");
+ jsTest.log("Returning the connections back to the pool.");
- for (var i = 0; i < conns.length; i++) {
- conns[i] = null;
- }
- // Make sure we return connections back to the pool
- gc();
+ for (var i = 0; i < conns.length; i++) {
+ conns[i] = null;
+ }
+ // Make sure we return connections back to the pool
+ gc();
- // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
- // something goes wrong.
- var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
- printjson(connPoolStats);
+ // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
+ // something goes wrong.
+ var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
+ printjson(connPoolStats);
- jsTest.log("Stepdown primary and then step back up...");
+ jsTest.log("Stepdown primary and then step back up...");
- var stepDown = function(node, timeSecs) {
- assert.commandWorked(
- node.getDB("admin").runCommand({replSetStepDown: timeSecs, force: true}));
- };
+ var stepDown = function(node, timeSecs) {
+ assert.commandWorked(
+ node.getDB("admin").runCommand({replSetStepDown: timeSecs, force: true}));
+ };
- stepDown(primary, 0);
+ stepDown(primary, 0);
- jsTest.log("Waiting for mongos to acknowledge stepdown...");
+ jsTest.log("Waiting for mongos to acknowledge stepdown...");
- awaitRSClientHosts(mongos,
- secondary,
- {ismaster: true},
- st.rs0,
- 2 * 60 * 1000); // slow hosts can take longer to recognize sd
+ awaitRSClientHosts(mongos,
+ secondary,
+ {ismaster: true},
+ st.rs0,
+ 2 * 60 * 1000); // slow hosts can take longer to recognize sd
- jsTest.log("Stepping back up...");
+ jsTest.log("Stepping back up...");
- stepDown(secondary, 10000);
+ stepDown(secondary, 10000);
- jsTest.log("Waiting for mongos to acknowledge step up...");
+ jsTest.log("Waiting for mongos to acknowledge step up...");
- awaitRSClientHosts(mongos, primary, {ismaster: true}, st.rs0, 2 * 60 * 1000);
+ awaitRSClientHosts(mongos, primary, {ismaster: true}, st.rs0, 2 * 60 * 1000);
- jsTest.log("Waiting for socket timeout time...");
+ jsTest.log("Waiting for socket timeout time...");
- // Need to wait longer than the socket polling time.
- sleep(2 * 5000);
+ // Need to wait longer than the socket polling time.
+ sleep(2 * 5000);
- jsTest.log("Run queries using new connections.");
+ jsTest.log("Run queries using new connections.");
- var numErrors = 0;
- for (var i = 0; i < conns.length; i++) {
- var newConn = new Mongo(mongos.host);
- try {
- printjson(newConn.getCollection("foo.bar").findOne());
- } catch (e) {
- printjson(e);
- numErrors++;
- }
+ var numErrors = 0;
+ for (var i = 0; i < conns.length; i++) {
+ var newConn = new Mongo(mongos.host);
+ try {
+ printjson(newConn.getCollection("foo.bar").findOne());
+ } catch (e) {
+ printjson(e);
+ numErrors++;
}
+ }
- assert.eq(0, numErrors);
+ assert.eq(0, numErrors);
- } // End Win32 check
+} // End Win32 check
- jsTest.log("DONE!");
+jsTest.log("DONE!");
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 814b79b6308..a507f876fce 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -17,592 +17,588 @@
* "versioned". Determines what system profiler checks are performed.
*/
(function() {
- "use strict";
-
- load('jstests/libs/profiler.js');
- load('jstests/sharding/libs/last_stable_mongos_commands.js');
-
- let db = "test";
- let coll = "foo";
- let nss = db + "." + coll;
-
- // Check that a test case is well-formed.
- let validateTestCase = function(test) {
- assert(test.setUp && typeof(test.setUp) === "function");
- assert(test.command && typeof(test.command) === "object");
- assert(test.checkResults && typeof(test.checkResults) === "function");
- assert(test.behavior === "unshardedOnly" ||
- test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
- test.behavior === "versioned");
- };
-
- let testCases = {
- _addShard: {skip: "primary only"},
- _cloneCatalogData: {skip: "primary only"},
- _configsvrAddShard: {skip: "primary only"},
- _configsvrAddShardToZone: {skip: "primary only"},
- _configsvrBalancerStart: {skip: "primary only"},
- _configsvrBalancerStatus: {skip: "primary only"},
- _configsvrBalancerStop: {skip: "primary only"},
- _configsvrCommitChunkMerge: {skip: "primary only"},
- _configsvrCommitChunkMigration: {skip: "primary only"},
- _configsvrCommitChunkSplit: {skip: "primary only"},
- _configsvrCommitMovePrimary: {skip: "primary only"},
- _configsvrDropCollection: {skip: "primary only"},
- _configsvrDropDatabase: {skip: "primary only"},
- _configsvrMoveChunk: {skip: "primary only"},
- _configsvrMovePrimary: {skip: "primary only"},
- _configsvrRemoveShardFromZone: {skip: "primary only"},
- _configsvrShardCollection: {skip: "primary only"},
- _configsvrUpdateZoneKeyRange: {skip: "primary only"},
- _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
- _getUserCacheGeneration: {skip: "does not return user data"},
- _hashBSONElement: {skip: "does not return user data"},
- _isSelf: {skip: "does not return user data"},
- _mergeAuthzCollections: {skip: "primary only"},
- _migrateClone: {skip: "primary only"},
- _movePrimary: {skip: "primary only"},
- _recvChunkAbort: {skip: "primary only"},
- _recvChunkCommit: {skip: "primary only"},
- _recvChunkStart: {skip: "primary only"},
- _recvChunkStatus: {skip: "primary only"},
- _transferMods: {skip: "primary only"},
- abortTransaction: {skip: "primary only"},
- addShard: {skip: "primary only"},
- addShardToZone: {skip: "primary only"},
- aggregate: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+"use strict";
+
+load('jstests/libs/profiler.js');
+load('jstests/sharding/libs/last_stable_mongos_commands.js');
+
+let db = "test";
+let coll = "foo";
+let nss = db + "." + coll;
+
+// Check that a test case is well-formed.
+let validateTestCase = function(test) {
+ assert(test.setUp && typeof (test.setUp) === "function");
+ assert(test.command && typeof (test.command) === "object");
+ assert(test.checkResults && typeof (test.checkResults) === "function");
+ assert(test.behavior === "unshardedOnly" ||
+ test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
+ test.behavior === "versioned");
+};
+
+let testCases = {
+ _addShard: {skip: "primary only"},
+ _cloneCatalogData: {skip: "primary only"},
+ _configsvrAddShard: {skip: "primary only"},
+ _configsvrAddShardToZone: {skip: "primary only"},
+ _configsvrBalancerStart: {skip: "primary only"},
+ _configsvrBalancerStatus: {skip: "primary only"},
+ _configsvrBalancerStop: {skip: "primary only"},
+ _configsvrCommitChunkMerge: {skip: "primary only"},
+ _configsvrCommitChunkMigration: {skip: "primary only"},
+ _configsvrCommitChunkSplit: {skip: "primary only"},
+ _configsvrCommitMovePrimary: {skip: "primary only"},
+ _configsvrDropCollection: {skip: "primary only"},
+ _configsvrDropDatabase: {skip: "primary only"},
+ _configsvrMoveChunk: {skip: "primary only"},
+ _configsvrMovePrimary: {skip: "primary only"},
+ _configsvrRemoveShardFromZone: {skip: "primary only"},
+ _configsvrShardCollection: {skip: "primary only"},
+ _configsvrUpdateZoneKeyRange: {skip: "primary only"},
+ _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
+ _getUserCacheGeneration: {skip: "does not return user data"},
+ _hashBSONElement: {skip: "does not return user data"},
+ _isSelf: {skip: "does not return user data"},
+ _mergeAuthzCollections: {skip: "primary only"},
+ _migrateClone: {skip: "primary only"},
+ _movePrimary: {skip: "primary only"},
+ _recvChunkAbort: {skip: "primary only"},
+ _recvChunkCommit: {skip: "primary only"},
+ _recvChunkStart: {skip: "primary only"},
+ _recvChunkStatus: {skip: "primary only"},
+ _transferMods: {skip: "primary only"},
+ abortTransaction: {skip: "primary only"},
+ addShard: {skip: "primary only"},
+ addShardToZone: {skip: "primary only"},
+ aggregate: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- appendOplogNote: {skip: "primary only"},
- applyOps: {skip: "primary only"},
- authSchemaUpgrade: {skip: "primary only"},
- authenticate: {skip: "does not return user data"},
- availableQueryOptions: {skip: "does not return user data"},
- balancerStart: {skip: "primary only"},
- balancerStatus: {skip: "primary only"},
- balancerStop: {skip: "primary only"},
- buildInfo: {skip: "does not return user data"},
- captrunc: {skip: "primary only"},
- checkShardingIndex: {skip: "primary only"},
- cleanupOrphaned: {skip: "primary only"},
- clearLog: {skip: "does not return user data"},
- clone: {skip: "primary only"},
- cloneCollection: {skip: "primary only"},
- cloneCollectionAsCapped: {skip: "primary only"},
- collMod: {skip: "primary only"},
- collStats: {skip: "does not return user data"},
- commitTransaction: {skip: "primary only"},
- compact: {skip: "does not return user data"},
- configureFailPoint: {skip: "does not return user data"},
- connPoolStats: {skip: "does not return user data"},
- connPoolSync: {skip: "does not return user data"},
- connectionStatus: {skip: "does not return user data"},
- convertToCapped: {skip: "primary only"},
- count: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {count: coll, query: {x: 1}},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.n, tojson(res));
- },
- behavior: "versioned"
+ command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.cursor.firstBatch.length, tojson(res));
},
- cpuload: {skip: "does not return user data"},
- create: {skip: "primary only"},
- createIndexes: {skip: "primary only"},
- createRole: {skip: "primary only"},
- createUser: {skip: "primary only"},
- currentOp: {skip: "does not return user data"},
- dataSize: {skip: "does not return user data"},
- dbHash: {skip: "does not return user data"},
- dbStats: {skip: "does not return user data"},
- delete: {skip: "primary only"},
- distinct: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {distinct: coll, key: "x"},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.values.length, tojson(res));
- },
- behavior: "versioned"
+ behavior: "versioned"
+ },
+ appendOplogNote: {skip: "primary only"},
+ applyOps: {skip: "primary only"},
+ authSchemaUpgrade: {skip: "primary only"},
+ authenticate: {skip: "does not return user data"},
+ availableQueryOptions: {skip: "does not return user data"},
+ balancerStart: {skip: "primary only"},
+ balancerStatus: {skip: "primary only"},
+ balancerStop: {skip: "primary only"},
+ buildInfo: {skip: "does not return user data"},
+ captrunc: {skip: "primary only"},
+ checkShardingIndex: {skip: "primary only"},
+ cleanupOrphaned: {skip: "primary only"},
+ clearLog: {skip: "does not return user data"},
+ clone: {skip: "primary only"},
+ cloneCollection: {skip: "primary only"},
+ cloneCollectionAsCapped: {skip: "primary only"},
+ collMod: {skip: "primary only"},
+ collStats: {skip: "does not return user data"},
+ commitTransaction: {skip: "primary only"},
+ compact: {skip: "does not return user data"},
+ configureFailPoint: {skip: "does not return user data"},
+ connPoolStats: {skip: "does not return user data"},
+ connPoolSync: {skip: "does not return user data"},
+ connectionStatus: {skip: "does not return user data"},
+ convertToCapped: {skip: "primary only"},
+ count: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- driverOIDTest: {skip: "does not return user data"},
- drop: {skip: "primary only"},
- dropAllRolesFromDatabase: {skip: "primary only"},
- dropAllUsersFromDatabase: {skip: "primary only"},
- dropConnections: {skip: "does not return user data"},
- dropDatabase: {skip: "primary only"},
- dropIndexes: {skip: "primary only"},
- dropRole: {skip: "primary only"},
- dropUser: {skip: "primary only"},
- echo: {skip: "does not return user data"},
- emptycapped: {skip: "primary only"},
- enableSharding: {skip: "primary only"},
- endSessions: {skip: "does not return user data"},
- explain: {skip: "TODO SERVER-30068"},
- features: {skip: "does not return user data"},
- filemd5: {skip: "does not return user data"},
- find: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {find: coll, filter: {x: 1}},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+ command: {count: coll, query: {x: 1}},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.n, tojson(res));
},
- findAndModify: {skip: "primary only"},
- flushRouterConfig: {skip: "does not return user data"},
- forceerror: {skip: "does not return user data"},
- fsync: {skip: "does not return user data"},
- fsyncUnlock: {skip: "does not return user data"},
- geoSearch: {skip: "not supported in mongos"},
- getCmdLineOpts: {skip: "does not return user data"},
- getDiagnosticData: {skip: "does not return user data"},
- getLastError: {skip: "primary only"},
- getLog: {skip: "does not return user data"},
- getMore: {skip: "shard version already established"},
- getParameter: {skip: "does not return user data"},
- getShardMap: {skip: "does not return user data"},
- getShardVersion: {skip: "primary only"},
- getnonce: {skip: "does not return user data"},
- godinsert: {skip: "for testing only"},
- grantPrivilegesToRole: {skip: "primary only"},
- grantRolesToRole: {skip: "primary only"},
- grantRolesToUser: {skip: "primary only"},
- handshake: {skip: "does not return user data"},
- hostInfo: {skip: "does not return user data"},
- insert: {skip: "primary only"},
- invalidateUserCache: {skip: "does not return user data"},
- isdbgrid: {skip: "does not return user data"},
- isMaster: {skip: "does not return user data"},
- killAllSessions: {skip: "does not return user data"},
- killAllSessionsByPattern: {skip: "does not return user data"},
- killCursors: {skip: "does not return user data"},
- killOp: {skip: "does not return user data"},
- killSessions: {skip: "does not return user data"},
- listCollections: {skip: "primary only"},
- listCommands: {skip: "does not return user data"},
- listDatabases: {skip: "primary only"},
- listIndexes: {skip: "primary only"},
- listShards: {skip: "does not return user data"},
- lockInfo: {skip: "primary only"},
- logApplicationMessage: {skip: "primary only"},
- logRotate: {skip: "does not return user data"},
- logout: {skip: "does not return user data"},
- makeSnapshot: {skip: "does not return user data"},
- mapReduce: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {
- mapReduce: coll,
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: {inline: 1}
- },
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.results.length, tojson(res));
- },
- behavior: "targetsPrimaryUsesConnectionVersioning"
+ behavior: "versioned"
+ },
+ cpuload: {skip: "does not return user data"},
+ create: {skip: "primary only"},
+ createIndexes: {skip: "primary only"},
+ createRole: {skip: "primary only"},
+ createUser: {skip: "primary only"},
+ currentOp: {skip: "does not return user data"},
+ dataSize: {skip: "does not return user data"},
+ dbHash: {skip: "does not return user data"},
+ dbStats: {skip: "does not return user data"},
+ delete: {skip: "primary only"},
+ distinct: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- mergeChunks: {skip: "primary only"},
- moveChunk: {skip: "primary only"},
- movePrimary: {skip: "primary only"},
- multicast: {skip: "does not return user data"},
- netstat: {skip: "does not return user data"},
- ping: {skip: "does not return user data"},
- planCacheClear: {skip: "does not return user data"},
- planCacheClearFilters: {skip: "does not return user data"},
- planCacheListFilters: {skip: "does not return user data"},
- planCacheListPlans: {skip: "does not return user data"},
- planCacheListQueryShapes: {skip: "does not return user data"},
- planCacheSetFilter: {skip: "does not return user data"},
- profile: {skip: "primary only"},
- reapLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshSessions: {skip: "does not return user data"},
- refreshSessionsInternal: {skip: "does not return user data"},
- removeShard: {skip: "primary only"},
- removeShardFromZone: {skip: "primary only"},
- renameCollection: {skip: "primary only"},
- repairCursor: {skip: "does not return user data"},
- replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
- replSetFreeze: {skip: "does not return user data"},
- replSetGetConfig: {skip: "does not return user data"},
- replSetGetRBID: {skip: "does not return user data"},
- replSetGetStatus: {skip: "does not return user data"},
- replSetHeartbeat: {skip: "does not return user data"},
- replSetInitiate: {skip: "does not return user data"},
- replSetMaintenance: {skip: "does not return user data"},
- replSetReconfig: {skip: "does not return user data"},
- replSetRequestVotes: {skip: "does not return user data"},
- replSetStepDown: {skip: "does not return user data"},
- replSetStepUp: {skip: "does not return user data"},
- replSetSyncFrom: {skip: "does not return user data"},
- replSetTest: {skip: "does not return user data"},
- replSetUpdatePosition: {skip: "does not return user data"},
- replSetResizeOplog: {skip: "does not return user data"},
- resetError: {skip: "does not return user data"},
- restartCatalog: {skip: "internal-only command"},
- resync: {skip: "primary only"},
- revokePrivilegesFromRole: {skip: "primary only"},
- revokeRolesFromRole: {skip: "primary only"},
- revokeRolesFromUser: {skip: "primary only"},
- rolesInfo: {skip: "primary only"},
- saslContinue: {skip: "primary only"},
- saslStart: {skip: "primary only"},
- serverStatus: {skip: "does not return user data"},
- setCommittedSnapshot: {skip: "does not return user data"},
- setIndexCommitQuorum: {skip: "primary only"},
- setFeatureCompatibilityVersion: {skip: "primary only"},
- setFreeMonitoring: {skip: "primary only"},
- setParameter: {skip: "does not return user data"},
- setShardVersion: {skip: "does not return user data"},
- shardCollection: {skip: "primary only"},
- shardConnPoolStats: {skip: "does not return user data"},
- shardingState: {skip: "does not return user data"},
- shutdown: {skip: "does not return user data"},
- sleep: {skip: "does not return user data"},
- split: {skip: "primary only"},
- splitChunk: {skip: "primary only"},
- splitVector: {skip: "primary only"},
- stageDebug: {skip: "primary only"},
- startRecordingTraffic: {skip: "does not return user data"},
- startSession: {skip: "does not return user data"},
- stopRecordingTraffic: {skip: "does not return user data"},
- top: {skip: "does not return user data"},
- touch: {skip: "does not return user data"},
- unsetSharding: {skip: "does not return user data"},
- update: {skip: "primary only"},
- updateRole: {skip: "primary only"},
- updateUser: {skip: "primary only"},
- updateZoneKeyRange: {skip: "primary only"},
- usersInfo: {skip: "primary only"},
- validate: {skip: "does not return user data"},
- waitForOngoingChunkSplits: {skip: "does not return user data"},
- whatsmyuri: {skip: "does not return user data"}
- };
-
- commandsRemovedFromMongosIn42.forEach(function(cmd) {
- testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
- });
-
- let scenarios = {
- dropRecreateAsUnshardedOnSameShard: function(
- staleMongos, freshMongos, test, commandProfile) {
- let primaryShardPrimary = st.rs0.getPrimary();
- let primaryShardSecondary = st.rs0.getSecondary();
-
- // Drop and recreate the collection.
- assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
-
- // Ensure the latest version changes have been persisted and propagate to the secondary
- // before we target it with versioned commands.
- assert.commandWorked(st.rs0.getPrimary().getDB('admin').runCommand(
- {_flushRoutingTableCacheUpdates: nss}));
- st.rs0.awaitReplication();
-
- let res = staleMongos.getDB(db).runCommand(Object.assign(
- {},
- test.command,
- {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
-
- test.checkResults(res);
-
- if (test.behavior === "unshardedOnly") {
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: primaryShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the primary shard primary received the request without a shardVersion
- // field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false}
- },
- commandProfile)
- });
- } else if (test.behavior == "versioned") {
- // Check that the primary shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the primary shard secondary received the request again and returned
- // success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
+ command: {distinct: coll, key: "x"},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.values.length, tojson(res));
},
- dropRecreateAsShardedOnSameShard: function(staleMongos, freshMongos, test, commandProfile) {
- let primaryShardPrimary = st.rs0.getPrimary();
- let primaryShardSecondary = st.rs0.getSecondary();
-
- // Drop and recreate the collection as sharded.
- assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
- assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
-
- // We do this because we expect staleMongos to see that the collection is sharded, which
- // it may not if the "nearest" config server it contacts has not replicated the
- // shardCollection writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
-
- // Ensure the latest version changes have been persisted and propagate to the secondary
- // before we target it with versioned commands.
- assert.commandWorked(st.rs0.getPrimary().getDB('admin').runCommand(
- {_flushRoutingTableCacheUpdates: nss}));
- st.rs0.awaitReplication();
-
- let res = staleMongos.getDB(db).runCommand(Object.assign(
- {},
- test.command,
- {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
-
- test.checkResults(res);
-
- if (test.behavior === "unshardedOnly") {
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: primaryShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the primary shard primary received the request without a shardVersion
- // field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false},
- },
- commandProfile)
- });
- } else if (test.behavior == "versioned") {
- // Check that the primary shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the primary shard secondary received the request again and returned
- // success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
+ behavior: "versioned"
+ },
+ driverOIDTest: {skip: "does not return user data"},
+ drop: {skip: "primary only"},
+ dropAllRolesFromDatabase: {skip: "primary only"},
+ dropAllUsersFromDatabase: {skip: "primary only"},
+ dropConnections: {skip: "does not return user data"},
+ dropDatabase: {skip: "primary only"},
+ dropIndexes: {skip: "primary only"},
+ dropRole: {skip: "primary only"},
+ dropUser: {skip: "primary only"},
+ echo: {skip: "does not return user data"},
+ emptycapped: {skip: "primary only"},
+ enableSharding: {skip: "primary only"},
+ endSessions: {skip: "does not return user data"},
+ explain: {skip: "TODO SERVER-30068"},
+ features: {skip: "does not return user data"},
+ filemd5: {skip: "does not return user data"},
+ find: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- dropRecreateAsUnshardedOnDifferentShard: function(
- staleMongos, freshMongos, test, commandProfile) {
- // There is no way to drop and recreate the collection as unsharded on a *different*
- // shard without calling movePrimary, and it is known that a stale mongos will not
- // refresh its notion of the primary shard after it loads it once.
+ command: {find: coll, filter: {x: 1}},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.cursor.firstBatch.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ findAndModify: {skip: "primary only"},
+ flushRouterConfig: {skip: "does not return user data"},
+ forceerror: {skip: "does not return user data"},
+ fsync: {skip: "does not return user data"},
+ fsyncUnlock: {skip: "does not return user data"},
+ geoSearch: {skip: "not supported in mongos"},
+ getCmdLineOpts: {skip: "does not return user data"},
+ getDiagnosticData: {skip: "does not return user data"},
+ getLastError: {skip: "primary only"},
+ getLog: {skip: "does not return user data"},
+ getMore: {skip: "shard version already established"},
+ getParameter: {skip: "does not return user data"},
+ getShardMap: {skip: "does not return user data"},
+ getShardVersion: {skip: "primary only"},
+ getnonce: {skip: "does not return user data"},
+ godinsert: {skip: "for testing only"},
+ grantPrivilegesToRole: {skip: "primary only"},
+ grantRolesToRole: {skip: "primary only"},
+ grantRolesToUser: {skip: "primary only"},
+ handshake: {skip: "does not return user data"},
+ hostInfo: {skip: "does not return user data"},
+ insert: {skip: "primary only"},
+ invalidateUserCache: {skip: "does not return user data"},
+ isdbgrid: {skip: "does not return user data"},
+ isMaster: {skip: "does not return user data"},
+ killAllSessions: {skip: "does not return user data"},
+ killAllSessionsByPattern: {skip: "does not return user data"},
+ killCursors: {skip: "does not return user data"},
+ killOp: {skip: "does not return user data"},
+ killSessions: {skip: "does not return user data"},
+ listCollections: {skip: "primary only"},
+ listCommands: {skip: "does not return user data"},
+ listDatabases: {skip: "primary only"},
+ listIndexes: {skip: "primary only"},
+ listShards: {skip: "does not return user data"},
+ lockInfo: {skip: "primary only"},
+ logApplicationMessage: {skip: "primary only"},
+ logRotate: {skip: "does not return user data"},
+ logout: {skip: "does not return user data"},
+ makeSnapshot: {skip: "does not return user data"},
+ mapReduce: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {
+ mapReduce: coll,
+ map: function() {
+ emit(this.x, 1);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: {inline: 1}
},
- dropRecreateAsShardedOnDifferentShard: function(
- staleMongos, freshMongos, test, commandProfile) {
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardPrimary = st.rs1.getPrimary();
- let recipientShardSecondary = st.rs1.getSecondary();
-
- // Drop and recreate the collection as sharded, and move the chunk to the other shard.
- assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
- assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
-
- // We do this because we expect staleMongos to see that the collection is sharded, which
- // it may not if the "nearest" config server it contacts has not replicated the
- // shardCollection writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
-
- // Use {w:2} (all) write concern in the moveChunk operation so the metadata change gets
- // persisted to the secondary before versioned commands are sent against the secondary.
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: nss,
- find: {x: 0},
- to: st.shard1.shardName,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- }));
-
- let res = staleMongos.getDB(db).runCommand(Object.assign(
- {},
- test.command,
- {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
-
- test.checkResults(res);
-
- if (test.behavior === "unshardedOnly") {
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the recipient shard primary received the request without a
- // shardVersion field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false},
- },
- commandProfile)
- });
- } else if (test.behavior == "versioned") {
- // Check that the donor shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the recipient shard secondary received the request and returned
- // success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.results.length, tojson(res));
+ },
+ behavior: "targetsPrimaryUsesConnectionVersioning"
+ },
+ mergeChunks: {skip: "primary only"},
+ moveChunk: {skip: "primary only"},
+ movePrimary: {skip: "primary only"},
+ multicast: {skip: "does not return user data"},
+ netstat: {skip: "does not return user data"},
+ ping: {skip: "does not return user data"},
+ planCacheClear: {skip: "does not return user data"},
+ planCacheClearFilters: {skip: "does not return user data"},
+ planCacheListFilters: {skip: "does not return user data"},
+ planCacheListPlans: {skip: "does not return user data"},
+ planCacheListQueryShapes: {skip: "does not return user data"},
+ planCacheSetFilter: {skip: "does not return user data"},
+ profile: {skip: "primary only"},
+ reapLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshSessions: {skip: "does not return user data"},
+ refreshSessionsInternal: {skip: "does not return user data"},
+ removeShard: {skip: "primary only"},
+ removeShardFromZone: {skip: "primary only"},
+ renameCollection: {skip: "primary only"},
+ repairCursor: {skip: "does not return user data"},
+ replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
+ replSetFreeze: {skip: "does not return user data"},
+ replSetGetConfig: {skip: "does not return user data"},
+ replSetGetRBID: {skip: "does not return user data"},
+ replSetGetStatus: {skip: "does not return user data"},
+ replSetHeartbeat: {skip: "does not return user data"},
+ replSetInitiate: {skip: "does not return user data"},
+ replSetMaintenance: {skip: "does not return user data"},
+ replSetReconfig: {skip: "does not return user data"},
+ replSetRequestVotes: {skip: "does not return user data"},
+ replSetStepDown: {skip: "does not return user data"},
+ replSetStepUp: {skip: "does not return user data"},
+ replSetSyncFrom: {skip: "does not return user data"},
+ replSetTest: {skip: "does not return user data"},
+ replSetUpdatePosition: {skip: "does not return user data"},
+ replSetResizeOplog: {skip: "does not return user data"},
+ resetError: {skip: "does not return user data"},
+ restartCatalog: {skip: "internal-only command"},
+ resync: {skip: "primary only"},
+ revokePrivilegesFromRole: {skip: "primary only"},
+ revokeRolesFromRole: {skip: "primary only"},
+ revokeRolesFromUser: {skip: "primary only"},
+ rolesInfo: {skip: "primary only"},
+ saslContinue: {skip: "primary only"},
+ saslStart: {skip: "primary only"},
+ serverStatus: {skip: "does not return user data"},
+ setCommittedSnapshot: {skip: "does not return user data"},
+ setIndexCommitQuorum: {skip: "primary only"},
+ setFeatureCompatibilityVersion: {skip: "primary only"},
+ setFreeMonitoring: {skip: "primary only"},
+ setParameter: {skip: "does not return user data"},
+ setShardVersion: {skip: "does not return user data"},
+ shardCollection: {skip: "primary only"},
+ shardConnPoolStats: {skip: "does not return user data"},
+ shardingState: {skip: "does not return user data"},
+ shutdown: {skip: "does not return user data"},
+ sleep: {skip: "does not return user data"},
+ split: {skip: "primary only"},
+ splitChunk: {skip: "primary only"},
+ splitVector: {skip: "primary only"},
+ stageDebug: {skip: "primary only"},
+ startRecordingTraffic: {skip: "does not return user data"},
+ startSession: {skip: "does not return user data"},
+ stopRecordingTraffic: {skip: "does not return user data"},
+ top: {skip: "does not return user data"},
+ touch: {skip: "does not return user data"},
+ unsetSharding: {skip: "does not return user data"},
+ update: {skip: "primary only"},
+ updateRole: {skip: "primary only"},
+ updateUser: {skip: "primary only"},
+ updateZoneKeyRange: {skip: "primary only"},
+ usersInfo: {skip: "primary only"},
+ validate: {skip: "does not return user data"},
+ waitForOngoingChunkSplits: {skip: "does not return user data"},
+ whatsmyuri: {skip: "does not return user data"}
+};
+
+commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+});
+
+let scenarios = {
+ dropRecreateAsUnshardedOnSameShard: function(staleMongos, freshMongos, test, commandProfile) {
+ let primaryShardPrimary = st.rs0.getPrimary();
+ let primaryShardSecondary = st.rs0.getSecondary();
+
+ // Drop and recreate the collection.
+ assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
+
+ // Ensure the latest version changes have been persisted and propagate to the secondary
+ // before we target it with versioned commands.
+ assert.commandWorked(
+ st.rs0.getPrimary().getDB('admin').runCommand({_flushRoutingTableCacheUpdates: nss}));
+ st.rs0.awaitReplication();
+
+ let res = staleMongos.getDB(db).runCommand(
+ Object.assign({},
+ test.command,
+ {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+
+ test.checkResults(res);
+
+ if (test.behavior === "unshardedOnly") {
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: primaryShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the primary shard primary received the request without a shardVersion
+ // field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false}
+ },
+ commandProfile)
+ });
+ } else if (test.behavior == "versioned") {
+ // Check that the primary shard secondary returned stale shardVersion.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
+
+ // Check that the primary shard secondary received the request again and returned
+ // success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
+ },
+ commandProfile)
+ });
+ }
+ },
+ dropRecreateAsShardedOnSameShard: function(staleMongos, freshMongos, test, commandProfile) {
+ let primaryShardPrimary = st.rs0.getPrimary();
+ let primaryShardSecondary = st.rs0.getSecondary();
+
+ // Drop and recreate the collection as sharded.
+ assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
+ assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+
+ // We do this because we expect staleMongos to see that the collection is sharded, which
+ // it may not if the "nearest" config server it contacts has not replicated the
+ // shardCollection writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
+
+ // Ensure the latest version changes have been persisted and propagate to the secondary
+ // before we target it with versioned commands.
+ assert.commandWorked(
+ st.rs0.getPrimary().getDB('admin').runCommand({_flushRoutingTableCacheUpdates: nss}));
+ st.rs0.awaitReplication();
+
+ let res = staleMongos.getDB(db).runCommand(
+ Object.assign({},
+ test.command,
+ {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+
+ test.checkResults(res);
+
+ if (test.behavior === "unshardedOnly") {
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: primaryShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the primary shard primary received the request without a shardVersion
+ // field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false},
+ },
+ commandProfile)
+ });
+ } else if (test.behavior == "versioned") {
+ // Check that the primary shard secondary returned stale shardVersion.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
+
+ // Check that the primary shard secondary received the request again and returned
+ // success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
+ },
+ commandProfile)
+ });
}
- };
+ },
+ dropRecreateAsUnshardedOnDifferentShard: function(
+ staleMongos, freshMongos, test, commandProfile) {
+ // There is no way to drop and recreate the collection as unsharded on a *different*
+ // shard without calling movePrimary, and it is known that a stale mongos will not
+ // refresh its notion of the primary shard after it loads it once.
+ },
+ dropRecreateAsShardedOnDifferentShard: function(
+ staleMongos, freshMongos, test, commandProfile) {
+ let donorShardSecondary = st.rs0.getSecondary();
+ let recipientShardPrimary = st.rs1.getPrimary();
+ let recipientShardSecondary = st.rs1.getSecondary();
+
+ // Drop and recreate the collection as sharded, and move the chunk to the other shard.
+ assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
+ assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+
+ // We do this because we expect staleMongos to see that the collection is sharded, which
+ // it may not if the "nearest" config server it contacts has not replicated the
+ // shardCollection writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
+
+ // Use {w:2} (all) write concern in the moveChunk operation so the metadata change gets
+ // persisted to the secondary before versioned commands are sent against the secondary.
+ assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: nss,
+ find: {x: 0},
+ to: st.shard1.shardName,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ }));
+
+ let res = staleMongos.getDB(db).runCommand(
+ Object.assign({},
+ test.command,
+ {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+
+ test.checkResults(res);
+
+ if (test.behavior === "unshardedOnly") {
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the recipient shard primary received the request without a
+ // shardVersion field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false},
+ },
+ commandProfile)
+ });
+ } else if (test.behavior == "versioned") {
+ // Check that the donor shard secondary returned stale shardVersion.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
+
+ // Check that the recipient shard secondary received the request and returned
+ // success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
+ },
+ commandProfile)
+ });
+ }
+ }
+};
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
- let freshMongos = st.s0;
- let staleMongos = st.s1;
+let freshMongos = st.s0;
+let staleMongos = st.s1;
- let res = st.s.adminCommand({listCommands: 1});
- assert.commandWorked(res);
+let res = st.s.adminCommand({listCommands: 1});
+assert.commandWorked(res);
- let commands = Object.keys(res.commands);
- for (let command of commands) {
- let test = testCases[command];
- assert(test !== undefined,
- "coverage failure: must define a safe secondary reads test for " + command);
+let commands = Object.keys(res.commands);
+for (let command of commands) {
+ let test = testCases[command];
+ assert(test !== undefined,
+ "coverage failure: must define a safe secondary reads test for " + command);
- if (test.skip !== undefined) {
- print("skipping " + command + ": " + test.skip);
- continue;
- }
- validateTestCase(test);
-
- // Build the query to identify the operation in the system profiler.
- let commandProfile = buildCommandProfile(test.command, true /* sharded */);
-
- for (let scenario in scenarios) {
- jsTest.log("testing command " + tojson(command) + " under scenario " + scenario);
-
- // Each scenario starts with a sharded collection with shard0 as the primary shard.
- assert.commandWorked(staleMongos.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, st.shard0.shardName);
- assert.commandWorked(staleMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
-
- // We do this because we expect staleMongos to see that the collection is sharded, which
- // it may not if the "nearest" config server it contacts has not replicated the
- // shardCollection writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
-
- // Do any test-specific setup.
- test.setUp(staleMongos);
-
- // Wait for replication as a safety net, in case the individual setup function for a
- // test case did not specify writeConcern itself
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
-
- // Do dummy read from the stale mongos so it loads the routing table into memory once.
- // Additionally, do a secondary read to ensure that the secondary has loaded the initial
- // routing table -- the first read to the primary will refresh the mongos' shardVersion,
- // which will then be used against the secondary to ensure the secondary is fresh.
- assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand({
- find: coll,
- $readPreference: {mode: 'secondary'},
- readConcern: {'level': 'local'}
- }));
- // Wait for drop of previous database to replicate before beginning profiling
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
- assert.commandWorked(st.rs0.getPrimary().getDB(db).setProfilingLevel(2));
- assert.commandWorked(st.rs0.getSecondary().getDB(db).setProfilingLevel(2));
- assert.commandWorked(st.rs1.getPrimary().getDB(db).setProfilingLevel(2));
- assert.commandWorked(st.rs1.getSecondary().getDB(db).setProfilingLevel(2));
-
- scenarios[scenario](staleMongos, freshMongos, test, commandProfile);
-
- // Clean up the database by dropping it; this is the only way to drop the profiler
- // collection on secondaries.
- // Do this from staleMongos, so staleMongos purges the database entry from its cache.
- assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
- }
+ if (test.skip !== undefined) {
+ print("skipping " + command + ": " + test.skip);
+ continue;
+ }
+ validateTestCase(test);
+
+ // Build the query to identify the operation in the system profiler.
+ let commandProfile = buildCommandProfile(test.command, true /* sharded */);
+
+ for (let scenario in scenarios) {
+ jsTest.log("testing command " + tojson(command) + " under scenario " + scenario);
+
+ // Each scenario starts with a sharded collection with shard0 as the primary shard.
+ assert.commandWorked(staleMongos.adminCommand({enableSharding: db}));
+ st.ensurePrimaryShard(db, st.shard0.shardName);
+ assert.commandWorked(staleMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+
+ // We do this because we expect staleMongos to see that the collection is sharded, which
+ // it may not if the "nearest" config server it contacts has not replicated the
+ // shardCollection writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
+
+ // Do any test-specific setup.
+ test.setUp(staleMongos);
+
+ // Wait for replication as a safety net, in case the individual setup function for a
+ // test case did not specify writeConcern itself
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
+
+ // Do dummy read from the stale mongos so it loads the routing table into memory once.
+ // Additionally, do a secondary read to ensure that the secondary has loaded the initial
+ // routing table -- the first read to the primary will refresh the mongos' shardVersion,
+ // which will then be used against the secondary to ensure the secondary is fresh.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand(
+ {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+ // Wait for drop of previous database to replicate before beginning profiling
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
+ assert.commandWorked(st.rs0.getPrimary().getDB(db).setProfilingLevel(2));
+ assert.commandWorked(st.rs0.getSecondary().getDB(db).setProfilingLevel(2));
+ assert.commandWorked(st.rs1.getPrimary().getDB(db).setProfilingLevel(2));
+ assert.commandWorked(st.rs1.getSecondary().getDB(db).setProfilingLevel(2));
+
+ scenarios[scenario](staleMongos, freshMongos, test, commandProfile);
+
+ // Clean up the database by dropping it; this is the only way to drop the profiler
+ // collection on secondaries.
+ // Do this from staleMongos, so staleMongos purges the database entry from its cache.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
}
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index 7f7e8fff3f1..234f0873076 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -22,521 +22,520 @@
* "versioned". Determines what system profiler checks are performed.
*/
(function() {
- "use strict";
-
- load('jstests/libs/profiler.js');
- load('jstests/sharding/libs/last_stable_mongos_commands.js');
-
- let db = "test";
- let coll = "foo";
- let nss = db + "." + coll;
-
- // Check that a test case is well-formed.
- let validateTestCase = function(test) {
- assert(test.setUp && typeof(test.setUp) === "function");
- assert(test.command && typeof(test.command) === "object");
- assert(test.checkResults && typeof(test.checkResults) === "function");
- assert(test.checkAvailableReadConcernResults &&
- typeof(test.checkAvailableReadConcernResults) === "function");
- assert(test.behavior === "unshardedOnly" ||
- test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
- test.behavior === "versioned");
- };
-
- let testCases = {
- _addShard: {skip: "primary only"},
- _cloneCatalogData: {skip: "primary only"},
- _configsvrAddShard: {skip: "primary only"},
- _configsvrAddShardToZone: {skip: "primary only"},
- _configsvrBalancerStart: {skip: "primary only"},
- _configsvrBalancerStatus: {skip: "primary only"},
- _configsvrBalancerStop: {skip: "primary only"},
- _configsvrCommitChunkMerge: {skip: "primary only"},
- _configsvrCommitChunkMigration: {skip: "primary only"},
- _configsvrCommitChunkSplit: {skip: "primary only"},
- _configsvrCommitMovePrimary: {skip: "primary only"},
- _configsvrDropCollection: {skip: "primary only"},
- _configsvrDropDatabase: {skip: "primary only"},
- _configsvrMoveChunk: {skip: "primary only"},
- _configsvrMovePrimary: {skip: "primary only"},
- _configsvrRemoveShardFromZone: {skip: "primary only"},
- _configsvrShardCollection: {skip: "primary only"},
- _configsvrUpdateZoneKeyRange: {skip: "primary only"},
- _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
- _getUserCacheGeneration: {skip: "does not return user data"},
- _hashBSONElement: {skip: "does not return user data"},
- _isSelf: {skip: "does not return user data"},
- _mergeAuthzCollections: {skip: "primary only"},
- _migrateClone: {skip: "primary only"},
- _movePrimary: {skip: "primary only"},
- _recvChunkAbort: {skip: "primary only"},
- _recvChunkCommit: {skip: "primary only"},
- _recvChunkStart: {skip: "primary only"},
- _recvChunkStatus: {skip: "primary only"},
- _transferMods: {skip: "primary only"},
- abortTransaction: {skip: "primary only"},
- addShard: {skip: "primary only"},
- addShardToZone: {skip: "primary only"},
- aggregate: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
- },
- checkAvailableReadConcernResults: function(res) {
- // The command should work and return orphaned results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+"use strict";
+
+load('jstests/libs/profiler.js');
+load('jstests/sharding/libs/last_stable_mongos_commands.js');
+
+let db = "test";
+let coll = "foo";
+let nss = db + "." + coll;
+
+// Check that a test case is well-formed.
+let validateTestCase = function(test) {
+ assert(test.setUp && typeof (test.setUp) === "function");
+ assert(test.command && typeof (test.command) === "object");
+ assert(test.checkResults && typeof (test.checkResults) === "function");
+ assert(test.checkAvailableReadConcernResults &&
+ typeof (test.checkAvailableReadConcernResults) === "function");
+ assert(test.behavior === "unshardedOnly" ||
+ test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
+ test.behavior === "versioned");
+};
+
+let testCases = {
+ _addShard: {skip: "primary only"},
+ _cloneCatalogData: {skip: "primary only"},
+ _configsvrAddShard: {skip: "primary only"},
+ _configsvrAddShardToZone: {skip: "primary only"},
+ _configsvrBalancerStart: {skip: "primary only"},
+ _configsvrBalancerStatus: {skip: "primary only"},
+ _configsvrBalancerStop: {skip: "primary only"},
+ _configsvrCommitChunkMerge: {skip: "primary only"},
+ _configsvrCommitChunkMigration: {skip: "primary only"},
+ _configsvrCommitChunkSplit: {skip: "primary only"},
+ _configsvrCommitMovePrimary: {skip: "primary only"},
+ _configsvrDropCollection: {skip: "primary only"},
+ _configsvrDropDatabase: {skip: "primary only"},
+ _configsvrMoveChunk: {skip: "primary only"},
+ _configsvrMovePrimary: {skip: "primary only"},
+ _configsvrRemoveShardFromZone: {skip: "primary only"},
+ _configsvrShardCollection: {skip: "primary only"},
+ _configsvrUpdateZoneKeyRange: {skip: "primary only"},
+ _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
+ _getUserCacheGeneration: {skip: "does not return user data"},
+ _hashBSONElement: {skip: "does not return user data"},
+ _isSelf: {skip: "does not return user data"},
+ _mergeAuthzCollections: {skip: "primary only"},
+ _migrateClone: {skip: "primary only"},
+ _movePrimary: {skip: "primary only"},
+ _recvChunkAbort: {skip: "primary only"},
+ _recvChunkCommit: {skip: "primary only"},
+ _recvChunkStart: {skip: "primary only"},
+ _recvChunkStatus: {skip: "primary only"},
+ _transferMods: {skip: "primary only"},
+ abortTransaction: {skip: "primary only"},
+ addShard: {skip: "primary only"},
+ addShardToZone: {skip: "primary only"},
+ aggregate: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- appendOplogNote: {skip: "primary only"},
- applyOps: {skip: "primary only"},
- authSchemaUpgrade: {skip: "primary only"},
- authenticate: {skip: "does not return user data"},
- availableQueryOptions: {skip: "does not return user data"},
- balancerStart: {skip: "primary only"},
- balancerStatus: {skip: "primary only"},
- balancerStop: {skip: "primary only"},
- buildInfo: {skip: "does not return user data"},
- captrunc: {skip: "primary only"},
- checkShardingIndex: {skip: "primary only"},
- cleanupOrphaned: {skip: "primary only"},
- clearLog: {skip: "does not return user data"},
- clone: {skip: "primary only"},
- cloneCollection: {skip: "primary only"},
- cloneCollectionAsCapped: {skip: "primary only"},
- commitTransaction: {skip: "primary only"},
- collMod: {skip: "primary only"},
- collStats: {skip: "does not return user data"},
- compact: {skip: "does not return user data"},
- configureFailPoint: {skip: "does not return user data"},
- connPoolStats: {skip: "does not return user data"},
- connPoolSync: {skip: "does not return user data"},
- connectionStatus: {skip: "does not return user data"},
- convertToCapped: {skip: "primary only"},
- count: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {count: coll, query: {x: 1}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.n, tojson(res));
- },
- checkAvailableReadConcernResults: function(res) {
- // The command should work and return orphaned results.
- assert.commandWorked(res);
- assert.eq(1, res.n, tojson(res));
- },
- behavior: "versioned"
+ command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
},
- cpuload: {skip: "does not return user data"},
- create: {skip: "primary only"},
- createIndexes: {skip: "primary only"},
- createRole: {skip: "primary only"},
- createUser: {skip: "primary only"},
- currentOp: {skip: "does not return user data"},
- dataSize: {skip: "does not return user data"},
- dbHash: {skip: "does not return user data"},
- dbStats: {skip: "does not return user data"},
- delete: {skip: "primary only"},
- distinct: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {distinct: coll, key: "x"},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(1, res.values.length, tojson(res));
+ checkAvailableReadConcernResults: function(res) {
+ // The command should work and return orphaned results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ appendOplogNote: {skip: "primary only"},
+ applyOps: {skip: "primary only"},
+ authSchemaUpgrade: {skip: "primary only"},
+ authenticate: {skip: "does not return user data"},
+ availableQueryOptions: {skip: "does not return user data"},
+ balancerStart: {skip: "primary only"},
+ balancerStatus: {skip: "primary only"},
+ balancerStop: {skip: "primary only"},
+ buildInfo: {skip: "does not return user data"},
+ captrunc: {skip: "primary only"},
+ checkShardingIndex: {skip: "primary only"},
+ cleanupOrphaned: {skip: "primary only"},
+ clearLog: {skip: "does not return user data"},
+ clone: {skip: "primary only"},
+ cloneCollection: {skip: "primary only"},
+ cloneCollectionAsCapped: {skip: "primary only"},
+ commitTransaction: {skip: "primary only"},
+ collMod: {skip: "primary only"},
+ collStats: {skip: "does not return user data"},
+ compact: {skip: "does not return user data"},
+ configureFailPoint: {skip: "does not return user data"},
+ connPoolStats: {skip: "does not return user data"},
+ connPoolSync: {skip: "does not return user data"},
+ connectionStatus: {skip: "does not return user data"},
+ convertToCapped: {skip: "primary only"},
+ count: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {count: coll, query: {x: 1}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.n, tojson(res));
+ },
+ checkAvailableReadConcernResults: function(res) {
+ // The command should work and return orphaned results.
+ assert.commandWorked(res);
+ assert.eq(1, res.n, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ cpuload: {skip: "does not return user data"},
+ create: {skip: "primary only"},
+ createIndexes: {skip: "primary only"},
+ createRole: {skip: "primary only"},
+ createUser: {skip: "primary only"},
+ currentOp: {skip: "does not return user data"},
+ dataSize: {skip: "does not return user data"},
+ dbHash: {skip: "does not return user data"},
+ dbStats: {skip: "does not return user data"},
+ delete: {skip: "primary only"},
+ distinct: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {distinct: coll, key: "x"},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(1, res.values.length, tojson(res));
+ },
+ checkAvailableReadConcernResults: function(res) {
+ // The command should work and return orphaned results.
+ assert.commandWorked(res);
+ assert.eq(1, res.values.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ driverOIDTest: {skip: "does not return user data"},
+ drop: {skip: "primary only"},
+ dropAllRolesFromDatabase: {skip: "primary only"},
+ dropAllUsersFromDatabase: {skip: "primary only"},
+ dropConnections: {skip: "does not return user data"},
+ dropDatabase: {skip: "primary only"},
+ dropIndexes: {skip: "primary only"},
+ dropRole: {skip: "primary only"},
+ dropUser: {skip: "primary only"},
+ echo: {skip: "does not return user data"},
+ emptycapped: {skip: "primary only"},
+ enableSharding: {skip: "primary only"},
+ endSessions: {skip: "does not return user data"},
+ explain: {skip: "TODO SERVER-30068"},
+ features: {skip: "does not return user data"},
+ filemd5: {skip: "does not return user data"},
+ find: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {find: coll, filter: {x: 1}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ },
+ checkAvailableReadConcernResults: function(res) {
+ // The command should work and return orphaned results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ findAndModify: {skip: "primary only"},
+ flushRouterConfig: {skip: "does not return user data"},
+ forceerror: {skip: "does not return user data"},
+ fsync: {skip: "does not return user data"},
+ fsyncUnlock: {skip: "does not return user data"},
+ geoSearch: {skip: "not supported in mongos"},
+ getCmdLineOpts: {skip: "does not return user data"},
+ getDiagnosticData: {skip: "does not return user data"},
+ getLastError: {skip: "primary only"},
+ getLog: {skip: "does not return user data"},
+ getMore: {skip: "shard version already established"},
+ getParameter: {skip: "does not return user data"},
+ getShardMap: {skip: "does not return user data"},
+ getShardVersion: {skip: "primary only"},
+ getnonce: {skip: "does not return user data"},
+ godinsert: {skip: "for testing only"},
+ grantPrivilegesToRole: {skip: "primary only"},
+ grantRolesToRole: {skip: "primary only"},
+ grantRolesToUser: {skip: "primary only"},
+ handshake: {skip: "does not return user data"},
+ hostInfo: {skip: "does not return user data"},
+ insert: {skip: "primary only"},
+ invalidateUserCache: {skip: "does not return user data"},
+ isdbgrid: {skip: "does not return user data"},
+ isMaster: {skip: "does not return user data"},
+ killCursors: {skip: "does not return user data"},
+ killAllSessions: {skip: "does not return user data"},
+ killAllSessionsByPattern: {skip: "does not return user data"},
+ killOp: {skip: "does not return user data"},
+ killSessions: {skip: "does not return user data"},
+ listCollections: {skip: "primary only"},
+ listCommands: {skip: "does not return user data"},
+ listDatabases: {skip: "primary only"},
+ listIndexes: {skip: "primary only"},
+ listShards: {skip: "does not return user data"},
+ lockInfo: {skip: "primary only"},
+ logApplicationMessage: {skip: "primary only"},
+ logRotate: {skip: "does not return user data"},
+ logout: {skip: "does not return user data"},
+ makeSnapshot: {skip: "does not return user data"},
+ mapReduce: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {
+ mapReduce: coll,
+ map: function() {
+ emit(this.x, 1);
},
- checkAvailableReadConcernResults: function(res) {
- // The command should work and return orphaned results.
- assert.commandWorked(res);
- assert.eq(1, res.values.length, tojson(res));
+ reduce: function(key, values) {
+ return Array.sum(values);
},
- behavior: "versioned"
+ out: {inline: 1}
},
- driverOIDTest: {skip: "does not return user data"},
- drop: {skip: "primary only"},
- dropAllRolesFromDatabase: {skip: "primary only"},
- dropAllUsersFromDatabase: {skip: "primary only"},
- dropConnections: {skip: "does not return user data"},
- dropDatabase: {skip: "primary only"},
- dropIndexes: {skip: "primary only"},
- dropRole: {skip: "primary only"},
- dropUser: {skip: "primary only"},
- echo: {skip: "does not return user data"},
- emptycapped: {skip: "primary only"},
- enableSharding: {skip: "primary only"},
- endSessions: {skip: "does not return user data"},
- explain: {skip: "TODO SERVER-30068"},
- features: {skip: "does not return user data"},
- filemd5: {skip: "does not return user data"},
- find: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {find: coll, filter: {x: 1}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(1, res.results.length, tojson(res));
+ assert.eq(1, res.results[0]._id, tojson(res));
+ assert.eq(2, res.results[0].value, tojson(res));
+ },
+ checkAvailableReadConcernResults: function(res) {
+ assert.commandFailed(res);
+ },
+ behavior: "targetsPrimaryUsesConnectionVersioning"
+ },
+ mergeChunks: {skip: "primary only"},
+ moveChunk: {skip: "primary only"},
+ movePrimary: {skip: "primary only"},
+ multicast: {skip: "does not return user data"},
+ netstat: {skip: "does not return user data"},
+ ping: {skip: "does not return user data"},
+ planCacheClear: {skip: "does not return user data"},
+ planCacheClearFilters: {skip: "does not return user data"},
+ planCacheListFilters: {skip: "does not return user data"},
+ planCacheListPlans: {skip: "does not return user data"},
+ planCacheListQueryShapes: {skip: "does not return user data"},
+ planCacheSetFilter: {skip: "does not return user data"},
+ profile: {skip: "primary only"},
+ reapLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshSessions: {skip: "does not return user data"},
+ refreshSessionsInternal: {skip: "does not return user data"},
+ removeShard: {skip: "primary only"},
+ removeShardFromZone: {skip: "primary only"},
+ renameCollection: {skip: "primary only"},
+ repairCursor: {skip: "does not return user data"},
+ replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
+ replSetFreeze: {skip: "does not return user data"},
+ replSetGetConfig: {skip: "does not return user data"},
+ replSetGetRBID: {skip: "does not return user data"},
+ replSetGetStatus: {skip: "does not return user data"},
+ replSetHeartbeat: {skip: "does not return user data"},
+ replSetInitiate: {skip: "does not return user data"},
+ replSetMaintenance: {skip: "does not return user data"},
+ replSetReconfig: {skip: "does not return user data"},
+ replSetRequestVotes: {skip: "does not return user data"},
+ replSetStepDown: {skip: "does not return user data"},
+ replSetStepUp: {skip: "does not return user data"},
+ replSetSyncFrom: {skip: "does not return user data"},
+ replSetTest: {skip: "does not return user data"},
+ replSetUpdatePosition: {skip: "does not return user data"},
+ replSetResizeOplog: {skip: "does not return user data"},
+ resetError: {skip: "does not return user data"},
+ restartCatalog: {skip: "internal-only command"},
+ resync: {skip: "primary only"},
+ revokePrivilegesFromRole: {skip: "primary only"},
+ revokeRolesFromRole: {skip: "primary only"},
+ revokeRolesFromUser: {skip: "primary only"},
+ rolesInfo: {skip: "primary only"},
+ saslContinue: {skip: "primary only"},
+ saslStart: {skip: "primary only"},
+ serverStatus: {skip: "does not return user data"},
+ setCommittedSnapshot: {skip: "does not return user data"},
+ setIndexCommitQuorum: {skip: "primary only"},
+ setFeatureCompatibilityVersion: {skip: "primary only"},
+ setFreeMonitoring: {skip: "primary only"},
+ setParameter: {skip: "does not return user data"},
+ setShardVersion: {skip: "does not return user data"},
+ shardCollection: {skip: "primary only"},
+ shardConnPoolStats: {skip: "does not return user data"},
+ shardingState: {skip: "does not return user data"},
+ shutdown: {skip: "does not return user data"},
+ sleep: {skip: "does not return user data"},
+ split: {skip: "primary only"},
+ splitChunk: {skip: "primary only"},
+ splitVector: {skip: "primary only"},
+ stageDebug: {skip: "primary only"},
+ startRecordingTraffic: {skip: "does not return user data"},
+ startSession: {skip: "does not return user data"},
+ stopRecordingTraffic: {skip: "does not return user data"},
+ top: {skip: "does not return user data"},
+ touch: {skip: "does not return user data"},
+ unsetSharding: {skip: "does not return user data"},
+ update: {skip: "primary only"},
+ updateRole: {skip: "primary only"},
+ updateUser: {skip: "primary only"},
+ updateZoneKeyRange: {skip: "primary only"},
+ usersInfo: {skip: "primary only"},
+ validate: {skip: "does not return user data"},
+ waitForOngoingChunkSplits: {skip: "does not return user data"},
+ whatsmyuri: {skip: "does not return user data"}
+};
+
+commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+});
+
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
+
+let donorShardPrimary = st.rs0.getPrimary();
+let recipientShardPrimary = st.rs1.getPrimary();
+let donorShardSecondary = st.rs0.getSecondary();
+let recipientShardSecondary = st.rs1.getSecondary();
+
+let freshMongos = st.s0;
+let staleMongos = st.s1;
+
+let res = st.s.adminCommand({listCommands: 1});
+assert.commandWorked(res);
+
+let commands = Object.keys(res.commands);
+for (let command of commands) {
+ let test = testCases[command];
+ assert(test !== undefined,
+ "coverage failure: must define a safe secondary reads test for " + command);
+
+ if (test.skip !== undefined) {
+ print("skipping " + command + ": " + test.skip);
+ continue;
+ }
+ validateTestCase(test);
+
+ jsTest.log("testing command " + tojson(test.command));
+
+ assert.commandWorked(freshMongos.adminCommand({enableSharding: db}));
+ st.ensurePrimaryShard(db, st.shard0.shardName);
+ assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+
+ // We do this because we expect staleMongos to see that the collection is sharded, which
+ // it may not if the "nearest" config server it contacts has not replicated the
+ // shardCollection writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
+
+ assert.commandWorked(freshMongos.adminCommand({split: nss, middle: {x: 0}}));
+
+ // Do dummy read from the stale mongos so it loads the routing table into memory once.
+ // Additionally, do a secondary read to ensure that the secondary has loaded the initial
+ // routing table -- the first read to the primary will refresh the mongos' shardVersion,
+ // which will then be used against the secondary to ensure the secondary is fresh.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand(
+ {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+
+ // Do any test-specific setup.
+ test.setUp(staleMongos);
+
+ // Wait for replication as a safety net, in case the individual setup function for a test
+ // case did not specify writeConcern itself
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
+
+ assert.commandWorked(recipientShardPrimary.getDB(db).setProfilingLevel(2));
+ assert.commandWorked(donorShardSecondary.getDB(db).setProfilingLevel(2));
+ assert.commandWorked(recipientShardSecondary.getDB(db).setProfilingLevel(2));
+
+ // Suspend range deletion on the donor shard.
+ donorShardPrimary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
+
+ // Do a moveChunk from the fresh mongos to make the other mongos stale.
+ // Use {w:2} (all) write concern so the metadata change gets persisted to the secondary
+ // before stalely versioned commands are sent against the secondary.
+ assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: nss,
+ find: {x: 0},
+ to: st.shard1.shardName,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ }));
+
+ let cmdReadPrefSecondary =
+ Object.assign({}, test.command, {$readPreference: {mode: 'secondary'}});
+ let cmdPrefSecondaryConcernAvailable =
+ Object.assign({}, cmdReadPrefSecondary, {readConcern: {level: 'available'}});
+ let cmdPrefSecondaryConcernLocal =
+ Object.assign({}, cmdReadPrefSecondary, {readConcern: {level: 'local'}});
+
+ let availableReadConcernRes =
+ staleMongos.getDB(db).runCommand(cmdPrefSecondaryConcernAvailable);
+ test.checkAvailableReadConcernResults(availableReadConcernRes);
+
+ let defaultReadConcernRes = staleMongos.getDB(db).runCommand(cmdReadPrefSecondary);
+ if (command === 'mapReduce') {
+ // mapReduce is always sent to a primary, which defaults to 'local' readConcern
+ test.checkResults(defaultReadConcernRes);
+ } else {
+ // Secondaries default to the 'available' readConcern
+ test.checkAvailableReadConcernResults(defaultReadConcernRes);
+ }
+
+ let localReadConcernRes = staleMongos.getDB(db).runCommand(cmdPrefSecondaryConcernLocal);
+ test.checkResults(localReadConcernRes);
+
+ // Build the query to identify the operation in the system profiler.
+ let commandProfile = buildCommandProfile(test.command, true /* sharded */);
+
+ if (test.behavior === "unshardedOnly") {
+ // Check that neither the donor nor recipient shard secondaries received either request.
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the recipient shard primary received the request without a shardVersion
+ // field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false},
},
- checkAvailableReadConcernResults: function(res) {
- // The command should work and return orphaned results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ commandProfile)
+ });
+ } else if (test.behavior === "versioned") {
+ // Check that the donor shard secondary received both the 'available' read concern
+ // request and read concern not specified request and returned success for both, despite
+ // the mongos' stale routing table.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "available"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
},
- behavior: "versioned"
- },
- findAndModify: {skip: "primary only"},
- flushRouterConfig: {skip: "does not return user data"},
- forceerror: {skip: "does not return user data"},
- fsync: {skip: "does not return user data"},
- fsyncUnlock: {skip: "does not return user data"},
- geoSearch: {skip: "not supported in mongos"},
- getCmdLineOpts: {skip: "does not return user data"},
- getDiagnosticData: {skip: "does not return user data"},
- getLastError: {skip: "primary only"},
- getLog: {skip: "does not return user data"},
- getMore: {skip: "shard version already established"},
- getParameter: {skip: "does not return user data"},
- getShardMap: {skip: "does not return user data"},
- getShardVersion: {skip: "primary only"},
- getnonce: {skip: "does not return user data"},
- godinsert: {skip: "for testing only"},
- grantPrivilegesToRole: {skip: "primary only"},
- grantRolesToRole: {skip: "primary only"},
- grantRolesToUser: {skip: "primary only"},
- handshake: {skip: "does not return user data"},
- hostInfo: {skip: "does not return user data"},
- insert: {skip: "primary only"},
- invalidateUserCache: {skip: "does not return user data"},
- isdbgrid: {skip: "does not return user data"},
- isMaster: {skip: "does not return user data"},
- killCursors: {skip: "does not return user data"},
- killAllSessions: {skip: "does not return user data"},
- killAllSessionsByPattern: {skip: "does not return user data"},
- killOp: {skip: "does not return user data"},
- killSessions: {skip: "does not return user data"},
- listCollections: {skip: "primary only"},
- listCommands: {skip: "does not return user data"},
- listDatabases: {skip: "primary only"},
- listIndexes: {skip: "primary only"},
- listShards: {skip: "does not return user data"},
- lockInfo: {skip: "primary only"},
- logApplicationMessage: {skip: "primary only"},
- logRotate: {skip: "does not return user data"},
- logout: {skip: "does not return user data"},
- makeSnapshot: {skip: "does not return user data"},
- mapReduce: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ commandProfile)
+ });
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"$exists": false},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
},
- command: {
- mapReduce: coll,
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: {inline: 1}
+ commandProfile)
+ });
+
+ // Check that the donor shard secondary then returned stale shardVersion for the request
+ // with local read concern.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(1, res.results.length, tojson(res));
- assert.eq(1, res.results[0]._id, tojson(res));
- assert.eq(2, res.results[0].value, tojson(res));
+ commandProfile)
+ });
+
+ // Check that the recipient shard secondary received the request with local read concern
+ // and also returned stale shardVersion once, even though the mongos is fresh, because
+ // the secondary was stale.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
},
- checkAvailableReadConcernResults: function(res) {
- assert.commandFailed(res);
+ commandProfile)
+ });
+
+ // Check that the recipient shard secondary received the request with local read concern
+ // again and finally returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
},
- behavior: "targetsPrimaryUsesConnectionVersioning"
- },
- mergeChunks: {skip: "primary only"},
- moveChunk: {skip: "primary only"},
- movePrimary: {skip: "primary only"},
- multicast: {skip: "does not return user data"},
- netstat: {skip: "does not return user data"},
- ping: {skip: "does not return user data"},
- planCacheClear: {skip: "does not return user data"},
- planCacheClearFilters: {skip: "does not return user data"},
- planCacheListFilters: {skip: "does not return user data"},
- planCacheListPlans: {skip: "does not return user data"},
- planCacheListQueryShapes: {skip: "does not return user data"},
- planCacheSetFilter: {skip: "does not return user data"},
- profile: {skip: "primary only"},
- reapLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshSessions: {skip: "does not return user data"},
- refreshSessionsInternal: {skip: "does not return user data"},
- removeShard: {skip: "primary only"},
- removeShardFromZone: {skip: "primary only"},
- renameCollection: {skip: "primary only"},
- repairCursor: {skip: "does not return user data"},
- replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
- replSetFreeze: {skip: "does not return user data"},
- replSetGetConfig: {skip: "does not return user data"},
- replSetGetRBID: {skip: "does not return user data"},
- replSetGetStatus: {skip: "does not return user data"},
- replSetHeartbeat: {skip: "does not return user data"},
- replSetInitiate: {skip: "does not return user data"},
- replSetMaintenance: {skip: "does not return user data"},
- replSetReconfig: {skip: "does not return user data"},
- replSetRequestVotes: {skip: "does not return user data"},
- replSetStepDown: {skip: "does not return user data"},
- replSetStepUp: {skip: "does not return user data"},
- replSetSyncFrom: {skip: "does not return user data"},
- replSetTest: {skip: "does not return user data"},
- replSetUpdatePosition: {skip: "does not return user data"},
- replSetResizeOplog: {skip: "does not return user data"},
- resetError: {skip: "does not return user data"},
- restartCatalog: {skip: "internal-only command"},
- resync: {skip: "primary only"},
- revokePrivilegesFromRole: {skip: "primary only"},
- revokeRolesFromRole: {skip: "primary only"},
- revokeRolesFromUser: {skip: "primary only"},
- rolesInfo: {skip: "primary only"},
- saslContinue: {skip: "primary only"},
- saslStart: {skip: "primary only"},
- serverStatus: {skip: "does not return user data"},
- setCommittedSnapshot: {skip: "does not return user data"},
- setIndexCommitQuorum: {skip: "primary only"},
- setFeatureCompatibilityVersion: {skip: "primary only"},
- setFreeMonitoring: {skip: "primary only"},
- setParameter: {skip: "does not return user data"},
- setShardVersion: {skip: "does not return user data"},
- shardCollection: {skip: "primary only"},
- shardConnPoolStats: {skip: "does not return user data"},
- shardingState: {skip: "does not return user data"},
- shutdown: {skip: "does not return user data"},
- sleep: {skip: "does not return user data"},
- split: {skip: "primary only"},
- splitChunk: {skip: "primary only"},
- splitVector: {skip: "primary only"},
- stageDebug: {skip: "primary only"},
- startRecordingTraffic: {skip: "does not return user data"},
- startSession: {skip: "does not return user data"},
- stopRecordingTraffic: {skip: "does not return user data"},
- top: {skip: "does not return user data"},
- touch: {skip: "does not return user data"},
- unsetSharding: {skip: "does not return user data"},
- update: {skip: "primary only"},
- updateRole: {skip: "primary only"},
- updateUser: {skip: "primary only"},
- updateZoneKeyRange: {skip: "primary only"},
- usersInfo: {skip: "primary only"},
- validate: {skip: "does not return user data"},
- waitForOngoingChunkSplits: {skip: "does not return user data"},
- whatsmyuri: {skip: "does not return user data"}
- };
-
- commandsRemovedFromMongosIn42.forEach(function(cmd) {
- testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
- });
-
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
-
- let donorShardPrimary = st.rs0.getPrimary();
- let recipientShardPrimary = st.rs1.getPrimary();
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardSecondary = st.rs1.getSecondary();
-
- let freshMongos = st.s0;
- let staleMongos = st.s1;
-
- let res = st.s.adminCommand({listCommands: 1});
- assert.commandWorked(res);
-
- let commands = Object.keys(res.commands);
- for (let command of commands) {
- let test = testCases[command];
- assert(test !== undefined,
- "coverage failure: must define a safe secondary reads test for " + command);
-
- if (test.skip !== undefined) {
- print("skipping " + command + ": " + test.skip);
- continue;
- }
- validateTestCase(test);
-
- jsTest.log("testing command " + tojson(test.command));
-
- assert.commandWorked(freshMongos.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, st.shard0.shardName);
- assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
-
- // We do this because we expect staleMongos to see that the collection is sharded, which
- // it may not if the "nearest" config server it contacts has not replicated the
- // shardCollection writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
-
- assert.commandWorked(freshMongos.adminCommand({split: nss, middle: {x: 0}}));
-
- // Do dummy read from the stale mongos so it loads the routing table into memory once.
- // Additionally, do a secondary read to ensure that the secondary has loaded the initial
- // routing table -- the first read to the primary will refresh the mongos' shardVersion,
- // which will then be used against the secondary to ensure the secondary is fresh.
- assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand(
- {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
-
- // Do any test-specific setup.
- test.setUp(staleMongos);
-
- // Wait for replication as a safety net, in case the individual setup function for a test
- // case did not specify writeConcern itself
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
-
- assert.commandWorked(recipientShardPrimary.getDB(db).setProfilingLevel(2));
- assert.commandWorked(donorShardSecondary.getDB(db).setProfilingLevel(2));
- assert.commandWorked(recipientShardSecondary.getDB(db).setProfilingLevel(2));
-
- // Suspend range deletion on the donor shard.
- donorShardPrimary.adminCommand(
- {configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
-
- // Do a moveChunk from the fresh mongos to make the other mongos stale.
- // Use {w:2} (all) write concern so the metadata change gets persisted to the secondary
- // before stalely versioned commands are sent against the secondary.
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: nss,
- find: {x: 0},
- to: st.shard1.shardName,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- }));
-
- let cmdReadPrefSecondary =
- Object.assign({}, test.command, {$readPreference: {mode: 'secondary'}});
- let cmdPrefSecondaryConcernAvailable =
- Object.assign({}, cmdReadPrefSecondary, {readConcern: {level: 'available'}});
- let cmdPrefSecondaryConcernLocal =
- Object.assign({}, cmdReadPrefSecondary, {readConcern: {level: 'local'}});
-
- let availableReadConcernRes =
- staleMongos.getDB(db).runCommand(cmdPrefSecondaryConcernAvailable);
- test.checkAvailableReadConcernResults(availableReadConcernRes);
-
- let defaultReadConcernRes = staleMongos.getDB(db).runCommand(cmdReadPrefSecondary);
- if (command === 'mapReduce') {
- // mapReduce is always sent to a primary, which defaults to 'local' readConcern
- test.checkResults(defaultReadConcernRes);
- } else {
- // Secondaries default to the 'available' readConcern
- test.checkAvailableReadConcernResults(defaultReadConcernRes);
- }
-
- let localReadConcernRes = staleMongos.getDB(db).runCommand(cmdPrefSecondaryConcernLocal);
- test.checkResults(localReadConcernRes);
-
- // Build the query to identify the operation in the system profiler.
- let commandProfile = buildCommandProfile(test.command, true /* sharded */);
-
- if (test.behavior === "unshardedOnly") {
- // Check that neither the donor nor recipient shard secondaries received either request.
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the recipient shard primary received the request without a shardVersion
- // field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false},
- },
- commandProfile)
- });
- } else if (test.behavior === "versioned") {
- // Check that the donor shard secondary received both the 'available' read concern
- // request and read concern not specified request and returned success for both, despite
- // the mongos' stale routing table.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "available"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"$exists": false},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
-
- // Check that the donor shard secondary then returned stale shardVersion for the request
- // with local read concern.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the recipient shard secondary received the request with local read concern
- // and also returned stale shardVersion once, even though the mongos is fresh, because
- // the secondary was stale.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the recipient shard secondary received the request with local read concern
- // again and finally returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
-
- donorShardPrimary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'off'});
-
- // Clean up the collection by dropping the DB. This also drops all associated indexes and
- // clears the profiler collection.
- // Do this from staleMongos, so staleMongos purges the database entry from its cache.
- assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
+ commandProfile)
+ });
}
- st.stop();
+ donorShardPrimary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'off'});
+
+ // Clean up the collection by dropping the DB. This also drops all associated indexes and
+ // clears the profiler collection.
+ // Do this from staleMongos, so staleMongos purges the database entry from its cache.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index 4ce841f18f9..866080326d2 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -17,448 +17,448 @@
* "versioned". Determines what system profiler checks are performed.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/profiler.js');
- load('jstests/sharding/libs/last_stable_mongos_commands.js');
+load('jstests/libs/profiler.js');
+load('jstests/sharding/libs/last_stable_mongos_commands.js');
- let db = "test";
- let coll = "foo";
- let nss = db + "." + coll;
+let db = "test";
+let coll = "foo";
+let nss = db + "." + coll;
- // Check that a test case is well-formed.
- let validateTestCase = function(test) {
- assert(test.setUp && typeof(test.setUp) === "function");
- assert(test.command && typeof(test.command) === "object");
- assert(test.checkResults && typeof(test.checkResults) === "function");
- assert(test.behavior === "unshardedOnly" ||
- test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
- test.behavior === "versioned");
- };
+// Check that a test case is well-formed.
+let validateTestCase = function(test) {
+ assert(test.setUp && typeof (test.setUp) === "function");
+ assert(test.command && typeof (test.command) === "object");
+ assert(test.checkResults && typeof (test.checkResults) === "function");
+ assert(test.behavior === "unshardedOnly" ||
+ test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
+ test.behavior === "versioned");
+};
- let testCases = {
- _addShard: {skip: "primary only"},
- _cloneCatalogData: {skip: "primary only"},
- _configsvrAddShard: {skip: "primary only"},
- _configsvrAddShardToZone: {skip: "primary only"},
- _configsvrBalancerStart: {skip: "primary only"},
- _configsvrBalancerStatus: {skip: "primary only"},
- _configsvrBalancerStop: {skip: "primary only"},
- _configsvrCommitChunkMerge: {skip: "primary only"},
- _configsvrCommitChunkMigration: {skip: "primary only"},
- _configsvrCommitChunkSplit: {skip: "primary only"},
- _configsvrCommitMovePrimary: {skip: "primary only"},
- _configsvrDropCollection: {skip: "primary only"},
- _configsvrDropDatabase: {skip: "primary only"},
- _configsvrMoveChunk: {skip: "primary only"},
- _configsvrMovePrimary: {skip: "primary only"},
- _configsvrRemoveShardFromZone: {skip: "primary only"},
- _configsvrShardCollection: {skip: "primary only"},
- _configsvrUpdateZoneKeyRange: {skip: "primary only"},
- _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
- _getUserCacheGeneration: {skip: "does not return user data"},
- _hashBSONElement: {skip: "does not return user data"},
- _isSelf: {skip: "does not return user data"},
- _mergeAuthzCollections: {skip: "primary only"},
- _migrateClone: {skip: "primary only"},
- _movePrimary: {skip: "primary only"},
- _recvChunkAbort: {skip: "primary only"},
- _recvChunkCommit: {skip: "primary only"},
- _recvChunkStart: {skip: "primary only"},
- _recvChunkStatus: {skip: "primary only"},
- _transferMods: {skip: "primary only"},
- abortTransaction: {skip: "primary only"},
- addShard: {skip: "primary only"},
- addShardToZone: {skip: "primary only"},
- aggregate: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+let testCases = {
+ _addShard: {skip: "primary only"},
+ _cloneCatalogData: {skip: "primary only"},
+ _configsvrAddShard: {skip: "primary only"},
+ _configsvrAddShardToZone: {skip: "primary only"},
+ _configsvrBalancerStart: {skip: "primary only"},
+ _configsvrBalancerStatus: {skip: "primary only"},
+ _configsvrBalancerStop: {skip: "primary only"},
+ _configsvrCommitChunkMerge: {skip: "primary only"},
+ _configsvrCommitChunkMigration: {skip: "primary only"},
+ _configsvrCommitChunkSplit: {skip: "primary only"},
+ _configsvrCommitMovePrimary: {skip: "primary only"},
+ _configsvrDropCollection: {skip: "primary only"},
+ _configsvrDropDatabase: {skip: "primary only"},
+ _configsvrMoveChunk: {skip: "primary only"},
+ _configsvrMovePrimary: {skip: "primary only"},
+ _configsvrRemoveShardFromZone: {skip: "primary only"},
+ _configsvrShardCollection: {skip: "primary only"},
+ _configsvrUpdateZoneKeyRange: {skip: "primary only"},
+ _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
+ _getUserCacheGeneration: {skip: "does not return user data"},
+ _hashBSONElement: {skip: "does not return user data"},
+ _isSelf: {skip: "does not return user data"},
+ _mergeAuthzCollections: {skip: "primary only"},
+ _migrateClone: {skip: "primary only"},
+ _movePrimary: {skip: "primary only"},
+ _recvChunkAbort: {skip: "primary only"},
+ _recvChunkCommit: {skip: "primary only"},
+ _recvChunkStart: {skip: "primary only"},
+ _recvChunkStatus: {skip: "primary only"},
+ _transferMods: {skip: "primary only"},
+ abortTransaction: {skip: "primary only"},
+ addShard: {skip: "primary only"},
+ addShardToZone: {skip: "primary only"},
+ aggregate: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- appendOplogNote: {skip: "primary only"},
- applyOps: {skip: "primary only"},
- authenticate: {skip: "does not return user data"},
- authSchemaUpgrade: {skip: "primary only"},
- availableQueryOptions: {skip: "does not return user data"},
- balancerStart: {skip: "primary only"},
- balancerStatus: {skip: "primary only"},
- balancerStop: {skip: "primary only"},
- buildInfo: {skip: "does not return user data"},
- captrunc: {skip: "primary only"},
- checkShardingIndex: {skip: "primary only"},
- cleanupOrphaned: {skip: "primary only"},
- clearLog: {skip: "does not return user data"},
- clone: {skip: "primary only"},
- cloneCollection: {skip: "primary only"},
- cloneCollectionAsCapped: {skip: "primary only"},
- collMod: {skip: "primary only"},
- collStats: {skip: "does not return user data"},
- commitTransaction: {skip: "primary only"},
- compact: {skip: "does not return user data"},
- configureFailPoint: {skip: "does not return user data"},
- connPoolStats: {skip: "does not return user data"},
- connPoolSync: {skip: "does not return user data"},
- connectionStatus: {skip: "does not return user data"},
- convertToCapped: {skip: "primary only"},
- count: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {count: coll, query: {x: 1}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.n, tojson(res));
- },
- behavior: "versioned"
+ command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
},
- cpuload: {skip: "does not return user data"},
- create: {skip: "primary only"},
- createIndexes: {skip: "primary only"},
- createRole: {skip: "primary only"},
- createUser: {skip: "primary only"},
- currentOp: {skip: "does not return user data"},
- dataSize: {skip: "does not return user data"},
- dbHash: {skip: "does not return user data"},
- dbStats: {skip: "does not return user data"},
- delete: {skip: "primary only"},
- distinct: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {distinct: coll, key: "x"},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(1, res.values.length, tojson(res));
- },
- behavior: "versioned"
+ behavior: "versioned"
+ },
+ appendOplogNote: {skip: "primary only"},
+ applyOps: {skip: "primary only"},
+ authenticate: {skip: "does not return user data"},
+ authSchemaUpgrade: {skip: "primary only"},
+ availableQueryOptions: {skip: "does not return user data"},
+ balancerStart: {skip: "primary only"},
+ balancerStatus: {skip: "primary only"},
+ balancerStop: {skip: "primary only"},
+ buildInfo: {skip: "does not return user data"},
+ captrunc: {skip: "primary only"},
+ checkShardingIndex: {skip: "primary only"},
+ cleanupOrphaned: {skip: "primary only"},
+ clearLog: {skip: "does not return user data"},
+ clone: {skip: "primary only"},
+ cloneCollection: {skip: "primary only"},
+ cloneCollectionAsCapped: {skip: "primary only"},
+ collMod: {skip: "primary only"},
+ collStats: {skip: "does not return user data"},
+ commitTransaction: {skip: "primary only"},
+ compact: {skip: "does not return user data"},
+ configureFailPoint: {skip: "does not return user data"},
+ connPoolStats: {skip: "does not return user data"},
+ connPoolSync: {skip: "does not return user data"},
+ connectionStatus: {skip: "does not return user data"},
+ convertToCapped: {skip: "primary only"},
+ count: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- driverOIDTest: {skip: "does not return user data"},
- drop: {skip: "primary only"},
- dropAllRolesFromDatabase: {skip: "primary only"},
- dropAllUsersFromDatabase: {skip: "primary only"},
- dropConnections: {skip: "does not return user data"},
- dropDatabase: {skip: "primary only"},
- dropIndexes: {skip: "primary only"},
- dropRole: {skip: "primary only"},
- dropUser: {skip: "primary only"},
- echo: {skip: "does not return user data"},
- emptycapped: {skip: "primary only"},
- enableSharding: {skip: "primary only"},
- endSessions: {skip: "does not return user data"},
- explain: {skip: "TODO SERVER-30068"},
- features: {skip: "does not return user data"},
- filemd5: {skip: "does not return user data"},
- find: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {find: coll, filter: {x: 1}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+ command: {count: coll, query: {x: 1}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.n, tojson(res));
},
- findAndModify: {skip: "primary only"},
- flushRouterConfig: {skip: "does not return user data"},
- forceerror: {skip: "does not return user data"},
- fsync: {skip: "does not return user data"},
- fsyncUnlock: {skip: "does not return user data"},
- geoSearch: {skip: "not supported in mongos"},
- getCmdLineOpts: {skip: "does not return user data"},
- getDiagnosticData: {skip: "does not return user data"},
- getLastError: {skip: "primary only"},
- getLog: {skip: "does not return user data"},
- getMore: {skip: "shard version already established"},
- getParameter: {skip: "does not return user data"},
- getShardMap: {skip: "does not return user data"},
- getShardVersion: {skip: "primary only"},
- getnonce: {skip: "does not return user data"},
- godinsert: {skip: "for testing only"},
- grantPrivilegesToRole: {skip: "primary only"},
- grantRolesToRole: {skip: "primary only"},
- grantRolesToUser: {skip: "primary only"},
- handshake: {skip: "does not return user data"},
- hostInfo: {skip: "does not return user data"},
- insert: {skip: "primary only"},
- invalidateUserCache: {skip: "does not return user data"},
- isdbgrid: {skip: "does not return user data"},
- isMaster: {skip: "does not return user data"},
- killAllSessions: {skip: "does not return user data"},
- killAllSessionsByPattern: {skip: "does not return user data"},
- killCursors: {skip: "does not return user data"},
- killOp: {skip: "does not return user data"},
- killSessions: {skip: "does not return user data"},
- listCollections: {skip: "primary only"},
- listCommands: {skip: "does not return user data"},
- listDatabases: {skip: "primary only"},
- listIndexes: {skip: "primary only"},
- listShards: {skip: "does not return user data"},
- lockInfo: {skip: "primary only"},
- logApplicationMessage: {skip: "primary only"},
- logRotate: {skip: "does not return user data"},
- logout: {skip: "does not return user data"},
- makeSnapshot: {skip: "does not return user data"},
- mapReduce: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {
- mapReduce: coll,
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: {inline: 1}
+ behavior: "versioned"
+ },
+ cpuload: {skip: "does not return user data"},
+ create: {skip: "primary only"},
+ createIndexes: {skip: "primary only"},
+ createRole: {skip: "primary only"},
+ createUser: {skip: "primary only"},
+ currentOp: {skip: "does not return user data"},
+ dataSize: {skip: "does not return user data"},
+ dbHash: {skip: "does not return user data"},
+ dbStats: {skip: "does not return user data"},
+ delete: {skip: "primary only"},
+ distinct: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {distinct: coll, key: "x"},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(1, res.values.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ driverOIDTest: {skip: "does not return user data"},
+ drop: {skip: "primary only"},
+ dropAllRolesFromDatabase: {skip: "primary only"},
+ dropAllUsersFromDatabase: {skip: "primary only"},
+ dropConnections: {skip: "does not return user data"},
+ dropDatabase: {skip: "primary only"},
+ dropIndexes: {skip: "primary only"},
+ dropRole: {skip: "primary only"},
+ dropUser: {skip: "primary only"},
+ echo: {skip: "does not return user data"},
+ emptycapped: {skip: "primary only"},
+ enableSharding: {skip: "primary only"},
+ endSessions: {skip: "does not return user data"},
+ explain: {skip: "TODO SERVER-30068"},
+ features: {skip: "does not return user data"},
+ filemd5: {skip: "does not return user data"},
+ find: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {find: coll, filter: {x: 1}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ findAndModify: {skip: "primary only"},
+ flushRouterConfig: {skip: "does not return user data"},
+ forceerror: {skip: "does not return user data"},
+ fsync: {skip: "does not return user data"},
+ fsyncUnlock: {skip: "does not return user data"},
+ geoSearch: {skip: "not supported in mongos"},
+ getCmdLineOpts: {skip: "does not return user data"},
+ getDiagnosticData: {skip: "does not return user data"},
+ getLastError: {skip: "primary only"},
+ getLog: {skip: "does not return user data"},
+ getMore: {skip: "shard version already established"},
+ getParameter: {skip: "does not return user data"},
+ getShardMap: {skip: "does not return user data"},
+ getShardVersion: {skip: "primary only"},
+ getnonce: {skip: "does not return user data"},
+ godinsert: {skip: "for testing only"},
+ grantPrivilegesToRole: {skip: "primary only"},
+ grantRolesToRole: {skip: "primary only"},
+ grantRolesToUser: {skip: "primary only"},
+ handshake: {skip: "does not return user data"},
+ hostInfo: {skip: "does not return user data"},
+ insert: {skip: "primary only"},
+ invalidateUserCache: {skip: "does not return user data"},
+ isdbgrid: {skip: "does not return user data"},
+ isMaster: {skip: "does not return user data"},
+ killAllSessions: {skip: "does not return user data"},
+ killAllSessionsByPattern: {skip: "does not return user data"},
+ killCursors: {skip: "does not return user data"},
+ killOp: {skip: "does not return user data"},
+ killSessions: {skip: "does not return user data"},
+ listCollections: {skip: "primary only"},
+ listCommands: {skip: "does not return user data"},
+ listDatabases: {skip: "primary only"},
+ listIndexes: {skip: "primary only"},
+ listShards: {skip: "does not return user data"},
+ lockInfo: {skip: "primary only"},
+ logApplicationMessage: {skip: "primary only"},
+ logRotate: {skip: "does not return user data"},
+ logout: {skip: "does not return user data"},
+ makeSnapshot: {skip: "does not return user data"},
+ mapReduce: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {
+ mapReduce: coll,
+ map: function() {
+ emit(this.x, 1);
},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(1, res.results.length, tojson(res));
- assert.eq(1, res.results[0]._id, tojson(res));
- assert.eq(2, res.results[0].value, tojson(res));
+ reduce: function(key, values) {
+ return Array.sum(values);
},
- behavior: "targetsPrimaryUsesConnectionVersioning"
+ out: {inline: 1}
},
- mergeChunks: {skip: "primary only"},
- moveChunk: {skip: "primary only"},
- movePrimary: {skip: "primary only"},
- multicast: {skip: "does not return user data"},
- netstat: {skip: "does not return user data"},
- ping: {skip: "does not return user data"},
- planCacheClear: {skip: "does not return user data"},
- planCacheClearFilters: {skip: "does not return user data"},
- planCacheListFilters: {skip: "does not return user data"},
- planCacheListPlans: {skip: "does not return user data"},
- planCacheListQueryShapes: {skip: "does not return user data"},
- planCacheSetFilter: {skip: "does not return user data"},
- profile: {skip: "primary only"},
- reapLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshSessions: {skip: "does not return user data"},
- refreshSessionsInternal: {skip: "does not return user data"},
- removeShard: {skip: "primary only"},
- removeShardFromZone: {skip: "primary only"},
- renameCollection: {skip: "primary only"},
- repairCursor: {skip: "does not return user data"},
- replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
- replSetFreeze: {skip: "does not return user data"},
- replSetGetConfig: {skip: "does not return user data"},
- replSetGetRBID: {skip: "does not return user data"},
- replSetGetStatus: {skip: "does not return user data"},
- replSetHeartbeat: {skip: "does not return user data"},
- replSetInitiate: {skip: "does not return user data"},
- replSetMaintenance: {skip: "does not return user data"},
- replSetReconfig: {skip: "does not return user data"},
- replSetRequestVotes: {skip: "does not return user data"},
- replSetStepDown: {skip: "does not return user data"},
- replSetStepUp: {skip: "does not return user data"},
- replSetSyncFrom: {skip: "does not return user data"},
- replSetTest: {skip: "does not return user data"},
- replSetUpdatePosition: {skip: "does not return user data"},
- replSetResizeOplog: {skip: "does not return user data"},
- resetError: {skip: "does not return user data"},
- restartCatalog: {skip: "internal-only command"},
- resync: {skip: "primary only"},
- revokePrivilegesFromRole: {skip: "primary only"},
- revokeRolesFromRole: {skip: "primary only"},
- revokeRolesFromUser: {skip: "primary only"},
- rolesInfo: {skip: "primary only"},
- saslContinue: {skip: "primary only"},
- saslStart: {skip: "primary only"},
- serverStatus: {skip: "does not return user data"},
- setCommittedSnapshot: {skip: "does not return user data"},
- setIndexCommitQuorum: {skip: "primary only"},
- setFeatureCompatibilityVersion: {skip: "primary only"},
- setFreeMonitoring: {skip: "primary only"},
- setParameter: {skip: "does not return user data"},
- setShardVersion: {skip: "does not return user data"},
- shardCollection: {skip: "primary only"},
- shardConnPoolStats: {skip: "does not return user data"},
- shardingState: {skip: "does not return user data"},
- shutdown: {skip: "does not return user data"},
- sleep: {skip: "does not return user data"},
- split: {skip: "primary only"},
- splitChunk: {skip: "primary only"},
- splitVector: {skip: "primary only"},
- stageDebug: {skip: "primary only"},
- startRecordingTraffic: {skip: "does not return user data"},
- startSession: {skip: "does not return user data"},
- stopRecordingTraffic: {skip: "does not return user data"},
- top: {skip: "does not return user data"},
- touch: {skip: "does not return user data"},
- unsetSharding: {skip: "does not return user data"},
- update: {skip: "primary only"},
- updateRole: {skip: "primary only"},
- updateUser: {skip: "primary only"},
- updateZoneKeyRange: {skip: "primary only"},
- usersInfo: {skip: "primary only"},
- validate: {skip: "does not return user data"},
- waitForOngoingChunkSplits: {skip: "does not return user data"},
- whatsmyuri: {skip: "does not return user data"}
- };
-
- commandsRemovedFromMongosIn42.forEach(function(cmd) {
- testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
- });
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(1, res.results.length, tojson(res));
+ assert.eq(1, res.results[0]._id, tojson(res));
+ assert.eq(2, res.results[0].value, tojson(res));
+ },
+ behavior: "targetsPrimaryUsesConnectionVersioning"
+ },
+ mergeChunks: {skip: "primary only"},
+ moveChunk: {skip: "primary only"},
+ movePrimary: {skip: "primary only"},
+ multicast: {skip: "does not return user data"},
+ netstat: {skip: "does not return user data"},
+ ping: {skip: "does not return user data"},
+ planCacheClear: {skip: "does not return user data"},
+ planCacheClearFilters: {skip: "does not return user data"},
+ planCacheListFilters: {skip: "does not return user data"},
+ planCacheListPlans: {skip: "does not return user data"},
+ planCacheListQueryShapes: {skip: "does not return user data"},
+ planCacheSetFilter: {skip: "does not return user data"},
+ profile: {skip: "primary only"},
+ reapLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshSessions: {skip: "does not return user data"},
+ refreshSessionsInternal: {skip: "does not return user data"},
+ removeShard: {skip: "primary only"},
+ removeShardFromZone: {skip: "primary only"},
+ renameCollection: {skip: "primary only"},
+ repairCursor: {skip: "does not return user data"},
+ replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
+ replSetFreeze: {skip: "does not return user data"},
+ replSetGetConfig: {skip: "does not return user data"},
+ replSetGetRBID: {skip: "does not return user data"},
+ replSetGetStatus: {skip: "does not return user data"},
+ replSetHeartbeat: {skip: "does not return user data"},
+ replSetInitiate: {skip: "does not return user data"},
+ replSetMaintenance: {skip: "does not return user data"},
+ replSetReconfig: {skip: "does not return user data"},
+ replSetRequestVotes: {skip: "does not return user data"},
+ replSetStepDown: {skip: "does not return user data"},
+ replSetStepUp: {skip: "does not return user data"},
+ replSetSyncFrom: {skip: "does not return user data"},
+ replSetTest: {skip: "does not return user data"},
+ replSetUpdatePosition: {skip: "does not return user data"},
+ replSetResizeOplog: {skip: "does not return user data"},
+ resetError: {skip: "does not return user data"},
+ restartCatalog: {skip: "internal-only command"},
+ resync: {skip: "primary only"},
+ revokePrivilegesFromRole: {skip: "primary only"},
+ revokeRolesFromRole: {skip: "primary only"},
+ revokeRolesFromUser: {skip: "primary only"},
+ rolesInfo: {skip: "primary only"},
+ saslContinue: {skip: "primary only"},
+ saslStart: {skip: "primary only"},
+ serverStatus: {skip: "does not return user data"},
+ setCommittedSnapshot: {skip: "does not return user data"},
+ setIndexCommitQuorum: {skip: "primary only"},
+ setFeatureCompatibilityVersion: {skip: "primary only"},
+ setFreeMonitoring: {skip: "primary only"},
+ setParameter: {skip: "does not return user data"},
+ setShardVersion: {skip: "does not return user data"},
+ shardCollection: {skip: "primary only"},
+ shardConnPoolStats: {skip: "does not return user data"},
+ shardingState: {skip: "does not return user data"},
+ shutdown: {skip: "does not return user data"},
+ sleep: {skip: "does not return user data"},
+ split: {skip: "primary only"},
+ splitChunk: {skip: "primary only"},
+ splitVector: {skip: "primary only"},
+ stageDebug: {skip: "primary only"},
+ startRecordingTraffic: {skip: "does not return user data"},
+ startSession: {skip: "does not return user data"},
+ stopRecordingTraffic: {skip: "does not return user data"},
+ top: {skip: "does not return user data"},
+ touch: {skip: "does not return user data"},
+ unsetSharding: {skip: "does not return user data"},
+ update: {skip: "primary only"},
+ updateRole: {skip: "primary only"},
+ updateUser: {skip: "primary only"},
+ updateZoneKeyRange: {skip: "primary only"},
+ usersInfo: {skip: "primary only"},
+ validate: {skip: "does not return user data"},
+ waitForOngoingChunkSplits: {skip: "does not return user data"},
+ whatsmyuri: {skip: "does not return user data"}
+};
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
+commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+});
- let recipientShardPrimary = st.rs1.getPrimary();
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardSecondary = st.rs1.getSecondary();
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
- let freshMongos = st.s0;
- let staleMongos = st.s1;
+let recipientShardPrimary = st.rs1.getPrimary();
+let donorShardSecondary = st.rs0.getSecondary();
+let recipientShardSecondary = st.rs1.getSecondary();
- let res = st.s.adminCommand({listCommands: 1});
- assert.commandWorked(res);
+let freshMongos = st.s0;
+let staleMongos = st.s1;
- let commands = Object.keys(res.commands);
- for (let command of commands) {
- let test = testCases[command];
- assert(test !== undefined,
- "coverage failure: must define a safe secondary reads test for " + command);
+let res = st.s.adminCommand({listCommands: 1});
+assert.commandWorked(res);
- if (test.skip !== undefined) {
- print("skipping " + command + ": " + test.skip);
- continue;
- }
- validateTestCase(test);
+let commands = Object.keys(res.commands);
+for (let command of commands) {
+ let test = testCases[command];
+ assert(test !== undefined,
+ "coverage failure: must define a safe secondary reads test for " + command);
- jsTest.log("testing command " + tojson(test.command));
+ if (test.skip !== undefined) {
+ print("skipping " + command + ": " + test.skip);
+ continue;
+ }
+ validateTestCase(test);
- assert.commandWorked(staleMongos.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, st.shard0.shardName);
- assert.commandWorked(staleMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+ jsTest.log("testing command " + tojson(test.command));
- // We do this because we expect freshMongos to see that the collection is sharded, which it
- // may not if the "nearest" config server it contacts has not replicated the shardCollection
- // writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
+ assert.commandWorked(staleMongos.adminCommand({enableSharding: db}));
+ st.ensurePrimaryShard(db, st.shard0.shardName);
+ assert.commandWorked(staleMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
- assert.commandWorked(staleMongos.adminCommand({split: nss, middle: {x: 0}}));
+ // We do this because we expect freshMongos to see that the collection is sharded, which it
+ // may not if the "nearest" config server it contacts has not replicated the shardCollection
+ // writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
- // Do dummy read from the stale mongos so it loads the routing table into memory once.
- // Additionally, do a secondary read to ensure that the secondary has loaded the initial
- // routing table -- the first read to the primary will refresh the mongos' shardVersion,
- // which will then be used against the secondary to ensure the secondary is fresh.
- assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand(
- {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+ assert.commandWorked(staleMongos.adminCommand({split: nss, middle: {x: 0}}));
- // Do any test-specific setup.
- test.setUp(staleMongos);
+ // Do dummy read from the stale mongos so it loads the routing table into memory once.
+ // Additionally, do a secondary read to ensure that the secondary has loaded the initial
+ // routing table -- the first read to the primary will refresh the mongos' shardVersion,
+ // which will then be used against the secondary to ensure the secondary is fresh.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand(
+ {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
- // Wait for replication as a safety net, in case the individual setup function for a test
- // case did not specify writeConcern itself
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
+ // Do any test-specific setup.
+ test.setUp(staleMongos);
- assert.commandWorked(recipientShardPrimary.getDB(db).setProfilingLevel(2));
- assert.commandWorked(donorShardSecondary.getDB(db).setProfilingLevel(2));
- assert.commandWorked(recipientShardSecondary.getDB(db).setProfilingLevel(2));
+ // Wait for replication as a safety net, in case the individual setup function for a test
+ // case did not specify writeConcern itself
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
- // Do a moveChunk from the fresh mongos to make the other mongos stale.
- // Use {w:2} (all) write concern so the metadata change gets persisted to the secondary
- // before stalely versioned commands are sent against the secondary.
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: nss,
- find: {x: 0},
- to: st.shard1.shardName,
- waitForDelete: true,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- }));
+ assert.commandWorked(recipientShardPrimary.getDB(db).setProfilingLevel(2));
+ assert.commandWorked(donorShardSecondary.getDB(db).setProfilingLevel(2));
+ assert.commandWorked(recipientShardSecondary.getDB(db).setProfilingLevel(2));
- let res = staleMongos.getDB(db).runCommand(Object.extend(
- test.command, {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+ // Do a moveChunk from the fresh mongos to make the other mongos stale.
+ // Use {w:2} (all) write concern so the metadata change gets persisted to the secondary
+ // before stalely versioned commands are sent against the secondary.
+ assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: nss,
+ find: {x: 0},
+ to: st.shard1.shardName,
+ waitForDelete: true,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ }));
- test.checkResults(res);
+ let res = staleMongos.getDB(db).runCommand(Object.extend(
+ test.command, {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
- // Build the query to identify the operation in the system profiler.
- let commandProfile = buildCommandProfile(test.command, true /* sharded */);
+ test.checkResults(res);
- if (test.behavior === "unshardedOnly") {
- // Check that neither the donor shard secondary nor recipient shard secondary
- // received the request.
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the recipient shard primary received the request without a shardVersion
- // field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false},
- },
- commandProfile)
- });
- } else if (test.behavior === "versioned") {
- // Check that the donor shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
+ // Build the query to identify the operation in the system profiler.
+ let commandProfile = buildCommandProfile(test.command, true /* sharded */);
- // Check that the recipient shard secondary received the request and returned stale
- // shardVersion once, even though the mongos is fresh, because the secondary was
- // stale.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
+ if (test.behavior === "unshardedOnly") {
+ // Check that neither the donor shard secondary nor recipient shard secondary
+ // received the request.
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the recipient shard primary received the request without a shardVersion
+ // field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false},
+ },
+ commandProfile)
+ });
+ } else if (test.behavior === "versioned") {
+ // Check that the donor shard secondary returned stale shardVersion.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
- // Check that the recipient shard secondary received the request again and returned
- // success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
+ // Check that the recipient shard secondary received the request and returned stale
+ // shardVersion once, even though the mongos is fresh, because the secondary was
+ // stale.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
- // Clean up the database by dropping it; this is the only way to drop the profiler
- // collection on secondaries. This also drops all associated indexes.
- // Do this from staleMongos, so staleMongos purges the database entry from its cache.
- assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
+ // Check that the recipient shard secondary received the request again and returned
+ // success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
+ },
+ commandProfile)
+ });
}
- st.stop();
+ // Clean up the database by dropping it; this is the only way to drop the profiler
+ // collection on secondaries. This also drops all associated indexes.
+ // Do this from staleMongos, so staleMongos purges the database entry from its cache.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js b/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
index d1da3e396d0..6c8b150aebb 100644
--- a/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
+++ b/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
@@ -5,112 +5,110 @@
* level should default to 'local' read concern level, using the shard version protocol.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/profiler.js'); // for profilerHasSingleMatchingEntryOrThrow()
+load('jstests/libs/profiler.js'); // for profilerHasSingleMatchingEntryOrThrow()
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
- let st =
- new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}, causallyConsistent: true});
- let dbName = 'test', collName = 'foo', ns = 'test.foo';
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
+let st =
+ new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}, causallyConsistent: true});
+let dbName = 'test', collName = 'foo', ns = 'test.foo';
- assert.commandWorked(st.s0.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: ns, key: {x: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: ns, middle: {x: 0}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: ns, key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: ns, middle: {x: 0}}));
- let freshMongos = st.s0;
- let staleMongos = st.s1;
+let freshMongos = st.s0;
+let staleMongos = st.s1;
- jsTest.log("do insert from stale mongos to make it load the routing table before the move");
- assert.writeOK(staleMongos.getCollection(ns).insert({x: 1}));
+jsTest.log("do insert from stale mongos to make it load the routing table before the move");
+assert.writeOK(staleMongos.getCollection(ns).insert({x: 1}));
- jsTest.log("do moveChunk from fresh mongos");
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: ns,
- find: {x: 0},
- to: st.shard1.shardName,
- secondaryThrottle: true,
- _waitForDelete: true,
- writeConcern: {w: 2},
- }));
+jsTest.log("do moveChunk from fresh mongos");
+assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: ns,
+ find: {x: 0},
+ to: st.shard1.shardName,
+ secondaryThrottle: true,
+ _waitForDelete: true,
+ writeConcern: {w: 2},
+}));
- // Turn on system profiler on secondaries to collect data on all future operations on the db.
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardSecondary = st.rs1.getSecondary();
- assert.commandWorked(donorShardSecondary.getDB(dbName).setProfilingLevel(2));
- assert.commandWorked(recipientShardSecondary.getDB(dbName).setProfilingLevel(2));
+// Turn on system profiler on secondaries to collect data on all future operations on the db.
+let donorShardSecondary = st.rs0.getSecondary();
+let recipientShardSecondary = st.rs1.getSecondary();
+assert.commandWorked(donorShardSecondary.getDB(dbName).setProfilingLevel(2));
+assert.commandWorked(recipientShardSecondary.getDB(dbName).setProfilingLevel(2));
- // Note: this query will not be registered by the profiler because it errors before reaching the
- // storage level.
- jsTest.log("Do a secondary read from stale mongos with afterClusterTime and level 'available'");
- const staleMongosDB = staleMongos.getDB(dbName);
- assert.commandFailedWithCode(staleMongosDB.runCommand({
- count: collName,
- query: {x: 1},
- $readPreference: {mode: "secondary"},
- readConcern: {
- 'afterClusterTime': staleMongosDB.getSession().getOperationTime(),
- 'level': 'available'
- }
- }),
- ErrorCodes.InvalidOptions);
+// Note: this query will not be registered by the profiler because it errors before reaching the
+// storage level.
+jsTest.log("Do a secondary read from stale mongos with afterClusterTime and level 'available'");
+const staleMongosDB = staleMongos.getDB(dbName);
+assert.commandFailedWithCode(staleMongosDB.runCommand({
+ count: collName,
+ query: {x: 1},
+ $readPreference: {mode: "secondary"},
+ readConcern:
+ {'afterClusterTime': staleMongosDB.getSession().getOperationTime(), 'level': 'available'}
+}),
+ ErrorCodes.InvalidOptions);
- jsTest.log("Do a secondary read from stale mongos with afterClusterTime and no level");
- let res = staleMongosDB.runCommand({
- count: collName,
- query: {x: 1},
- $readPreference: {mode: "secondary"},
- readConcern: {'afterClusterTime': staleMongosDB.getSession().getOperationTime()},
- });
- assert(res.ok);
- assert.eq(1, res.n, tojson(res));
+jsTest.log("Do a secondary read from stale mongos with afterClusterTime and no level");
+let res = staleMongosDB.runCommand({
+ count: collName,
+ query: {x: 1},
+ $readPreference: {mode: "secondary"},
+ readConcern: {'afterClusterTime': staleMongosDB.getSession().getOperationTime()},
+});
+assert(res.ok);
+assert.eq(1, res.n, tojson(res));
- // The stale mongos will first go to the donor shard and receive a stale shard version,
- // prompting the stale mongos to refresh it's routing table and retarget to the recipient shard.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(dbName),
- filter: {
- "ns": ns,
- "command.count": collName,
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern.afterClusterTime": {"$exists": true},
- "errCode": ErrorCodes.StaleConfig
- }
- });
+// The stale mongos will first go to the donor shard and receive a stale shard version,
+// prompting the stale mongos to refresh it's routing table and retarget to the recipient shard.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(dbName),
+ filter: {
+ "ns": ns,
+ "command.count": collName,
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern.afterClusterTime": {"$exists": true},
+ "errCode": ErrorCodes.StaleConfig
+ }
+});
- // The recipient shard will then return a stale shard version error because it needs to refresh
- // its own routing table.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(dbName),
- filter: {
- "ns": ns,
- "command.count": collName,
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern.afterClusterTime": {"$exists": true},
- "errCode": ErrorCodes.StaleConfig
- }
- });
+// The recipient shard will then return a stale shard version error because it needs to refresh
+// its own routing table.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(dbName),
+ filter: {
+ "ns": ns,
+ "command.count": collName,
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern.afterClusterTime": {"$exists": true},
+ "errCode": ErrorCodes.StaleConfig
+ }
+});
- // Finally, the command is retried on the recipient shard and succeeds.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(dbName),
- filter: {
- "ns": ns,
- "command.count": collName,
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern.afterClusterTime": {"$exists": true},
- "errCode": {"$exists": false}
- }
- });
+// Finally, the command is retried on the recipient shard and succeeds.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(dbName),
+ filter: {
+ "ns": ns,
+ "command.count": collName,
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern.afterClusterTime": {"$exists": true},
+ "errCode": {"$exists": false}
+ }
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/secondary_shard_versioning.js b/jstests/sharding/secondary_shard_versioning.js
index 6c92231ec82..94e49c09a5d 100644
--- a/jstests/sharding/secondary_shard_versioning.js
+++ b/jstests/sharding/secondary_shard_versioning.js
@@ -2,98 +2,98 @@
* Tests that secondaries participate in the shard versioning protocol.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/profiler.js'); // for profilerHasSingleMatchingEntryOrThrow()
+load('jstests/libs/profiler.js'); // for profilerHasSingleMatchingEntryOrThrow()
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{}, {rsConfig: {priority: 0}}]};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{}, {rsConfig: {priority: 0}}]};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'test.foo', middle: {x: 0}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'test.foo', middle: {x: 0}}));
- let freshMongos = st.s0;
- let staleMongos = st.s1;
+let freshMongos = st.s0;
+let staleMongos = st.s1;
- jsTest.log("do insert from stale mongos to make it load the routing table before the move");
- assert.writeOK(staleMongos.getDB('test').foo.insert({x: 1}));
+jsTest.log("do insert from stale mongos to make it load the routing table before the move");
+assert.writeOK(staleMongos.getDB('test').foo.insert({x: 1}));
- jsTest.log("do moveChunk from fresh mongos");
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: 'test.foo',
- find: {x: 0},
- to: st.shard1.shardName,
- }));
+jsTest.log("do moveChunk from fresh mongos");
+assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: 'test.foo',
+ find: {x: 0},
+ to: st.shard1.shardName,
+}));
- // Turn on system profiler on secondaries to collect data on all future operations on the db.
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardSecondary = st.rs1.getSecondary();
- assert.commandWorked(donorShardSecondary.getDB('test').setProfilingLevel(2));
- assert.commandWorked(recipientShardSecondary.getDB('test').setProfilingLevel(2));
+// Turn on system profiler on secondaries to collect data on all future operations on the db.
+let donorShardSecondary = st.rs0.getSecondary();
+let recipientShardSecondary = st.rs1.getSecondary();
+assert.commandWorked(donorShardSecondary.getDB('test').setProfilingLevel(2));
+assert.commandWorked(recipientShardSecondary.getDB('test').setProfilingLevel(2));
- // Use the mongos with the stale routing table to send read requests to the secondaries. 'local'
- // read concern level must be specified in the request because secondaries default to
- // 'available', which doesn't participate in the version protocol. Check that the donor shard
- // returns a stale shardVersion error, which provokes mongos to refresh its routing table and
- // re-target; that the recipient shard secondary refreshes its routing table on hearing the
- // fresh version from mongos; and that the recipient shard secondary returns the results.
+// Use the mongos with the stale routing table to send read requests to the secondaries. 'local'
+// read concern level must be specified in the request because secondaries default to
+// 'available', which doesn't participate in the version protocol. Check that the donor shard
+// returns a stale shardVersion error, which provokes mongos to refresh its routing table and
+// re-target; that the recipient shard secondary refreshes its routing table on hearing the
+// fresh version from mongos; and that the recipient shard secondary returns the results.
- jsTest.log("do secondary read from stale mongos");
- let res = staleMongos.getDB('test').runCommand({
- count: 'foo',
- query: {x: 1},
- $readPreference: {mode: "secondary"},
- readConcern: {"level": "local"}
- });
- assert(res.ok);
- assert.eq(1, res.n, tojson(res));
+jsTest.log("do secondary read from stale mongos");
+let res = staleMongos.getDB('test').runCommand({
+ count: 'foo',
+ query: {x: 1},
+ $readPreference: {mode: "secondary"},
+ readConcern: {"level": "local"}
+});
+assert(res.ok);
+assert.eq(1, res.n, tojson(res));
- // Check that the donor shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB('test'),
- filter: {
- "ns": "test.foo",
- "command.count": "foo",
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- }
- });
+// Check that the donor shard secondary returned stale shardVersion.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB('test'),
+ filter: {
+ "ns": "test.foo",
+ "command.count": "foo",
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ }
+});
- // The recipient shard secondary will also return stale shardVersion once, even though the
- // mongos is fresh, because the recipient shard secondary was stale.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB('test'),
- filter: {
- "ns": "test.foo",
- "command.count": "foo",
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- }
- });
+// The recipient shard secondary will also return stale shardVersion once, even though the
+// mongos is fresh, because the recipient shard secondary was stale.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB('test'),
+ filter: {
+ "ns": "test.foo",
+ "command.count": "foo",
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ }
+});
- // Check that the recipient shard secondary received the query and returned results.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB('test'),
- filter: {
- "ns": "test.foo",
- "command.count": "foo",
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false}
- }
- });
+// Check that the recipient shard secondary received the query and returned results.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB('test'),
+ filter: {
+ "ns": "test.foo",
+ "command.count": "foo",
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false}
+ }
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/server_status.js b/jstests/sharding/server_status.js
index 770300174b8..0e2865842b4 100644
--- a/jstests/sharding/server_status.js
+++ b/jstests/sharding/server_status.js
@@ -4,41 +4,41 @@
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var testDB = st.s.getDB('test');
- testDB.adminCommand({enableSharding: 'test'});
- testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+var testDB = st.s.getDB('test');
+testDB.adminCommand({enableSharding: 'test'});
+testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
- // Initialize shard metadata in shards
- testDB.user.insert({x: 1});
+// Initialize shard metadata in shards
+testDB.user.insert({x: 1});
- var checkShardingServerStatus = function(doc) {
- var shardingSection = doc.sharding;
- assert.neq(shardingSection, null);
+var checkShardingServerStatus = function(doc) {
+ var shardingSection = doc.sharding;
+ assert.neq(shardingSection, null);
- var configConnStr = shardingSection.configsvrConnectionString;
- var configConn = new Mongo(configConnStr);
- var configIsMaster = configConn.getDB('admin').runCommand({isMaster: 1});
+ var configConnStr = shardingSection.configsvrConnectionString;
+ var configConn = new Mongo(configConnStr);
+ var configIsMaster = configConn.getDB('admin').runCommand({isMaster: 1});
- var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
+ var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
- assert.gt(configConnStr.indexOf('/'), 0);
- assert.gte(configIsMaster.configsvr, 1); // If it's a shard, this field won't exist.
- assert.neq(null, configOpTimeObj);
- assert.neq(null, configOpTimeObj.ts);
- assert.neq(null, configOpTimeObj.t);
+ assert.gt(configConnStr.indexOf('/'), 0);
+ assert.gte(configIsMaster.configsvr, 1); // If it's a shard, this field won't exist.
+ assert.neq(null, configOpTimeObj);
+ assert.neq(null, configOpTimeObj.ts);
+ assert.neq(null, configOpTimeObj.t);
- assert.neq(null, shardingSection.maxChunkSizeInBytes);
- };
+ assert.neq(null, shardingSection.maxChunkSizeInBytes);
+};
- var mongosServerStatus = testDB.adminCommand({serverStatus: 1});
- checkShardingServerStatus(mongosServerStatus);
+var mongosServerStatus = testDB.adminCommand({serverStatus: 1});
+checkShardingServerStatus(mongosServerStatus);
- var mongodServerStatus = st.rs0.getPrimary().getDB('admin').runCommand({serverStatus: 1});
- checkShardingServerStatus(mongodServerStatus);
+var mongodServerStatus = st.rs0.getPrimary().getDB('admin').runCommand({serverStatus: 1});
+checkShardingServerStatus(mongodServerStatus);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/server_status_crud_metrics.js b/jstests/sharding/server_status_crud_metrics.js
index 522f2f89222..b40c412fcd7 100644
--- a/jstests/sharding/server_status_crud_metrics.js
+++ b/jstests/sharding/server_status_crud_metrics.js
@@ -4,75 +4,75 @@
*/
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
- const testDB = st.s.getDB("test");
- const testColl = testDB.coll;
- const unshardedColl = testDB.unsharded;
+const st = new ShardingTest({shards: 2});
+const testDB = st.s.getDB("test");
+const testColl = testDB.coll;
+const unshardedColl = testDB.unsharded;
- assert.commandWorked(st.s0.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
- // Shard testColl on {x:1}, split it at {x:0}, and move chunk {x:1} to shard1.
- st.shardColl(testColl, {x: 1}, {x: 0}, {x: 1});
+// Shard testColl on {x:1}, split it at {x:0}, and move chunk {x:1} to shard1.
+st.shardColl(testColl, {x: 1}, {x: 0}, {x: 1});
- // Insert one document on each shard.
- assert.commandWorked(testColl.insert({x: 1, _id: 1}));
- assert.commandWorked(testColl.insert({x: -1, _id: 0}));
+// Insert one document on each shard.
+assert.commandWorked(testColl.insert({x: 1, _id: 1}));
+assert.commandWorked(testColl.insert({x: -1, _id: 0}));
- assert.commandWorked(unshardedColl.insert({x: 1, _id: 1}));
+assert.commandWorked(unshardedColl.insert({x: 1, _id: 1}));
- // Verification for 'updateOneOpStyleBroadcastWithExactIDCount' metric.
+// Verification for 'updateOneOpStyleBroadcastWithExactIDCount' metric.
- // Should increment the metric as the update cannot target single shard and are {multi:false}.
- assert.commandWorked(testDB.coll.update({_id: "missing"}, {$set: {a: 1}}, {multi: false}));
- assert.commandWorked(testDB.coll.update({_id: 1}, {$set: {a: 2}}, {multi: false}));
+// Should increment the metric as the update cannot target single shard and are {multi:false}.
+assert.commandWorked(testDB.coll.update({_id: "missing"}, {$set: {a: 1}}, {multi: false}));
+assert.commandWorked(testDB.coll.update({_id: 1}, {$set: {a: 2}}, {multi: false}));
- // Should increment the metric because we broadcast by _id, even though the update subsequently
- // fails on the individual shard.
- assert.commandFailedWithCode(testDB.coll.update({_id: 1}, {$set: {x: 2}}, {multi: false}),
- [ErrorCodes.ImmutableField, 31025]);
- assert.commandFailedWithCode(
- testDB.coll.update({_id: 1}, {$set: {x: 2, $invalidField: 4}}, {multi: false}),
- ErrorCodes.DollarPrefixedFieldName);
+// Should increment the metric because we broadcast by _id, even though the update subsequently
+// fails on the individual shard.
+assert.commandFailedWithCode(testDB.coll.update({_id: 1}, {$set: {x: 2}}, {multi: false}),
+ [ErrorCodes.ImmutableField, 31025]);
+assert.commandFailedWithCode(
+ testDB.coll.update({_id: 1}, {$set: {x: 2, $invalidField: 4}}, {multi: false}),
+ ErrorCodes.DollarPrefixedFieldName);
- let mongosServerStatus = testDB.adminCommand({serverStatus: 1});
+let mongosServerStatus = testDB.adminCommand({serverStatus: 1});
- // Verify that the above four updates incremented the metric counter.
- assert.eq(4, mongosServerStatus.metrics.query.updateOneOpStyleBroadcastWithExactIDCount);
+// Verify that the above four updates incremented the metric counter.
+assert.eq(4, mongosServerStatus.metrics.query.updateOneOpStyleBroadcastWithExactIDCount);
- // Shouldn't increment the metric when {multi:true}.
- assert.commandWorked(testDB.coll.update({_id: 1}, {$set: {a: 3}}, {multi: true}));
- assert.commandWorked(testDB.coll.update({}, {$set: {a: 3}}, {multi: true}));
+// Shouldn't increment the metric when {multi:true}.
+assert.commandWorked(testDB.coll.update({_id: 1}, {$set: {a: 3}}, {multi: true}));
+assert.commandWorked(testDB.coll.update({}, {$set: {a: 3}}, {multi: true}));
- // Shouldn't increment the metric when update can target single shard.
- assert.commandWorked(testDB.coll.update({x: 11}, {$set: {a: 2}}, {multi: false}));
- assert.commandWorked(testDB.coll.update({x: 1}, {$set: {a: 2}}, {multi: false}));
+// Shouldn't increment the metric when update can target single shard.
+assert.commandWorked(testDB.coll.update({x: 11}, {$set: {a: 2}}, {multi: false}));
+assert.commandWorked(testDB.coll.update({x: 1}, {$set: {a: 2}}, {multi: false}));
- // Shouldn't increment the metric for replacement style updates.
- assert.commandWorked(testDB.coll.update({_id: 1}, {x: 1, a: 2}));
- assert.commandWorked(testDB.coll.update({x: 1}, {x: 1, a: 1}));
+// Shouldn't increment the metric for replacement style updates.
+assert.commandWorked(testDB.coll.update({_id: 1}, {x: 1, a: 2}));
+assert.commandWorked(testDB.coll.update({x: 1}, {x: 1, a: 1}));
- // Shouldn't increment the metric when routing fails.
- assert.commandFailedWithCode(testDB.coll.update({}, {$set: {x: 2}}, {multi: false}),
- [ErrorCodes.InvalidOptions, ErrorCodes.ShardKeyNotFound]);
- assert.commandFailedWithCode(testDB.coll.update({_id: 1}, {$set: {x: 2}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
+// Shouldn't increment the metric when routing fails.
+assert.commandFailedWithCode(testDB.coll.update({}, {$set: {x: 2}}, {multi: false}),
+ [ErrorCodes.InvalidOptions, ErrorCodes.ShardKeyNotFound]);
+assert.commandFailedWithCode(testDB.coll.update({_id: 1}, {$set: {x: 2}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
- // Shouldn't increment the metrics for unsharded collection.
- assert.commandWorked(unshardedColl.update({_id: "missing"}, {$set: {a: 1}}, {multi: false}));
- assert.commandWorked(unshardedColl.update({_id: 1}, {$set: {a: 2}}, {multi: false}));
+// Shouldn't increment the metrics for unsharded collection.
+assert.commandWorked(unshardedColl.update({_id: "missing"}, {$set: {a: 1}}, {multi: false}));
+assert.commandWorked(unshardedColl.update({_id: 1}, {$set: {a: 2}}, {multi: false}));
- // Shouldn't incement the metrics when query had invalid operator.
- assert.commandFailedWithCode(
- testDB.coll.update({_id: 1, $invalidOperator: 1}, {$set: {a: 2}}, {multi: false}),
- ErrorCodes.BadValue);
+// Shouldn't incement the metrics when query had invalid operator.
+assert.commandFailedWithCode(
+ testDB.coll.update({_id: 1, $invalidOperator: 1}, {$set: {a: 2}}, {multi: false}),
+ ErrorCodes.BadValue);
- mongosServerStatus = testDB.adminCommand({serverStatus: 1});
+mongosServerStatus = testDB.adminCommand({serverStatus: 1});
- // Verify that only the first four upserts incremented the metric counter.
- assert.eq(4, mongosServerStatus.metrics.query.updateOneOpStyleBroadcastWithExactIDCount);
+// Verify that only the first four upserts incremented the metric counter.
+assert.eq(4, mongosServerStatus.metrics.query.updateOneOpStyleBroadcastWithExactIDCount);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/session_info_in_oplog.js b/jstests/sharding/session_info_in_oplog.js
index dc7b17f9494..617d5759207 100644
--- a/jstests/sharding/session_info_in_oplog.js
+++ b/jstests/sharding/session_info_in_oplog.js
@@ -4,363 +4,342 @@
* updated after the write operations.
*/
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+const kNodes = 2;
+
+var checkOplog = function(oplog, lsid, uid, txnNum, stmtId, prevTs, prevTerm) {
+ assert(oplog != null);
+ assert(oplog.lsid != null);
+ assert.eq(lsid, oplog.lsid.id);
+ assert.eq(uid, oplog.lsid.uid);
+ assert.eq(txnNum, oplog.txnNumber);
+ assert.eq(stmtId, oplog.stmtId);
+
+ var oplogPrevTs = oplog.prevOpTime.ts;
+ assert.eq(prevTs.getTime(), oplogPrevTs.getTime());
+ assert.eq(prevTs.getInc(), oplogPrevTs.getInc());
+ assert.eq(prevTerm, oplog.prevOpTime.t);
+};
+
+var checkSessionCatalog = function(conn, sessionId, uid, txnNum, expectedTs, expectedTerm) {
+ var coll = conn.getDB('config').transactions;
+ var sessionDoc = coll.findOne({'_id': {id: sessionId, uid: uid}});
+
+ assert.eq(txnNum, sessionDoc.txnNum);
+
+ var oplogTs = sessionDoc.lastWriteOpTime.ts;
+ assert.eq(expectedTs.getTime(), oplogTs.getTime());
+ assert.eq(expectedTs.getInc(), oplogTs.getInc());
+
+ assert.eq(expectedTerm, sessionDoc.lastWriteOpTime.t);
+};
+
+var runTests = function(mainConn, priConn, secConn) {
+ var lsid = UUID();
+ var uid = function() {
+ var user = mainConn.getDB("admin")
+ .runCommand({connectionStatus: 1})
+ .authInfo.authenticatedUsers[0];
+
+ if (user) {
+ return computeSHA256Block(user.user + "@" + user.db);
+ } else {
+ return computeSHA256Block("");
+ }
+ }();
+
+ var txnNumber = NumberLong(34);
+ var incrementTxnNumber = function() {
+ txnNumber = NumberLong(txnNumber + 1);
+ };
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test insert command
+
+ var cmd = {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
- load("jstests/libs/retryable_writes_util.js");
+ assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+ var oplog = priConn.getDB('local').oplog.rs;
- const kNodes = 2;
+ var firstDoc = oplog.findOne({ns: 'test.user', 'o._id': 10});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
- var checkOplog = function(oplog, lsid, uid, txnNum, stmtId, prevTs, prevTerm) {
- assert(oplog != null);
- assert(oplog.lsid != null);
- assert.eq(lsid, oplog.lsid.id);
- assert.eq(uid, oplog.lsid.uid);
- assert.eq(txnNum, oplog.txnNumber);
- assert.eq(stmtId, oplog.stmtId);
+ var secondDoc = oplog.findOne({ns: 'test.user', 'o._id': 30});
+ checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
- var oplogPrevTs = oplog.prevOpTime.ts;
- assert.eq(prevTs.getTime(), oplogPrevTs.getTime());
- assert.eq(prevTs.getInc(), oplogPrevTs.getInc());
- assert.eq(prevTerm, oplog.prevOpTime.t);
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test update command
+
+ incrementTxnNumber();
+ cmd = {
+ update: 'user',
+ updates: [
+ {q: {_id: 10}, u: {$set: {x: 1}}}, // in place
+ {q: {_id: 20}, u: {$set: {y: 1}}, upsert: true},
+ {q: {_id: 30}, u: {z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
};
- var checkSessionCatalog = function(conn, sessionId, uid, txnNum, expectedTs, expectedTerm) {
- var coll = conn.getDB('config').transactions;
- var sessionDoc = coll.findOne({'_id': {id: sessionId, uid: uid}});
+ assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 10});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ secondDoc = oplog.findOne({ns: 'test.user', op: 'i', 'o._id': 20});
+ checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
- assert.eq(txnNum, sessionDoc.txnNum);
+ var thirdDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 30});
+ checkOplog(thirdDoc, lsid, uid, txnNumber, 2, secondDoc.ts, secondDoc.t);
- var oplogTs = sessionDoc.lastWriteOpTime.ts;
- assert.eq(expectedTs.getTime(), oplogTs.getTime());
- assert.eq(expectedTs.getInc(), oplogTs.getInc());
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, thirdDoc.ts, thirdDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, thirdDoc.ts, thirdDoc.t);
- assert.eq(expectedTerm, sessionDoc.lastWriteOpTime.t);
+ ////////////////////////////////////////////////////////////////////////
+ // Test delete command
+
+ incrementTxnNumber();
+ cmd = {
+ delete: 'user',
+ deletes: [{q: {_id: 10}, limit: 1}, {q: {_id: 20}, limit: 1}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
};
- var runTests = function(mainConn, priConn, secConn) {
- var lsid = UUID();
- var uid = function() {
- var user = mainConn.getDB("admin")
- .runCommand({connectionStatus: 1})
- .authInfo.authenticatedUsers[0];
-
- if (user) {
- return computeSHA256Block(user.user + "@" + user.db);
- } else {
- return computeSHA256Block("");
- }
- }();
-
- var txnNumber = NumberLong(34);
- var incrementTxnNumber = function() {
- txnNumber = NumberLong(txnNumber + 1);
- };
-
- ////////////////////////////////////////////////////////////////////////
- // Test insert command
-
- var cmd = {
- insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- var oplog = priConn.getDB('local').oplog.rs;
-
- var firstDoc = oplog.findOne({ns: 'test.user', 'o._id': 10});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- var secondDoc = oplog.findOne({ns: 'test.user', 'o._id': 30});
- checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
-
- ////////////////////////////////////////////////////////////////////////
- // Test update command
-
- incrementTxnNumber();
- cmd = {
- update: 'user',
- updates: [
- {q: {_id: 10}, u: {$set: {x: 1}}}, // in place
- {q: {_id: 20}, u: {$set: {y: 1}}, upsert: true},
- {q: {_id: 30}, u: {z: 1}} // replacement
- ],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 10});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- secondDoc = oplog.findOne({ns: 'test.user', op: 'i', 'o._id': 20});
- checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
-
- var thirdDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 30});
- checkOplog(thirdDoc, lsid, uid, txnNumber, 2, secondDoc.ts, secondDoc.t);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, thirdDoc.ts, thirdDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, thirdDoc.ts, thirdDoc.t);
-
- ////////////////////////////////////////////////////////////////////////
- // Test delete command
-
- incrementTxnNumber();
- cmd = {
- delete: 'user',
- deletes: [{q: {_id: 10}, limit: 1}, {q: {_id: 20}, limit: 1}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 10});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- secondDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 20});
- checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (upsert)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {$set: {x: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'i', 'o._id': 40});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.preImageTs);
- assert.eq(null, firstDoc.postImageTs);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- var lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (in-place update, return pre-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {$inc: {x: 1}},
- new: false,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- var beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
- var res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.postImageTs);
-
- var savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.preImageOpTime.ts,
- t: firstDoc.preImageOpTime.t
- });
- assert.eq(beforeDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (in-place update, return post-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {$inc: {x: 1}},
- new: true,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- var afterDoc = mainConn.getDB('test').user.findOne({_id: 40});
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.preImageTs);
-
- savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.postImageOpTime.ts,
- t: firstDoc.postImageOpTime.t
- });
- assert.eq(afterDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (replacement update, return pre-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {y: 1},
- new: false,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
- res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.postImageTs);
-
- savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.preImageOpTime.ts,
- t: firstDoc.preImageOpTime.t
- });
- assert.eq(beforeDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (replacement update, return post-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {z: 1},
- new: true,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- afterDoc = mainConn.getDB('test').user.findOne({_id: 40});
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.preImageTs);
-
- savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.postImageOpTime.ts,
- t: firstDoc.postImageOpTime.t
- });
- assert.eq(afterDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (remove, return pre-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- remove: true,
- new: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
- res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.postImageTs);
-
- savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.preImageOpTime.ts,
- t: firstDoc.preImageOpTime.t
- });
- assert.eq(beforeDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
+ assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 10});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ secondDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 20});
+ checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (upsert)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {$set: {x: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'i', 'o._id': 40});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.preImageTs);
+ assert.eq(null, firstDoc.postImageTs);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ var lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (in-place update, return pre-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {$inc: {x: 1}},
+ new: false,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ var beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
+ var res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.postImageTs);
+
+ var savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.preImageOpTime.ts, t: firstDoc.preImageOpTime.t});
+ assert.eq(beforeDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (in-place update, return post-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
};
- var replTest = new ReplSetTest({nodes: kNodes});
- replTest.startSet();
- replTest.initiate();
+ res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ var afterDoc = mainConn.getDB('test').user.findOne({_id: 40});
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.preImageTs);
+
+ savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.postImageOpTime.ts, t: firstDoc.postImageOpTime.t});
+ assert.eq(afterDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (replacement update, return pre-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {y: 1},
+ new: false,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
+ res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.postImageTs);
+
+ savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.preImageOpTime.ts, t: firstDoc.preImageOpTime.t});
+ assert.eq(beforeDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (replacement update, return post-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {z: 1},
+ new: true,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ afterDoc = mainConn.getDB('test').user.findOne({_id: 40});
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.preImageTs);
+
+ savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.postImageOpTime.ts, t: firstDoc.postImageOpTime.t});
+ assert.eq(afterDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (remove, return pre-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ remove: true,
+ new: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
+ res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.postImageTs);
+
+ savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.preImageOpTime.ts, t: firstDoc.preImageOpTime.t});
+ assert.eq(beforeDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+};
- var priConn = replTest.getPrimary();
- var secConn = replTest.getSecondary();
- secConn.setSlaveOk(true);
+var replTest = new ReplSetTest({nodes: kNodes});
+replTest.startSet();
+replTest.initiate();
- runTests(priConn, priConn, secConn);
+var priConn = replTest.getPrimary();
+var secConn = replTest.getSecondary();
+secConn.setSlaveOk(true);
- replTest.stopSet();
+runTests(priConn, priConn, secConn);
- var st = new ShardingTest({shards: {rs0: {nodes: kNodes}}});
+replTest.stopSet();
- secConn = st.rs0.getSecondary();
- secConn.setSlaveOk(true);
- runTests(st.s, st.rs0.getPrimary(), secConn);
+var st = new ShardingTest({shards: {rs0: {nodes: kNodes}}});
- st.stop();
+secConn = st.rs0.getSecondary();
+secConn.setSlaveOk(true);
+runTests(st.s, st.rs0.getPrimary(), secConn);
+st.stop();
})();
diff --git a/jstests/sharding/sessions_collection_auto_healing.js b/jstests/sharding/sessions_collection_auto_healing.js
index 8d0c91f9d85..6efb9cf2274 100644
--- a/jstests/sharding/sessions_collection_auto_healing.js
+++ b/jstests/sharding/sessions_collection_auto_healing.js
@@ -1,164 +1,163 @@
load('jstests/libs/sessions_collection.js');
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- var st = new ShardingTest({shards: 0});
- var configSvr = st.configRS.getPrimary();
- var configAdmin = configSvr.getDB("admin");
+var st = new ShardingTest({shards: 0});
+var configSvr = st.configRS.getPrimary();
+var configAdmin = configSvr.getDB("admin");
- var mongos = st.s;
- var mongosAdmin = mongos.getDB("admin");
- var mongosConfig = mongos.getDB("config");
+var mongos = st.s;
+var mongosAdmin = mongos.getDB("admin");
+var mongosConfig = mongos.getDB("config");
- // Test that we can use sessions on the config server before we add any shards.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(mongos, false, false);
+// Test that we can use sessions on the config server before we add any shards.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
- assert.commandWorked(configAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(configAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(mongos, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
+}
- // Test that we can use sessions on a mongos before we add any shards.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(mongos, false, false);
+// Test that we can use sessions on a mongos before we add any shards.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
- assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(mongos, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
+}
- // Test that the config server does not create the sessions collection
- // if there are not any shards.
- {
- assert.eq(mongosConfig.shards.count(), 0);
+// Test that the config server does not create the sessions collection
+// if there are not any shards.
+{
+ assert.eq(mongosConfig.shards.count(), 0);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(configSvr, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+}
- // Test-wide: add a shard
- var rs = new ReplSetTest({nodes: 1});
- rs.startSet({shardsvr: ""});
- rs.initiate();
+// Test-wide: add a shard
+var rs = new ReplSetTest({nodes: 1});
+rs.startSet({shardsvr: ""});
+rs.initiate();
- var shard = rs.getPrimary();
- var shardAdmin = shard.getDB("admin");
- var shardConfig = shard.getDB("config");
+var shard = rs.getPrimary();
+var shardAdmin = shard.getDB("admin");
+var shardConfig = shard.getDB("config");
- // Test that we can add this shard, even with a local config.system.sessions collection,
- // and test that we drop its local collection
- {
- shardConfig.system.sessions.insert({"hey": "you"});
- validateSessionsCollection(shard, true, false);
+// Test that we can add this shard, even with a local config.system.sessions collection,
+// and test that we drop its local collection
+{
+ shardConfig.system.sessions.insert({"hey": "you"});
+ validateSessionsCollection(shard, true, false);
- assert.commandWorked(mongosAdmin.runCommand({addShard: rs.getURL()}));
- assert.eq(mongosConfig.shards.count(), 1);
- validateSessionsCollection(shard, false, false);
- }
+ assert.commandWorked(mongosAdmin.runCommand({addShard: rs.getURL()}));
+ assert.eq(mongosConfig.shards.count(), 1);
+ validateSessionsCollection(shard, false, false);
+}
- // Test that we can use sessions on a shard before the sessions collection
- // is set up by the config servers.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
+// Test that we can use sessions on a shard before the sessions collection
+// is set up by the config servers.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(shardAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(shardAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+}
- // Test that we can use sessions from a mongos before the sessions collection
- // is set up by the config servers.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
- validateSessionsCollection(mongos, false, false);
+// Test that we can use sessions from a mongos before the sessions collection
+// is set up by the config servers.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+ validateSessionsCollection(mongos, false, false);
- assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
- validateSessionsCollection(mongos, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+ validateSessionsCollection(mongos, false, false);
+}
- // Test that if we do a refresh (write) from a shard server while there
- // is no sessions collection, it does not create the sessions collection.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
+// Test that if we do a refresh (write) from a shard server while there
+// is no sessions collection, it does not create the sessions collection.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+}
- // Test that a refresh on the config servers once there are shards creates
- // the sessions collection on a shard.
- {
- validateSessionsCollection(shard, false, false);
+// Test that a refresh on the config servers once there are shards creates
+// the sessions collection on a shard.
+{
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, true, true);
+ validateSessionsCollection(shard, true, true);
- // We will have two sessions because of the session used in the shardCollection's retryable
- // write to shard the sessions collection. It will disappear after we run the refresh
- // function on the shard.
- assert.eq(shardConfig.system.sessions.count(), 2, "did not flush config's sessions");
+ // We will have two sessions because of the session used in the shardCollection's retryable
+ // write to shard the sessions collection. It will disappear after we run the refresh
+ // function on the shard.
+ assert.eq(shardConfig.system.sessions.count(), 2, "did not flush config's sessions");
- // Now, if we do refreshes on the other servers, their in-mem records will
- // be written to the collection.
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- assert.eq(shardConfig.system.sessions.count(), 2, "did not flush shard's sessions");
+ // Now, if we do refreshes on the other servers, their in-mem records will
+ // be written to the collection.
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.eq(shardConfig.system.sessions.count(), 2, "did not flush shard's sessions");
- assert.commandWorked(mongosAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- assert.eq(shardConfig.system.sessions.count(), 4, "did not flush mongos' sessions");
- }
+ assert.commandWorked(mongosAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.eq(shardConfig.system.sessions.count(), 4, "did not flush mongos' sessions");
+}
- // Test that if we drop the index on the sessions collection, only a refresh on the config
- // server heals it.
- {
- assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
+// Test that if we drop the index on the sessions collection, only a refresh on the config
+// server heals it.
+{
+ assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
- validateSessionsCollection(shard, true, false);
+ validateSessionsCollection(shard, true, false);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, true, true);
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, true, true);
- assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
+ assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, true, false);
- }
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, true, false);
+}
- // Test that if we drop the collection, it will be recreated only by the config server.
- {
- assertDropCollection(mongosConfig, "system.sessions");
- validateSessionsCollection(shard, false, false);
+// Test that if we drop the collection, it will be recreated only by the config server.
+{
+ assertDropCollection(mongosConfig, "system.sessions");
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, false, false);
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, true, true);
- }
-
- st.stop();
- rs.stopSet();
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, true, true);
+}
+st.stop();
+rs.stopSet();
})();
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index a5dfd4aca1c..5d74e86728a 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -1,47 +1,50 @@
/**
-* this tests some of the ground work
-*/
+ * this tests some of the ground work
+ */
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2});
- var db = s.getDB("test");
+var s = new ShardingTest({shards: 2});
+var db = s.getDB("test");
- assert.writeOK(db.foo.insert({num: 1, name: "eliot"}));
- assert.writeOK(db.foo.insert({num: 2, name: "sara"}));
- assert.writeOK(db.foo.insert({num: -1, name: "joe"}));
+assert.writeOK(db.foo.insert({num: 1, name: "eliot"}));
+assert.writeOK(db.foo.insert({num: 2, name: "sara"}));
+assert.writeOK(db.foo.insert({num: -1, name: "joe"}));
- assert.commandWorked(db.foo.ensureIndex({num: 1}));
+assert.commandWorked(db.foo.ensureIndex({num: 1}));
- assert.eq(3, db.foo.find().length(), "A");
+assert.eq(3, db.foo.find().length(), "A");
- const shardCommand = {shardcollection: "test.foo", key: {num: 1}};
+const shardCommand = {
+ shardcollection: "test.foo",
+ key: {num: 1}
+};
- assert.commandFailed(s.s0.adminCommand(shardCommand));
+assert.commandFailed(s.s0.adminCommand(shardCommand));
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.eq(3, db.foo.find().length(), "after partitioning count failed");
+assert.eq(3, db.foo.find().length(), "after partitioning count failed");
- assert.commandWorked(s.s0.adminCommand(shardCommand));
- assert.commandFailed(s.s0.adminCommand({shardCollection: 'test', key: {x: 1}}));
- assert.commandFailed(s.s0.adminCommand({shardCollection: '.foo', key: {x: 1}}));
+assert.commandWorked(s.s0.adminCommand(shardCommand));
+assert.commandFailed(s.s0.adminCommand({shardCollection: 'test', key: {x: 1}}));
+assert.commandFailed(s.s0.adminCommand({shardCollection: '.foo', key: {x: 1}}));
- var cconfig = s.config.collections.findOne({_id: "test.foo"});
- assert(cconfig, "No collection entry found for test.foo");
+var cconfig = s.config.collections.findOne({_id: "test.foo"});
+assert(cconfig, "No collection entry found for test.foo");
- delete cconfig.lastmod;
- delete cconfig.dropped;
- delete cconfig.lastmodEpoch;
- delete cconfig.uuid;
+delete cconfig.lastmod;
+delete cconfig.dropped;
+delete cconfig.lastmodEpoch;
+delete cconfig.uuid;
- assert.eq(cconfig, {_id: "test.foo", key: {num: 1}, unique: false}, "Sharded content mismatch");
+assert.eq(cconfig, {_id: "test.foo", key: {num: 1}, unique: false}, "Sharded content mismatch");
- s.config.collections.find().forEach(printjson);
+s.config.collections.find().forEach(printjson);
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "num chunks A");
- assert.eq(3, db.foo.find().length(), "after sharding, no split count failed");
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "num chunks A");
+assert.eq(3, db.foo.find().length(), "after sharding, no split count failed");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index 527fad07be5..fd8d8657af6 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -1,240 +1,225 @@
(function() {
- 'use strict';
-
- function placeCheck(num) {
- print("shard2 step: " + num);
- }
-
- function printAll() {
- print("****************");
- db.foo.find().forEach(printjsononeline);
- print("++++++++++++++++++");
- primary.foo.find().forEach(printjsononeline);
- print("++++++++++++++++++");
- secondary.foo.find().forEach(printjsononeline);
- print("---------------------");
- }
-
- var s = new ShardingTest({shards: 2});
- var db = s.getDB("test");
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check 1");
-
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 0}}));
- assert.eq(2, s.config.chunks.count({"ns": "test.foo"}), "should be 2 shards");
- var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
- assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
-
- assert.writeOK(db.foo.save({num: 1, name: "eliot"}));
- assert.writeOK(db.foo.save({num: 2, name: "sara"}));
- assert.writeOK(db.foo.save({num: -1, name: "joe"}));
-
- assert.eq(3,
- s.getPrimaryShard("test").getDB("test").foo.find().length(),
- "not right directly to db A");
- assert.eq(3, db.foo.find().length(), "not right on shard");
-
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
-
- assert.eq(3, primary.foo.find().length(), "primary wrong B");
- assert.eq(0, secondary.foo.find().length(), "secondary wrong C");
- assert.eq(3, db.foo.find().sort({num: 1}).length());
-
- placeCheck(2);
-
- // Test move shard to unexisting shard
- assert.commandFailedWithCode(
- s.s0.adminCommand(
- {movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true}),
- ErrorCodes.ShardNotFound);
-
- assert.commandWorked(s.s0.adminCommand({
- movechunk: "test.foo",
- find: {num: 1},
- to: secondary.getMongo().name,
- _waitForDelete: true
- }));
- assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
- assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
-
- assert.eq(2,
- s.config.chunks.count({"ns": "test.foo"}),
- "still should have 2 shards after move not:" + s.getChunksString());
- var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
- assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
-
- placeCheck(3);
-
- // Test inserts go to right server/shard
- assert.writeOK(db.foo.save({num: 3, name: "bob"}));
- assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
- assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-
- assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
- assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
- assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-
- assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
- assert.eq(2, primary.foo.find().length(), "boundary A");
- assert.eq(4, secondary.foo.find().length(), "boundary B");
-
- placeCheck(4);
-
- // findOne
- assert.eq("eliot", db.foo.findOne({num: 1}).name);
- assert.eq("funny man", db.foo.findOne({num: -2}).name);
-
- // getAll
- function sumQuery(c) {
- var sum = 0;
- c.toArray().forEach(function(z) {
- sum += z.num;
- });
- return sum;
- }
- assert.eq(6, db.foo.find().length(), "sharded query 1");
- assert.eq(3, sumQuery(db.foo.find()), "sharded query 2");
+'use strict';
+
+function placeCheck(num) {
+ print("shard2 step: " + num);
+}
+
+function printAll() {
+ print("****************");
+ db.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ primary.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ secondary.foo.find().forEach(printjsononeline);
+ print("---------------------");
+}
+
+var s = new ShardingTest({shards: 2});
+var db = s.getDB("test");
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check 1");
+
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 0}}));
+assert.eq(2, s.config.chunks.count({"ns": "test.foo"}), "should be 2 shards");
+var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
+assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
+
+assert.writeOK(db.foo.save({num: 1, name: "eliot"}));
+assert.writeOK(db.foo.save({num: 2, name: "sara"}));
+assert.writeOK(db.foo.save({num: -1, name: "joe"}));
+
+assert.eq(
+ 3, s.getPrimaryShard("test").getDB("test").foo.find().length(), "not right directly to db A");
+assert.eq(3, db.foo.find().length(), "not right on shard");
+
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
+
+assert.eq(3, primary.foo.find().length(), "primary wrong B");
+assert.eq(0, secondary.foo.find().length(), "secondary wrong C");
+assert.eq(3, db.foo.find().sort({num: 1}).length());
+
+placeCheck(2);
+
+// Test move shard to unexisting shard
+assert.commandFailedWithCode(
+ s.s0.adminCommand({movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true}),
+ ErrorCodes.ShardNotFound);
+
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: 1}, to: secondary.getMongo().name, _waitForDelete: true}));
+assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
+assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
+
+assert.eq(2,
+ s.config.chunks.count({"ns": "test.foo"}),
+ "still should have 2 shards after move not:" + s.getChunksString());
+var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
+assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
+
+placeCheck(3);
+
+// Test inserts go to right server/shard
+assert.writeOK(db.foo.save({num: 3, name: "bob"}));
+assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
+assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+
+assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
+assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
+assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+
+assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
+assert.eq(2, primary.foo.find().length(), "boundary A");
+assert.eq(4, secondary.foo.find().length(), "boundary B");
+
+placeCheck(4);
+
+// findOne
+assert.eq("eliot", db.foo.findOne({num: 1}).name);
+assert.eq("funny man", db.foo.findOne({num: -2}).name);
+
+// getAll
+function sumQuery(c) {
+ var sum = 0;
+ c.toArray().forEach(function(z) {
+ sum += z.num;
+ });
+ return sum;
+}
+assert.eq(6, db.foo.find().length(), "sharded query 1");
+assert.eq(3, sumQuery(db.foo.find()), "sharded query 2");
- placeCheck(5);
+placeCheck(5);
- // sort by num
+// sort by num
- assert.eq(3, sumQuery(db.foo.find().sort({num: 1})), "sharding query w/sort 1");
- assert.eq(3, sumQuery(db.foo.find().sort({num: -1})), "sharding query w/sort 2");
+assert.eq(3, sumQuery(db.foo.find().sort({num: 1})), "sharding query w/sort 1");
+assert.eq(3, sumQuery(db.foo.find().sort({num: -1})), "sharding query w/sort 2");
- assert.eq(
- "funny man", db.foo.find().sort({num: 1})[0].name, "sharding query w/sort 3 order wrong");
- assert.eq(-2, db.foo.find().sort({num: 1})[0].num, "sharding query w/sort 4 order wrong");
+assert.eq("funny man", db.foo.find().sort({num: 1})[0].name, "sharding query w/sort 3 order wrong");
+assert.eq(-2, db.foo.find().sort({num: 1})[0].num, "sharding query w/sort 4 order wrong");
- assert.eq("bob", db.foo.find().sort({num: -1})[0].name, "sharding query w/sort 5 order wrong");
- assert.eq(3, db.foo.find().sort({num: -1})[0].num, "sharding query w/sort 6 order wrong");
+assert.eq("bob", db.foo.find().sort({num: -1})[0].name, "sharding query w/sort 5 order wrong");
+assert.eq(3, db.foo.find().sort({num: -1})[0].num, "sharding query w/sort 6 order wrong");
- placeCheck(6);
+placeCheck(6);
- // Sort by name
- function getNames(c) {
- return c.toArray().map(function(z) {
- return z.name;
- });
- }
- var correct = getNames(db.foo.find()).sort();
- assert.eq(correct, getNames(db.foo.find().sort({name: 1})));
- correct = correct.reverse();
- assert.eq(correct, getNames(db.foo.find().sort({name: -1})));
-
- assert.eq(3, sumQuery(db.foo.find().sort({name: 1})), "sharding query w/non-shard sort 1");
- assert.eq(3, sumQuery(db.foo.find().sort({name: -1})), "sharding query w/non-shard sort 2");
-
- // sort by num multiple shards per server
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 2}}));
- assert.eq("funny man",
- db.foo.find().sort({num: 1})[0].name,
- "sharding query w/sort and another split 1 order wrong");
- assert.eq("bob",
- db.foo.find().sort({num: -1})[0].name,
- "sharding query w/sort and another split 2 order wrong");
- assert.eq("funny man",
- db.foo.find({num: {$lt: 100}}).sort({num: 1}).arrayAccess(0).name,
- "sharding query w/sort and another split 3 order wrong");
-
- placeCheck(7);
-
- db.foo.find().sort({_id: 1}).forEach(function(z) {
- print(z._id);
+// Sort by name
+function getNames(c) {
+ return c.toArray().map(function(z) {
+ return z.name;
});
-
- var zzz = db.foo.find().explain("executionStats").executionStats;
- assert.eq(0, zzz.totalKeysExamined, "EX1a");
- assert.eq(6, zzz.nReturned, "EX1b");
- assert.eq(6, zzz.totalDocsExamined, "EX1c");
-
- zzz = db.foo.find().hint({_id: 1}).sort({_id: 1}).explain("executionStats").executionStats;
- assert.eq(6, zzz.totalKeysExamined, "EX2a");
- assert.eq(6, zzz.nReturned, "EX2b");
- assert.eq(6, zzz.totalDocsExamined, "EX2c");
-
- // getMore
- assert.eq(4, db.foo.find().limit(-4).toArray().length, "getMore 1");
- function countCursor(c) {
- var num = 0;
- while (c.hasNext()) {
- c.next();
- num++;
- }
- return num;
+}
+var correct = getNames(db.foo.find()).sort();
+assert.eq(correct, getNames(db.foo.find().sort({name: 1})));
+correct = correct.reverse();
+assert.eq(correct, getNames(db.foo.find().sort({name: -1})));
+
+assert.eq(3, sumQuery(db.foo.find().sort({name: 1})), "sharding query w/non-shard sort 1");
+assert.eq(3, sumQuery(db.foo.find().sort({name: -1})), "sharding query w/non-shard sort 2");
+
+// sort by num multiple shards per server
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 2}}));
+assert.eq("funny man",
+ db.foo.find().sort({num: 1})[0].name,
+ "sharding query w/sort and another split 1 order wrong");
+assert.eq("bob",
+ db.foo.find().sort({num: -1})[0].name,
+ "sharding query w/sort and another split 2 order wrong");
+assert.eq("funny man",
+ db.foo.find({num: {$lt: 100}}).sort({num: 1}).arrayAccess(0).name,
+ "sharding query w/sort and another split 3 order wrong");
+
+placeCheck(7);
+
+db.foo.find().sort({_id: 1}).forEach(function(z) {
+ print(z._id);
+});
+
+var zzz = db.foo.find().explain("executionStats").executionStats;
+assert.eq(0, zzz.totalKeysExamined, "EX1a");
+assert.eq(6, zzz.nReturned, "EX1b");
+assert.eq(6, zzz.totalDocsExamined, "EX1c");
+
+zzz = db.foo.find().hint({_id: 1}).sort({_id: 1}).explain("executionStats").executionStats;
+assert.eq(6, zzz.totalKeysExamined, "EX2a");
+assert.eq(6, zzz.nReturned, "EX2b");
+assert.eq(6, zzz.totalDocsExamined, "EX2c");
+
+// getMore
+assert.eq(4, db.foo.find().limit(-4).toArray().length, "getMore 1");
+function countCursor(c) {
+ var num = 0;
+ while (c.hasNext()) {
+ c.next();
+ num++;
}
- assert.eq(6, countCursor(db.foo.find()._exec()), "getMore 2");
- assert.eq(6, countCursor(db.foo.find().batchSize(1)._exec()), "getMore 3");
-
- // find by non-shard-key
- db.foo.find().forEach(function(z) {
- var y = db.foo.findOne({_id: z._id});
- assert(y, "_id check 1 : " + tojson(z));
- assert.eq(z.num, y.num, "_id check 2 : " + tojson(z));
- });
+ return num;
+}
+assert.eq(6, countCursor(db.foo.find()._exec()), "getMore 2");
+assert.eq(6, countCursor(db.foo.find().batchSize(1)._exec()), "getMore 3");
+
+// find by non-shard-key
+db.foo.find().forEach(function(z) {
+ var y = db.foo.findOne({_id: z._id});
+ assert(y, "_id check 1 : " + tojson(z));
+ assert.eq(z.num, y.num, "_id check 2 : " + tojson(z));
+});
- // update
- var person = db.foo.findOne({num: 3});
- assert.eq("bob", person.name, "update setup 1");
- person.name = "bob is gone";
- db.foo.update({num: 3}, person);
- person = db.foo.findOne({num: 3});
- assert.eq("bob is gone", person.name, "update test B");
+// update
+var person = db.foo.findOne({num: 3});
+assert.eq("bob", person.name, "update setup 1");
+person.name = "bob is gone";
+db.foo.update({num: 3}, person);
+person = db.foo.findOne({num: 3});
+assert.eq("bob is gone", person.name, "update test B");
- // remove
- assert(db.foo.findOne({num: 3}) != null, "remove test A");
- db.foo.remove({num: 3});
- assert.isnull(db.foo.findOne({num: 3}), "remove test B");
+// remove
+assert(db.foo.findOne({num: 3}) != null, "remove test A");
+db.foo.remove({num: 3});
+assert.isnull(db.foo.findOne({num: 3}), "remove test B");
- db.foo.save({num: 3, name: "eliot2"});
- person = db.foo.findOne({num: 3});
- assert(person, "remove test C");
- assert.eq(person.name, "eliot2");
+db.foo.save({num: 3, name: "eliot2"});
+person = db.foo.findOne({num: 3});
+assert(person, "remove test C");
+assert.eq(person.name, "eliot2");
- db.foo.remove({_id: person._id});
- assert.isnull(db.foo.findOne({num: 3}), "remove test E");
+db.foo.remove({_id: person._id});
+assert.isnull(db.foo.findOne({num: 3}), "remove test E");
- placeCheck(8);
+placeCheck(8);
- // more update stuff
+// more update stuff
- printAll();
- var total = db.foo.find().count();
- var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
- printAll();
- assert.eq(total, res.nModified, res.toString());
+printAll();
+var total = db.foo.find().count();
+var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
+printAll();
+assert.eq(total, res.nModified, res.toString());
- res = db.foo.update({num: -1}, {$inc: {x: 1}}, false, true);
- assert.eq(1, res.nModified, res.toString());
+res = db.foo.update({num: -1}, {$inc: {x: 1}}, false, true);
+assert.eq(1, res.nModified, res.toString());
- // ---- move all to the secondary
+// ---- move all to the secondary
- assert.eq(2, s.onNumShards("foo"), "on 2 shards");
+assert.eq(2, s.onNumShards("foo"), "on 2 shards");
- secondary.foo.insert({num: -3});
+secondary.foo.insert({num: -3});
- assert.commandWorked(s.s0.adminCommand({
- movechunk: "test.foo",
- find: {num: -2},
- to: secondary.getMongo().name,
- _waitForDelete: true
- }));
- assert.eq(1, s.onNumShards("foo"), "on 1 shards");
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: -2}, to: secondary.getMongo().name, _waitForDelete: true}));
+assert.eq(1, s.onNumShards("foo"), "on 1 shards");
- assert.commandWorked(s.s0.adminCommand({
- movechunk: "test.foo",
- find: {num: -2},
- to: primary.getMongo().name,
- _waitForDelete: true
- }));
- assert.eq(2, s.onNumShards("foo"), "on 2 shards again");
- assert.eq(3, s.config.chunks.count({"ns": "test.foo"}), "only 3 chunks");
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: -2}, to: primary.getMongo().name, _waitForDelete: true}));
+assert.eq(2, s.onNumShards("foo"), "on 2 shards again");
+assert.eq(3, s.config.chunks.count({"ns": "test.foo"}), "only 3 chunks");
- print("YO : " + tojson(db.runCommand("serverStatus")));
+print("YO : " + tojson(db.runCommand("serverStatus")));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index b0a2c0c72c8..d0957a1c45d 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,195 +1,194 @@
(function() {
- // Include helpers for analyzing explain output.
- load("jstests/libs/analyze_plan.js");
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
- var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: {enableBalancer: true}});
+var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: {enableBalancer: true}});
- s2 = s._mongos[1];
+s2 = s._mongos[1];
- db = s.getDB("test");
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+db = s.getDB("test");
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
- // Ensure that the second mongos will see the movePrimary
- s.configRS.awaitLastOpCommitted();
+// Ensure that the second mongos will see the movePrimary
+s.configRS.awaitLastOpCommitted();
- assert(sh.getBalancerState(), "A1");
+assert(sh.getBalancerState(), "A1");
- s.stopBalancer();
- assert(!sh.getBalancerState(), "A2");
+s.stopBalancer();
+assert(!sh.getBalancerState(), "A2");
- s.startBalancer();
- assert(sh.getBalancerState(), "A3");
+s.startBalancer();
+assert(sh.getBalancerState(), "A3");
- s.stopBalancer();
- assert(!sh.getBalancerState(), "A4");
+s.stopBalancer();
+assert(!sh.getBalancerState(), "A4");
- s.config.databases.find().forEach(printjson);
+s.config.databases.find().forEach(printjson);
- a = s.getDB("test").foo;
- b = s2.getDB("test").foo;
+a = s.getDB("test").foo;
+b = s2.getDB("test").foo;
- primary = s.getPrimaryShard("test").getDB("test").foo;
- secondary = s.getOther(primary.name).getDB("test").foo;
+primary = s.getPrimaryShard("test").getDB("test").foo;
+secondary = s.getOther(primary.name).getDB("test").foo;
- a.save({num: 1});
- a.save({num: 2});
- a.save({num: 3});
+a.save({num: 1});
+a.save({num: 2});
+a.save({num: 3});
- assert.eq(3, a.find().toArray().length, "normal A");
- assert.eq(3, b.find().toArray().length, "other A");
+assert.eq(3, a.find().toArray().length, "normal A");
+assert.eq(3, b.find().toArray().length, "other A");
- assert.eq(3, primary.count(), "p1");
- assert.eq(0, secondary.count(), "s1");
-
- assert.eq(1, s.onNumShards("foo"), "on 1 shards");
+assert.eq(3, primary.count(), "p1");
+assert.eq(0, secondary.count(), "s1");
+
+assert.eq(1, s.onNumShards("foo"), "on 1 shards");
- s.adminCommand({split: "test.foo", middle: {num: 2}});
- s.adminCommand({
- movechunk: "test.foo",
- find: {num: 3},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
-
- assert(primary.find().toArray().length > 0, "blah 1");
- assert(secondary.find().toArray().length > 0, "blah 2");
- assert.eq(3, primary.find().itcount() + secondary.find().itcount(), "blah 3");
-
- assert.eq(3, a.find().toArray().length, "normal B");
- assert.eq(3, b.find().toArray().length, "other B");
-
- printjson(primary._db._adminCommand("shardingState"));
-
- // --- filtering ---
-
- function doCounts(name, total, onlyItCounts) {
- total = total || (primary.count() + secondary.count());
- if (!onlyItCounts)
- assert.eq(total, a.count(), name + " count");
- assert.eq(total, a.find().sort({n: 1}).itcount(), name + " itcount - sort n");
- assert.eq(total, a.find().itcount(), name + " itcount");
- assert.eq(total, a.find().sort({_id: 1}).itcount(), name + " itcount - sort _id");
- return total;
- }
-
- var total = doCounts("before wrong save");
- assert.writeOK(secondary.insert({_id: 111, num: -3}));
- doCounts("after wrong save", total, true);
- e = a.find().explain("executionStats").executionStats;
- assert.eq(3, e.nReturned, "ex1");
- assert.eq(0, e.totalKeysExamined, "ex2");
- assert.eq(4, e.totalDocsExamined, "ex3");
-
- var chunkSkips = 0;
- for (var shard in e.executionStages.shards) {
- var theShard = e.executionStages.shards[shard];
- chunkSkips += getChunkSkips(theShard.executionStages);
- }
- assert.eq(1, chunkSkips, "ex4");
-
- // SERVER-4612
- // make sure idhack obeys chunks
- x = a.findOne({_id: 111});
- assert(!x, "idhack didn't obey chunk boundaries " + tojson(x));
-
- // --- move all to 1 ---
- print("MOVE ALL TO 1");
-
- assert.eq(2, s.onNumShards("foo"), "on 2 shards");
- s.printCollectionInfo("test.foo");
-
- assert(a.findOne({num: 1}));
- assert(b.findOne({num: 1}));
-
- print("GOING TO MOVE");
- assert(a.findOne({num: 1}), "pre move 1");
- s.printCollectionInfo("test.foo");
- myto = s.getOther(s.getPrimaryShard("test")).name;
- print("counts before move: " + tojson(s.shardCounts("foo")));
- s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: myto, _waitForDelete: true});
- print("counts after move: " + tojson(s.shardCounts("foo")));
- s.printCollectionInfo("test.foo");
- assert.eq(1, s.onNumShards("foo"), "on 1 shard again");
- assert(a.findOne({num: 1}), "post move 1");
- assert(b.findOne({num: 1}), "post move 2");
-
- print("*** drop");
-
- s.printCollectionInfo("test.foo", "before drop");
- a.drop();
- s.printCollectionInfo("test.foo", "after drop");
-
- assert.eq(0, a.count(), "a count after drop");
- assert.eq(0, b.count(), "b count after drop");
-
- s.printCollectionInfo("test.foo", "after counts");
-
- assert.eq(0, primary.count(), "p count after drop");
- assert.eq(0, secondary.count(), "s count after drop");
-
- print("*** dropDatabase setup");
-
- s.printShardingStatus();
- s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
- a.save({num: 2});
- a.save({num: 3});
- s.adminCommand({split: "test.foo", middle: {num: 2}});
- s.adminCommand({
- movechunk: "test.foo",
- find: {num: 3},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
- s.printShardingStatus();
-
- s.printCollectionInfo("test.foo", "after dropDatabase setup");
- doCounts("after dropDatabase setup2");
- s.printCollectionInfo("test.foo", "after dropDatabase setup3");
-
- print("*** ready to call dropDatabase");
- res = s.getDB("test").dropDatabase();
- assert.eq(1, res.ok, "dropDatabase failed : " + tojson(res));
- // Waiting for SERVER-2253
- assert.eq(0,
- s.config.databases.count({_id: "test"}),
- "database 'test' was dropped but still appears in configDB");
-
- s.printShardingStatus();
- s.printCollectionInfo("test.foo", "after dropDatabase call 1");
- assert.eq(0, doCounts("after dropDatabase called"));
-
- // ---- retry commands SERVER-1471 ----
-
- s.adminCommand({enablesharding: "test2"});
- s.ensurePrimaryShard('test2', s.shard0.shardName);
- s.adminCommand({shardcollection: "test2.foo", key: {num: 1}});
- dba = s.getDB("test2");
- dbb = s2.getDB("test2");
- dba.foo.save({num: 1});
- dba.foo.save({num: 2});
- dba.foo.save({num: 3});
-
- assert.eq(1, s.onNumShards("foo", "test2"), "B on 1 shards");
- assert.eq(3, dba.foo.count(), "Ba");
- assert.eq(3, dbb.foo.count(), "Bb");
-
- s.adminCommand({split: "test2.foo", middle: {num: 2}});
- s.adminCommand({
- movechunk: "test2.foo",
- find: {num: 3},
- to: s.getOther(s.getPrimaryShard("test2")).name,
- _waitForDelete: true
- });
-
- assert.eq(2, s.onNumShards("foo", "test2"), "B on 2 shards");
-
- x = dba.foo.stats();
- printjson(x);
- y = dbb.foo.stats();
- printjson(y);
-
- s.stop();
+s.adminCommand({split: "test.foo", middle: {num: 2}});
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+assert(primary.find().toArray().length > 0, "blah 1");
+assert(secondary.find().toArray().length > 0, "blah 2");
+assert.eq(3, primary.find().itcount() + secondary.find().itcount(), "blah 3");
+
+assert.eq(3, a.find().toArray().length, "normal B");
+assert.eq(3, b.find().toArray().length, "other B");
+
+printjson(primary._db._adminCommand("shardingState"));
+// --- filtering ---
+
+function doCounts(name, total, onlyItCounts) {
+ total = total || (primary.count() + secondary.count());
+ if (!onlyItCounts)
+ assert.eq(total, a.count(), name + " count");
+ assert.eq(total, a.find().sort({n: 1}).itcount(), name + " itcount - sort n");
+ assert.eq(total, a.find().itcount(), name + " itcount");
+ assert.eq(total, a.find().sort({_id: 1}).itcount(), name + " itcount - sort _id");
+ return total;
+}
+
+var total = doCounts("before wrong save");
+assert.writeOK(secondary.insert({_id: 111, num: -3}));
+doCounts("after wrong save", total, true);
+e = a.find().explain("executionStats").executionStats;
+assert.eq(3, e.nReturned, "ex1");
+assert.eq(0, e.totalKeysExamined, "ex2");
+assert.eq(4, e.totalDocsExamined, "ex3");
+
+var chunkSkips = 0;
+for (var shard in e.executionStages.shards) {
+ var theShard = e.executionStages.shards[shard];
+ chunkSkips += getChunkSkips(theShard.executionStages);
+}
+assert.eq(1, chunkSkips, "ex4");
+
+// SERVER-4612
+// make sure idhack obeys chunks
+x = a.findOne({_id: 111});
+assert(!x, "idhack didn't obey chunk boundaries " + tojson(x));
+
+// --- move all to 1 ---
+print("MOVE ALL TO 1");
+
+assert.eq(2, s.onNumShards("foo"), "on 2 shards");
+s.printCollectionInfo("test.foo");
+
+assert(a.findOne({num: 1}));
+assert(b.findOne({num: 1}));
+
+print("GOING TO MOVE");
+assert(a.findOne({num: 1}), "pre move 1");
+s.printCollectionInfo("test.foo");
+myto = s.getOther(s.getPrimaryShard("test")).name;
+print("counts before move: " + tojson(s.shardCounts("foo")));
+s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: myto, _waitForDelete: true});
+print("counts after move: " + tojson(s.shardCounts("foo")));
+s.printCollectionInfo("test.foo");
+assert.eq(1, s.onNumShards("foo"), "on 1 shard again");
+assert(a.findOne({num: 1}), "post move 1");
+assert(b.findOne({num: 1}), "post move 2");
+
+print("*** drop");
+
+s.printCollectionInfo("test.foo", "before drop");
+a.drop();
+s.printCollectionInfo("test.foo", "after drop");
+
+assert.eq(0, a.count(), "a count after drop");
+assert.eq(0, b.count(), "b count after drop");
+
+s.printCollectionInfo("test.foo", "after counts");
+
+assert.eq(0, primary.count(), "p count after drop");
+assert.eq(0, secondary.count(), "s count after drop");
+
+print("*** dropDatabase setup");
+
+s.printShardingStatus();
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+a.save({num: 2});
+a.save({num: 3});
+s.adminCommand({split: "test.foo", middle: {num: 2}});
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+s.printShardingStatus();
+
+s.printCollectionInfo("test.foo", "after dropDatabase setup");
+doCounts("after dropDatabase setup2");
+s.printCollectionInfo("test.foo", "after dropDatabase setup3");
+
+print("*** ready to call dropDatabase");
+res = s.getDB("test").dropDatabase();
+assert.eq(1, res.ok, "dropDatabase failed : " + tojson(res));
+// Waiting for SERVER-2253
+assert.eq(0,
+ s.config.databases.count({_id: "test"}),
+ "database 'test' was dropped but still appears in configDB");
+
+s.printShardingStatus();
+s.printCollectionInfo("test.foo", "after dropDatabase call 1");
+assert.eq(0, doCounts("after dropDatabase called"));
+
+// ---- retry commands SERVER-1471 ----
+
+s.adminCommand({enablesharding: "test2"});
+s.ensurePrimaryShard('test2', s.shard0.shardName);
+s.adminCommand({shardcollection: "test2.foo", key: {num: 1}});
+dba = s.getDB("test2");
+dbb = s2.getDB("test2");
+dba.foo.save({num: 1});
+dba.foo.save({num: 2});
+dba.foo.save({num: 3});
+
+assert.eq(1, s.onNumShards("foo", "test2"), "B on 1 shards");
+assert.eq(3, dba.foo.count(), "Ba");
+assert.eq(3, dbb.foo.count(), "Bb");
+
+s.adminCommand({split: "test2.foo", middle: {num: 2}});
+s.adminCommand({
+ movechunk: "test2.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test2")).name,
+ _waitForDelete: true
+});
+
+assert.eq(2, s.onNumShards("foo", "test2"), "B on 2 shards");
+
+x = dba.foo.stats();
+printjson(x);
+y = dbb.foo.stats();
+printjson(y);
+
+s.stop();
})();
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
index e00833ac179..a2b60b77b98 100644
--- a/jstests/sharding/shard6.js
+++ b/jstests/sharding/shard6.js
@@ -1,120 +1,120 @@
// shard6.js
(function() {
- "use strict";
- var summary = "";
-
- var s = new ShardingTest({name: "shard6", shards: 2});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.data", key: {num: 1}});
-
- var version = s.getDB("admin").runCommand({buildinfo: 1}).versionArray;
- var post32 = (version[0] > 4) || ((version[0] == 3) && (version[1] > 2));
-
- var db = s.getDB("test");
-
- function poolStats(where) {
- var total = 0;
- var msg = "poolStats " + where + " ";
- var stats = db.runCommand("connPoolStats");
- for (var h in stats.hosts) {
- if (!stats.hosts.hasOwnProperty(h)) {
- continue;
- }
- var host = stats.hosts[h];
- msg += host.created + " ";
- total += host.created;
- }
- printjson(stats.hosts);
- print("****\n" + msg + "\n*****");
- summary += msg + "\n";
- if (post32) {
- assert.eq(total, stats.totalCreated, "mismatched number of total connections created");
+"use strict";
+var summary = "";
+
+var s = new ShardingTest({name: "shard6", shards: 2});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.data", key: {num: 1}});
+
+var version = s.getDB("admin").runCommand({buildinfo: 1}).versionArray;
+var post32 = (version[0] > 4) || ((version[0] == 3) && (version[1] > 2));
+
+var db = s.getDB("test");
+
+function poolStats(where) {
+ var total = 0;
+ var msg = "poolStats " + where + " ";
+ var stats = db.runCommand("connPoolStats");
+ for (var h in stats.hosts) {
+ if (!stats.hosts.hasOwnProperty(h)) {
+ continue;
}
- return total;
+ var host = stats.hosts[h];
+ msg += host.created + " ";
+ total += host.created;
+ }
+ printjson(stats.hosts);
+ print("****\n" + msg + "\n*****");
+ summary += msg + "\n";
+ if (post32) {
+ assert.eq(total, stats.totalCreated, "mismatched number of total connections created");
}
+ return total;
+}
- poolStats("at start");
+poolStats("at start");
- // we want a lot of data, so lets make a 50k string to cheat :)
- var bigString = "this is a big string. ".repeat(50000);
+// we want a lot of data, so lets make a 50k string to cheat :)
+var bigString = "this is a big string. ".repeat(50000);
- // ok, now lets insert a some data
- var num = 0;
- for (; num < 100; num++) {
- db.data.save({num: num, bigString: bigString});
- }
+// ok, now lets insert a some data
+var num = 0;
+for (; num < 100; num++) {
+ db.data.save({num: num, bigString: bigString});
+}
- assert.eq(100, db.data.find().toArray().length, "basic find after setup");
+assert.eq(100, db.data.find().toArray().length, "basic find after setup");
- poolStats("setup done");
+poolStats("setup done");
- // limit
+// limit
- assert.eq(77, db.data.find().limit(77).itcount(), "limit test 1");
- assert.eq(1, db.data.find().limit(1).itcount(), "limit test 2");
- for (var i = 1; i < 10; i++) {
- assert.eq(i, db.data.find().limit(i).itcount(), "limit test 3a : " + i);
- assert.eq(i, db.data.find().skip(i).limit(i).itcount(), "limit test 3b : " + i);
- poolStats("after loop : " + i);
- }
+assert.eq(77, db.data.find().limit(77).itcount(), "limit test 1");
+assert.eq(1, db.data.find().limit(1).itcount(), "limit test 2");
+for (var i = 1; i < 10; i++) {
+ assert.eq(i, db.data.find().limit(i).itcount(), "limit test 3a : " + i);
+ assert.eq(i, db.data.find().skip(i).limit(i).itcount(), "limit test 3b : " + i);
+ poolStats("after loop : " + i);
+}
- poolStats("limit test done");
+poolStats("limit test done");
- function assertOrder(start, num) {
- var a = db.data.find().skip(start).limit(num).sort({num: 1}).map(function(z) {
- return z.num;
- });
- var c = [];
- for (var i = 0; i < num; i++)
- c.push(start + i);
- assert.eq(c, a, "assertOrder start: " + start + " num: " + num);
- }
+function assertOrder(start, num) {
+ var a = db.data.find().skip(start).limit(num).sort({num: 1}).map(function(z) {
+ return z.num;
+ });
+ var c = [];
+ for (var i = 0; i < num; i++)
+ c.push(start + i);
+ assert.eq(c, a, "assertOrder start: " + start + " num: " + num);
+}
- assertOrder(0, 10);
- assertOrder(5, 10);
+assertOrder(0, 10);
+assertOrder(5, 10);
- poolStats("after checking order");
+poolStats("after checking order");
- function doItCount(skip, sort, batchSize) {
- var c = db.data.find();
- if (skip)
- c.skip(skip);
- if (sort)
- c.sort(sort);
- if (batchSize)
- c.batchSize(batchSize);
- return c.itcount();
- }
+function doItCount(skip, sort, batchSize) {
+ var c = db.data.find();
+ if (skip)
+ c.skip(skip);
+ if (sort)
+ c.sort(sort);
+ if (batchSize)
+ c.batchSize(batchSize);
+ return c.itcount();
+}
- function checkItCount(batchSize) {
- assert.eq(5, doItCount(num - 5, null, batchSize), "skip 1 " + batchSize);
- assert.eq(5, doItCount(num - 5, {num: 1}, batchSize), "skip 2 " + batchSize);
- assert.eq(5, doItCount(num - 5, {_id: 1}, batchSize), "skip 3 " + batchSize);
- assert.eq(0, doItCount(num + 5, {num: 1}, batchSize), "skip 4 " + batchSize);
- assert.eq(0, doItCount(num + 5, {_id: 1}, batchSize), "skip 5 " + batchSize);
- }
+function checkItCount(batchSize) {
+ assert.eq(5, doItCount(num - 5, null, batchSize), "skip 1 " + batchSize);
+ assert.eq(5, doItCount(num - 5, {num: 1}, batchSize), "skip 2 " + batchSize);
+ assert.eq(5, doItCount(num - 5, {_id: 1}, batchSize), "skip 3 " + batchSize);
+ assert.eq(0, doItCount(num + 5, {num: 1}, batchSize), "skip 4 " + batchSize);
+ assert.eq(0, doItCount(num + 5, {_id: 1}, batchSize), "skip 5 " + batchSize);
+}
- poolStats("before checking itcount");
+poolStats("before checking itcount");
- checkItCount(0);
- checkItCount(2);
+checkItCount(0);
+checkItCount(2);
- poolStats("after checking itcount");
+poolStats("after checking itcount");
- // --- Verify that modify & save style updates doesn't work on sharded clusters ---
+// --- Verify that modify & save style updates doesn't work on sharded clusters ---
- var o = db.data.findOne();
- o.x = 16;
- assert.commandFailedWithCode(db.data.save(o), ErrorCodes.ShardKeyNotFound);
- poolStats("at end");
+var o = db.data.findOne();
+o.x = 16;
+assert.commandFailedWithCode(db.data.save(o), ErrorCodes.ShardKeyNotFound);
+poolStats("at end");
- print(summary);
+print(summary);
- assert.throws(function() {
- s.adminCommand({enablesharding: "admin"});
- });
+assert.throws(function() {
+ s.adminCommand({enablesharding: "admin"});
+});
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index e041d4f377a..5ed9e129a4d 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -7,111 +7,64 @@
*/
(function() {
- "use strict";
+"use strict";
- var waitForMaster = function(conn) {
- assert.soon(function() {
- var res = conn.getDB('admin').runCommand({isMaster: 1});
- return res.ismaster;
- });
+var waitForMaster = function(conn) {
+ assert.soon(function() {
+ var res = conn.getDB('admin').runCommand({isMaster: 1});
+ return res.ismaster;
+ });
+};
+
+/**
+ * Runs a series of test on the mongod instance mongodConn is pointing to. Notes that the
+ * test can restart the mongod instance several times so mongodConn can end up with a broken
+ * connection after.
+ *
+ * awaitVersionUpdate is used with the replset invocation of this test to ensure that our
+ * initial write to the admin.system.version collection is fully flushed out of the oplog before
+ * restarting. That allows our standalone corrupting update to see the write (and cause us to
+ * fail on startup).
+ *
+ * TODO: Remove awaitVersionUpdate after SERVER-41005, where we figure out how to wait until
+ * after replication is started before reading our shard identity from
+ * admin.system.version
+ */
+var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
+ var shardIdentityDoc = {
+ _id: 'shardIdentity',
+ configsvrConnectionString: configConnStr,
+ shardName: 'newShard',
+ clusterId: ObjectId()
};
/**
- * Runs a series of test on the mongod instance mongodConn is pointing to. Notes that the
- * test can restart the mongod instance several times so mongodConn can end up with a broken
- * connection after.
- *
- * awaitVersionUpdate is used with the replset invocation of this test to ensure that our
- * initial write to the admin.system.version collection is fully flushed out of the oplog before
- * restarting. That allows our standalone corrupting update to see the write (and cause us to
- * fail on startup).
- *
- * TODO: Remove awaitVersionUpdate after SERVER-41005, where we figure out how to wait until
- * after replication is started before reading our shard identity from
- * admin.system.version
+ * Restarts the server without --shardsvr and replace the shardIdentity doc with a valid
+ * document. Then, restarts the server again with --shardsvr. This also returns a
+ * connection to the server after the last restart.
*/
- var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
- var shardIdentityDoc = {
- _id: 'shardIdentity',
- configsvrConnectionString: configConnStr,
- shardName: 'newShard',
- clusterId: ObjectId()
- };
-
- /**
- * Restarts the server without --shardsvr and replace the shardIdentity doc with a valid
- * document. Then, restarts the server again with --shardsvr. This also returns a
- * connection to the server after the last restart.
- */
- var restartAndFixShardIdentityDoc = function(startOptions) {
- var options = Object.extend({}, startOptions);
- // With Recover to a Timestamp, writes to a replica set member may not be written to
- // disk in the collection, but are instead re-applied from the oplog at startup. When
- // restarting with `--shardsvr`, the update to the `shardIdentity` document is not
- // processed. Turning off `--replSet` guarantees the update is written out to the
- // collection and the test no longer relies on replication recovery from performing
- // the update with `--shardsvr` on.
- var rsName = options.replSet;
- delete options.replSet;
- delete options.shardsvr;
- var mongodConn = MongoRunner.runMongod(options);
- waitForMaster(mongodConn);
-
- var res = mongodConn.getDB('admin').system.version.update({_id: 'shardIdentity'},
- shardIdentityDoc);
- assert.eq(1, res.nModified);
-
- MongoRunner.stopMongod(mongodConn);
-
- newMongodOptions.shardsvr = '';
- newMongodOptions.replSet = rsName;
- mongodConn = MongoRunner.runMongod(newMongodOptions);
- waitForMaster(mongodConn);
-
- res = mongodConn.getDB('admin').runCommand({shardingState: 1});
-
- assert(res.enabled);
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- return mongodConn;
- };
-
- // Simulate the upsert that is performed by a config server on addShard.
- assert.writeOK(mongodConn.getDB('admin').system.version.update(
- {
- _id: shardIdentityDoc._id,
- shardName: shardIdentityDoc.shardName,
- clusterId: shardIdentityDoc.clusterId,
- },
- {$set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}},
- {upsert: true}));
-
- awaitVersionUpdate();
-
- var res = mongodConn.getDB('admin').runCommand({shardingState: 1});
+ var restartAndFixShardIdentityDoc = function(startOptions) {
+ var options = Object.extend({}, startOptions);
+ // With Recover to a Timestamp, writes to a replica set member may not be written to
+ // disk in the collection, but are instead re-applied from the oplog at startup. When
+ // restarting with `--shardsvr`, the update to the `shardIdentity` document is not
+ // processed. Turning off `--replSet` guarantees the update is written out to the
+ // collection and the test no longer relies on replication recovery from performing
+ // the update with `--shardsvr` on.
+ var rsName = options.replSet;
+ delete options.replSet;
+ delete options.shardsvr;
+ var mongodConn = MongoRunner.runMongod(options);
+ waitForMaster(mongodConn);
+
+ var res = mongodConn.getDB('admin').system.version.update({_id: 'shardIdentity'},
+ shardIdentityDoc);
+ assert.eq(1, res.nModified);
- assert(res.enabled);
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
- // Should not be allowed to remove the shardIdentity document
- assert.writeErrorWithCode(
- mongodConn.getDB('admin').system.version.remove({_id: 'shardIdentity'}), 40070);
-
- //
- // Test normal startup
- //
-
- var newMongodOptions = Object.extend(mongodConn.savedOptions, {
- restart: true,
- // disable snapshotting to force the stable timestamp forward with or without the
- // majority commit point. This simplifies forcing out our corrupted write to
- // admin.system.version
- setParameter: {"failpoint.disableSnapshotting": "{'mode':'alwaysOn'}"}
- });
MongoRunner.stopMongod(mongodConn);
+
+ newMongodOptions.shardsvr = '';
+ newMongodOptions.replSet = rsName;
mongodConn = MongoRunner.runMongod(newMongodOptions);
waitForMaster(mongodConn);
@@ -122,61 +75,108 @@
assert.eq(shardIdentityDoc.shardName, res.shardName);
assert.eq(shardIdentityDoc.clusterId, res.clusterId);
- //
- // Test shardIdentity doc without configsvrConnectionString, resulting into parse error
- //
-
- // Note: modification of the shardIdentity is allowed only when not running with --shardsvr
- MongoRunner.stopMongod(mongodConn);
- // The manipulation of `--replSet` is explained in `restartAndFixShardIdentityDoc`.
- var rsName = newMongodOptions.replSet;
- delete newMongodOptions.replSet;
- delete newMongodOptions.shardsvr;
- mongodConn = MongoRunner.runMongod(newMongodOptions);
- waitForMaster(mongodConn);
-
- let writeResult = assert.commandWorked(mongodConn.getDB('admin').system.version.update(
- {_id: 'shardIdentity'}, {_id: 'shardIdentity', shardName: 'x', clusterId: ObjectId()}));
- assert.eq(writeResult.nModified, 1);
-
- MongoRunner.stopMongod(mongodConn);
-
- newMongodOptions.shardsvr = '';
- newMongodOptions.replSet = rsName;
- assert.throws(function() {
- var connToCrashedMongod = MongoRunner.runMongod(newMongodOptions);
- waitForMaster(connToCrashedMongod);
- });
-
- // We call MongoRunner.stopMongod() using a former connection to the server that is
- // configured with the same port in order to be able to assert on the server's exit code.
- MongoRunner.stopMongod(mongodConn, undefined, {allowedExitCode: MongoRunner.EXIT_UNCAUGHT});
-
- //
- // Test that it is possible to fix the invalid shardIdentity doc by not passing --shardsvr
- //
- mongodConn = restartAndFixShardIdentityDoc(newMongodOptions);
- res = mongodConn.getDB('admin').runCommand({shardingState: 1});
- assert(res.enabled);
+ return mongodConn;
};
- var st = new ShardingTest({shards: 1});
-
- {
- var mongod = MongoRunner.runMongod({shardsvr: ''});
- runTest(mongod, st.configRS.getURL(), function() {});
- MongoRunner.stopMongod(mongod);
- }
-
- {
- var replTest = new ReplSetTest({nodes: 1});
- replTest.startSet({shardsvr: ''});
- replTest.initiate();
- runTest(replTest.getPrimary(), st.configRS.getURL(), function() {
- replTest.awaitLastStableRecoveryTimestamp();
- });
- replTest.stopSet();
- }
-
- st.stop();
+ // Simulate the upsert that is performed by a config server on addShard.
+ assert.writeOK(mongodConn.getDB('admin').system.version.update(
+ {
+ _id: shardIdentityDoc._id,
+ shardName: shardIdentityDoc.shardName,
+ clusterId: shardIdentityDoc.clusterId,
+ },
+ {$set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}},
+ {upsert: true}));
+
+ awaitVersionUpdate();
+
+ var res = mongodConn.getDB('admin').runCommand({shardingState: 1});
+
+ assert(res.enabled);
+ assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+ assert.eq(shardIdentityDoc.shardName, res.shardName);
+ assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+ // Should not be allowed to remove the shardIdentity document
+ assert.writeErrorWithCode(
+ mongodConn.getDB('admin').system.version.remove({_id: 'shardIdentity'}), 40070);
+
+ //
+ // Test normal startup
+ //
+
+ var newMongodOptions = Object.extend(mongodConn.savedOptions, {
+ restart: true,
+ // disable snapshotting to force the stable timestamp forward with or without the
+ // majority commit point. This simplifies forcing out our corrupted write to
+ // admin.system.version
+ setParameter: {"failpoint.disableSnapshotting": "{'mode':'alwaysOn'}"}
+ });
+ MongoRunner.stopMongod(mongodConn);
+ mongodConn = MongoRunner.runMongod(newMongodOptions);
+ waitForMaster(mongodConn);
+
+ res = mongodConn.getDB('admin').runCommand({shardingState: 1});
+
+ assert(res.enabled);
+ assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+ assert.eq(shardIdentityDoc.shardName, res.shardName);
+ assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+ //
+ // Test shardIdentity doc without configsvrConnectionString, resulting into parse error
+ //
+
+ // Note: modification of the shardIdentity is allowed only when not running with --shardsvr
+ MongoRunner.stopMongod(mongodConn);
+ // The manipulation of `--replSet` is explained in `restartAndFixShardIdentityDoc`.
+ var rsName = newMongodOptions.replSet;
+ delete newMongodOptions.replSet;
+ delete newMongodOptions.shardsvr;
+ mongodConn = MongoRunner.runMongod(newMongodOptions);
+ waitForMaster(mongodConn);
+
+ let writeResult = assert.commandWorked(mongodConn.getDB('admin').system.version.update(
+ {_id: 'shardIdentity'}, {_id: 'shardIdentity', shardName: 'x', clusterId: ObjectId()}));
+ assert.eq(writeResult.nModified, 1);
+
+ MongoRunner.stopMongod(mongodConn);
+
+ newMongodOptions.shardsvr = '';
+ newMongodOptions.replSet = rsName;
+ assert.throws(function() {
+ var connToCrashedMongod = MongoRunner.runMongod(newMongodOptions);
+ waitForMaster(connToCrashedMongod);
+ });
+
+ // We call MongoRunner.stopMongod() using a former connection to the server that is
+ // configured with the same port in order to be able to assert on the server's exit code.
+ MongoRunner.stopMongod(mongodConn, undefined, {allowedExitCode: MongoRunner.EXIT_UNCAUGHT});
+
+ //
+ // Test that it is possible to fix the invalid shardIdentity doc by not passing --shardsvr
+ //
+ mongodConn = restartAndFixShardIdentityDoc(newMongodOptions);
+ res = mongodConn.getDB('admin').runCommand({shardingState: 1});
+ assert(res.enabled);
+};
+
+var st = new ShardingTest({shards: 1});
+
+{
+ var mongod = MongoRunner.runMongod({shardsvr: ''});
+ runTest(mongod, st.configRS.getURL(), function() {});
+ MongoRunner.stopMongod(mongod);
+}
+
+{
+ var replTest = new ReplSetTest({nodes: 1});
+ replTest.startSet({shardsvr: ''});
+ replTest.initiate();
+ runTest(replTest.getPrimary(), st.configRS.getURL(), function() {
+ replTest.awaitLastStableRecoveryTimestamp();
+ });
+ replTest.stopSet();
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_aware_init_secondaries.js b/jstests/sharding/shard_aware_init_secondaries.js
index 009610f47a6..a1387592212 100644
--- a/jstests/sharding/shard_aware_init_secondaries.js
+++ b/jstests/sharding/shard_aware_init_secondaries.js
@@ -5,68 +5,67 @@
*/
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 1});
-
- var replTest = new ReplSetTest({nodes: 2});
- replTest.startSet({shardsvr: ''});
- var nodeList = replTest.nodeList();
- replTest.initiate({
- _id: replTest.name,
- members:
- [{_id: 0, host: nodeList[0], priority: 1}, {_id: 1, host: nodeList[1], priority: 0}]
- });
-
- var priConn = replTest.getPrimary();
-
- var configConnStr = st.configRS.getURL();
-
- var shardIdentityDoc = {
- _id: 'shardIdentity',
- configsvrConnectionString: configConnStr,
- shardName: 'newShard',
- clusterId: ObjectId()
- };
-
- // Simulate the upsert that is performed by a config server on addShard.
- var shardIdentityQuery = {
- _id: shardIdentityDoc._id,
- shardName: shardIdentityDoc.shardName,
- clusterId: shardIdentityDoc.clusterId
- };
- var shardIdentityUpdate = {
- $set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
- };
- assert.writeOK(priConn.getDB('admin').system.version.update(
- shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 2}}));
-
- var secConn = replTest.getSecondary();
- secConn.setSlaveOk(true);
-
- var res = secConn.getDB('admin').runCommand({shardingState: 1});
-
- assert(res.enabled, tojson(res));
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- var newMongodOptions = Object.extend(secConn.savedOptions, {restart: true});
- replTest.restart(replTest.getNodeId(secConn), newMongodOptions);
- replTest.waitForMaster();
- replTest.awaitSecondaryNodes();
-
- secConn = replTest.getSecondary();
- secConn.setSlaveOk(true);
-
- res = secConn.getDB('admin').runCommand({shardingState: 1});
-
- assert(res.enabled, tojson(res));
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- replTest.stopSet();
-
- st.stop();
+"use strict";
+
+var st = new ShardingTest({shards: 1});
+
+var replTest = new ReplSetTest({nodes: 2});
+replTest.startSet({shardsvr: ''});
+var nodeList = replTest.nodeList();
+replTest.initiate({
+ _id: replTest.name,
+ members: [{_id: 0, host: nodeList[0], priority: 1}, {_id: 1, host: nodeList[1], priority: 0}]
+});
+
+var priConn = replTest.getPrimary();
+
+var configConnStr = st.configRS.getURL();
+
+var shardIdentityDoc = {
+ _id: 'shardIdentity',
+ configsvrConnectionString: configConnStr,
+ shardName: 'newShard',
+ clusterId: ObjectId()
+};
+
+// Simulate the upsert that is performed by a config server on addShard.
+var shardIdentityQuery = {
+ _id: shardIdentityDoc._id,
+ shardName: shardIdentityDoc.shardName,
+ clusterId: shardIdentityDoc.clusterId
+};
+var shardIdentityUpdate = {
+ $set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
+};
+assert.writeOK(priConn.getDB('admin').system.version.update(
+ shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 2}}));
+
+var secConn = replTest.getSecondary();
+secConn.setSlaveOk(true);
+
+var res = secConn.getDB('admin').runCommand({shardingState: 1});
+
+assert(res.enabled, tojson(res));
+assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+assert.eq(shardIdentityDoc.shardName, res.shardName);
+assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+var newMongodOptions = Object.extend(secConn.savedOptions, {restart: true});
+replTest.restart(replTest.getNodeId(secConn), newMongodOptions);
+replTest.waitForMaster();
+replTest.awaitSecondaryNodes();
+
+secConn = replTest.getSecondary();
+secConn.setSlaveOk(true);
+
+res = secConn.getDB('admin').runCommand({shardingState: 1});
+
+assert(res.enabled, tojson(res));
+assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+assert.eq(shardIdentityDoc.shardName, res.shardName);
+assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+replTest.stopSet();
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_aware_on_add_shard.js b/jstests/sharding/shard_aware_on_add_shard.js
index 92c490f2982..68a0c871b88 100644
--- a/jstests/sharding/shard_aware_on_add_shard.js
+++ b/jstests/sharding/shard_aware_on_add_shard.js
@@ -4,63 +4,62 @@
*/
(function() {
- "use strict";
+"use strict";
- var waitForIsMaster = function(conn) {
- assert.soon(function() {
- var res = conn.getDB('admin').runCommand({isMaster: 1});
- return res.ismaster;
- });
- };
+var waitForIsMaster = function(conn) {
+ assert.soon(function() {
+ var res = conn.getDB('admin').runCommand({isMaster: 1});
+ return res.ismaster;
+ });
+};
- var checkShardingStateInitialized = function(conn, configConnStr, shardName, clusterId) {
- var res = conn.getDB('admin').runCommand({shardingState: 1});
- assert.commandWorked(res);
- assert(res.enabled);
- assert.eq(configConnStr, res.configServer);
- assert.eq(shardName, res.shardName);
- assert(clusterId.equals(res.clusterId),
- 'cluster id: ' + tojson(clusterId) + ' != ' + tojson(res.clusterId));
- };
+var checkShardingStateInitialized = function(conn, configConnStr, shardName, clusterId) {
+ var res = conn.getDB('admin').runCommand({shardingState: 1});
+ assert.commandWorked(res);
+ assert(res.enabled);
+ assert.eq(configConnStr, res.configServer);
+ assert.eq(shardName, res.shardName);
+ assert(clusterId.equals(res.clusterId),
+ 'cluster id: ' + tojson(clusterId) + ' != ' + tojson(res.clusterId));
+};
- var checkShardMarkedAsShardAware = function(mongosConn, shardName) {
- var res = mongosConn.getDB('config').getCollection('shards').findOne({_id: shardName});
- assert.neq(null, res, "Could not find new shard " + shardName + " in config.shards");
- assert.eq(1, res.state);
- };
+var checkShardMarkedAsShardAware = function(mongosConn, shardName) {
+ var res = mongosConn.getDB('config').getCollection('shards').findOne({_id: shardName});
+ assert.neq(null, res, "Could not find new shard " + shardName + " in config.shards");
+ assert.eq(1, res.state);
+};
- // Create the cluster to test adding shards to.
- var st = new ShardingTest({shards: 1});
- var clusterId = st.s.getDB('config').getCollection('version').findOne().clusterId;
+// Create the cluster to test adding shards to.
+var st = new ShardingTest({shards: 1});
+var clusterId = st.s.getDB('config').getCollection('version').findOne().clusterId;
- // Add a shard that is a standalone mongod.
+// Add a shard that is a standalone mongod.
- var standaloneConn = MongoRunner.runMongod({shardsvr: ''});
- waitForIsMaster(standaloneConn);
+var standaloneConn = MongoRunner.runMongod({shardsvr: ''});
+waitForIsMaster(standaloneConn);
- jsTest.log("Going to add standalone as shard: " + standaloneConn);
- var newShardName = "newShard";
- assert.commandWorked(st.s.adminCommand({addShard: standaloneConn.name, name: newShardName}));
- checkShardingStateInitialized(standaloneConn, st.configRS.getURL(), newShardName, clusterId);
- checkShardMarkedAsShardAware(st.s, newShardName);
+jsTest.log("Going to add standalone as shard: " + standaloneConn);
+var newShardName = "newShard";
+assert.commandWorked(st.s.adminCommand({addShard: standaloneConn.name, name: newShardName}));
+checkShardingStateInitialized(standaloneConn, st.configRS.getURL(), newShardName, clusterId);
+checkShardMarkedAsShardAware(st.s, newShardName);
- MongoRunner.stopMongod(standaloneConn);
+MongoRunner.stopMongod(standaloneConn);
- // Add a shard that is a replica set.
+// Add a shard that is a replica set.
- var replTest = new ReplSetTest({nodes: 1});
- replTest.startSet({shardsvr: ''});
- replTest.initiate();
- waitForIsMaster(replTest.getPrimary());
+var replTest = new ReplSetTest({nodes: 1});
+replTest.startSet({shardsvr: ''});
+replTest.initiate();
+waitForIsMaster(replTest.getPrimary());
- jsTest.log("Going to add replica set as shard: " + tojson(replTest));
- assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: replTest.getURL()}));
- checkShardingStateInitialized(
- replTest.getPrimary(), st.configRS.getURL(), replTest.getURL(), clusterId);
- checkShardMarkedAsShardAware(st.s, newShardName);
+jsTest.log("Going to add replica set as shard: " + tojson(replTest));
+assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: replTest.getURL()}));
+checkShardingStateInitialized(
+ replTest.getPrimary(), st.configRS.getURL(), replTest.getURL(), clusterId);
+checkShardMarkedAsShardAware(st.s, newShardName);
- replTest.stopSet();
-
- st.stop();
+replTest.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/shard_aware_primary_failover.js b/jstests/sharding/shard_aware_primary_failover.js
index abbfb47c1cf..9e7f572c3e9 100644
--- a/jstests/sharding/shard_aware_primary_failover.js
+++ b/jstests/sharding/shard_aware_primary_failover.js
@@ -2,57 +2,57 @@
* Test that a new primary that gets elected will properly perform shard initialization.
*/
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 1});
-
- var replTest = new ReplSetTest({nodes: 3});
- replTest.startSet({shardsvr: ''});
-
- var nodes = replTest.nodeList();
- replTest.initiate({
- _id: replTest.name,
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1]},
- {_id: 2, host: nodes[2], arbiterOnly: true}
- ]
- });
-
- var primaryConn = replTest.getPrimary();
-
- var shardIdentityDoc = {
- _id: 'shardIdentity',
- configsvrConnectionString: st.configRS.getURL(),
- shardName: 'newShard',
- clusterId: ObjectId()
- };
-
- // Simulate the upsert that is performed by a config server on addShard.
- var shardIdentityQuery = {
- _id: shardIdentityDoc._id,
- shardName: shardIdentityDoc.shardName,
- clusterId: shardIdentityDoc.clusterId
- };
- var shardIdentityUpdate = {
- $set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
- };
- assert.writeOK(primaryConn.getDB('admin').system.version.update(
- shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 'majority'}}));
-
- replTest.stopMaster();
- replTest.waitForMaster(30000);
-
- primaryConn = replTest.getPrimary();
-
- var res = primaryConn.getDB('admin').runCommand({shardingState: 1});
-
- assert(res.enabled);
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- replTest.stopSet();
-
- st.stop();
+"use strict";
+
+var st = new ShardingTest({shards: 1});
+
+var replTest = new ReplSetTest({nodes: 3});
+replTest.startSet({shardsvr: ''});
+
+var nodes = replTest.nodeList();
+replTest.initiate({
+ _id: replTest.name,
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1]},
+ {_id: 2, host: nodes[2], arbiterOnly: true}
+ ]
+});
+
+var primaryConn = replTest.getPrimary();
+
+var shardIdentityDoc = {
+ _id: 'shardIdentity',
+ configsvrConnectionString: st.configRS.getURL(),
+ shardName: 'newShard',
+ clusterId: ObjectId()
+};
+
+// Simulate the upsert that is performed by a config server on addShard.
+var shardIdentityQuery = {
+ _id: shardIdentityDoc._id,
+ shardName: shardIdentityDoc.shardName,
+ clusterId: shardIdentityDoc.clusterId
+};
+var shardIdentityUpdate = {
+ $set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
+};
+assert.writeOK(primaryConn.getDB('admin').system.version.update(
+ shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 'majority'}}));
+
+replTest.stopMaster();
+replTest.waitForMaster(30000);
+
+primaryConn = replTest.getPrimary();
+
+var res = primaryConn.getDB('admin').runCommand({shardingState: 1});
+
+assert(res.enabled);
+assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+assert.eq(shardIdentityDoc.shardName, res.shardName);
+assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+replTest.stopSet();
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js
index f12d4c8482b..f417cdc4165 100644
--- a/jstests/sharding/shard_collection_basic.js
+++ b/jstests/sharding/shard_collection_basic.js
@@ -3,356 +3,347 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 1, shards: 2});
- var kDbName = 'db';
- var mongos = st.s0;
+var st = new ShardingTest({mongos: 1, shards: 2});
+var kDbName = 'db';
+var mongos = st.s0;
- function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-
- var ns = kDbName + '.foo';
- assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
-
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- }
+function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- function testAndClenaupWithKeyOK(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(keyDoc));
+ var ns = kDbName + '.foo';
+ assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- var ns = kDbName + '.foo';
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+}
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
+function testAndClenaupWithKeyOK(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(keyDoc));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- }
+ var ns = kDbName + '.foo';
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- function testAndClenaupWithKeyNoIndexOK(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- var ns = kDbName + '.foo';
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
+ assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+}
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
+function testAndClenaupWithKeyNoIndexOK(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- }
+ var ns = kDbName + '.foo';
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- function getIndexSpecByName(coll, indexName) {
- var indexes = coll.getIndexes().filter(function(spec) {
- return spec.name === indexName;
- });
- assert.eq(1, indexes.length, 'index "' + indexName + '" not found"');
- return indexes[0];
- }
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- // Fail if db is not sharded.
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
+ assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+}
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+function getIndexSpecByName(coll, indexName) {
+ var indexes = coll.getIndexes().filter(function(spec) {
+ return spec.name === indexName;
+ });
+ assert.eq(1, indexes.length, 'index "' + indexName + '" not found"');
+ return indexes[0];
+}
- // Fail if db is not sharding enabled.
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+// Fail if db is not sharded.
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- // Verify wrong arguments errors.
- assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: {_id: 1}}));
+// Fail if db is not sharding enabled.
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: "aaa"}));
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- // shardCollection may only be run against admin database.
- assert.commandFailed(
- mongos.getDB('test').runCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+// Verify wrong arguments errors.
+assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: {_id: 1}}));
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- // Can't shard if key is not specified.
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo'}));
+assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: "aaa"}));
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {}}));
+// shardCollection may only be run against admin database.
+assert.commandFailed(
+ mongos.getDB('test').runCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- // Verify key format
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: "hahahashed"}}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+// Can't shard if key is not specified.
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo'}));
- // Shard key cannot contain embedded objects.
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: {a: 1}}}));
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: {'a.b': 1}}}));
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {}}));
- // Shard key can contain dotted path to embedded element.
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.shard_key_dotted_path', key: {'_id.a': 1}}));
+// Verify key format
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: "hahahashed"}}));
- //
- // Test shardCollection's idempotency
- //
+// Shard key cannot contain embedded objects.
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: {a: 1}}}));
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: {'a.b': 1}}}));
- // Succeed if a collection is already sharded with the same options.
- assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- // Specifying the simple collation or not specifying a collation should be equivalent, because
- // if no collation is specified, the collection default collation is used.
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
+// Shard key can contain dotted path to embedded element.
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: kDbName + '.shard_key_dotted_path', key: {'_id.a': 1}}));
- // Fail if the collection is already sharded with different options.
- // different shard key
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {x: 1}}));
- // different 'unique'
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}, unique: true}));
+//
+// Test shardCollection's idempotency
+//
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+// Succeed if a collection is already sharded with the same options.
+assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+// Specifying the simple collation or not specifying a collation should be equivalent, because
+// if no collation is specified, the collection default collation is used.
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
- // Shard empty collections no index required.
- testAndClenaupWithKeyNoIndexOK({_id: 1});
- testAndClenaupWithKeyNoIndexOK({_id: 'hashed'});
+// Fail if the collection is already sharded with different options.
+// different shard key
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {x: 1}}));
+// different 'unique'
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}, unique: true}));
- // Shard by a plain key.
- testAndClenaupWithKeyNoIndexOK({a: 1});
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- // Cant shard collection with data and no index on the shard key.
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 1});
+// Shard empty collections no index required.
+testAndClenaupWithKeyNoIndexOK({_id: 1});
+testAndClenaupWithKeyNoIndexOK({_id: 'hashed'});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyOK({a: 1});
+// Shard by a plain key.
+testAndClenaupWithKeyNoIndexOK({a: 1});
- // Shard by a hashed key.
- testAndClenaupWithKeyNoIndexOK({a: 'hashed'});
+// Cant shard collection with data and no index on the shard key.
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 1});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyOK({a: 1});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyOK({a: 'hashed'});
+// Shard by a hashed key.
+testAndClenaupWithKeyNoIndexOK({a: 'hashed'});
- // Shard by a compound key.
- testAndClenaupWithKeyNoIndexOK({x: 1, y: 1});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
- testAndClenaupWithKeyNoIndexFailed({x: 1, y: 1});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyOK({a: 'hashed'});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
- testAndClenaupWithKeyOK({x: 1, y: 1});
+// Shard by a compound key.
+testAndClenaupWithKeyNoIndexOK({x: 1, y: 1});
- testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 1});
- testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 'hashed'});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+testAndClenaupWithKeyNoIndexFailed({x: 1, y: 1});
- // Shard by a key component.
- testAndClenaupWithKeyOK({'z.x': 1});
- testAndClenaupWithKeyOK({'z.x': 'hashed'});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+testAndClenaupWithKeyOK({x: 1, y: 1});
- // Can't shard by a multikey.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 1});
+testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 1});
+testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 'hashed'});
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1, b: 1}));
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
+// Shard by a key component.
+testAndClenaupWithKeyOK({'z.x': 1});
+testAndClenaupWithKeyOK({'z.x': 'hashed'});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
+// Can't shard by a multikey.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 1});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyOK({a: 'hashed'});
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1, b: 1}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
- // Cant shard by a parallel arrays.
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
- testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyOK({a: 'hashed'});
- // Can't shard on unique hashed key.
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {aKey: "hashed"}, unique: true}));
+// Cant shard by a parallel arrays.
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
+testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
- // If shardCollection has unique:true it must have a unique index.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({aKey: 1}));
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: 1}, unique: true}));
+// Can't shard on unique hashed key.
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: "hashed"}, unique: true}));
- //
- // Session-related tests
- //
+// If shardCollection has unique:true it must have a unique index.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({aKey: 1}));
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: 1}, unique: true}));
- // shardCollection can be called under a session.
- const sessionDb = mongos.startSession().getDatabase(kDbName);
- assert.commandWorked(
- sessionDb.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 'hashed'}}));
- sessionDb.getSession().endSession();
-
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
-
- //
- // Collation-related tests
- //
+//
+// Session-related tests
+//
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- // shardCollection should fail when the 'collation' option is not a nested object.
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}, collation: true}));
-
- // shardCollection should fail when the 'collation' option cannot be parsed.
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'unknown'}}));
-
- // shardCollection should fail when the 'collation' option is valid but is not the simple
- // collation.
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'en_US'}}));
-
- // shardCollection should succeed when the 'collation' option specifies the simple collation.
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
-
- // shardCollection should fail when it does not specify the 'collation' option but the
- // collection has a non-simple default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
-
- // shardCollection should fail for the key pattern {_id: 1} if the collection has a non-simple
- // default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
-
- // shardCollection should fail for the key pattern {a: 1} if there is already an index 'a_1',
- // but it has a non-simple collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).foo.createIndex({a: 1}, {collation: {locale: 'en_US'}}));
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
-
- // shardCollection should succeed for the key pattern {a: 1} and collation {locale: 'simple'} if
- // there is no index 'a_1', but there is a non-simple collection default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
- var indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('collation'));
-
- // shardCollection should succeed for the key pattern {a: 1} if there are two indexes on {a: 1}
- // and one has the simple collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}, {name: "a_1_simple"}));
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(
- {a: 1}, {collation: {locale: 'en_US'}, name: "a_1_en_US"}));
- assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
-
- // shardCollection should fail on a non-empty collection when the only index available with the
- // shard key as a prefix has a non-simple collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 'foo'}));
- // This index will inherit the collection's default collation.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
-
- // shardCollection should succeed on an empty collection with a non-simple default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
-
- // shardCollection should succeed on an empty collection with no default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+// shardCollection can be called under a session.
+const sessionDb = mongos.startSession().getDatabase(kDbName);
+assert.commandWorked(
+ sessionDb.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 'hashed'}}));
+sessionDb.getSession().endSession();
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- //
- // Tests for the shell helper sh.shardCollection().
- //
+//
+// Collation-related tests
+//
- db = mongos.getDB(kDbName);
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+
+// shardCollection should fail when the 'collation' option is not a nested object.
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}, collation: true}));
+
+// shardCollection should fail when the 'collation' option cannot be parsed.
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'unknown'}}));
+
+// shardCollection should fail when the 'collation' option is valid but is not the simple
+// collation.
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'en_US'}}));
+
+// shardCollection should succeed when the 'collation' option specifies the simple collation.
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
+
+// shardCollection should fail when it does not specify the 'collation' option but the
+// collection has a non-simple default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+
+// shardCollection should fail for the key pattern {_id: 1} if the collection has a non-simple
+// default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
+
+// shardCollection should fail for the key pattern {a: 1} if there is already an index 'a_1',
+// but it has a non-simple collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}, {collation: {locale: 'en_US'}}));
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+
+// shardCollection should succeed for the key pattern {a: 1} and collation {locale: 'simple'} if
+// there is no index 'a_1', but there is a non-simple collection default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
+var indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('collation'));
+
+// shardCollection should succeed for the key pattern {a: 1} if there are two indexes on {a: 1}
+// and one has the simple collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}, {name: "a_1_simple"}));
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(
+ {a: 1}, {collation: {locale: 'en_US'}, name: "a_1_en_US"}));
+assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+
+// shardCollection should fail on a non-empty collection when the only index available with the
+// shard key as a prefix has a non-simple collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 'foo'}));
+// This index will inherit the collection's default collation.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
+
+// shardCollection should succeed on an empty collection with a non-simple default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
+
+// shardCollection should succeed on an empty collection with no default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- // shardCollection() propagates the shard key and the correct defaults.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
- assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
-
- // shardCollection() propagates the value for 'unique'.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}, true));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
- assert.eq(indexSpec.unique, true, tojson(indexSpec));
-
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}, false));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
-
- // shardCollections() 'options' parameter must be an object.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.throws(function() {
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, 'not an object');
- });
-
- // shardCollection() propagates the value for 'collation'.
- // Currently only the simple collation is supported.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandFailed(
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'en_US'}}));
- assert.commandWorked(
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'simple'}}));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
-
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandFailed(sh.shardCollection(kDbName + '.foo', {a: 1}));
- assert.commandFailed(
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'en_US'}}));
- assert.commandWorked(
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'simple'}}));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
-
- // shardCollection() propagates the value for 'numInitialChunks'.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(
- sh.shardCollection(kDbName + '.foo', {a: "hashed"}, false, {numInitialChunks: 5}));
- st.printShardingStatus();
- var numChunks = st.config.chunks.find({ns: kDbName + '.foo'}).count();
- assert.eq(numChunks, 5, "unexpected number of chunks");
-
- st.stop();
+//
+// Tests for the shell helper sh.shardCollection().
+//
+db = mongos.getDB(kDbName);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+
+// shardCollection() propagates the shard key and the correct defaults.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
+assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
+
+// shardCollection() propagates the value for 'unique'.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}, true));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
+assert.eq(indexSpec.unique, true, tojson(indexSpec));
+
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}, false));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
+
+// shardCollections() 'options' parameter must be an object.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.throws(function() {
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, 'not an object');
+});
+
+// shardCollection() propagates the value for 'collation'.
+// Currently only the simple collation is supported.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandFailed(
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'en_US'}}));
+assert.commandWorked(
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'simple'}}));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
+
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandFailed(sh.shardCollection(kDbName + '.foo', {a: 1}));
+assert.commandFailed(
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'en_US'}}));
+assert.commandWorked(
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'simple'}}));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
+
+// shardCollection() propagates the value for 'numInitialChunks'.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(
+ sh.shardCollection(kDbName + '.foo', {a: "hashed"}, false, {numInitialChunks: 5}));
+st.printShardingStatus();
+var numChunks = st.config.chunks.find({ns: kDbName + '.foo'}).count();
+assert.eq(numChunks, 5, "unexpected number of chunks");
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_collection_existing_zones.js b/jstests/sharding/shard_collection_existing_zones.js
index 8782e4e132b..8030b40ee9a 100644
--- a/jstests/sharding/shard_collection_existing_zones.js
+++ b/jstests/sharding/shard_collection_existing_zones.js
@@ -1,204 +1,188 @@
// Test that shardCollection uses existing zone info to validate
// shard keys and do initial chunk splits.
(function() {
- 'use strict';
-
- var st = new ShardingTest({mongos: 1, shards: 3});
- var kDbName = 'test';
- var kCollName = 'foo';
- var ns = kDbName + '.' + kCollName;
- var zoneName = 'zoneName';
- var mongos = st.s0;
- var testDB = mongos.getDB(kDbName);
- var configDB = mongos.getDB('config');
- var shardName = st.shard0.shardName;
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-
- /**
- * Test that shardCollection correctly validates that a zone is associated with a shard.
- */
- function testShardZoneAssociationValidation(proposedShardKey, numberLongMin, numberLongMax) {
- var zoneMin = numberLongMin ? {x: NumberLong(0)} : {x: 0};
- var zoneMax = numberLongMax ? {x: NumberLong(10)} : {x: 10};
- assert.commandWorked(configDB.tags.insert(
- {_id: {ns: ns, min: zoneMin}, ns: ns, min: zoneMin, max: zoneMax, tag: zoneName}));
-
- var tagDoc = configDB.tags.findOne();
- assert.eq(ns, tagDoc.ns);
- assert.eq(zoneMin, tagDoc.min);
- assert.eq(zoneMax, tagDoc.max);
- assert.eq(zoneName, tagDoc.tag);
-
- assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
-
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zoneName}));
+'use strict';
+
+var st = new ShardingTest({mongos: 1, shards: 3});
+var kDbName = 'test';
+var kCollName = 'foo';
+var ns = kDbName + '.' + kCollName;
+var zoneName = 'zoneName';
+var mongos = st.s0;
+var testDB = mongos.getDB(kDbName);
+var configDB = mongos.getDB('config');
+var shardName = st.shard0.shardName;
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+
+/**
+ * Test that shardCollection correctly validates that a zone is associated with a shard.
+ */
+function testShardZoneAssociationValidation(proposedShardKey, numberLongMin, numberLongMax) {
+ var zoneMin = numberLongMin ? {x: NumberLong(0)} : {x: 0};
+ var zoneMax = numberLongMax ? {x: NumberLong(10)} : {x: 10};
+ assert.commandWorked(configDB.tags.insert(
+ {_id: {ns: ns, min: zoneMin}, ns: ns, min: zoneMin, max: zoneMax, tag: zoneName}));
+
+ var tagDoc = configDB.tags.findOne();
+ assert.eq(ns, tagDoc.ns);
+ assert.eq(zoneMin, tagDoc.min);
+ assert.eq(zoneMax, tagDoc.max);
+ assert.eq(zoneName, tagDoc.tag);
+
+ assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
+
+ assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zoneName}));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
+
+ assert.commandWorked(testDB.runCommand({drop: kCollName}));
+}
+
+/**
+ * Test that shardCollection correctly validates shard key against existing zones.
+ */
+function testShardKeyValidation(proposedShardKey, numberLongMin, numberLongMax, success) {
+ assert.commandWorked(testDB.foo.createIndex(proposedShardKey));
+ assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zoneName}));
+
+ var zoneMin = numberLongMin ? {x: NumberLong(0)} : {x: 0};
+ var zoneMax = numberLongMax ? {x: NumberLong(10)} : {x: 10};
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: zoneMin, max: zoneMax, zone: zoneName}));
+
+ var tagDoc = configDB.tags.findOne();
+ jsTestLog("xxx tag doc " + tojson(tagDoc));
+ assert.eq(ns, tagDoc.ns);
+ assert.eq(zoneMin, tagDoc.min);
+ assert.eq(zoneMax, tagDoc.max);
+ assert.eq(zoneName, tagDoc.tag);
+
+ if (success) {
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
-
- assert.commandWorked(testDB.runCommand({drop: kCollName}));
+ } else {
+ assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
}
- /**
- * Test that shardCollection correctly validates shard key against existing zones.
- */
- function testShardKeyValidation(proposedShardKey, numberLongMin, numberLongMax, success) {
- assert.commandWorked(testDB.foo.createIndex(proposedShardKey));
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zoneName}));
+ assert.commandWorked(testDB.runCommand({drop: kCollName}));
+}
+
+/**
+ * Test that shardCollection uses existing zone ranges to split chunks.
+ */
+function testChunkSplits(collectionExists) {
+ var shardKey = {x: 1};
+ var ranges =
+ [{min: {x: 0}, max: {x: 10}}, {min: {x: 10}, max: {x: 20}}, {min: {x: 30}, max: {x: 40}}];
+ var shards = configDB.shards.find().toArray();
+ assert.eq(ranges.length, shards.length);
+ if (collectionExists) {
+ assert.commandWorked(testDB.foo.createIndex(shardKey));
+ }
- var zoneMin = numberLongMin ? {x: NumberLong(0)} : {x: 0};
- var zoneMax = numberLongMax ? {x: NumberLong(10)} : {x: 10};
+ // create zones:
+ // shard0 - zonename0 - [0, 10)
+ // shard1 - zonename0 - [10, 20)
+ // shard2 - zonename0 - [30, 40)
+ for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(
+ st.s.adminCommand({addShardToZone: shards[i]._id, zone: zoneName + i}));
assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: ns, min: zoneMin, max: zoneMax, zone: zoneName}));
-
- var tagDoc = configDB.tags.findOne();
- jsTestLog("xxx tag doc " + tojson(tagDoc));
- assert.eq(ns, tagDoc.ns);
- assert.eq(zoneMin, tagDoc.min);
- assert.eq(zoneMax, tagDoc.max);
- assert.eq(zoneName, tagDoc.tag);
-
- if (success) {
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
- } else {
- assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
- }
-
- assert.commandWorked(testDB.runCommand({drop: kCollName}));
+ {updateZoneKeyRange: ns, min: ranges[i].min, max: ranges[i].max, zone: zoneName + i}));
+ }
+ assert.eq(
+ configDB.tags.find().count(), shards.length, "failed to create tag documents correctly");
+ assert.eq(configDB.chunks.find({ns: ns}).count(),
+ 0,
+ "expect to see no chunk documents for the collection before shardCollection is run");
+
+ // shard the collection and validate the resulting chunks
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
+ var expectedChunks = [
+ {range: [{x: {"$minKey": 1}}, {x: 0}], shardId: st.shard0.shardName},
+ {range: [{x: 0}, {x: 10}], shardId: st.shard0.shardName}, // pre-defined
+ {range: [{x: 10}, {x: 20}], shardId: st.shard1.shardName},
+ {range: [{x: 20}, {x: 30}], shardId: st.shard1.shardName}, // pre-defined
+ {range: [{x: 30}, {x: 40}], shardId: st.shard2.shardName}, // pre-defined
+ {range: [{x: 40}, {x: {"$maxKey": 1}}], shardId: st.shard2.shardName}
+ ];
+ var chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+ assert.eq(chunkDocs.length,
+ expectedChunks.length,
+ "shardCollection failed to create chunk documents correctly");
+ for (var i = 0; i < chunkDocs.length; i++) {
+ var errMsg = "expect to see chunk " + tojson(expectedChunks[i]) + " but found chunk " +
+ tojson(chunkDocs[i]);
+ assert.eq(expectedChunks[i].range[0], chunkDocs[i].min, errMsg);
+ assert.eq(expectedChunks[i].range[1], chunkDocs[i].max, errMsg);
+ assert.eq(expectedChunks[i].shardId, chunkDocs[i].shard, errMsg);
}
- /**
- * Test that shardCollection uses existing zone ranges to split chunks.
- */
- function testChunkSplits(collectionExists) {
- var shardKey = {x: 1};
- var ranges = [
- {min: {x: 0}, max: {x: 10}},
- {min: {x: 10}, max: {x: 20}},
- {min: {x: 30}, max: {x: 40}}
- ];
- var shards = configDB.shards.find().toArray();
- assert.eq(ranges.length, shards.length);
- if (collectionExists) {
- assert.commandWorked(testDB.foo.createIndex(shardKey));
- }
-
- // create zones:
- // shard0 - zonename0 - [0, 10)
- // shard1 - zonename0 - [10, 20)
- // shard2 - zonename0 - [30, 40)
- for (var i = 0; i < shards.length; i++) {
- assert.commandWorked(
- st.s.adminCommand({addShardToZone: shards[i]._id, zone: zoneName + i}));
- assert.commandWorked(st.s.adminCommand({
- updateZoneKeyRange: ns,
- min: ranges[i].min,
- max: ranges[i].max,
- zone: zoneName + i
- }));
- }
- assert.eq(configDB.tags.find().count(),
- shards.length,
- "failed to create tag documents correctly");
- assert.eq(
- configDB.chunks.find({ns: ns}).count(),
- 0,
- "expect to see no chunk documents for the collection before shardCollection is run");
-
- // shard the collection and validate the resulting chunks
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
- var expectedChunks = [
- {range: [{x: {"$minKey": 1}}, {x: 0}], shardId: st.shard0.shardName},
- {range: [{x: 0}, {x: 10}], shardId: st.shard0.shardName}, // pre-defined
- {range: [{x: 10}, {x: 20}], shardId: st.shard1.shardName},
- {range: [{x: 20}, {x: 30}], shardId: st.shard1.shardName}, // pre-defined
- {range: [{x: 30}, {x: 40}], shardId: st.shard2.shardName}, // pre-defined
- {range: [{x: 40}, {x: {"$maxKey": 1}}], shardId: st.shard2.shardName}
- ];
- var chunkDocs = configDB.chunks.find({ns: ns}).toArray();
- assert.eq(chunkDocs.length,
- expectedChunks.length,
- "shardCollection failed to create chunk documents correctly");
- for (var i = 0; i < chunkDocs.length; i++) {
- var errMsg = "expect to see chunk " + tojson(expectedChunks[i]) + " but found chunk " +
- tojson(chunkDocs[i]);
- assert.eq(expectedChunks[i].range[0], chunkDocs[i].min, errMsg);
- assert.eq(expectedChunks[i].range[1], chunkDocs[i].max, errMsg);
- assert.eq(expectedChunks[i].shardId, chunkDocs[i].shard, errMsg);
- }
-
- assert.commandWorked(testDB.runCommand({drop: kCollName}));
+ assert.commandWorked(testDB.runCommand({drop: kCollName}));
+}
+
+/**
+ * Tests that a non-empty collection associated with zones can be sharded.
+ */
+function testNonemptyZonedCollection() {
+ var shardKey = {x: 1};
+ var shards = configDB.shards.find().toArray();
+ var testColl = testDB.getCollection(kCollName);
+ var ranges =
+ [{min: {x: 0}, max: {x: 10}}, {min: {x: 10}, max: {x: 20}}, {min: {x: 20}, max: {x: 40}}];
+
+ for (let i = 0; i < 40; i++) {
+ assert.writeOK(testColl.insert({x: i}));
}
- /**
- * Tests that a non-empty collection associated with zones can be sharded.
- */
- function testNonemptyZonedCollection() {
- var shardKey = {x: 1};
- var shards = configDB.shards.find().toArray();
- var testColl = testDB.getCollection(kCollName);
- var ranges = [
- {min: {x: 0}, max: {x: 10}},
- {min: {x: 10}, max: {x: 20}},
- {min: {x: 20}, max: {x: 40}}
- ];
-
- for (let i = 0; i < 40; i++) {
- assert.writeOK(testColl.insert({x: i}));
- }
-
- assert.commandWorked(testColl.createIndex(shardKey));
-
- for (let i = 0; i < shards.length; i++) {
- assert.commandWorked(
- mongos.adminCommand({addShardToZone: shards[i]._id, zone: zoneName + i}));
- assert.commandWorked(mongos.adminCommand({
- updateZoneKeyRange: ns,
- min: ranges[i].min,
- max: ranges[i].max,
- zone: zoneName + i
- }));
- }
-
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
-
- // Check that there is initially 1 chunk.
- assert.eq(1, configDB.chunks.count({ns: ns}));
-
- st.startBalancer();
-
- // Check that the chunks were moved properly.
- assert.soon(() => {
- let res = configDB.chunks.count({ns: ns});
- return res === 5;
- }, 'balancer never ran', 10 * 60 * 1000, 1000);
-
- assert.commandWorked(testDB.runCommand({drop: kCollName}));
+ assert.commandWorked(testColl.createIndex(shardKey));
+
+ for (let i = 0; i < shards.length; i++) {
+ assert.commandWorked(
+ mongos.adminCommand({addShardToZone: shards[i]._id, zone: zoneName + i}));
+ assert.commandWorked(mongos.adminCommand(
+ {updateZoneKeyRange: ns, min: ranges[i].min, max: ranges[i].max, zone: zoneName + i}));
}
- // test that shardCollection checks that a zone is associated with a shard.
- testShardZoneAssociationValidation({x: 1}, false, false);
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
+
+ // Check that there is initially 1 chunk.
+ assert.eq(1, configDB.chunks.count({ns: ns}));
+
+ st.startBalancer();
+
+ // Check that the chunks were moved properly.
+ assert.soon(() => {
+ let res = configDB.chunks.count({ns: ns});
+ return res === 5;
+ }, 'balancer never ran', 10 * 60 * 1000, 1000);
+
+ assert.commandWorked(testDB.runCommand({drop: kCollName}));
+}
+
+// test that shardCollection checks that a zone is associated with a shard.
+testShardZoneAssociationValidation({x: 1}, false, false);
- // test that shardCollection uses existing zones to validate shard key
- testShardKeyValidation({x: 1}, false, false, true);
+// test that shardCollection uses existing zones to validate shard key
+testShardKeyValidation({x: 1}, false, false, true);
- // cannot use a completely different key from the zone shard key or a key
- // that has the zone shard key as a prefix is not allowed.
- testShardKeyValidation({y: 1}, false, false, false);
- testShardKeyValidation({x: 1, y: 1}, false, false, false);
+// cannot use a completely different key from the zone shard key or a key
+// that has the zone shard key as a prefix is not allowed.
+testShardKeyValidation({y: 1}, false, false, false);
+testShardKeyValidation({x: 1, y: 1}, false, false, false);
- // can only do hash sharding when the boundaries are of type NumberLong.
- testShardKeyValidation({x: "hashed"}, false, false, false);
- testShardKeyValidation({x: "hashed"}, true, false, false);
- testShardKeyValidation({x: "hashed"}, false, true, false);
- testShardKeyValidation({x: "hashed"}, true, true, true);
+// can only do hash sharding when the boundaries are of type NumberLong.
+testShardKeyValidation({x: "hashed"}, false, false, false);
+testShardKeyValidation({x: "hashed"}, true, false, false);
+testShardKeyValidation({x: "hashed"}, false, true, false);
+testShardKeyValidation({x: "hashed"}, true, true, true);
- assert.commandWorked(st.s.adminCommand({removeShardFromZone: shardName, zone: zoneName}));
+assert.commandWorked(st.s.adminCommand({removeShardFromZone: shardName, zone: zoneName}));
- // test that shardCollection uses zone ranges to split chunks
+// test that shardCollection uses zone ranges to split chunks
- testChunkSplits(false);
- testChunkSplits(true);
+testChunkSplits(false);
+testChunkSplits(true);
- testNonemptyZonedCollection();
+testNonemptyZonedCollection();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/shard_collection_verify_initial_chunks.js b/jstests/sharding/shard_collection_verify_initial_chunks.js
index e7072132b11..65c5897371e 100644
--- a/jstests/sharding/shard_collection_verify_initial_chunks.js
+++ b/jstests/sharding/shard_collection_verify_initial_chunks.js
@@ -3,55 +3,47 @@
* and empty/non-empty collections.
*/
(function() {
- 'use strict';
-
- let st = new ShardingTest({mongos: 1, shards: 2});
- let mongos = st.s0;
-
- let config = mongos.getDB("config");
- let db = mongos.getDB('TestDB');
-
- assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard1.shardName);
-
- function checkChunkCounts(collName, chunksOnShard0, chunksOnShard1) {
- let counts = st.chunkCounts(collName, 'TestDB');
- assert.eq(chunksOnShard0,
- counts[st.shard0.shardName],
- 'Count mismatch on shard0: ' + tojson(counts));
- assert.eq(chunksOnShard1,
- counts[st.shard1.shardName],
- 'Count mismatch on shard1: ' + tojson(counts));
- }
-
- // Unsupported: Range sharding + numInitialChunks
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: 'TestDB.RangeCollEmpty', key: {aKey: 1}, numInitialChunks: 6}));
-
- // Unsupported: Hashed sharding + numInitialChunks + non-empty collection
- assert.writeOK(db.HashedCollNotEmpty.insert({aKey: 1}));
- assert.commandWorked(db.HashedCollNotEmpty.createIndex({aKey: "hashed"}));
- assert.commandFailed(mongos.adminCommand({
- shardCollection: 'TestDB.HashedCollNotEmpty',
- key: {aKey: "hashed"},
- numInitialChunks: 6
- }));
-
- // Supported: Hashed sharding + numInitialChunks + empty collection
- // Expected: Even chunk distribution
- assert.commandWorked(db.HashedCollEmpty.createIndex({aKey: "hashed"}));
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: 'TestDB.HashedCollEmpty', key: {aKey: "hashed"}, numInitialChunks: 6}));
- checkChunkCounts('HashedCollEmpty', 3, 3);
-
- // Supported: Hashed sharding + numInitialChunks + non-existent collection
- // Expected: Even chunk distribution
- assert.commandWorked(mongos.adminCommand({
- shardCollection: 'TestDB.HashedCollNonExistent',
- key: {aKey: "hashed"},
- numInitialChunks: 6
- }));
- checkChunkCounts('HashedCollNonExistent', 3, 3);
-
- st.stop();
+'use strict';
+
+let st = new ShardingTest({mongos: 1, shards: 2});
+let mongos = st.s0;
+
+let config = mongos.getDB("config");
+let db = mongos.getDB('TestDB');
+
+assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard1.shardName);
+
+function checkChunkCounts(collName, chunksOnShard0, chunksOnShard1) {
+ let counts = st.chunkCounts(collName, 'TestDB');
+ assert.eq(
+ chunksOnShard0, counts[st.shard0.shardName], 'Count mismatch on shard0: ' + tojson(counts));
+ assert.eq(
+ chunksOnShard1, counts[st.shard1.shardName], 'Count mismatch on shard1: ' + tojson(counts));
+}
+
+// Unsupported: Range sharding + numInitialChunks
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: 'TestDB.RangeCollEmpty', key: {aKey: 1}, numInitialChunks: 6}));
+
+// Unsupported: Hashed sharding + numInitialChunks + non-empty collection
+assert.writeOK(db.HashedCollNotEmpty.insert({aKey: 1}));
+assert.commandWorked(db.HashedCollNotEmpty.createIndex({aKey: "hashed"}));
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: 'TestDB.HashedCollNotEmpty', key: {aKey: "hashed"}, numInitialChunks: 6}));
+
+// Supported: Hashed sharding + numInitialChunks + empty collection
+// Expected: Even chunk distribution
+assert.commandWorked(db.HashedCollEmpty.createIndex({aKey: "hashed"}));
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: 'TestDB.HashedCollEmpty', key: {aKey: "hashed"}, numInitialChunks: 6}));
+checkChunkCounts('HashedCollEmpty', 3, 3);
+
+// Supported: Hashed sharding + numInitialChunks + non-existent collection
+// Expected: Even chunk distribution
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: 'TestDB.HashedCollNonExistent', key: {aKey: "hashed"}, numInitialChunks: 6}));
+checkChunkCounts('HashedCollNonExistent', 3, 3);
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_config_db_collections.js b/jstests/sharding/shard_config_db_collections.js
index 73e711946c1..8f8f324957b 100644
--- a/jstests/sharding/shard_config_db_collections.js
+++ b/jstests/sharding/shard_config_db_collections.js
@@ -1,52 +1,50 @@
(function() {
- 'use strict';
+'use strict';
- // Database-level tests
- {
- var st = new ShardingTest({shards: 2});
- var config = st.s.getDB('config');
- var admin = st.s.getDB('admin');
+// Database-level tests
+{
+ var st = new ShardingTest({shards: 2});
+ var config = st.s.getDB('config');
+ var admin = st.s.getDB('admin');
- // At first, there should not be an entry for config
- assert.eq(0, config.databases.count({"_id": "config"}));
+ // At first, there should not be an entry for config
+ assert.eq(0, config.databases.count({"_id": "config"}));
- // Test that we can enable sharding on the config db
- assert.commandWorked(admin.runCommand({enableSharding: 'config'}));
+ // Test that we can enable sharding on the config db
+ assert.commandWorked(admin.runCommand({enableSharding: 'config'}));
- // We should never have a metadata doc for config, it is generated in-mem
- assert.eq(0, config.databases.count({"_id": "config"}));
+ // We should never have a metadata doc for config, it is generated in-mem
+ assert.eq(0, config.databases.count({"_id": "config"}));
- // Test that you cannot set the primary shard for config (not even to 'config')
- assert.commandFailed(admin.runCommand({movePrimary: 'config', to: st.shard0.shardName}));
- assert.commandFailed(admin.runCommand({movePrimary: 'config', to: 'config'}));
+ // Test that you cannot set the primary shard for config (not even to 'config')
+ assert.commandFailed(admin.runCommand({movePrimary: 'config', to: st.shard0.shardName}));
+ assert.commandFailed(admin.runCommand({movePrimary: 'config', to: 'config'}));
- st.stop();
- }
+ st.stop();
+}
- // Test that only system.sessions may be sharded.
- {
- var st = new ShardingTest({shards: 2});
- var admin = st.s.getDB('admin');
+// Test that only system.sessions may be sharded.
+{
+ var st = new ShardingTest({shards: 2});
+ var admin = st.s.getDB('admin');
- assert.commandWorked(
- admin.runCommand({shardCollection: "config.system.sessions", key: {_id: 1}}));
- assert.eq(0, st.s.getDB('config').chunks.count({"shard": "config"}));
+ assert.commandWorked(
+ admin.runCommand({shardCollection: "config.system.sessions", key: {_id: 1}}));
+ assert.eq(0, st.s.getDB('config').chunks.count({"shard": "config"}));
- assert.commandFailed(
- admin.runCommand({shardCollection: "config.anythingelse", key: {_id: 1}}));
+ assert.commandFailed(admin.runCommand({shardCollection: "config.anythingelse", key: {_id: 1}}));
- st.stop();
- }
+ st.stop();
+}
- // Cannot shard things in config without shards.
- {
- var st = new ShardingTest({shards: 0});
- var admin = st.s.getDB('admin');
+// Cannot shard things in config without shards.
+{
+ var st = new ShardingTest({shards: 0});
+ var admin = st.s.getDB('admin');
- assert.commandFailed(
- admin.runCommand({shardCollection: "config.system.sessions", key: {_id: 1}}));
-
- st.stop();
- }
+ assert.commandFailed(
+ admin.runCommand({shardCollection: "config.system.sessions", key: {_id: 1}}));
+ st.stop();
+}
})();
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 1c8415662a7..8a5c19d1eb9 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -1,37 +1,37 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({name: "shard_existing", shards: 2, mongos: 1, other: {chunkSize: 1}});
- var db = s.getDB("test");
+var s = new ShardingTest({name: "shard_existing", shards: 2, mongos: 1, other: {chunkSize: 1}});
+var db = s.getDB("test");
- var stringSize = 10000;
- var numDocs = 2000;
+var stringSize = 10000;
+var numDocs = 2000;
- // we want a lot of data, so lets make a string to cheat :)
- var bigString = new Array(stringSize).toString();
- var docSize = Object.bsonsize({_id: numDocs, s: bigString});
- var totalSize = docSize * numDocs;
- print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
+// we want a lot of data, so lets make a string to cheat :)
+var bigString = new Array(stringSize).toString();
+var docSize = Object.bsonsize({_id: numDocs, s: bigString});
+var totalSize = docSize * numDocs;
+print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
- var bulk = db.data.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
+var bulk = db.data.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, s: bigString});
+}
+assert.writeOK(bulk.execute());
- var avgObjSize = db.data.stats().avgObjSize;
- var dataSize = db.data.stats().size;
- assert.lte(totalSize, dataSize);
+var avgObjSize = db.data.stats().avgObjSize;
+var dataSize = db.data.stats().size;
+assert.lte(totalSize, dataSize);
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- var res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
- printjson(res);
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+var res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
+printjson(res);
- // number of chunks should be approx equal to the total data size / half the chunk size
- var numChunks = s.config.chunks.find({ns: 'test.data'}).itcount();
- var guess = Math.ceil(dataSize / (512 * 1024 + avgObjSize));
- assert(Math.abs(numChunks - guess) < 2, "not right number of chunks");
+// number of chunks should be approx equal to the total data size / half the chunk size
+var numChunks = s.config.chunks.find({ns: 'test.data'}).itcount();
+var guess = Math.ceil(dataSize / (512 * 1024 + avgObjSize));
+assert(Math.abs(numChunks - guess) < 2, "not right number of chunks");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard_existing_coll_chunk_count.js b/jstests/sharding/shard_existing_coll_chunk_count.js
index 4910b5e8964..91a6abca2ee 100644
--- a/jstests/sharding/shard_existing_coll_chunk_count.js
+++ b/jstests/sharding/shard_existing_coll_chunk_count.js
@@ -5,171 +5,170 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
-
- // TODO (SERVER-37699): Lower logging verbosity.
- var s = new ShardingTest({
- name: "shard_existing_coll_chunk_count",
- shards: 1,
- mongos: 1,
- other: {enableAutoSplit: true},
- });
-
- assert.commandWorked(s.s.adminCommand({enablesharding: "test"}));
-
- var collNum = 0;
- var overhead = Object.bsonsize({_id: ObjectId(), i: 1, pad: ""});
-
- var getNumberChunks = function(ns) {
- return s.getDB("config").getCollection("chunks").count({ns});
- };
-
- var runCase = function(opts) {
- // Expected options.
- assert.gte(opts.docSize, 0);
- assert.gte(opts.stages.length, 2);
-
- // Compute padding.
- if (opts.docSize < overhead) {
- var pad = "";
- } else {
- var pad = (new Array(opts.docSize - overhead + 1)).join(' ');
- }
-
- collNum++;
- var db = s.getDB("test");
- var collName = "coll" + collNum;
- var coll = db.getCollection(collName);
- var i = 0;
- var limit = 0;
- var stageNum = 0;
- var stage = opts.stages[stageNum];
-
- // Insert initial docs.
- var bulk = coll.initializeUnorderedBulkOp();
+'use strict';
+load('jstests/sharding/autosplit_include.js');
+
+// TODO (SERVER-37699): Lower logging verbosity.
+var s = new ShardingTest({
+ name: "shard_existing_coll_chunk_count",
+ shards: 1,
+ mongos: 1,
+ other: {enableAutoSplit: true},
+});
+
+assert.commandWorked(s.s.adminCommand({enablesharding: "test"}));
+
+var collNum = 0;
+var overhead = Object.bsonsize({_id: ObjectId(), i: 1, pad: ""});
+
+var getNumberChunks = function(ns) {
+ return s.getDB("config").getCollection("chunks").count({ns});
+};
+
+var runCase = function(opts) {
+ // Expected options.
+ assert.gte(opts.docSize, 0);
+ assert.gte(opts.stages.length, 2);
+
+ // Compute padding.
+ if (opts.docSize < overhead) {
+ var pad = "";
+ } else {
+ var pad = (new Array(opts.docSize - overhead + 1)).join(' ');
+ }
+
+ collNum++;
+ var db = s.getDB("test");
+ var collName = "coll" + collNum;
+ var coll = db.getCollection(collName);
+ var i = 0;
+ var limit = 0;
+ var stageNum = 0;
+ var stage = opts.stages[stageNum];
+
+ // Insert initial docs.
+ var bulk = coll.initializeUnorderedBulkOp();
+ limit += stage.numDocsToInsert;
+ for (; i < limit; i++) {
+ bulk.insert({i, pad});
+ }
+ assert.writeOK(bulk.execute());
+
+ // Create shard key index.
+ assert.commandWorked(coll.createIndex({i: 1}));
+
+ // Shard collection.
+ assert.commandWorked(s.s.adminCommand({shardcollection: coll.getFullName(), key: {i: 1}}));
+
+ // Confirm initial number of chunks.
+ var numChunks = getNumberChunks(coll.getFullName());
+ assert.eq(numChunks,
+ stage.expectedNumChunks,
+ 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
+ ' initial chunks, but found ' + numChunks + '\nopts: ' + tojson(opts) +
+ '\nchunks:\n' + s.getChunksString(coll.getFullName()));
+
+ // Do the rest of the stages.
+ for (stageNum = 1; stageNum < opts.stages.length; stageNum++) {
+ stage = opts.stages[stageNum];
+
+ // Insert the later docs (one at a time, to maximise the autosplit effects).
limit += stage.numDocsToInsert;
for (; i < limit; i++) {
- bulk.insert({i, pad});
- }
- assert.writeOK(bulk.execute());
+ coll.insert({i, pad});
- // Create shard key index.
- assert.commandWorked(coll.createIndex({i: 1}));
-
- // Shard collection.
- assert.commandWorked(s.s.adminCommand({shardcollection: coll.getFullName(), key: {i: 1}}));
+ waitForOngoingChunkSplits(s);
+ }
- // Confirm initial number of chunks.
+ // Confirm number of chunks for this stage.
var numChunks = getNumberChunks(coll.getFullName());
- assert.eq(numChunks,
- stage.expectedNumChunks,
- 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
- ' initial chunks, but found ' + numChunks + '\nopts: ' + tojson(opts) +
- '\nchunks:\n' + s.getChunksString(coll.getFullName()));
-
- // Do the rest of the stages.
- for (stageNum = 1; stageNum < opts.stages.length; stageNum++) {
- stage = opts.stages[stageNum];
-
- // Insert the later docs (one at a time, to maximise the autosplit effects).
- limit += stage.numDocsToInsert;
- for (; i < limit; i++) {
- coll.insert({i, pad});
-
- waitForOngoingChunkSplits(s);
- }
-
- // Confirm number of chunks for this stage.
- var numChunks = getNumberChunks(coll.getFullName());
- assert.gte(numChunks,
- stage.expectedNumChunks,
- 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
- ' chunks for stage ' + stageNum + ', but found ' + numChunks +
- '\nopts: ' + tojson(opts) + '\nchunks:\n' +
- s.getChunksString(coll.getFullName()));
- }
- };
-
- // Original problematic case.
- runCase({
- docSize: 0,
- stages: [
- {numDocsToInsert: 20000, expectedNumChunks: 1},
- {numDocsToInsert: 7, expectedNumChunks: 1},
- {numDocsToInsert: 1000, expectedNumChunks: 1},
- ],
- });
-
- // Original problematic case (worse).
- runCase({
- docSize: 0,
- stages: [
- {numDocsToInsert: 90000, expectedNumChunks: 1},
- {numDocsToInsert: 7, expectedNumChunks: 1},
- {numDocsToInsert: 1000, expectedNumChunks: 1},
- ],
- });
-
- // Pathological case #1.
- runCase({
- docSize: 522,
- stages: [
- {numDocsToInsert: 8191, expectedNumChunks: 1},
- {numDocsToInsert: 2, expectedNumChunks: 1},
- {numDocsToInsert: 1000, expectedNumChunks: 1},
- ],
- });
-
- // Pathological case #2.
- runCase({
- docSize: 522,
- stages: [
- {numDocsToInsert: 8192, expectedNumChunks: 1},
- {numDocsToInsert: 8192, expectedNumChunks: 1},
- ],
- });
-
- // Lower chunksize to 1MB, and restart the mongod for it to take. We also
- // need to restart mongos for the case of the last-stable suite where the
- // shard is also last-stable.
- assert.writeOK(
- s.getDB("config").getCollection("settings").update({_id: "chunksize"}, {$set: {value: 1}}, {
- upsert: true
- }));
-
- s.restartMongos(0);
- s.restartShardRS(0);
-
- // Original problematic case, scaled down to smaller chunksize.
- runCase({
- docSize: 0,
- stages: [
- {numDocsToInsert: 10000, expectedNumChunks: 1},
- {numDocsToInsert: 10, expectedNumChunks: 1},
- {numDocsToInsert: 20, expectedNumChunks: 1},
- {numDocsToInsert: 40, expectedNumChunks: 1},
- {numDocsToInsert: 1000, expectedNumChunks: 1},
- ],
- });
-
- // Docs just smaller than half chunk size.
- runCase({
- docSize: 510 * 1024,
- stages: [
- {numDocsToInsert: 10, expectedNumChunks: 6},
- {numDocsToInsert: 10, expectedNumChunks: 10},
- ],
- });
-
- // Docs just larger than half chunk size.
- runCase({
- docSize: 514 * 1024,
- stages: [
- {numDocsToInsert: 10, expectedNumChunks: 10},
- {numDocsToInsert: 10, expectedNumChunks: 18},
- ],
- });
-
- s.stop();
+ assert.gte(numChunks,
+ stage.expectedNumChunks,
+ 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
+ ' chunks for stage ' + stageNum + ', but found ' + numChunks + '\nopts: ' +
+ tojson(opts) + '\nchunks:\n' + s.getChunksString(coll.getFullName()));
+ }
+};
+
+// Original problematic case.
+runCase({
+ docSize: 0,
+ stages: [
+ {numDocsToInsert: 20000, expectedNumChunks: 1},
+ {numDocsToInsert: 7, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+});
+
+// Original problematic case (worse).
+runCase({
+ docSize: 0,
+ stages: [
+ {numDocsToInsert: 90000, expectedNumChunks: 1},
+ {numDocsToInsert: 7, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+});
+
+// Pathological case #1.
+runCase({
+ docSize: 522,
+ stages: [
+ {numDocsToInsert: 8191, expectedNumChunks: 1},
+ {numDocsToInsert: 2, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+});
+
+// Pathological case #2.
+runCase({
+ docSize: 522,
+ stages: [
+ {numDocsToInsert: 8192, expectedNumChunks: 1},
+ {numDocsToInsert: 8192, expectedNumChunks: 1},
+ ],
+});
+
+// Lower chunksize to 1MB, and restart the mongod for it to take. We also
+// need to restart mongos for the case of the last-stable suite where the
+// shard is also last-stable.
+assert.writeOK(
+ s.getDB("config").getCollection("settings").update({_id: "chunksize"}, {$set: {value: 1}}, {
+ upsert: true
+ }));
+
+s.restartMongos(0);
+s.restartShardRS(0);
+
+// Original problematic case, scaled down to smaller chunksize.
+runCase({
+ docSize: 0,
+ stages: [
+ {numDocsToInsert: 10000, expectedNumChunks: 1},
+ {numDocsToInsert: 10, expectedNumChunks: 1},
+ {numDocsToInsert: 20, expectedNumChunks: 1},
+ {numDocsToInsert: 40, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+});
+
+// Docs just smaller than half chunk size.
+runCase({
+ docSize: 510 * 1024,
+ stages: [
+ {numDocsToInsert: 10, expectedNumChunks: 6},
+ {numDocsToInsert: 10, expectedNumChunks: 10},
+ ],
+});
+
+// Docs just larger than half chunk size.
+runCase({
+ docSize: 514 * 1024,
+ stages: [
+ {numDocsToInsert: 10, expectedNumChunks: 10},
+ {numDocsToInsert: 10, expectedNumChunks: 18},
+ ],
+});
+
+s.stop();
})();
diff --git a/jstests/sharding/shard_identity_config_update.js b/jstests/sharding/shard_identity_config_update.js
index b7c3453134f..8c5235b50a4 100644
--- a/jstests/sharding/shard_identity_config_update.js
+++ b/jstests/sharding/shard_identity_config_update.js
@@ -8,98 +8,98 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
+"use strict";
- load('jstests/replsets/rslib.js');
+load('jstests/replsets/rslib.js');
- var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
- var shardPri = st.rs0.getPrimary();
+var shardPri = st.rs0.getPrimary();
- // Note: Adding new replica set member by hand because of SERVER-24011.
+// Note: Adding new replica set member by hand because of SERVER-24011.
- var newNode = MongoRunner.runMongod(
- {configsvr: '', replSet: st.configRS.name, storageEngine: 'wiredTiger'});
+var newNode =
+ MongoRunner.runMongod({configsvr: '', replSet: st.configRS.name, storageEngine: 'wiredTiger'});
- var replConfig = st.configRS.getReplSetConfigFromNode();
- replConfig.version += 1;
- replConfig.members.push({_id: 3, host: newNode.host});
+var replConfig = st.configRS.getReplSetConfigFromNode();
+replConfig.version += 1;
+replConfig.members.push({_id: 3, host: newNode.host});
- reconfig(st.configRS, replConfig);
+reconfig(st.configRS, replConfig);
- /**
- * Returns true if the shardIdentity document has all the replica set member nodes in the
- * expectedConfigStr.
- */
- var checkConfigStrUpdated = function(conn, expectedConfigStr) {
- var shardIdentity = conn.getDB('admin').system.version.findOne({_id: 'shardIdentity'});
+/**
+ * Returns true if the shardIdentity document has all the replica set member nodes in the
+ * expectedConfigStr.
+ */
+var checkConfigStrUpdated = function(conn, expectedConfigStr) {
+ var shardIdentity = conn.getDB('admin').system.version.findOne({_id: 'shardIdentity'});
- var shardConfigsvrStr = shardIdentity.configsvrConnectionString;
- var shardConfigReplName = shardConfigsvrStr.split('/')[0];
- var expectedReplName = expectedConfigStr.split('/')[0];
+ var shardConfigsvrStr = shardIdentity.configsvrConnectionString;
+ var shardConfigReplName = shardConfigsvrStr.split('/')[0];
+ var expectedReplName = expectedConfigStr.split('/')[0];
- assert.eq(expectedReplName, shardConfigReplName);
+ assert.eq(expectedReplName, shardConfigReplName);
- var expectedHostList = expectedConfigStr.split('/')[1].split(',');
- var shardConfigHostList = shardConfigsvrStr.split('/')[1].split(',');
+ var expectedHostList = expectedConfigStr.split('/')[1].split(',');
+ var shardConfigHostList = shardConfigsvrStr.split('/')[1].split(',');
- if (expectedHostList.length != shardConfigHostList.length) {
- return false;
- }
+ if (expectedHostList.length != shardConfigHostList.length) {
+ return false;
+ }
- for (var x = 0; x < expectedHostList.length; x++) {
- if (shardConfigsvrStr.indexOf(expectedHostList[x]) == -1) {
- return false;
- }
+ for (var x = 0; x < expectedHostList.length; x++) {
+ if (shardConfigsvrStr.indexOf(expectedHostList[x]) == -1) {
+ return false;
}
+ }
- return true;
- };
+ return true;
+};
- var origConfigConnStr = st.configRS.getURL();
- var expectedConfigStr = origConfigConnStr + ',' + newNode.host;
- assert.soon(function() {
- return checkConfigStrUpdated(st.rs0.getPrimary(), expectedConfigStr);
- });
+var origConfigConnStr = st.configRS.getURL();
+var expectedConfigStr = origConfigConnStr + ',' + newNode.host;
+assert.soon(function() {
+ return checkConfigStrUpdated(st.rs0.getPrimary(), expectedConfigStr);
+});
- var secConn = st.rs0.getSecondary();
- secConn.setSlaveOk(true);
- assert.soon(function() {
- return checkConfigStrUpdated(secConn, expectedConfigStr);
- });
+var secConn = st.rs0.getSecondary();
+secConn.setSlaveOk(true);
+assert.soon(function() {
+ return checkConfigStrUpdated(secConn, expectedConfigStr);
+});
- //
- // Remove the newly added member from the config replSet while the shards are down.
- // Check that the shard identity document will be updated with the new replSet connection
- // string when they come back up.
- //
+//
+// Remove the newly added member from the config replSet while the shards are down.
+// Check that the shard identity document will be updated with the new replSet connection
+// string when they come back up.
+//
- st.rs0.stop(0);
- st.rs0.stop(1);
+st.rs0.stop(0);
+st.rs0.stop(1);
- MongoRunner.stopMongod(newNode);
+MongoRunner.stopMongod(newNode);
- replConfig = st.configRS.getReplSetConfigFromNode();
- replConfig.version += 1;
- replConfig.members.pop();
+replConfig = st.configRS.getReplSetConfigFromNode();
+replConfig.version += 1;
+replConfig.members.pop();
- reconfig(st.configRS, replConfig);
+reconfig(st.configRS, replConfig);
- st.rs0.restart(0, {shardsvr: ''});
- st.rs0.restart(1, {shardsvr: ''});
+st.rs0.restart(0, {shardsvr: ''});
+st.rs0.restart(1, {shardsvr: ''});
- st.rs0.waitForMaster();
- st.rs0.awaitSecondaryNodes();
+st.rs0.waitForMaster();
+st.rs0.awaitSecondaryNodes();
- assert.soon(function() {
- return checkConfigStrUpdated(st.rs0.getPrimary(), origConfigConnStr);
- });
+assert.soon(function() {
+ return checkConfigStrUpdated(st.rs0.getPrimary(), origConfigConnStr);
+});
- secConn = st.rs0.getSecondary();
- secConn.setSlaveOk(true);
- assert.soon(function() {
- return checkConfigStrUpdated(secConn, origConfigConnStr);
- });
+secConn = st.rs0.getSecondary();
+secConn.setSlaveOk(true);
+assert.soon(function() {
+ return checkConfigStrUpdated(secConn, origConfigConnStr);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/shard_identity_rollback.js b/jstests/sharding/shard_identity_rollback.js
index e799a79600d..b0a3f9b891c 100644
--- a/jstests/sharding/shard_identity_rollback.js
+++ b/jstests/sharding/shard_identity_rollback.js
@@ -5,131 +5,129 @@
*/
(function() {
- "use strict";
-
- load('jstests/libs/write_concern_util.js');
-
- var st = new ShardingTest({shards: 1});
-
- var replTest = new ReplSetTest({nodes: 3});
- var nodes = replTest.startSet({shardsvr: ''});
- replTest.initiate();
-
- var priConn = replTest.getPrimary();
- var secondaries = replTest.getSecondaries();
- var configConnStr = st.configRS.getURL();
-
- // Shards start in FCV 4.0 until a config server reaches out to them. This causes storage to
- // shutdown with 4.0 compatible files, requiring rollback via refetch.
- priConn.adminCommand({setFeatureCompatibilityVersion: "4.0"});
-
- // Wait for the secondaries to have the latest oplog entries before stopping the fetcher to
- // avoid the situation where one of the secondaries will not have an overlapping oplog with
- // the other nodes once the primary is killed.
- replTest.awaitSecondaryNodes();
-
- replTest.awaitReplication();
-
- stopServerReplication(secondaries);
-
- jsTest.log("inserting shardIdentity document to primary that shouldn't replicate");
-
- var shardIdentityDoc = {
- _id: 'shardIdentity',
- configsvrConnectionString: configConnStr,
- shardName: 'newShard',
- clusterId: ObjectId()
- };
-
- assert.writeOK(priConn.getDB('admin').system.version.update(
- {_id: 'shardIdentity'}, shardIdentityDoc, {upsert: true}));
-
- // Ensure sharding state on the primary was initialized
- var res = priConn.getDB('admin').runCommand({shardingState: 1});
- assert(res.enabled, tojson(res));
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- // Ensure sharding state on the secondaries was *not* initialized
- secondaries.forEach(function(secondary) {
- secondary.setSlaveOk(true);
- res = secondary.getDB('admin').runCommand({shardingState: 1});
- assert(!res.enabled, tojson(res));
- });
-
- // Ensure manually deleting the shardIdentity document is not allowed.
- assert.writeErrorWithCode(priConn.getDB('admin').system.version.remove({_id: 'shardIdentity'}),
- 40070);
-
- jsTest.log("shutting down primary");
- // Shut down the primary so a secondary gets elected that definitely won't have replicated the
- // shardIdentity insert, which should trigger a rollback on the original primary when it comes
- // back online.
- replTest.stop(priConn);
-
- // Disable the fail point so that the elected node can exit drain mode and finish becoming
- // primary.
- restartServerReplication(secondaries);
-
- // Wait for a new healthy primary
- var newPriConn = replTest.getPrimary();
- assert.neq(priConn, newPriConn);
- assert.writeOK(newPriConn.getDB('test').foo.insert({a: 1}, {writeConcern: {w: 'majority'}}));
-
- // Restart the original primary so it triggers a rollback of the shardIdentity insert.
- jsTest.log("Restarting original primary");
- priConn = replTest.restart(priConn);
-
- // Wait until we cannot create a connection to the former primary, which indicates that it must
- // have shut itself down during the rollback.
- jsTest.log("Waiting for original primary to rollback and shut down");
- assert.soon(
- function() {
- try {
- var newConn = new Mongo(priConn.host);
- return false;
- } catch (x) {
- return true;
- }
- },
- function() {
- var oldPriOplog = priConn.getDB('local').oplog.rs.find().sort({$natural: -1}).toArray();
- var newPriOplog =
- newPriConn.getDB('local').oplog.rs.find().sort({$natural: -1}).toArray();
- return "timed out waiting for original primary to shut down after rollback. " +
- "Old primary oplog: " + tojson(oldPriOplog) + "; new primary oplog: " +
- tojson(newPriOplog);
- },
- 90000);
-
- // Restart the original primary again. This time, the shardIdentity document should already be
- // rolled back, so there shouldn't be any rollback and the node should stay online.
- jsTest.log(
- "Restarting original primary a second time and waiting for it to successfully become " +
- "secondary");
- try {
- // Join() with the crashed mongod and ignore its bad exit status.
- MongoRunner.stopMongod(priConn);
- } catch (e) {
- // expected
- }
- priConn = replTest.restart(priConn, {shardsvr: ''});
- priConn.setSlaveOk();
-
- // Wait for the old primary to replicate the document that was written to the new primary while
- // it was shut down.
- assert.soonNoExcept(function() {
- return priConn.getDB('test').foo.findOne();
- });
-
- // Ensure that there's no sharding state on the restarted original primary, since the
- // shardIdentity doc should have been rolled back.
- res = priConn.getDB('admin').runCommand({shardingState: 1});
- assert(!res.enabled, tojson(res));
- assert.eq(null, priConn.getDB('admin').system.version.findOne({_id: 'shardIdentity'}));
+"use strict";
+
+load('jstests/libs/write_concern_util.js');
+
+var st = new ShardingTest({shards: 1});
+
+var replTest = new ReplSetTest({nodes: 3});
+var nodes = replTest.startSet({shardsvr: ''});
+replTest.initiate();
+
+var priConn = replTest.getPrimary();
+var secondaries = replTest.getSecondaries();
+var configConnStr = st.configRS.getURL();
+
+// Shards start in FCV 4.0 until a config server reaches out to them. This causes storage to
+// shutdown with 4.0 compatible files, requiring rollback via refetch.
+priConn.adminCommand({setFeatureCompatibilityVersion: "4.0"});
+
+// Wait for the secondaries to have the latest oplog entries before stopping the fetcher to
+// avoid the situation where one of the secondaries will not have an overlapping oplog with
+// the other nodes once the primary is killed.
+replTest.awaitSecondaryNodes();
- replTest.stopSet();
+replTest.awaitReplication();
- st.stop();
+stopServerReplication(secondaries);
+
+jsTest.log("inserting shardIdentity document to primary that shouldn't replicate");
+
+var shardIdentityDoc = {
+ _id: 'shardIdentity',
+ configsvrConnectionString: configConnStr,
+ shardName: 'newShard',
+ clusterId: ObjectId()
+};
+
+assert.writeOK(priConn.getDB('admin').system.version.update(
+ {_id: 'shardIdentity'}, shardIdentityDoc, {upsert: true}));
+
+// Ensure sharding state on the primary was initialized
+var res = priConn.getDB('admin').runCommand({shardingState: 1});
+assert(res.enabled, tojson(res));
+assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+assert.eq(shardIdentityDoc.shardName, res.shardName);
+assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+// Ensure sharding state on the secondaries was *not* initialized
+secondaries.forEach(function(secondary) {
+ secondary.setSlaveOk(true);
+ res = secondary.getDB('admin').runCommand({shardingState: 1});
+ assert(!res.enabled, tojson(res));
+});
+
+// Ensure manually deleting the shardIdentity document is not allowed.
+assert.writeErrorWithCode(priConn.getDB('admin').system.version.remove({_id: 'shardIdentity'}),
+ 40070);
+
+jsTest.log("shutting down primary");
+// Shut down the primary so a secondary gets elected that definitely won't have replicated the
+// shardIdentity insert, which should trigger a rollback on the original primary when it comes
+// back online.
+replTest.stop(priConn);
+
+// Disable the fail point so that the elected node can exit drain mode and finish becoming
+// primary.
+restartServerReplication(secondaries);
+
+// Wait for a new healthy primary
+var newPriConn = replTest.getPrimary();
+assert.neq(priConn, newPriConn);
+assert.writeOK(newPriConn.getDB('test').foo.insert({a: 1}, {writeConcern: {w: 'majority'}}));
+
+// Restart the original primary so it triggers a rollback of the shardIdentity insert.
+jsTest.log("Restarting original primary");
+priConn = replTest.restart(priConn);
+
+// Wait until we cannot create a connection to the former primary, which indicates that it must
+// have shut itself down during the rollback.
+jsTest.log("Waiting for original primary to rollback and shut down");
+assert.soon(
+ function() {
+ try {
+ var newConn = new Mongo(priConn.host);
+ return false;
+ } catch (x) {
+ return true;
+ }
+ },
+ function() {
+ var oldPriOplog = priConn.getDB('local').oplog.rs.find().sort({$natural: -1}).toArray();
+ var newPriOplog = newPriConn.getDB('local').oplog.rs.find().sort({$natural: -1}).toArray();
+ return "timed out waiting for original primary to shut down after rollback. " +
+ "Old primary oplog: " + tojson(oldPriOplog) +
+ "; new primary oplog: " + tojson(newPriOplog);
+ },
+ 90000);
+
+// Restart the original primary again. This time, the shardIdentity document should already be
+// rolled back, so there shouldn't be any rollback and the node should stay online.
+jsTest.log("Restarting original primary a second time and waiting for it to successfully become " +
+ "secondary");
+try {
+ // Join() with the crashed mongod and ignore its bad exit status.
+ MongoRunner.stopMongod(priConn);
+} catch (e) {
+ // expected
+}
+priConn = replTest.restart(priConn, {shardsvr: ''});
+priConn.setSlaveOk();
+
+// Wait for the old primary to replicate the document that was written to the new primary while
+// it was shut down.
+assert.soonNoExcept(function() {
+ return priConn.getDB('test').foo.findOne();
+});
+
+// Ensure that there's no sharding state on the restarted original primary, since the
+// shardIdentity doc should have been rolled back.
+res = priConn.getDB('admin').runCommand({shardingState: 1});
+assert(!res.enabled, tojson(res));
+assert.eq(null, priConn.getDB('admin').system.version.findOne({_id: 'shardIdentity'}));
+
+replTest.stopSet();
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index c87fc13478f..a068da936fb 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -6,88 +6,83 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- var numDocs = 2000;
- var baseName = "shard_insert_getlasterror_w2";
- var testDBName = baseName;
- var testCollName = 'coll';
- var replNodes = 3;
-
- // ~1KB string
- var textString = '';
- for (var i = 0; i < 40; i++) {
- textString += 'abcdefghijklmnopqrstuvwxyz';
+"use strict";
+
+var numDocs = 2000;
+var baseName = "shard_insert_getlasterror_w2";
+var testDBName = baseName;
+var testCollName = 'coll';
+var replNodes = 3;
+
+// ~1KB string
+var textString = '';
+for (var i = 0; i < 40; i++) {
+ textString += 'abcdefghijklmnopqrstuvwxyz';
+}
+
+// Spin up a sharded cluster, but do not add the shards
+var shardingTestConfig =
+ {name: baseName, mongos: 1, shards: 1, rs: {nodes: replNodes}, other: {manualAddShard: true}};
+var shardingTest = new ShardingTest(shardingTestConfig);
+
+// Get connection to the individual shard
+var replSet1 = shardingTest.rs0;
+
+// Add data to it
+var testDBReplSet1 = replSet1.getPrimary().getDB(testDBName);
+var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({x: i, text: textString});
+}
+assert.writeOK(bulk.execute());
+
+// Get connection to mongos for the cluster
+var mongosConn = shardingTest.s;
+var testDB = mongosConn.getDB(testDBName);
+
+// Add replSet1 as only shard
+assert.commandWorked(mongosConn.adminCommand({addshard: replSet1.getURL()}));
+
+// Enable sharding on test db and its collection foo
+assert.commandWorked(mongosConn.getDB('admin').runCommand({enablesharding: testDBName}));
+testDB[testCollName].ensureIndex({x: 1});
+assert.commandWorked(mongosConn.getDB('admin').runCommand(
+ {shardcollection: testDBName + '.' + testCollName, key: {x: 1}}));
+
+// Test case where GLE should return an error
+assert.writeOK(testDB.foo.insert({_id: 'a', x: 1}));
+assert.writeError(testDB.foo.insert({_id: 'a', x: 1}, {writeConcern: {w: 2, wtimeout: 30000}}));
+
+// Add more data
+bulk = testDB.foo.initializeUnorderedBulkOp();
+for (var i = numDocs; i < 2 * numDocs; i++) {
+ bulk.insert({x: i, text: textString});
+}
+assert.writeOK(bulk.execute({w: replNodes, wtimeout: 30000}));
+
+// Take down two nodes and make sure slaveOk reads still work
+var primary = replSet1._master;
+var secondary1 = replSet1._slaves[0];
+var secondary2 = replSet1._slaves[1];
+replSet1.stop(secondary1);
+replSet1.stop(secondary2);
+replSet1.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+testDB.getMongo().adminCommand({setParameter: 1, logLevel: 1});
+testDB.getMongo().setSlaveOk();
+print("trying some queries");
+assert.soon(function() {
+ try {
+ testDB.foo.find().next();
+ } catch (e) {
+ print(e);
+ return false;
}
+ return true;
+}, "Queries took too long to complete correctly.", 2 * 60 * 1000);
- // Spin up a sharded cluster, but do not add the shards
- var shardingTestConfig = {
- name: baseName,
- mongos: 1,
- shards: 1,
- rs: {nodes: replNodes},
- other: {manualAddShard: true}
- };
- var shardingTest = new ShardingTest(shardingTestConfig);
-
- // Get connection to the individual shard
- var replSet1 = shardingTest.rs0;
-
- // Add data to it
- var testDBReplSet1 = replSet1.getPrimary().getDB(testDBName);
- var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({x: i, text: textString});
- }
- assert.writeOK(bulk.execute());
-
- // Get connection to mongos for the cluster
- var mongosConn = shardingTest.s;
- var testDB = mongosConn.getDB(testDBName);
-
- // Add replSet1 as only shard
- assert.commandWorked(mongosConn.adminCommand({addshard: replSet1.getURL()}));
+// Shutdown cluster
+shardingTest.stop();
- // Enable sharding on test db and its collection foo
- assert.commandWorked(mongosConn.getDB('admin').runCommand({enablesharding: testDBName}));
- testDB[testCollName].ensureIndex({x: 1});
- assert.commandWorked(mongosConn.getDB('admin').runCommand(
- {shardcollection: testDBName + '.' + testCollName, key: {x: 1}}));
-
- // Test case where GLE should return an error
- assert.writeOK(testDB.foo.insert({_id: 'a', x: 1}));
- assert.writeError(testDB.foo.insert({_id: 'a', x: 1}, {writeConcern: {w: 2, wtimeout: 30000}}));
-
- // Add more data
- bulk = testDB.foo.initializeUnorderedBulkOp();
- for (var i = numDocs; i < 2 * numDocs; i++) {
- bulk.insert({x: i, text: textString});
- }
- assert.writeOK(bulk.execute({w: replNodes, wtimeout: 30000}));
-
- // Take down two nodes and make sure slaveOk reads still work
- var primary = replSet1._master;
- var secondary1 = replSet1._slaves[0];
- var secondary2 = replSet1._slaves[1];
- replSet1.stop(secondary1);
- replSet1.stop(secondary2);
- replSet1.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- testDB.getMongo().adminCommand({setParameter: 1, logLevel: 1});
- testDB.getMongo().setSlaveOk();
- print("trying some queries");
- assert.soon(function() {
- try {
- testDB.foo.find().next();
- } catch (e) {
- print(e);
- return false;
- }
- return true;
- }, "Queries took too long to complete correctly.", 2 * 60 * 1000);
-
- // Shutdown cluster
- shardingTest.stop();
-
- print('shard_insert_getlasterror_w2.js SUCCESS');
+print('shard_insert_getlasterror_w2.js SUCCESS');
})();
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 78ac1c3fb6f..3076dde5b7e 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -1,43 +1,43 @@
// Tests splitting a chunk twice
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({name: "shard_keycount", shards: 2, mongos: 1, other: {chunkSize: 1}});
+var s = new ShardingTest({name: "shard_keycount", shards: 2, mongos: 1, other: {chunkSize: 1}});
- var dbName = "test";
- var collName = "foo";
- var ns = dbName + "." + collName;
+var dbName = "test";
+var collName = "foo";
+var ns = dbName + "." + collName;
- var db = s.getDB(dbName);
+var db = s.getDB(dbName);
- for (var i = 0; i < 10; i++) {
- db.foo.insert({_id: i});
- }
+for (var i = 0; i < 10; i++) {
+ db.foo.insert({_id: i});
+}
- // Enable sharding on DB
- assert.commandWorked(s.s0.adminCommand({enablesharding: dbName}));
- s.ensurePrimaryShard(dbName, s.shard1.shardName);
+// Enable sharding on DB
+assert.commandWorked(s.s0.adminCommand({enablesharding: dbName}));
+s.ensurePrimaryShard(dbName, s.shard1.shardName);
- // Enable sharding on collection
- assert.commandWorked(s.s0.adminCommand({shardcollection: ns, key: {_id: 1}}));
+// Enable sharding on collection
+assert.commandWorked(s.s0.adminCommand({shardcollection: ns, key: {_id: 1}}));
- // Split into two chunks
- assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
+// Split into two chunks
+assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- var coll = db.getCollection(collName);
+var coll = db.getCollection(collName);
- // Split chunk again
- assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
+// Split chunk again
+assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- assert.writeOK(coll.update({_id: 3}, {_id: 3}));
+assert.writeOK(coll.update({_id: 3}, {_id: 3}));
- // Split chunk again
- assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
+// Split chunk again
+assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- assert.writeOK(coll.update({_id: 3}, {_id: 3}));
+assert.writeOK(coll.update({_id: 3}, {_id: 3}));
- // Split chunk again
- assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
+// Split chunk again
+assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js
index 1f21d823f26..13715d62ddc 100644
--- a/jstests/sharding/shard_kill_and_pooling.js
+++ b/jstests/sharding/shard_kill_and_pooling.js
@@ -11,76 +11,75 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
// Run through the same test twice, once with a hard -9 kill, once with a regular shutdown
(function() {
- 'use strict';
+'use strict';
- for (var test = 0; test < 2; test++) {
- var killWith = (test == 0 ? 15 : 9);
+for (var test = 0; test < 2; test++) {
+ var killWith = (test == 0 ? 15 : 9);
- var st = new ShardingTest({shards: 1});
+ var st = new ShardingTest({shards: 1});
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var db = coll.getDB();
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var db = coll.getDB();
- assert.writeOK(coll.insert({hello: "world"}));
+ assert.writeOK(coll.insert({hello: "world"}));
- jsTest.log("Creating new connections...");
+ jsTest.log("Creating new connections...");
- // Create a bunch of connections to the primary node through mongos.
- // jstest ->(x10)-> mongos ->(x10)-> primary
- var conns = [];
- for (var i = 0; i < 50; i++) {
- conns.push(new Mongo(mongos.host));
- assert.neq(null, conns[i].getCollection(coll + "").findOne());
- }
+ // Create a bunch of connections to the primary node through mongos.
+ // jstest ->(x10)-> mongos ->(x10)-> primary
+ var conns = [];
+ for (var i = 0; i < 50; i++) {
+ conns.push(new Mongo(mongos.host));
+ assert.neq(null, conns[i].getCollection(coll + "").findOne());
+ }
- jsTest.log("Returning the connections back to the pool.");
+ jsTest.log("Returning the connections back to the pool.");
- for (var i = 0; i < conns.length; i++) {
- conns[i].close();
- }
+ for (var i = 0; i < conns.length; i++) {
+ conns[i].close();
+ }
- // Don't make test fragile by linking to format of shardConnPoolStats, but this is
- // useful if
- // something goes wrong.
- var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
- printjson(connPoolStats);
+ // Don't make test fragile by linking to format of shardConnPoolStats, but this is
+ // useful if
+ // something goes wrong.
+ var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
+ printjson(connPoolStats);
- jsTest.log("Shutdown shard " + (killWith == 9 ? "uncleanly" : "") + "...");
+ jsTest.log("Shutdown shard " + (killWith == 9 ? "uncleanly" : "") + "...");
- // Flush writes to disk, since sometimes we're killing uncleanly
- assert(mongos.getDB("admin").runCommand({fsync: 1}).ok);
+ // Flush writes to disk, since sometimes we're killing uncleanly
+ assert(mongos.getDB("admin").runCommand({fsync: 1}).ok);
- var exitCode = killWith === 9 ? MongoRunner.EXIT_SIGKILL : MongoRunner.EXIT_CLEAN;
+ var exitCode = killWith === 9 ? MongoRunner.EXIT_SIGKILL : MongoRunner.EXIT_CLEAN;
- st.rs0.stopSet(killWith, true, {allowedExitCode: exitCode});
+ st.rs0.stopSet(killWith, true, {allowedExitCode: exitCode});
- jsTest.log("Restart shard...");
- st.rs0.startSet({forceLock: true}, true);
+ jsTest.log("Restart shard...");
+ st.rs0.startSet({forceLock: true}, true);
- jsTest.log("Waiting for socket timeout time...");
+ jsTest.log("Waiting for socket timeout time...");
- // Need to wait longer than the socket polling time.
- sleep(2 * 5000);
+ // Need to wait longer than the socket polling time.
+ sleep(2 * 5000);
- jsTest.log("Run queries using new connections.");
+ jsTest.log("Run queries using new connections.");
- var numErrors = 0;
- for (var i = 0; i < conns.length; i++) {
- var newConn = new Mongo(mongos.host);
- try {
- assert.neq(null, newConn.getCollection("foo.bar").findOne());
- } catch (e) {
- printjson(e);
- numErrors++;
- }
+ var numErrors = 0;
+ for (var i = 0; i < conns.length; i++) {
+ var newConn = new Mongo(mongos.host);
+ try {
+ assert.neq(null, newConn.getCollection("foo.bar").findOne());
+ } catch (e) {
+ printjson(e);
+ numErrors++;
}
+ }
- assert.eq(0, numErrors);
-
- st.stop();
+ assert.eq(0, numErrors);
- jsTest.log("DONE test " + test);
- }
+ st.stop();
+ jsTest.log("DONE test " + test);
+}
})();
diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js
index 62d0f3fa88e..224b3b3ae14 100644
--- a/jstests/sharding/shard_targeting.js
+++ b/jstests/sharding/shard_targeting.js
@@ -4,64 +4,63 @@
// BSONObj itself as the query to target shards, which could return wrong
// shards if the shard key happens to be one of the fields in the command object.
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
+var s = new ShardingTest({shards: 2});
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
- var db = s.getDB("test");
- var res;
+var db = s.getDB("test");
+var res;
- //
- // Target count command
- //
+//
+// Target count command
+//
- // Shard key is the same with command name.
- s.shardColl("foo", {count: 1}, {count: ""});
+// Shard key is the same with command name.
+s.shardColl("foo", {count: 1}, {count: ""});
- for (var i = 0; i < 50; i++) {
- db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
- db.foo.insert({count: "" + i}); // chunk ["", MaxKey]
- }
+for (var i = 0; i < 50; i++) {
+ db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
+ db.foo.insert({count: "" + i}); // chunk ["", MaxKey]
+}
- var theOtherShard = s.getOther(s.getPrimaryShard("test")).name;
- s.printShardingStatus();
+var theOtherShard = s.getOther(s.getPrimaryShard("test")).name;
+s.printShardingStatus();
- // Count documents on both shards
+// Count documents on both shards
- // "count" commnad with "query" option { }.
- assert.eq(db.foo.count(), 100);
- // Optional "query" option is not given.
- res = db.foo.runCommand("count");
- assert.eq(res.n, 100);
+// "count" commnad with "query" option { }.
+assert.eq(db.foo.count(), 100);
+// Optional "query" option is not given.
+res = db.foo.runCommand("count");
+assert.eq(res.n, 100);
- //
- // Target mapreduce command
- //
- db.foo.drop();
+//
+// Target mapreduce command
+//
+db.foo.drop();
- // Shard key is the same with command name.
- s.shardColl("foo", {mapReduce: 1}, {mapReduce: ""});
+// Shard key is the same with command name.
+s.shardColl("foo", {mapReduce: 1}, {mapReduce: ""});
- for (var i = 0; i < 50; i++) {
- db.foo.insert({mapReduce: i}); // to the chunk including number
- db.foo.insert({mapReduce: "" + i}); // to the chunk including string
- }
+for (var i = 0; i < 50; i++) {
+ db.foo.insert({mapReduce: i}); // to the chunk including number
+ db.foo.insert({mapReduce: "" + i}); // to the chunk including string
+}
- s.printShardingStatus();
+s.printShardingStatus();
- function m() {
- emit("total", 1);
- }
- function r(k, v) {
- return Array.sum(v);
- }
- res = db.foo.runCommand({mapReduce: "foo", map: m, reduce: r, out: {inline: 1}});
+function m() {
+ emit("total", 1);
+}
+function r(k, v) {
+ return Array.sum(v);
+}
+res = db.foo.runCommand({mapReduce: "foo", map: m, reduce: r, out: {inline: 1}});
- // Count documents on both shards
- assert.eq(res.results[0].value, 100);
-
- s.stop();
+// Count documents on both shards
+assert.eq(res.results[0].value, 100);
+s.stop();
})();
diff --git a/jstests/sharding/shard_with_special_db_names.js b/jstests/sharding/shard_with_special_db_names.js
index 75f0ea19bb5..b96b6bf3f5c 100644
--- a/jstests/sharding/shard_with_special_db_names.js
+++ b/jstests/sharding/shard_with_special_db_names.js
@@ -1,28 +1,28 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 2});
- var specialDB = "[a-z]+";
- var specialNS = specialDB + ".special";
+var s = new ShardingTest({shards: 2, mongos: 2});
+var specialDB = "[a-z]+";
+var specialNS = specialDB + ".special";
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {num: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {num: 1}}));
- // Test that the database will not complain "cannot have 2 database names that differs on case"
- assert.commandWorked(s.s0.adminCommand({enablesharding: specialDB}));
- s.ensurePrimaryShard(specialDB, s.shard0.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: specialNS, key: {num: 1}}));
+// Test that the database will not complain "cannot have 2 database names that differs on case"
+assert.commandWorked(s.s0.adminCommand({enablesharding: specialDB}));
+s.ensurePrimaryShard(specialDB, s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: specialNS, key: {num: 1}}));
- var exists = s.getDB("config").collections.find({_id: specialNS}).itcount();
- assert.eq(exists, 1);
+var exists = s.getDB("config").collections.find({_id: specialNS}).itcount();
+assert.eq(exists, 1);
- // Test that drop database properly cleans up config
- s.getDB(specialDB).dropDatabase();
+// Test that drop database properly cleans up config
+s.getDB(specialDB).dropDatabase();
- var cursor = s.getDB("config").collections.find({_id: specialNS});
- assert(cursor.next()["dropped"]);
- assert(!cursor.hasNext());
+var cursor = s.getDB("config").collections.find({_id: specialNS});
+assert(cursor.next()["dropped"]);
+assert(!cursor.hasNext());
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharded_limit_batchsize.js b/jstests/sharding/sharded_limit_batchsize.js
index 7d47db46e7d..e7f1f589ca9 100644
--- a/jstests/sharding/sharded_limit_batchsize.js
+++ b/jstests/sharding/sharded_limit_batchsize.js
@@ -2,151 +2,150 @@
// of limit and batchSize with sort return the correct results, and do not issue
// unnecessary getmores (see SERVER-14299).
(function() {
- 'use strict';
-
- /**
- * Test the correctness of queries with sort and batchSize on a sharded cluster,
- * running the queries against collection 'coll'.
- */
- function testBatchSize(coll) {
- // Roll the cursor over the second batch and make sure it's correctly sized
- assert.eq(20, coll.find().sort({x: 1}).batchSize(3).itcount());
- assert.eq(15, coll.find().sort({x: 1}).batchSize(3).skip(5).itcount());
- }
-
- /**
- * Test the correctness of queries with sort and limit on a sharded cluster,
- * running the queries against collection 'coll'.
- */
- function testLimit(coll) {
- var cursor = coll.find().sort({x: 1}).limit(3);
- assert.eq(-10, cursor.next()["_id"]);
- assert.eq(-9, cursor.next()["_id"]);
- assert.eq(-8, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- assert.eq(13, coll.find().sort({x: 1}).limit(13).itcount());
-
- cursor = coll.find().sort({x: 1}).skip(5).limit(2);
- assert.eq(-5, cursor.next()["_id"]);
- assert.eq(-4, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- cursor = coll.find().sort({x: 1}).skip(9).limit(2);
- assert.eq(-1, cursor.next()["_id"]);
- assert.eq(1, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- cursor = coll.find().sort({x: 1}).skip(11).limit(2);
- assert.eq(2, cursor.next()["_id"]);
- assert.eq(3, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- // Ensure that in the limit 1 case, which is special when in legacy readMode, the server
- // does not leave a cursor open.
- var openCursorsBefore =
- assert.commandWorked(coll.getDB().serverStatus()).metrics.cursor.open.total;
- cursor = coll.find().sort({x: 1}).limit(1);
- assert(cursor.hasNext());
- assert.eq(-10, cursor.next()["_id"]);
- var openCursorsAfter =
- assert.commandWorked(coll.getDB().serverStatus()).metrics.cursor.open.total;
- assert.eq(openCursorsBefore, openCursorsAfter);
- }
-
- /**
- * Test correctness of queries run with singleBatch=true.
- */
- function testSingleBatch(coll, numShards) {
- // Ensure that singleBatch queries that require multiple batches from individual shards
- // return complete results.
- var batchSize = 5;
- var res = assert.commandWorked(coll.getDB().runCommand({
- find: coll.getName(),
- filter: {x: {$lte: 10}},
- skip: numShards * batchSize,
- singleBatch: true,
- batchSize: batchSize
- }));
- assert.eq(batchSize, res.cursor.firstBatch.length);
- assert.eq(0, res.cursor.id);
- var cursor = coll.find().skip(numShards * batchSize).limit(-1 * batchSize);
- assert.eq(batchSize, cursor.itcount());
- cursor = coll.find().skip(numShards * batchSize).batchSize(-1 * batchSize);
- assert.eq(batchSize, cursor.itcount());
- }
-
- //
- // Create a two-shard cluster. Have an unsharded collection and a sharded collection.
- //
-
- var st = new ShardingTest(
- {shards: 2, other: {shardOptions: {setParameter: "enableTestCommands=1"}}});
-
- var db = st.s.getDB("test");
- var shardedCol = db.getCollection("sharded_limit_batchsize");
- var unshardedCol = db.getCollection("unsharded_limit_batchsize");
- shardedCol.drop();
- unshardedCol.drop();
-
- // Enable sharding and pre-split the sharded collection.
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}});
- assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(db.adminCommand(
- {moveChunk: shardedCol.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
-
- // Write 10 documents to shard 0, and 10 documents to shard 1 inside the sharded collection.
- // Write 20 documents which all go to the primary shard in the unsharded collection.
- for (var i = 1; i <= 10; ++i) {
- // These go to shard 1.
- assert.writeOK(shardedCol.insert({_id: i, x: i}));
-
- // These go to shard 0.
- assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
-
- // These go to shard 0 inside the non-sharded collection.
- assert.writeOK(unshardedCol.insert({_id: i, x: i}));
- assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
- }
-
- //
- // Run tests for singleBatch queries.
- //
-
- testSingleBatch(shardedCol, 2);
- testSingleBatch(unshardedCol, 1);
-
- //
- // Run tests for batch size. These should issue getmores.
- //
-
- jsTest.log("Running batchSize tests against sharded collection.");
- st.shard0.adminCommand({setParameter: 1, logLevel: 1});
- testBatchSize(shardedCol);
- st.shard0.adminCommand({setParameter: 1, logLevel: 0});
-
- jsTest.log("Running batchSize tests against non-sharded collection.");
- testBatchSize(unshardedCol);
-
- //
- // Run tests for limit. These should *not* issue getmores. We confirm this
- // by enabling the getmore failpoint on the shards.
- //
-
- assert.commandWorked(st.shard0.getDB("test").adminCommand(
- {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
-
- assert.commandWorked(st.shard1.getDB("test").adminCommand(
- {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
-
- jsTest.log("Running limit tests against sharded collection.");
- testLimit(shardedCol, st.shard0);
-
- jsTest.log("Running limit tests against non-sharded collection.");
- testLimit(unshardedCol, st.shard0);
-
- st.stop();
-
+'use strict';
+
+/**
+ * Test the correctness of queries with sort and batchSize on a sharded cluster,
+ * running the queries against collection 'coll'.
+ */
+function testBatchSize(coll) {
+ // Roll the cursor over the second batch and make sure it's correctly sized
+ assert.eq(20, coll.find().sort({x: 1}).batchSize(3).itcount());
+ assert.eq(15, coll.find().sort({x: 1}).batchSize(3).skip(5).itcount());
+}
+
+/**
+ * Test the correctness of queries with sort and limit on a sharded cluster,
+ * running the queries against collection 'coll'.
+ */
+function testLimit(coll) {
+ var cursor = coll.find().sort({x: 1}).limit(3);
+ assert.eq(-10, cursor.next()["_id"]);
+ assert.eq(-9, cursor.next()["_id"]);
+ assert.eq(-8, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ assert.eq(13, coll.find().sort({x: 1}).limit(13).itcount());
+
+ cursor = coll.find().sort({x: 1}).skip(5).limit(2);
+ assert.eq(-5, cursor.next()["_id"]);
+ assert.eq(-4, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ cursor = coll.find().sort({x: 1}).skip(9).limit(2);
+ assert.eq(-1, cursor.next()["_id"]);
+ assert.eq(1, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ cursor = coll.find().sort({x: 1}).skip(11).limit(2);
+ assert.eq(2, cursor.next()["_id"]);
+ assert.eq(3, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ // Ensure that in the limit 1 case, which is special when in legacy readMode, the server
+ // does not leave a cursor open.
+ var openCursorsBefore =
+ assert.commandWorked(coll.getDB().serverStatus()).metrics.cursor.open.total;
+ cursor = coll.find().sort({x: 1}).limit(1);
+ assert(cursor.hasNext());
+ assert.eq(-10, cursor.next()["_id"]);
+ var openCursorsAfter =
+ assert.commandWorked(coll.getDB().serverStatus()).metrics.cursor.open.total;
+ assert.eq(openCursorsBefore, openCursorsAfter);
+}
+
+/**
+ * Test correctness of queries run with singleBatch=true.
+ */
+function testSingleBatch(coll, numShards) {
+ // Ensure that singleBatch queries that require multiple batches from individual shards
+ // return complete results.
+ var batchSize = 5;
+ var res = assert.commandWorked(coll.getDB().runCommand({
+ find: coll.getName(),
+ filter: {x: {$lte: 10}},
+ skip: numShards * batchSize,
+ singleBatch: true,
+ batchSize: batchSize
+ }));
+ assert.eq(batchSize, res.cursor.firstBatch.length);
+ assert.eq(0, res.cursor.id);
+ var cursor = coll.find().skip(numShards * batchSize).limit(-1 * batchSize);
+ assert.eq(batchSize, cursor.itcount());
+ cursor = coll.find().skip(numShards * batchSize).batchSize(-1 * batchSize);
+ assert.eq(batchSize, cursor.itcount());
+}
+
+//
+// Create a two-shard cluster. Have an unsharded collection and a sharded collection.
+//
+
+var st =
+ new ShardingTest({shards: 2, other: {shardOptions: {setParameter: "enableTestCommands=1"}}});
+
+var db = st.s.getDB("test");
+var shardedCol = db.getCollection("sharded_limit_batchsize");
+var unshardedCol = db.getCollection("unsharded_limit_batchsize");
+shardedCol.drop();
+unshardedCol.drop();
+
+// Enable sharding and pre-split the sharded collection.
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}});
+assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(db.adminCommand(
+ {moveChunk: shardedCol.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+
+// Write 10 documents to shard 0, and 10 documents to shard 1 inside the sharded collection.
+// Write 20 documents which all go to the primary shard in the unsharded collection.
+for (var i = 1; i <= 10; ++i) {
+ // These go to shard 1.
+ assert.writeOK(shardedCol.insert({_id: i, x: i}));
+
+ // These go to shard 0.
+ assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
+
+ // These go to shard 0 inside the non-sharded collection.
+ assert.writeOK(unshardedCol.insert({_id: i, x: i}));
+ assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
+}
+
+//
+// Run tests for singleBatch queries.
+//
+
+testSingleBatch(shardedCol, 2);
+testSingleBatch(unshardedCol, 1);
+
+//
+// Run tests for batch size. These should issue getmores.
+//
+
+jsTest.log("Running batchSize tests against sharded collection.");
+st.shard0.adminCommand({setParameter: 1, logLevel: 1});
+testBatchSize(shardedCol);
+st.shard0.adminCommand({setParameter: 1, logLevel: 0});
+
+jsTest.log("Running batchSize tests against non-sharded collection.");
+testBatchSize(unshardedCol);
+
+//
+// Run tests for limit. These should *not* issue getmores. We confirm this
+// by enabling the getmore failpoint on the shards.
+//
+
+assert.commandWorked(st.shard0.getDB("test").adminCommand(
+ {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
+
+assert.commandWorked(st.shard1.getDB("test").adminCommand(
+ {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
+
+jsTest.log("Running limit tests against sharded collection.");
+testLimit(shardedCol, st.shard0);
+
+jsTest.log("Running limit tests against non-sharded collection.");
+testLimit(unshardedCol, st.shard0);
+
+st.stop();
})();
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index 0ae81862e0e..c38b178f73a 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -3,33 +3,32 @@
(function() {
- var st = new ShardingTest({shards: 1, mongos: 2});
- st.stopBalancer();
+var st = new ShardingTest({shards: 1, mongos: 2});
+st.stopBalancer();
- var admin = st.s0.getDB('admin');
- var shards = st.s0.getCollection('config.shards').find().toArray();
- var coll = st.s0.getCollection('foo.bar');
+var admin = st.s0.getDB('admin');
+var shards = st.s0.getCollection('config.shards').find().toArray();
+var coll = st.s0.getCollection('foo.bar');
- assert(admin.runCommand({enableSharding: coll.getDB() + ''}).ok);
- assert(admin.runCommand({shardCollection: coll + '', key: {_id: 1}}).ok);
+assert(admin.runCommand({enableSharding: coll.getDB() + ''}).ok);
+assert(admin.runCommand({shardCollection: coll + '', key: {_id: 1}}).ok);
- st.printShardingStatus();
+st.printShardingStatus();
- jsTest.log('Turning on profiling on ' + st.shard0);
+jsTest.log('Turning on profiling on ' + st.shard0);
- st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
+st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
- var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
+var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
- var inserts = [{_id: 0}, {_id: 1}, {_id: 2}];
+var inserts = [{_id: 0}, {_id: 1}, {_id: 2}];
- assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
+assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
- profileEntry = profileColl.findOne();
- assert.neq(null, profileEntry);
- printjson(profileEntry);
- assert.eq(profileEntry.command.documents, inserts);
-
- st.stop();
+profileEntry = profileColl.findOne();
+assert.neq(null, profileEntry);
+printjson(profileEntry);
+assert.eq(profileEntry.command.documents, inserts);
+st.stop();
})();
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index 413a7194c22..f07708d2d23 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -1,51 +1,51 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- var db = s.getDB("test");
-
- var bigString = "";
- while (bigString.length < 10000) {
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- }
-
- var inserted = 0;
- var num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (20 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
-
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
-
- function diff1() {
- var x = s.chunkCounts("foo");
- printjson(x);
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
- }
-
- function sum() {
- var x = s.chunkCounts("foo");
- return x[s.shard0.shardName] + x[s.shard1.shardName];
- }
-
- assert.lt(20, diff1(), "big differential here");
- print(diff1());
-
- assert.soon(function() {
- var d = diff1();
- return d < 5;
- // Make sure there's enough time here, since balancing can sleep for 15s or so between
- // balances.
- }, "balance didn't happen", 1000 * 60 * 5, 5000);
-
- s.stop();
+'use strict';
+
+var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+var db = s.getDB("test");
+
+var bigString = "";
+while (bigString.length < 10000) {
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+}
+
+var inserted = 0;
+var num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (20 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
+
+function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
+ Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
+}
+
+function sum() {
+ var x = s.chunkCounts("foo");
+ return x[s.shard0.shardName] + x[s.shard1.shardName];
+}
+
+assert.lt(20, diff1(), "big differential here");
+print(diff1());
+
+assert.soon(function() {
+ var d = diff1();
+ return d < 5;
+ // Make sure there's enough time here, since balancing can sleep for 15s or so between
+ // balances.
+}, "balance didn't happen", 1000 * 60 * 5, 5000);
+
+s.stop();
})();
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index 7bba7e25bf3..697f3f5c0b0 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -2,68 +2,68 @@
* Test the maxSize setting for the addShard command.
*/
(function() {
- 'use strict';
-
- var MaxSizeMB = 1;
-
- var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
- var db = s.getDB("test");
-
- var names = s.getConnNames();
- assert.eq(2, names.length);
- assert.commandWorked(s.s0.adminCommand({addshard: names[0]}));
- assert.commandWorked(s.s0.adminCommand({addshard: names[1], maxSize: MaxSizeMB}));
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', names[0]);
-
- var bigString = "";
- while (bigString.length < 10000)
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
- var inserted = 0;
- var num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (40 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
+'use strict';
+
+var MaxSizeMB = 1;
+
+var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
+var db = s.getDB("test");
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10);
+var names = s.getConnNames();
+assert.eq(2, names.length);
+assert.commandWorked(s.s0.adminCommand({addshard: names[0]}));
+assert.commandWorked(s.s0.adminCommand({addshard: names[1], maxSize: MaxSizeMB}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', names[0]);
- var getShardSize = function(conn) {
- var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
- return listDatabases.totalSize;
- };
+var bigString = "";
+while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- var shardConn = new Mongo(names[1]);
+var inserted = 0;
+var num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
- // Make sure that shard doesn't have any documents.
- assert.eq(0, shardConn.getDB('test').foo.find().itcount());
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10);
- var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+var getShardSize = function(conn) {
+ var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
+ return listDatabases.totalSize;
+};
- // Fill the shard with documents to exceed the max size so the balancer won't move
- // chunks to this shard.
- var localColl = shardConn.getDB('local').padding;
- while (getShardSize(shardConn) < maxSizeBytes) {
- var localBulk = localColl.initializeUnorderedBulkOp();
+var shardConn = new Mongo(names[1]);
- for (var x = 0; x < 20; x++) {
- localBulk.insert({x: x, val: bigString});
- }
- assert.writeOK(localBulk.execute());
+// Make sure that shard doesn't have any documents.
+assert.eq(0, shardConn.getDB('test').foo.find().itcount());
- // Force the storage engine to flush files to disk so shardSize will get updated.
- assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
+var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+
+// Fill the shard with documents to exceed the max size so the balancer won't move
+// chunks to this shard.
+var localColl = shardConn.getDB('local').padding;
+while (getShardSize(shardConn) < maxSizeBytes) {
+ var localBulk = localColl.initializeUnorderedBulkOp();
+
+ for (var x = 0; x < 20; x++) {
+ localBulk.insert({x: x, val: bigString});
}
+ assert.writeOK(localBulk.execute());
+
+ // Force the storage engine to flush files to disk so shardSize will get updated.
+ assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
+}
- s.startBalancer();
- s.awaitBalancerRound();
+s.startBalancer();
+s.awaitBalancerRound();
- var chunkCounts = s.chunkCounts('foo', 'test');
- assert.eq(0, chunkCounts[s.rs1.name]);
+var chunkCounts = s.chunkCounts('foo', 'test');
+assert.eq(0, chunkCounts[s.rs1.name]);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index 155403e0b7c..fa9b0dc38da 100644
--- a/jstests/sharding/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -2,69 +2,68 @@
(function() {
- var s = new ShardingTest({
- name: "slow_sharding_balance3",
- shards: 2,
- mongos: 1,
- other: {chunkSize: 1, enableBalancer: true}
- });
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- s.config.settings.find().forEach(printjson);
-
- db = s.getDB("test");
-
- bigString = "";
- while (bigString.length < 10000)
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
- inserted = 0;
- num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (40 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
-
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
-
- function diff1() {
- var x = s.chunkCounts("foo");
- printjson(x);
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
- }
-
- assert.lt(10, diff1());
-
- // Wait for balancer to kick in.
- var initialDiff = diff1();
- assert.soon(function() {
- return diff1() != initialDiff;
- }, "Balancer did not kick in", 5 * 60 * 1000, 1000);
-
- print("* A");
- print("disabling the balancer");
- s.stopBalancer();
- s.config.settings.find().forEach(printjson);
- print("* B");
-
- print(diff1());
-
- var currDiff = diff1();
- var waitTime = 0;
- var startTime = Date.now();
- while (waitTime < (1000 * 60)) {
- // Wait for 60 seconds to ensure balancer did not run
- assert.eq(currDiff, diff1(), "balance with stopped flag should not have happened");
- sleep(5000);
- waitTime = Date.now() - startTime;
- }
-
- s.stop();
-
+var s = new ShardingTest({
+ name: "slow_sharding_balance3",
+ shards: 2,
+ mongos: 1,
+ other: {chunkSize: 1, enableBalancer: true}
+});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+s.config.settings.find().forEach(printjson);
+
+db = s.getDB("test");
+
+bigString = "";
+while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
+
+function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
+ Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
+}
+
+assert.lt(10, diff1());
+
+// Wait for balancer to kick in.
+var initialDiff = diff1();
+assert.soon(function() {
+ return diff1() != initialDiff;
+}, "Balancer did not kick in", 5 * 60 * 1000, 1000);
+
+print("* A");
+print("disabling the balancer");
+s.stopBalancer();
+s.config.settings.find().forEach(printjson);
+print("* B");
+
+print(diff1());
+
+var currDiff = diff1();
+var waitTime = 0;
+var startTime = Date.now();
+while (waitTime < (1000 * 60)) {
+ // Wait for 60 seconds to ensure balancer did not run
+ assert.eq(currDiff, diff1(), "balance with stopped flag should not have happened");
+ sleep(5000);
+ waitTime = Date.now() - startTime;
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 75a30b62b9a..e97a6366120 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -7,158 +7,158 @@
*/
(function() {
- var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- s.config.settings.find().forEach(printjson);
+s.config.settings.find().forEach(printjson);
- db = s.getDB("test");
+db = s.getDB("test");
- bigString = "";
- while (bigString.length < 10000)
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+bigString = "";
+while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- N = 3000;
+N = 3000;
- num = 0;
+num = 0;
- var counts = {};
+var counts = {};
- //
- // TODO: Rewrite to make much clearer.
- //
- // The core behavior of this test is to add a bunch of documents to a sharded collection, then
- // incrementally update each document and make sure the counts in the document match our update
- // counts while balancing occurs (doUpdate()). Every once in a while we also check (check())
- // our counts via a query.
- //
- // If during a chunk migration an update is missed, we trigger an assertion and fail.
- //
+//
+// TODO: Rewrite to make much clearer.
+//
+// The core behavior of this test is to add a bunch of documents to a sharded collection, then
+// incrementally update each document and make sure the counts in the document match our update
+// counts while balancing occurs (doUpdate()). Every once in a while we also check (check())
+// our counts via a query.
+//
+// If during a chunk migration an update is missed, we trigger an assertion and fail.
+//
- function doUpdate(bulk, includeString, optionalId) {
- var up = {$inc: {x: 1}};
- if (includeString) {
- up["$set"] = {s: bigString};
+function doUpdate(bulk, includeString, optionalId) {
+ var up = {$inc: {x: 1}};
+ if (includeString) {
+ up["$set"] = {s: bigString};
+ }
+ var myid = optionalId == undefined ? Random.randInt(N) : optionalId;
+ bulk.find({_id: myid}).upsert().update(up);
+
+ counts[myid] = (counts[myid] ? counts[myid] : 0) + 1;
+ return myid;
+}
+
+Random.setRandomSeed();
+
+// Initially update all documents from 1 to N, otherwise later checks can fail because no
+// document previously existed
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (i = 0; i < N; i++) {
+ doUpdate(bulk, true, i);
+}
+
+for (i = 0; i < N * 9; i++) {
+ doUpdate(bulk, false);
+}
+assert.writeOK(bulk.execute());
+
+for (var i = 0; i < 50; i++) {
+ s.printChunks("test.foo");
+ if (check("initial:" + i, true))
+ break;
+ sleep(5000);
+}
+check("initial at end");
+
+assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
+
+function check(msg, dontAssert) {
+ for (var x in counts) {
+ var e = counts[x];
+ var z = db.foo.findOne({_id: parseInt(x)});
+
+ if (z && z.x == e)
+ continue;
+
+ if (dontAssert) {
+ if (z)
+ delete z.s;
+ print("not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z));
+ return false;
}
- var myid = optionalId == undefined ? Random.randInt(N) : optionalId;
- bulk.find({_id: myid}).upsert().update(up);
- counts[myid] = (counts[myid] ? counts[myid] : 0) + 1;
- return myid;
- }
+ s.s.getDB("admin").runCommand({setParameter: 1, logLevel: 2});
- Random.setRandomSeed();
+ printjson(db.foo.findOne({_id: parseInt(x)}));
- // Initially update all documents from 1 to N, otherwise later checks can fail because no
- // document previously existed
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (i = 0; i < N; i++) {
- doUpdate(bulk, true, i);
- }
+ var y = db.foo.findOne({_id: parseInt(x)});
- for (i = 0; i < N * 9; i++) {
- doUpdate(bulk, false);
- }
- assert.writeOK(bulk.execute());
+ if (y) {
+ delete y.s;
+ }
- for (var i = 0; i < 50; i++) {
s.printChunks("test.foo");
- if (check("initial:" + i, true))
- break;
- sleep(5000);
- }
- check("initial at end");
-
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
-
- function check(msg, dontAssert) {
- for (var x in counts) {
- var e = counts[x];
- var z = db.foo.findOne({_id: parseInt(x)});
- if (z && z.x == e)
- continue;
-
- if (dontAssert) {
- if (z)
- delete z.s;
- print("not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z));
- return false;
- }
+ assert(z, "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg);
+ assert.eq(e, z.x, "count for : " + x + " y:" + tojson(y) + " " + msg);
+ }
- s.s.getDB("admin").runCommand({setParameter: 1, logLevel: 2});
+ return true;
+}
- printjson(db.foo.findOne({_id: parseInt(x)}));
+var consecutiveNoProgressMadeErrors = 0;
- var y = db.foo.findOne({_id: parseInt(x)});
+function diff1() {
+ jsTest.log("Running diff1...");
- if (y) {
- delete y.s;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ var myid = doUpdate(bulk, false);
+ var res = bulk.execute();
+
+ assert(res instanceof BulkWriteResult,
+ 'Result from bulk.execute should be of type BulkWriteResult');
+ if (res.hasWriteErrors()) {
+ res.writeErrors.forEach(function(err) {
+ // Ignore up to 3 consecutive NoProgressMade errors for the cases where migration
+ // might be going faster than the writes are executing
+ if (err.code == ErrorCodes.NoProgressMade) {
+ consecutiveNoProgressMadeErrors++;
+ if (consecutiveNoProgressMadeErrors < 3) {
+ return;
+ }
}
- s.printChunks("test.foo");
+ assert.writeOK(res);
+ });
+ } else {
+ consecutiveNoProgressMadeErrors = 0;
- assert(z, "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg);
- assert.eq(e, z.x, "count for : " + x + " y:" + tojson(y) + " " + msg);
- }
-
- return true;
+ assert.eq(1,
+ res.nModified,
+ "diff myid: " + myid + " 2: " + res.toString() + "\n" +
+ " correct count is: " + counts[myid] +
+ " db says count is: " + tojson(db.foo.findOne({_id: myid})));
}
- var consecutiveNoProgressMadeErrors = 0;
-
- function diff1() {
- jsTest.log("Running diff1...");
-
- var bulk = db.foo.initializeUnorderedBulkOp();
- var myid = doUpdate(bulk, false);
- var res = bulk.execute();
-
- assert(res instanceof BulkWriteResult,
- 'Result from bulk.execute should be of type BulkWriteResult');
- if (res.hasWriteErrors()) {
- res.writeErrors.forEach(function(err) {
- // Ignore up to 3 consecutive NoProgressMade errors for the cases where migration
- // might be going faster than the writes are executing
- if (err.code == ErrorCodes.NoProgressMade) {
- consecutiveNoProgressMadeErrors++;
- if (consecutiveNoProgressMadeErrors < 3) {
- return;
- }
- }
+ var x = s.chunkCounts("foo");
+ if (Math.random() > .999)
+ printjson(x);
- assert.writeOK(res);
- });
- } else {
- consecutiveNoProgressMadeErrors = 0;
-
- assert.eq(1,
- res.nModified,
- "diff myid: " + myid + " 2: " + res.toString() + "\n" +
- " correct count is: " + counts[myid] + " db says count is: " +
- tojson(db.foo.findOne({_id: myid})));
- }
-
- var x = s.chunkCounts("foo");
- if (Math.random() > .999)
- printjson(x);
-
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
- }
+ return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
+ Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
+}
- assert.lt(20, diff1(), "initial load");
- print(diff1());
+assert.lt(20, diff1(), "initial load");
+print(diff1());
- s.startBalancer();
+s.startBalancer();
- assert.soon(function() {
- var d = diff1();
- return d < 5;
- }, "balance didn't happen", 1000 * 60 * 20, 1);
+assert.soon(function() {
+ var d = diff1();
+ return d < 5;
+}, "balance didn't happen", 1000 * 60 * 20, 1);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
index 0fef085d5c6..f196381528e 100644
--- a/jstests/sharding/sharding_migrate_cursor1.js
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -7,85 +7,84 @@
*/
(function() {
- var chunkSize = 25;
+var chunkSize = 25;
- var s = new ShardingTest(
- {name: "migrate_cursor1", shards: 2, mongos: 1, other: {chunkSize: chunkSize}});
+var s = new ShardingTest(
+ {name: "migrate_cursor1", shards: 2, mongos: 1, other: {chunkSize: chunkSize}});
- s.adminCommand({enablesharding: "test"});
- db = s.getDB("test");
- s.ensurePrimaryShard('test', s.shard1.shardName);
- t = db.foo;
+s.adminCommand({enablesharding: "test"});
+db = s.getDB("test");
+s.ensurePrimaryShard('test', s.shard1.shardName);
+t = db.foo;
- bigString = "";
- stringSize = 1024;
+bigString = "";
+stringSize = 1024;
- while (bigString.length < stringSize)
- bigString += "asdasdas";
+while (bigString.length < stringSize)
+ bigString += "asdasdas";
- stringSize = bigString.length;
- docsPerChunk = Math.ceil((chunkSize * 1024 * 1024) / (stringSize - 12));
- numChunks = 5;
- numDocs = 20 * docsPerChunk;
+stringSize = bigString.length;
+docsPerChunk = Math.ceil((chunkSize * 1024 * 1024) / (stringSize - 12));
+numChunks = 5;
+numDocs = 20 * docsPerChunk;
- print("stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs);
+print("stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs);
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, s: bigString});
+}
+assert.writeOK(bulk.execute());
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- assert.lt(numChunks, s.config.chunks.find().count(), "initial 1");
+assert.lt(numChunks, s.config.chunks.find().count(), "initial 1");
- primary = s.getPrimaryShard("test").getDB("test").foo;
- secondaryName = s.getOther(primary.name);
- secondary = secondaryName.getDB("test").foo;
+primary = s.getPrimaryShard("test").getDB("test").foo;
+secondaryName = s.getOther(primary.name);
+secondary = secondaryName.getDB("test").foo;
- assert.eq(numDocs, primary.count(), "initial 2");
- assert.eq(0, secondary.count(), "initial 3");
- assert.eq(numDocs, t.count(), "initial 4");
+assert.eq(numDocs, primary.count(), "initial 2");
+assert.eq(0, secondary.count(), "initial 3");
+assert.eq(numDocs, t.count(), "initial 4");
- x = primary.find({_id: {$lt: 500}}).batchSize(2);
- x.next(); // 1. Create an open cursor
+x = primary.find({_id: {$lt: 500}}).batchSize(2);
+x.next(); // 1. Create an open cursor
- print("start moving chunks...");
+print("start moving chunks...");
- // 2. Move chunk from s0 to s1 without waiting for deletion.
- // Command returns, but the deletion on s0 will block due to the open cursor.
- s.adminCommand({moveChunk: "test.foo", find: {_id: 0}, to: secondaryName.name});
+// 2. Move chunk from s0 to s1 without waiting for deletion.
+// Command returns, but the deletion on s0 will block due to the open cursor.
+s.adminCommand({moveChunk: "test.foo", find: {_id: 0}, to: secondaryName.name});
- // 3. Start second moveChunk command from s0 to s1.
- // This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause
- // deletion on s1.
- // This moveChunk will wait for deletion.
- join = startParallelShell(
- "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " +
- docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )");
- assert.soon(function() {
- return db.x.count() > 0;
- }, "XXX", 30000, 1);
+// 3. Start second moveChunk command from s0 to s1.
+// This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause
+// deletion on s1.
+// This moveChunk will wait for deletion.
+join = startParallelShell(
+ "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " +
+ docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )");
+assert.soon(function() {
+ return db.x.count() > 0;
+}, "XXX", 30000, 1);
- // 4. Close the cursor to enable chunk deletion.
- print("itcount: " + x.itcount());
+// 4. Close the cursor to enable chunk deletion.
+print("itcount: " + x.itcount());
- x = null;
- for (i = 0; i < 5; i++)
- gc();
+x = null;
+for (i = 0; i < 5; i++)
+ gc();
- print("cursor should be gone");
+print("cursor should be gone");
- // 5. Waiting for the second moveChunk to finish its deletion.
- // Note the deletion for the first moveChunk may not be finished.
- join();
+// 5. Waiting for the second moveChunk to finish its deletion.
+// Note the deletion for the first moveChunk may not be finished.
+join();
- // assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
- // 6. Check the total number of docs on both shards to make sure no doc is lost.
- // Use itcount() to ignore orphan docments.
- assert.eq(numDocs, t.find().itcount(), "at end 2");
-
- s.stop();
+// assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
+// 6. Check the total number of docs on both shards to make sure no doc is lost.
+// Use itcount() to ignore orphan docments.
+assert.eq(numDocs, t.find().itcount(), "at end 2");
+s.stop();
})();
diff --git a/jstests/sharding/sharding_multiple_ns_rs.js b/jstests/sharding/sharding_multiple_ns_rs.js
index f9ff596b87e..cd4a70fda15 100644
--- a/jstests/sharding/sharding_multiple_ns_rs.js
+++ b/jstests/sharding/sharding_multiple_ns_rs.js
@@ -5,56 +5,56 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- var s = new ShardingTest({shards: 1, mongos: 1, other: {rs: true, chunkSize: 1}});
+var s = new ShardingTest({shards: 1, mongos: 1, other: {rs: true, chunkSize: 1}});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- var db = s.getDB("test");
+var db = s.getDB("test");
- var bulk = db.foo.initializeUnorderedBulkOp();
- var bulk2 = db.bar.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({_id: i, x: i});
- bulk2.insert({_id: i, x: i});
- }
- assert.writeOK(bulk.execute());
- assert.writeOK(bulk2.execute());
+var bulk = db.foo.initializeUnorderedBulkOp();
+var bulk2 = db.bar.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({_id: i, x: i});
+ bulk2.insert({_id: i, x: i});
+}
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
- s.splitAt("test.foo", {_id: 50});
+s.splitAt("test.foo", {_id: 50});
- var other = new Mongo(s.s0.name);
- var dbother = other.getDB("test");
+var other = new Mongo(s.s0.name);
+var dbother = other.getDB("test");
- assert.eq(5, db.foo.findOne({_id: 5}).x);
- assert.eq(5, dbother.foo.findOne({_id: 5}).x);
+assert.eq(5, db.foo.findOne({_id: 5}).x);
+assert.eq(5, dbother.foo.findOne({_id: 5}).x);
- assert.eq(5, db.bar.findOne({_id: 5}).x);
- assert.eq(5, dbother.bar.findOne({_id: 5}).x);
+assert.eq(5, db.bar.findOne({_id: 5}).x);
+assert.eq(5, dbother.bar.findOne({_id: 5}).x);
- s.rs0.awaitReplication();
- s.rs0.stopMaster(15);
+s.rs0.awaitReplication();
+s.rs0.stopMaster(15);
- // Wait for mongos and the config server primary to recognize the new shard primary
- awaitRSClientHosts(db.getMongo(), s.rs0.getPrimary(), {ismaster: true});
- awaitRSClientHosts(db.getMongo(), s.configRS.getPrimary(), {ismaster: true});
+// Wait for mongos and the config server primary to recognize the new shard primary
+awaitRSClientHosts(db.getMongo(), s.rs0.getPrimary(), {ismaster: true});
+awaitRSClientHosts(db.getMongo(), s.configRS.getPrimary(), {ismaster: true});
- assert.eq(5, db.foo.findOne({_id: 5}).x);
- assert.eq(5, db.bar.findOne({_id: 5}).x);
+assert.eq(5, db.foo.findOne({_id: 5}).x);
+assert.eq(5, db.bar.findOne({_id: 5}).x);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.bar", key: {_id: 1}}));
- s.splitAt("test.bar", {_id: 50});
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.bar", key: {_id: 1}}));
+s.splitAt("test.bar", {_id: 50});
- var yetagain = new Mongo(s.s.name);
- assert.eq(5, yetagain.getDB("test").bar.findOne({_id: 5}).x);
- assert.eq(5, yetagain.getDB("test").foo.findOne({_id: 5}).x);
+var yetagain = new Mongo(s.s.name);
+assert.eq(5, yetagain.getDB("test").bar.findOne({_id: 5}).x);
+assert.eq(5, yetagain.getDB("test").foo.findOne({_id: 5}).x);
- assert.eq(5, dbother.bar.findOne({_id: 5}).x);
- assert.eq(5, dbother.foo.findOne({_id: 5}).x);
+assert.eq(5, dbother.bar.findOne({_id: 5}).x);
+assert.eq(5, dbother.foo.findOne({_id: 5}).x);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharding_options.js b/jstests/sharding/sharding_options.js
index 190d78a1e94..0703a784eff 100644
--- a/jstests/sharding/sharding_options.js
+++ b/jstests/sharding/sharding_options.js
@@ -29,8 +29,7 @@ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_paranoia.jso
// Sharding Role
jsTest.log("Testing \"configsvr\" command line option");
var expectedResult = {
- "parsed":
- {"sharding": {"clusterRole": "configsvr"}, "storage": {"journal": {"enabled": true}}}
+ "parsed": {"sharding": {"clusterRole": "configsvr"}, "storage": {"journal": {"enabled": true}}}
};
testGetCmdLineOptsMongod({configsvr: "", journal: ""}, expectedResult);
diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js
index c5022348fa6..af021bf9741 100644
--- a/jstests/sharding/sharding_rs1.js
+++ b/jstests/sharding/sharding_rs1.js
@@ -2,60 +2,59 @@
* tests sharding with replica sets
*/
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 3, other: {rs: true, chunkSize: 1, enableBalancer: true}});
+var s = new ShardingTest({shards: 3, other: {rs: true, chunkSize: 1, enableBalancer: true}});
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard0.shardName);
- s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard0.shardName);
+s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
- var db = s.getDB("test");
+var db = s.getDB("test");
- var bigString = "X".repeat(256 * 1024);
+var bigString = "X".repeat(256 * 1024);
- var insertedBytes = 0;
- var num = 0;
+var insertedBytes = 0;
+var num = 0;
- // Insert 10 MB of data to result in 10+ chunks
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (insertedBytes < (10 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString, x: Math.random()});
- insertedBytes += bigString.length;
- }
- assert.writeOK(bulk.execute({w: 3}));
-
- assert.commandWorked(s.s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+// Insert 10 MB of data to result in 10+ chunks
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (insertedBytes < (10 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString, x: Math.random()});
+ insertedBytes += bigString.length;
+}
+assert.writeOK(bulk.execute({w: 3}));
- jsTest.log("Waiting for balance to complete");
- s.awaitBalance('foo', 'test', 5 * 60 * 1000);
+assert.commandWorked(s.s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- jsTest.log("Stopping balancer");
- s.stopBalancer();
+jsTest.log("Waiting for balance to complete");
+s.awaitBalance('foo', 'test', 5 * 60 * 1000);
- jsTest.log("Balancer stopped, checking dbhashes");
- s._rs.forEach(function(rsNode) {
- rsNode.test.awaitReplication();
+jsTest.log("Stopping balancer");
+s.stopBalancer();
- var dbHashes = rsNode.test.getHashes("test");
- print(rsNode.url + ': ' + tojson(dbHashes));
+jsTest.log("Balancer stopped, checking dbhashes");
+s._rs.forEach(function(rsNode) {
+ rsNode.test.awaitReplication();
- for (var j = 0; j < dbHashes.slaves.length; j++) {
- assert.eq(dbHashes.master.md5,
- dbHashes.slaves[j].md5,
- "hashes not same for: " + rsNode.url + " slave: " + j);
- }
- });
+ var dbHashes = rsNode.test.getHashes("test");
+ print(rsNode.url + ': ' + tojson(dbHashes));
- assert.eq(num, db.foo.find().count(), "C1");
- assert.eq(num, db.foo.find().itcount(), "C2");
- assert.eq(num, db.foo.find().sort({_id: 1}).itcount(), "C3");
- assert.eq(num, db.foo.find().sort({_id: -1}).itcount(), "C4");
+ for (var j = 0; j < dbHashes.slaves.length; j++) {
+ assert.eq(dbHashes.master.md5,
+ dbHashes.slaves[j].md5,
+ "hashes not same for: " + rsNode.url + " slave: " + j);
+ }
+});
- db.foo.ensureIndex({x: 1});
- assert.eq(num, db.foo.find().sort({x: 1}).itcount(), "C5");
- assert.eq(num, db.foo.find().sort({x: -1}).itcount(), "C6");
+assert.eq(num, db.foo.find().count(), "C1");
+assert.eq(num, db.foo.find().itcount(), "C2");
+assert.eq(num, db.foo.find().sort({_id: 1}).itcount(), "C3");
+assert.eq(num, db.foo.find().sort({_id: -1}).itcount(), "C4");
- s.stop();
+db.foo.ensureIndex({x: 1});
+assert.eq(num, db.foo.find().sort({x: 1}).itcount(), "C5");
+assert.eq(num, db.foo.find().sort({x: -1}).itcount(), "C6");
+s.stop();
})();
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index 648512b051d..2b35cf695e4 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -10,246 +10,246 @@
//
(function() {
- 'use strict';
-
- // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
- // from stepping down during migrations on slow evergreen builders.
- var s = new ShardingTest({
- shards: 2,
- other: {
- chunkSize: 1,
- rs0: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- },
- rs1: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- }
+'use strict';
+
+// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
+// from stepping down during migrations on slow evergreen builders.
+var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ rs0: {
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
+ },
+ rs1: {
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
}
- });
+ }
+});
- var db = s.getDB("test");
- var t = db.foo;
+var db = s.getDB("test");
+var t = db.foo;
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard0.shardName);
- // -------------------------------------------------------------------------------------------
- // ---------- test that config server updates when replica set config changes ----------------
- // -------------------------------------------------------------------------------------------
+// -------------------------------------------------------------------------------------------
+// ---------- test that config server updates when replica set config changes ----------------
+// -------------------------------------------------------------------------------------------
- db.foo.save({_id: 5, x: 17});
- assert.eq(1, db.foo.count());
+db.foo.save({_id: 5, x: 17});
+assert.eq(1, db.foo.count());
- s.config.databases.find().forEach(printjson);
- s.config.shards.find().forEach(printjson);
+s.config.databases.find().forEach(printjson);
+s.config.shards.find().forEach(printjson);
- function countNodes() {
- return s.config.shards.findOne({_id: s.shard0.shardName}).host.split(",").length;
- }
+function countNodes() {
+ return s.config.shards.findOne({_id: s.shard0.shardName}).host.split(",").length;
+}
+
+assert.eq(2, countNodes(), "A1");
- assert.eq(2, countNodes(), "A1");
+var rs = s.rs0;
+rs.add({'shardsvr': ""});
+try {
+ rs.reInitiate();
+} catch (e) {
+ // this os ok as rs's may close connections on a change of master
+ print(e);
+}
- var rs = s.rs0;
- rs.add({'shardsvr': ""});
+assert.soon(function() {
try {
- rs.reInitiate();
+ printjson(rs.getPrimary().getDB("admin").runCommand("isMaster"));
+ s.config.shards.find().forEach(printjsononeline);
+ return countNodes() == 3;
} catch (e) {
- // this os ok as rs's may close connections on a change of master
print(e);
}
+}, "waiting for config server to update", 180 * 1000, 1000);
- assert.soon(function() {
- try {
- printjson(rs.getPrimary().getDB("admin").runCommand("isMaster"));
- s.config.shards.find().forEach(printjsononeline);
- return countNodes() == 3;
- } catch (e) {
- print(e);
- }
- }, "waiting for config server to update", 180 * 1000, 1000);
-
- // cleanup after adding node
- for (var i = 0; i < 5; i++) {
- try {
- db.foo.findOne();
- } catch (e) {
- }
+// cleanup after adding node
+for (var i = 0; i < 5; i++) {
+ try {
+ db.foo.findOne();
+ } catch (e) {
}
+}
- jsTest.log(
- "Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
- rs.awaitReplication();
- // Make sure we wait for secondaries here - otherwise a secondary could come online later and be
- // used for the
- // count command before being fully replicated
- jsTest.log("Awaiting secondary status of all nodes");
- rs.waitForState(rs.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
+jsTest.log(
+ "Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
+rs.awaitReplication();
+// Make sure we wait for secondaries here - otherwise a secondary could come online later and be
+// used for the
+// count command before being fully replicated
+jsTest.log("Awaiting secondary status of all nodes");
+rs.waitForState(rs.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
- // -------------------------------------------------------------------------------------------
- // ---------- test routing to slaves ----------------
- // -------------------------------------------------------------------------------------------
+// -------------------------------------------------------------------------------------------
+// ---------- test routing to slaves ----------------
+// -------------------------------------------------------------------------------------------
- // --- not sharded ----
+// --- not sharded ----
- var m = new Mongo(s.s.name);
- var ts = m.getDB("test").foo;
+var m = new Mongo(s.s.name);
+var ts = m.getDB("test").foo;
- var before = rs.getPrimary().adminCommand("serverStatus").opcounters;
+var before = rs.getPrimary().adminCommand("serverStatus").opcounters;
- for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B1");
- }
+for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B1");
+}
- m.setSlaveOk();
+m.setSlaveOk();
- for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B2");
- }
+for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B2");
+}
- var after = rs.getPrimary().adminCommand("serverStatus").opcounters;
+var after = rs.getPrimary().adminCommand("serverStatus").opcounters;
- printjson(before);
- printjson(after);
+printjson(before);
+printjson(after);
- assert.lte(before.query + 10, after.query, "B3");
+assert.lte(before.query + 10, after.query, "B3");
- // --- add more data ----
+// --- add more data ----
- db.foo.ensureIndex({x: 1});
+db.foo.ensureIndex({x: 1});
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- if (i == 17)
- continue;
- bulk.insert({x: i});
- }
- assert.writeOK(bulk.execute({w: 3}));
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ if (i == 17)
+ continue;
+ bulk.insert({x: i});
+}
+assert.writeOK(bulk.execute({w: 3}));
- // Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
- // replication for this and future tests to pass
- rs.awaitReplication();
+// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
+// replication for this and future tests to pass
+rs.awaitReplication();
- assert.eq(100, ts.count(), "B4");
- assert.eq(100, ts.find().itcount(), "B5");
- assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
+assert.eq(100, ts.count(), "B4");
+assert.eq(100, ts.find().itcount(), "B5");
+assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
- var cursor = t.find().batchSize(3);
- cursor.next();
- cursor.close();
+var cursor = t.find().batchSize(3);
+cursor.next();
+cursor.close();
- // --- sharded ----
+// --- sharded ----
- assert.eq(100, db.foo.count(), "C1");
+assert.eq(100, db.foo.count(), "C1");
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
- // We're doing some manual chunk stuff, so stop the balancer first
- s.stopBalancer();
+// We're doing some manual chunk stuff, so stop the balancer first
+s.stopBalancer();
- assert.eq(100, t.count(), "C2");
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 50}}));
+assert.eq(100, t.count(), "C2");
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 50}}));
- s.printShardingStatus();
+s.printShardingStatus();
- var other = s.config.shards.findOne({_id: {$ne: s.shard0.shardName}});
- assert.commandWorked(s.getDB('admin').runCommand({
- moveChunk: "test.foo",
- find: {x: 10},
- to: other._id,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- _waitForDelete: true
- }));
- assert.eq(100, t.count(), "C3");
+var other = s.config.shards.findOne({_id: {$ne: s.shard0.shardName}});
+assert.commandWorked(s.getDB('admin').runCommand({
+ moveChunk: "test.foo",
+ find: {x: 10},
+ to: other._id,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ _waitForDelete: true
+}));
+assert.eq(100, t.count(), "C3");
- assert.eq(50, rs.getPrimary().getDB("test").foo.count(), "C4");
+assert.eq(50, rs.getPrimary().getDB("test").foo.count(), "C4");
- // by non-shard key
+// by non-shard key
- m = new Mongo(s.s.name);
- ts = m.getDB("test").foo;
+m = new Mongo(s.s.name);
+ts = m.getDB("test").foo;
- before = rs.getPrimary().adminCommand("serverStatus").opcounters;
+before = rs.getPrimary().adminCommand("serverStatus").opcounters;
- for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({_id: 5}).x, "D1");
- }
+for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D1");
+}
- m.setSlaveOk();
- for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({_id: 5}).x, "D2");
- }
+m.setSlaveOk();
+for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D2");
+}
- after = rs.getPrimary().adminCommand("serverStatus").opcounters;
+after = rs.getPrimary().adminCommand("serverStatus").opcounters;
- assert.lte(before.query + 10, after.query, "D3");
+assert.lte(before.query + 10, after.query, "D3");
- // by shard key
+// by shard key
- m = new Mongo(s.s.name);
- m.forceWriteMode("commands");
+m = new Mongo(s.s.name);
+m.forceWriteMode("commands");
- s.printShardingStatus();
+s.printShardingStatus();
- ts = m.getDB("test").foo;
+ts = m.getDB("test").foo;
- before = rs.getPrimary().adminCommand("serverStatus").opcounters;
+before = rs.getPrimary().adminCommand("serverStatus").opcounters;
- for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({x: 57}).x, "E1");
- }
+for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E1");
+}
- m.setSlaveOk();
- for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({x: 57}).x, "E2");
- }
+m.setSlaveOk();
+for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E2");
+}
- after = rs.getPrimary().adminCommand("serverStatus").opcounters;
-
- assert.lte(before.query + 10, after.query, "E3");
-
- assert.eq(100, ts.count(), "E4");
- assert.eq(100, ts.find().itcount(), "E5");
- printjson(ts.find().batchSize(5).explain());
-
- // fsyncLock the secondaries
- rs.getSecondaries().forEach(function(secondary) {
- assert.commandWorked(secondary.getDB("test").fsyncLock());
- });
-
- // Modify data only on the primary replica of the primary shard.
- // { x: 60 } goes to the shard of "rs", which is the primary shard.
- assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
- // Read from secondary through mongos, the doc is not there due to replication delay or fsync.
- // But we can guarantee not to read from primary.
- assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
- // Unlock the secondaries
- rs.getSecondaries().forEach(function(secondary) {
- secondary.getDB("test").fsyncUnlock();
- });
- // Clean up the data
- assert.writeOK(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
-
- for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
- m.setSlaveOk();
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
- }
+after = rs.getPrimary().adminCommand("serverStatus").opcounters;
- for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
- }
+assert.lte(before.query + 10, after.query, "E3");
+
+assert.eq(100, ts.count(), "E4");
+assert.eq(100, ts.find().itcount(), "E5");
+printjson(ts.find().batchSize(5).explain());
+
+// fsyncLock the secondaries
+rs.getSecondaries().forEach(function(secondary) {
+ assert.commandWorked(secondary.getDB("test").fsyncLock());
+});
+
+// Modify data only on the primary replica of the primary shard.
+// { x: 60 } goes to the shard of "rs", which is the primary shard.
+assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
+// Read from secondary through mongos, the doc is not there due to replication delay or fsync.
+// But we can guarantee not to read from primary.
+assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
+// Unlock the secondaries
+rs.getSecondaries().forEach(function(secondary) {
+ secondary.getDB("test").fsyncUnlock();
+});
+// Clean up the data
+assert.writeOK(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
+
+for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ m.setSlaveOk();
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
+}
+
+for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
+}
- printjson(db.adminCommand("getShardMap"));
+printjson(db.adminCommand("getShardMap"));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharding_statistics_server_status.js b/jstests/sharding/sharding_statistics_server_status.js
index 7e25f5465ba..9aaa6ab3a9c 100644
--- a/jstests/sharding/sharding_statistics_server_status.js
+++ b/jstests/sharding/sharding_statistics_server_status.js
@@ -5,179 +5,178 @@
// @tags: [uses_transactions]
(function() {
- 'use strict';
-
- load("jstests/libs/chunk_manipulation_util.js");
- load("jstests/libs/parallelTester.js");
-
- function ShardStat() {
- this.countDonorMoveChunkStarted = 0;
- this.countRecipientMoveChunkStarted = 0;
- this.countDocsClonedOnRecipient = 0;
- this.countDocsClonedOnDonor = 0;
- this.countDocsDeletedOnDonor = 0;
- }
-
- function incrementStatsAndCheckServerShardStats(donor, recipient, numDocs) {
- ++donor.countDonorMoveChunkStarted;
- donor.countDocsClonedOnDonor += numDocs;
- ++recipient.countRecipientMoveChunkStarted;
- recipient.countDocsClonedOnRecipient += numDocs;
- donor.countDocsDeletedOnDonor += numDocs;
- const statsFromServerStatus = shardArr.map(function(shardVal) {
- return shardVal.getDB('admin').runCommand({serverStatus: 1}).shardingStatistics;
- });
- for (let i = 0; i < shardArr.length; ++i) {
- assert(statsFromServerStatus[i]);
- assert(statsFromServerStatus[i].countStaleConfigErrors);
- assert(statsFromServerStatus[i].totalCriticalSectionCommitTimeMillis);
- assert(statsFromServerStatus[i].totalCriticalSectionTimeMillis);
- assert(statsFromServerStatus[i].totalDonorChunkCloneTimeMillis);
- assert(statsFromServerStatus[i].countDonorMoveChunkLockTimeout);
- assert.eq(stats[i].countDonorMoveChunkStarted,
- statsFromServerStatus[i].countDonorMoveChunkStarted);
- assert.eq(stats[i].countDocsClonedOnRecipient,
- statsFromServerStatus[i].countDocsClonedOnRecipient);
- assert.eq(stats[i].countDocsClonedOnDonor,
- statsFromServerStatus[i].countDocsClonedOnDonor);
- assert.eq(stats[i].countDocsDeletedOnDonor,
- statsFromServerStatus[i].countDocsDeletedOnDonor);
- assert.eq(stats[i].countRecipientMoveChunkStarted,
- statsFromServerStatus[i].countRecipientMoveChunkStarted);
- }
- }
-
- function checkServerStatusMigrationLockTimeoutCount(shardConn, count) {
- const shardStats =
- assert.commandWorked(shardConn.adminCommand({serverStatus: 1})).shardingStatistics;
- assert(shardStats.hasOwnProperty("countDonorMoveChunkLockTimeout"));
- assert.eq(count, shardStats.countDonorMoveChunkLockTimeout);
- }
-
- function runConcurrentMoveChunk(host, ns, toShard) {
- const mongos = new Mongo(host);
- return mongos.adminCommand({moveChunk: ns, find: {_id: 1}, to: toShard});
- }
-
- function runConcurrentRead(host, dbName, collName) {
- const mongos = new Mongo(host);
- return mongos.getDB(dbName)[collName].find({_id: 5}).comment("concurrent read").itcount();
- }
-
- const dbName = "db";
- const collName = "coll";
-
- const st = new ShardingTest({shards: 2, mongos: 1});
- const mongos = st.s0;
- const admin = mongos.getDB("admin");
- const coll = mongos.getCollection(dbName + "." + collName);
- const numDocsToInsert = 3;
- const shardArr = [st.shard0, st.shard1];
- const stats = [new ShardStat(), new ShardStat()];
- let numDocsInserted = 0;
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
-
- // Move chunk from shard0 to shard1 without docs.
- assert.commandWorked(
- mongos.adminCommand({moveChunk: coll + '', find: {_id: 1}, to: st.shard1.shardName}));
- incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
-
- // Insert docs and then move chunk again from shard1 to shard0.
- for (let i = 0; i < numDocsToInsert; ++i) {
- assert.writeOK(coll.insert({_id: i}));
- ++numDocsInserted;
- }
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
- incrementStatsAndCheckServerShardStats(stats[1], stats[0], numDocsInserted);
-
- // Check that numbers are indeed cumulative. Move chunk from shard0 to shard1.
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
- incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
-
- // Move chunk from shard1 to shard0.
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
- incrementStatsAndCheckServerShardStats(stats[1], stats[0], numDocsInserted);
-
- //
- // Tests for the count of migrations aborting from lock timeouts.
- //
-
- // Lower migrationLockAcquisitionMaxWaitMS so migrations time out more quickly.
- const donorConn = st.rs0.getPrimary();
- const lockParameterRes = assert.commandWorked(
- donorConn.adminCommand({getParameter: 1, migrationLockAcquisitionMaxWaitMS: 1}));
- const originalMigrationLockTimeout = lockParameterRes.migrationLockAcquisitionMaxWaitMS;
- assert.commandWorked(
- donorConn.adminCommand({setParameter: 1, migrationLockAcquisitionMaxWaitMS: 2 * 1000}));
-
- // Counter starts at 0.
- checkServerStatusMigrationLockTimeoutCount(donorConn, 0);
-
- // Pause a migration before entering the critical section.
- pauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
- let moveChunkThread = new ScopedThread(
- runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
- moveChunkThread.start();
- waitForMoveChunkStep(donorConn, moveChunkStepNames.reachedSteadyState);
-
- // Start a transaction and insert to the migrating chunk to block entering the critical section.
- const session = mongos.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase(dbName)[collName].insert({_id: 5}));
-
- // Unpause the migration and it should time out entering the critical section.
- unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
- moveChunkThread.join();
- assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.LockTimeout);
-
- // Clean up the transaction and verify the counter was incremented in serverStatus.
- assert.commandWorked(session.abortTransaction_forTesting());
-
- checkServerStatusMigrationLockTimeoutCount(donorConn, 1);
-
- // Writes are blocked during the critical section, so insert a document into the chunk to be
- // moved before the migration begins that can be read later.
- assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 5}));
-
- // Pause a migration after entering the critical section, but before entering the commit phase.
- pauseMoveChunkAtStep(donorConn, moveChunkStepNames.chunkDataCommitted);
- moveChunkThread = new ScopedThread(
- runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
- moveChunkThread.start();
- waitForMoveChunkStep(donorConn, moveChunkStepNames.chunkDataCommitted);
-
- // Pause a read while it's holding locks so the migration can't commit.
- assert.commandWorked(donorConn.adminCommand(
- {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
- const concurrentRead = new ScopedThread(runConcurrentRead, st.s.host, dbName, collName);
- concurrentRead.start();
- assert.soon(function() {
- const curOpResults = assert.commandWorked(donorConn.adminCommand({currentOp: 1}));
- return curOpResults.inprog.some(op => op["command"]["comment"] === "concurrent read");
+'use strict';
+
+load("jstests/libs/chunk_manipulation_util.js");
+load("jstests/libs/parallelTester.js");
+
+function ShardStat() {
+ this.countDonorMoveChunkStarted = 0;
+ this.countRecipientMoveChunkStarted = 0;
+ this.countDocsClonedOnRecipient = 0;
+ this.countDocsClonedOnDonor = 0;
+ this.countDocsDeletedOnDonor = 0;
+}
+
+function incrementStatsAndCheckServerShardStats(donor, recipient, numDocs) {
+ ++donor.countDonorMoveChunkStarted;
+ donor.countDocsClonedOnDonor += numDocs;
+ ++recipient.countRecipientMoveChunkStarted;
+ recipient.countDocsClonedOnRecipient += numDocs;
+ donor.countDocsDeletedOnDonor += numDocs;
+ const statsFromServerStatus = shardArr.map(function(shardVal) {
+ return shardVal.getDB('admin').runCommand({serverStatus: 1}).shardingStatistics;
});
+ for (let i = 0; i < shardArr.length; ++i) {
+ assert(statsFromServerStatus[i]);
+ assert(statsFromServerStatus[i].countStaleConfigErrors);
+ assert(statsFromServerStatus[i].totalCriticalSectionCommitTimeMillis);
+ assert(statsFromServerStatus[i].totalCriticalSectionTimeMillis);
+ assert(statsFromServerStatus[i].totalDonorChunkCloneTimeMillis);
+ assert(statsFromServerStatus[i].countDonorMoveChunkLockTimeout);
+ assert.eq(stats[i].countDonorMoveChunkStarted,
+ statsFromServerStatus[i].countDonorMoveChunkStarted);
+ assert.eq(stats[i].countDocsClonedOnRecipient,
+ statsFromServerStatus[i].countDocsClonedOnRecipient);
+ assert.eq(stats[i].countDocsClonedOnDonor, statsFromServerStatus[i].countDocsClonedOnDonor);
+ assert.eq(stats[i].countDocsDeletedOnDonor,
+ statsFromServerStatus[i].countDocsDeletedOnDonor);
+ assert.eq(stats[i].countRecipientMoveChunkStarted,
+ statsFromServerStatus[i].countRecipientMoveChunkStarted);
+ }
+}
+
+function checkServerStatusMigrationLockTimeoutCount(shardConn, count) {
+ const shardStats =
+ assert.commandWorked(shardConn.adminCommand({serverStatus: 1})).shardingStatistics;
+ assert(shardStats.hasOwnProperty("countDonorMoveChunkLockTimeout"));
+ assert.eq(count, shardStats.countDonorMoveChunkLockTimeout);
+}
+
+function runConcurrentMoveChunk(host, ns, toShard) {
+ const mongos = new Mongo(host);
+ return mongos.adminCommand({moveChunk: ns, find: {_id: 1}, to: toShard});
+}
+
+function runConcurrentRead(host, dbName, collName) {
+ const mongos = new Mongo(host);
+ return mongos.getDB(dbName)[collName].find({_id: 5}).comment("concurrent read").itcount();
+}
+
+const dbName = "db";
+const collName = "coll";
+
+const st = new ShardingTest({shards: 2, mongos: 1});
+const mongos = st.s0;
+const admin = mongos.getDB("admin");
+const coll = mongos.getCollection(dbName + "." + collName);
+const numDocsToInsert = 3;
+const shardArr = [st.shard0, st.shard1];
+const stats = [new ShardStat(), new ShardStat()];
+let numDocsInserted = 0;
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+
+// Move chunk from shard0 to shard1 without docs.
+assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + '', find: {_id: 1}, to: st.shard1.shardName}));
+incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
+
+// Insert docs and then move chunk again from shard1 to shard0.
+for (let i = 0; i < numDocsToInsert; ++i) {
+ assert.writeOK(coll.insert({_id: i}));
+ ++numDocsInserted;
+}
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
+incrementStatsAndCheckServerShardStats(stats[1], stats[0], numDocsInserted);
+
+// Check that numbers are indeed cumulative. Move chunk from shard0 to shard1.
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
+incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
+
+// Move chunk from shard1 to shard0.
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
+incrementStatsAndCheckServerShardStats(stats[1], stats[0], numDocsInserted);
- // Unpause the migration and it should time out entering the commit phase.
- unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.chunkDataCommitted);
- moveChunkThread.join();
- assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.LockTimeout);
-
- // Let the read finish and verify the counter was incremented in serverStatus.
- assert.commandWorked(
- donorConn.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
- concurrentRead.join();
- assert.eq(1, concurrentRead.returnData());
-
- checkServerStatusMigrationLockTimeoutCount(donorConn, 2);
-
- assert.commandWorked(donorConn.adminCommand(
- {setParameter: 1, migrationLockAcquisitionMaxWaitMS: originalMigrationLockTimeout}));
+//
+// Tests for the count of migrations aborting from lock timeouts.
+//
- st.stop();
+// Lower migrationLockAcquisitionMaxWaitMS so migrations time out more quickly.
+const donorConn = st.rs0.getPrimary();
+const lockParameterRes = assert.commandWorked(
+ donorConn.adminCommand({getParameter: 1, migrationLockAcquisitionMaxWaitMS: 1}));
+const originalMigrationLockTimeout = lockParameterRes.migrationLockAcquisitionMaxWaitMS;
+assert.commandWorked(
+ donorConn.adminCommand({setParameter: 1, migrationLockAcquisitionMaxWaitMS: 2 * 1000}));
+
+// Counter starts at 0.
+checkServerStatusMigrationLockTimeoutCount(donorConn, 0);
+
+// Pause a migration before entering the critical section.
+pauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
+let moveChunkThread = new ScopedThread(
+ runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
+moveChunkThread.start();
+waitForMoveChunkStep(donorConn, moveChunkStepNames.reachedSteadyState);
+
+// Start a transaction and insert to the migrating chunk to block entering the critical section.
+const session = mongos.startSession();
+session.startTransaction();
+assert.commandWorked(session.getDatabase(dbName)[collName].insert({_id: 5}));
+
+// Unpause the migration and it should time out entering the critical section.
+unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
+moveChunkThread.join();
+assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.LockTimeout);
+
+// Clean up the transaction and verify the counter was incremented in serverStatus.
+assert.commandWorked(session.abortTransaction_forTesting());
+
+checkServerStatusMigrationLockTimeoutCount(donorConn, 1);
+
+// Writes are blocked during the critical section, so insert a document into the chunk to be
+// moved before the migration begins that can be read later.
+assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 5}));
+
+// Pause a migration after entering the critical section, but before entering the commit phase.
+pauseMoveChunkAtStep(donorConn, moveChunkStepNames.chunkDataCommitted);
+moveChunkThread = new ScopedThread(
+ runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
+moveChunkThread.start();
+waitForMoveChunkStep(donorConn, moveChunkStepNames.chunkDataCommitted);
+
+// Pause a read while it's holding locks so the migration can't commit.
+assert.commandWorked(
+ donorConn.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
+const concurrentRead = new ScopedThread(runConcurrentRead, st.s.host, dbName, collName);
+concurrentRead.start();
+assert.soon(function() {
+ const curOpResults = assert.commandWorked(donorConn.adminCommand({currentOp: 1}));
+ return curOpResults.inprog.some(op => op["command"]["comment"] === "concurrent read");
+});
+
+// Unpause the migration and it should time out entering the commit phase.
+unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.chunkDataCommitted);
+moveChunkThread.join();
+assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.LockTimeout);
+
+// Let the read finish and verify the counter was incremented in serverStatus.
+assert.commandWorked(
+ donorConn.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
+concurrentRead.join();
+assert.eq(1, concurrentRead.returnData());
+
+checkServerStatusMigrationLockTimeoutCount(donorConn, 2);
+
+assert.commandWorked(donorConn.adminCommand(
+ {setParameter: 1, migrationLockAcquisitionMaxWaitMS: originalMigrationLockTimeout}));
+
+st.stop();
})();
diff --git a/jstests/sharding/shards_and_config_return_last_committed_optime.js b/jstests/sharding/shards_and_config_return_last_committed_optime.js
index b884deaef39..efe6d7f431b 100644
--- a/jstests/sharding/shards_and_config_return_last_committed_optime.js
+++ b/jstests/sharding/shards_and_config_return_last_committed_optime.js
@@ -10,185 +10,183 @@
* - standalone mongod
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // For stopReplProducer
-
- function assertCmdDoesNotReturnLastCommittedOpTime(testDB, cmdObj, connType, expectSuccess) {
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // For stopReplProducer
+
+function assertCmdDoesNotReturnLastCommittedOpTime(testDB, cmdObj, connType, expectSuccess) {
+ const res = testDB.runCommand(cmdObj);
+ assert.eq(expectSuccess ? 1 : 0, res.ok);
+ assert(typeof res.lastCommittedOpTime === "undefined",
+ "Expected response from a " + connType + " to not contain lastCommittedOpTime," +
+ " received: " + tojson(res) + ", cmd was: " + tojson(cmdObj));
+}
+
+function assertDoesNotReturnLastCommittedOpTime(testDB, collName, connType) {
+ // Successful commands return lastCommittedOpTime.
+ assertCmdDoesNotReturnLastCommittedOpTime(testDB, {find: collName}, connType, true);
+
+ // Failed commands return lastCommittedOpTime.
+ assertCmdDoesNotReturnLastCommittedOpTime(
+ testDB, {dummyCommand: collName}, connType, false /* expectSuccess */);
+ assertCmdDoesNotReturnLastCommittedOpTime(testDB,
+ {find: collName, readConcern: {invalid: "rc"}},
+ connType,
+ false /* expectSuccess */);
+ assertCmdDoesNotReturnLastCommittedOpTime(
+ testDB,
+ {insert: collName, documents: [{x: 2}], writeConcern: {invalid: "wc"}},
+ connType,
+ false /* expectSuccess */);
+}
+
+function assertCmdReturnsLastCommittedOpTime(testDB, cmdObj, connType, expectSuccess) {
+ // Retry up to one time to avoid possible failures from lag in setting the
+ // lastCommittedOpTime.
+ assert.retryNoExcept(() => {
const res = testDB.runCommand(cmdObj);
assert.eq(expectSuccess ? 1 : 0, res.ok);
- assert(typeof res.lastCommittedOpTime === "undefined",
- "Expected response from a " + connType + " to not contain lastCommittedOpTime," +
+ assert(typeof res.lastCommittedOpTime !== "undefined",
+ "Expected response from a " + connType + " to contain lastCommittedOpTime," +
" received: " + tojson(res) + ", cmd was: " + tojson(cmdObj));
- }
-
- function assertDoesNotReturnLastCommittedOpTime(testDB, collName, connType) {
- // Successful commands return lastCommittedOpTime.
- assertCmdDoesNotReturnLastCommittedOpTime(testDB, {find: collName}, connType, true);
-
- // Failed commands return lastCommittedOpTime.
- assertCmdDoesNotReturnLastCommittedOpTime(
- testDB, {dummyCommand: collName}, connType, false /* expectSuccess */);
- assertCmdDoesNotReturnLastCommittedOpTime(testDB,
- {find: collName, readConcern: {invalid: "rc"}},
- connType,
- false /* expectSuccess */);
- assertCmdDoesNotReturnLastCommittedOpTime(
- testDB,
- {insert: collName, documents: [{x: 2}], writeConcern: {invalid: "wc"}},
- connType,
- false /* expectSuccess */);
- }
-
- function assertCmdReturnsLastCommittedOpTime(testDB, cmdObj, connType, expectSuccess) {
- // Retry up to one time to avoid possible failures from lag in setting the
- // lastCommittedOpTime.
- assert.retryNoExcept(() => {
- const res = testDB.runCommand(cmdObj);
- assert.eq(expectSuccess ? 1 : 0, res.ok);
- assert(typeof res.lastCommittedOpTime !== "undefined",
- "Expected response from a " + connType + " to contain lastCommittedOpTime," +
- " received: " + tojson(res) + ", cmd was: " + tojson(cmdObj));
-
- // The last committed opTime may advance after replSetGetStatus finishes executing and
- // before its response's metadata is computed, in which case the response's
- // lastCommittedOpTime will be greater than the lastCommittedOpTime timestamp in its
- // body. Assert the timestamp is <= lastCommittedOpTime to account for this.
- const statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assert.lte(
- 0,
- bsonWoCompare(res.lastCommittedOpTime, statusRes.optimes.lastCommittedOpTime.ts),
- "lastCommittedOpTime in command response, " + res.lastCommittedOpTime +
- ", is not <= to the replSetGetStatus lastCommittedOpTime timestamp, " +
- statusRes.optimes.lastCommittedOpTime.ts + ", cmd was: " + tojson(cmdObj));
-
- return true;
- }, "command: " + tojson(cmdObj) + " failed to return correct lastCommittedOpTime", 2);
- }
-
- function assertReturnsLastCommittedOpTime(testDB, collName, connType) {
- // Successful commands return lastCommittedOpTime.
- assertCmdReturnsLastCommittedOpTime(
- testDB, {find: collName}, connType, true /* expectSuccess */);
-
- // Failed commands return lastCommittedOpTime.
- assertCmdReturnsLastCommittedOpTime(
- testDB, {dummyCommand: collName}, connType, false /* expectSuccess */);
- assertCmdReturnsLastCommittedOpTime(testDB,
- {find: collName, readConcern: {invalid: "rc"}},
- connType,
- false /* expectSuccess */);
- assertCmdReturnsLastCommittedOpTime(
- testDB,
- {insert: collName, documents: [{x: 2}], writeConcern: {invalid: "wc"}},
- connType,
- false /* expectSuccess */);
- }
-
- //
- // Mongos should not return lastCommittedOpTime.
- //
-
- const st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
- assert.commandWorked(st.s.adminCommand({shardCollection: "test.foo", key: {x: 1}}));
-
- // Sharded collection.
- assertDoesNotReturnLastCommittedOpTime(
- st.s.getDB("test"), "foo", "mongos talking to a sharded collection");
-
- // Unsharded collection.
- assertDoesNotReturnLastCommittedOpTime(
- st.s.getDB("test"), "unsharded", "mongos talking to a non-sharded collection");
-
- // Collection stored on the config server.
- assertDoesNotReturnLastCommittedOpTime(
- st.s.getDB("config"), "foo", "mongos talking to a config server collection");
-
- //
- // A mongod in a sharded replica set returns lastCommittedOpTime.
- //
-
- // To verify the lastCommittedOpTime is being returned, pause replication on the secondary to
- // prevent the primary from advancing its lastCommittedOpTime and then perform a local write to
- // advance the primary's lastAppliedOpTime.
- let primary = st.rs0.getPrimary();
- let secondary = st.rs0.getSecondary();
-
- st.rs0.awaitLastOpCommitted();
- stopServerReplication(secondary);
- assert.writeOK(primary.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 1}}));
-
- // Sharded collection.
- assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "sharding-aware shard primary");
- assertReturnsLastCommittedOpTime(
- secondary.getDB("test"), "foo", "sharding-aware shard secondary");
-
- // Unsharded collection.
- assertReturnsLastCommittedOpTime(
- primary.getDB("test"), "unsharded", "sharding-aware shard primary");
- assertReturnsLastCommittedOpTime(
- secondary.getDB("test"), "unsharded", "sharding-aware shard secondary");
-
- restartServerReplication(secondary);
-
- //
- // A config server in a sharded replica set returns lastCommittedOpTime.
- //
-
- // Split the lastCommitted and lastApplied opTimes by pausing secondary application and
- // performing a local write.
- primary = st.configRS.getPrimary();
- secondary = st.configRS.getSecondary();
-
- st.configRS.awaitLastOpCommitted();
- stopServerReplication(secondary);
- assert.writeOK(primary.getDB("config").foo.insert({x: 1}, {writeConcern: {w: 1}}));
-
- assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "config server primary");
- assertReturnsLastCommittedOpTime(secondary.getDB("test"), "foo", "config server secondary");
-
- restartServerReplication(secondary);
- st.stop();
-
- //
- // A mongod started with --shardsvr that is not sharding aware does not return
- // lastCommittedOpTime.
- //
-
- const replTestShardSvr = new ReplSetTest({nodes: 2, nodeOptions: {shardsvr: ""}});
- replTestShardSvr.startSet();
- replTestShardSvr.initiate();
-
- assertDoesNotReturnLastCommittedOpTime(
- replTestShardSvr.getPrimary().getDB("test"), "foo", "non-sharding aware shard primary");
- assertDoesNotReturnLastCommittedOpTime(
- replTestShardSvr.getSecondary().getDB("test"), "foo", "non-sharding aware shard secondary");
-
- replTestShardSvr.stopSet();
-
- //
- // A mongod from a standalone replica set does not return lastCommittedOpTime.
- //
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
- assertDoesNotReturnLastCommittedOpTime(
- replTest.getPrimary().getDB("test"), "foo", "standalone replica set primary");
- assertDoesNotReturnLastCommittedOpTime(
- replTest.getSecondary().getDB("test"), "foo", "standalone replica set secondary");
+ // The last committed opTime may advance after replSetGetStatus finishes executing and
+ // before its response's metadata is computed, in which case the response's
+ // lastCommittedOpTime will be greater than the lastCommittedOpTime timestamp in its
+ // body. Assert the timestamp is <= lastCommittedOpTime to account for this.
+ const statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+ assert.lte(0,
+ bsonWoCompare(res.lastCommittedOpTime, statusRes.optimes.lastCommittedOpTime.ts),
+ "lastCommittedOpTime in command response, " + res.lastCommittedOpTime +
+ ", is not <= to the replSetGetStatus lastCommittedOpTime timestamp, " +
+ statusRes.optimes.lastCommittedOpTime.ts + ", cmd was: " + tojson(cmdObj));
+
+ return true;
+ }, "command: " + tojson(cmdObj) + " failed to return correct lastCommittedOpTime", 2);
+}
+
+function assertReturnsLastCommittedOpTime(testDB, collName, connType) {
+ // Successful commands return lastCommittedOpTime.
+ assertCmdReturnsLastCommittedOpTime(
+ testDB, {find: collName}, connType, true /* expectSuccess */);
+
+ // Failed commands return lastCommittedOpTime.
+ assertCmdReturnsLastCommittedOpTime(
+ testDB, {dummyCommand: collName}, connType, false /* expectSuccess */);
+ assertCmdReturnsLastCommittedOpTime(testDB,
+ {find: collName, readConcern: {invalid: "rc"}},
+ connType,
+ false /* expectSuccess */);
+ assertCmdReturnsLastCommittedOpTime(
+ testDB,
+ {insert: collName, documents: [{x: 2}], writeConcern: {invalid: "wc"}},
+ connType,
+ false /* expectSuccess */);
+}
+
+//
+// Mongos should not return lastCommittedOpTime.
+//
+
+const st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2});
+assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
+assert.commandWorked(st.s.adminCommand({shardCollection: "test.foo", key: {x: 1}}));
+
+// Sharded collection.
+assertDoesNotReturnLastCommittedOpTime(
+ st.s.getDB("test"), "foo", "mongos talking to a sharded collection");
+
+// Unsharded collection.
+assertDoesNotReturnLastCommittedOpTime(
+ st.s.getDB("test"), "unsharded", "mongos talking to a non-sharded collection");
+
+// Collection stored on the config server.
+assertDoesNotReturnLastCommittedOpTime(
+ st.s.getDB("config"), "foo", "mongos talking to a config server collection");
+
+//
+// A mongod in a sharded replica set returns lastCommittedOpTime.
+//
+
+// To verify the lastCommittedOpTime is being returned, pause replication on the secondary to
+// prevent the primary from advancing its lastCommittedOpTime and then perform a local write to
+// advance the primary's lastAppliedOpTime.
+let primary = st.rs0.getPrimary();
+let secondary = st.rs0.getSecondary();
+
+st.rs0.awaitLastOpCommitted();
+stopServerReplication(secondary);
+assert.writeOK(primary.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 1}}));
+
+// Sharded collection.
+assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "sharding-aware shard primary");
+assertReturnsLastCommittedOpTime(secondary.getDB("test"), "foo", "sharding-aware shard secondary");
+
+// Unsharded collection.
+assertReturnsLastCommittedOpTime(
+ primary.getDB("test"), "unsharded", "sharding-aware shard primary");
+assertReturnsLastCommittedOpTime(
+ secondary.getDB("test"), "unsharded", "sharding-aware shard secondary");
+
+restartServerReplication(secondary);
+
+//
+// A config server in a sharded replica set returns lastCommittedOpTime.
+//
+
+// Split the lastCommitted and lastApplied opTimes by pausing secondary application and
+// performing a local write.
+primary = st.configRS.getPrimary();
+secondary = st.configRS.getSecondary();
+
+st.configRS.awaitLastOpCommitted();
+stopServerReplication(secondary);
+assert.writeOK(primary.getDB("config").foo.insert({x: 1}, {writeConcern: {w: 1}}));
+
+assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "config server primary");
+assertReturnsLastCommittedOpTime(secondary.getDB("test"), "foo", "config server secondary");
+
+restartServerReplication(secondary);
+st.stop();
+
+//
+// A mongod started with --shardsvr that is not sharding aware does not return
+// lastCommittedOpTime.
+//
+
+const replTestShardSvr = new ReplSetTest({nodes: 2, nodeOptions: {shardsvr: ""}});
+replTestShardSvr.startSet();
+replTestShardSvr.initiate();
+
+assertDoesNotReturnLastCommittedOpTime(
+ replTestShardSvr.getPrimary().getDB("test"), "foo", "non-sharding aware shard primary");
+assertDoesNotReturnLastCommittedOpTime(
+ replTestShardSvr.getSecondary().getDB("test"), "foo", "non-sharding aware shard secondary");
+
+replTestShardSvr.stopSet();
+
+//
+// A mongod from a standalone replica set does not return lastCommittedOpTime.
+//
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+assertDoesNotReturnLastCommittedOpTime(
+ replTest.getPrimary().getDB("test"), "foo", "standalone replica set primary");
+assertDoesNotReturnLastCommittedOpTime(
+ replTest.getSecondary().getDB("test"), "foo", "standalone replica set secondary");
- replTest.stopSet();
+replTest.stopSet();
- //
- // A standalone mongod does not return lastCommittedOpTime.
- //
+//
+// A standalone mongod does not return lastCommittedOpTime.
+//
- const standalone = MongoRunner.runMongod();
+const standalone = MongoRunner.runMongod();
- assertDoesNotReturnLastCommittedOpTime(standalone.getDB("test"), "foo", "standalone mongod");
+assertDoesNotReturnLastCommittedOpTime(standalone.getDB("test"), "foo", "standalone mongod");
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/sharding/single_shard_transaction_with_arbiter.js b/jstests/sharding/single_shard_transaction_with_arbiter.js
index 4acf1e5b820..24c82b78dca 100644
--- a/jstests/sharding/single_shard_transaction_with_arbiter.js
+++ b/jstests/sharding/single_shard_transaction_with_arbiter.js
@@ -5,43 +5,43 @@
*/
(function() {
- "use strict";
-
- const name = "single_shard_transaction_with_arbiter";
- const dbName = "test";
- const collName = name;
-
- const shardingTest = new ShardingTest({
- shards: 1,
- rs: {
- nodes: [
- {/* primary */},
- {/* secondary */ rsConfig: {priority: 0}},
- {/* arbiter */ rsConfig: {arbiterOnly: true}}
- ]
- }
- });
-
- const mongos = shardingTest.s;
- const mongosDB = mongos.getDB(dbName);
- const mongosColl = mongosDB[collName];
-
- // Create and shard collection beforehand.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- const session = mongos.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- // Start a transaction and verify that it succeeds.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 0}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq({_id: 0}, sessionColl.findOne({_id: 0}));
-
- shardingTest.stop();
+"use strict";
+
+const name = "single_shard_transaction_with_arbiter";
+const dbName = "test";
+const collName = name;
+
+const shardingTest = new ShardingTest({
+ shards: 1,
+ rs: {
+ nodes: [
+ {/* primary */},
+ {/* secondary */ rsConfig: {priority: 0}},
+ {/* arbiter */ rsConfig: {arbiterOnly: true}}
+ ]
+ }
+});
+
+const mongos = shardingTest.s;
+const mongosDB = mongos.getDB(dbName);
+const mongosColl = mongosDB[collName];
+
+// Create and shard collection beforehand.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+const session = mongos.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+// Start a transaction and verify that it succeeds.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 0}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq({_id: 0}, sessionColl.findOne({_id: 0}));
+
+shardingTest.stop();
})();
diff --git a/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js b/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js
index 03dd211baf7..82d969eccb7 100644
--- a/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js
+++ b/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js
@@ -14,77 +14,77 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for 'stopServerReplication' and
- // 'restartServerReplication'.
-
- const name = "single_shard_transaction_without_majority_reads_lagged";
- const dbName = "test";
- const collName = name;
-
- const shardingTest = new ShardingTest({
- shards: 1,
- rs: {
- nodes: [
- {/* primary */ enableMajorityReadConcern: 'false'},
- {/* secondary */ rsConfig: {priority: 0}}
- ]
- }
- });
-
- const rst = shardingTest.rs0;
- const mongos = shardingTest.s;
- const mongosDB = mongos.getDB(dbName);
- const mongosColl = mongosDB[collName];
-
- // Create and shard collection beforehand.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // This is the last write the secondary will have before the start of the transaction.
- assert.commandWorked(mongosColl.insert({_id: 1, x: 10}, {writeConcern: {w: "majority"}}));
-
- // We want the secondary to lag for an amount generously greater than the history window.
- const secondary = rst.getSecondary();
- const maxWindowResult = assert.commandWorked(secondary.getDB("admin").runCommand(
- {"getParameter": 1, "maxTargetSnapshotHistoryWindowInSeconds": 1}));
- stopServerReplication(secondary);
-
- const maxWindowInMS = maxWindowResult.maxTargetSnapshotHistoryWindowInSeconds * 1000;
- const lagTimeMS = maxWindowInMS * 2;
- const startTime = Date.now();
- let nextId = 1000;
-
- // Insert a stream of writes to the primary with _ids all numbers greater or equal than
- // 1000 (this is done to easily distinguish them from the write above done with _id: 1).
- // The secondary cannot replicate them, so this has the effect of making that node lagged.
- // It would also update mongos' notion of the latest clusterTime in the system.
- while (Date.now() - startTime < maxWindowInMS) {
- assert.commandWorked(mongosColl.insert({id: nextId}));
- nextId++;
- sleep(50);
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for 'stopServerReplication' and
+ // 'restartServerReplication'.
+
+const name = "single_shard_transaction_without_majority_reads_lagged";
+const dbName = "test";
+const collName = name;
+
+const shardingTest = new ShardingTest({
+ shards: 1,
+ rs: {
+ nodes: [
+ {/* primary */ enableMajorityReadConcern: 'false'},
+ {/* secondary */ rsConfig: {priority: 0}}
+ ]
}
-
- // This is an update only the primary has. The test will explicitly check for it in a few lines.
- assert.commandWorked(mongosColl.update({_id: 1, x: 10}, {_id: 1, x: 20}));
-
- const session = mongos.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- // Begin a transaction and make sure its associated read succeeds. To make this test stricter,
- // have the transaction manipulate data that differs between the primary and secondary.
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Confirm that the results of the transaction are based on what the primary's data was when we
- // started the transaction.
- assert.eq(21, sessionColl.findOne({_id: 1}).x);
-
- restartServerReplication(secondary);
- shardingTest.stop();
+});
+
+const rst = shardingTest.rs0;
+const mongos = shardingTest.s;
+const mongosDB = mongos.getDB(dbName);
+const mongosColl = mongosDB[collName];
+
+// Create and shard collection beforehand.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// This is the last write the secondary will have before the start of the transaction.
+assert.commandWorked(mongosColl.insert({_id: 1, x: 10}, {writeConcern: {w: "majority"}}));
+
+// We want the secondary to lag for an amount generously greater than the history window.
+const secondary = rst.getSecondary();
+const maxWindowResult = assert.commandWorked(secondary.getDB("admin").runCommand(
+ {"getParameter": 1, "maxTargetSnapshotHistoryWindowInSeconds": 1}));
+stopServerReplication(secondary);
+
+const maxWindowInMS = maxWindowResult.maxTargetSnapshotHistoryWindowInSeconds * 1000;
+const lagTimeMS = maxWindowInMS * 2;
+const startTime = Date.now();
+let nextId = 1000;
+
+// Insert a stream of writes to the primary with _ids all numbers greater or equal than
+// 1000 (this is done to easily distinguish them from the write above done with _id: 1).
+// The secondary cannot replicate them, so this has the effect of making that node lagged.
+// It would also update mongos' notion of the latest clusterTime in the system.
+while (Date.now() - startTime < maxWindowInMS) {
+ assert.commandWorked(mongosColl.insert({id: nextId}));
+ nextId++;
+ sleep(50);
+}
+
+// This is an update only the primary has. The test will explicitly check for it in a few lines.
+assert.commandWorked(mongosColl.update({_id: 1, x: 10}, {_id: 1, x: 20}));
+
+const session = mongos.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+// Begin a transaction and make sure its associated read succeeds. To make this test stricter,
+// have the transaction manipulate data that differs between the primary and secondary.
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Confirm that the results of the transaction are based on what the primary's data was when we
+// started the transaction.
+assert.eq(21, sessionColl.findOne({_id: 1}).x);
+
+restartServerReplication(secondary);
+shardingTest.stop();
})();
diff --git a/jstests/sharding/snapshot_cursor_commands_mongos.js b/jstests/sharding/snapshot_cursor_commands_mongos.js
index face37b1af3..a853cc10942 100644
--- a/jstests/sharding/snapshot_cursor_commands_mongos.js
+++ b/jstests/sharding/snapshot_cursor_commands_mongos.js
@@ -1,273 +1,266 @@
// Tests snapshot isolation on readConcern level snapshot reads through mongos.
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- // This test intentionally runs commands without a logical session id, which is not compatible
- // with implicit sessions.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/global_snapshot_reads_util.js");
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const shardedCollName = "shardedColl";
- const unshardedCollName = "unshardedColl";
-
- const commands = {
- aggregate: {
- firstCommand: function(collName) {
- return {
- aggregate: collName,
- pipeline: [{$sort: {_id: 1}}],
- cursor: {batchSize: 5},
- readConcern: {level: "snapshot"},
- };
- },
- secondCommand: function(collName) {
- return {
- aggregate: collName,
- pipeline: [{$sort: {_id: 1}}],
- cursor: {batchSize: 20},
- readConcern: {level: "snapshot"},
- };
- }
+"use strict";
+
+// This test intentionally runs commands without a logical session id, which is not compatible
+// with implicit sessions.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/global_snapshot_reads_util.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const shardedCollName = "shardedColl";
+const unshardedCollName = "unshardedColl";
+
+const commands = {
+ aggregate: {
+ firstCommand: function(collName) {
+ return {
+ aggregate: collName,
+ pipeline: [{$sort: {_id: 1}}],
+ cursor: {batchSize: 5},
+ readConcern: {level: "snapshot"},
+ };
},
- find: {
- firstCommand: function(collName) {
- return {
- find: collName,
- sort: {_id: 1},
- batchSize: 5,
- readConcern: {level: "snapshot"},
- };
- },
- secondCommand: function(collName) {
- return {
- find: collName,
- sort: {_id: 1},
- batchSize: 20,
- readConcern: {level: "snapshot"},
- };
- }
+ secondCommand: function(collName) {
+ return {
+ aggregate: collName,
+ pipeline: [{$sort: {_id: 1}}],
+ cursor: {batchSize: 20},
+ readConcern: {level: "snapshot"},
+ };
}
- };
-
- let shardingScenarios = {
- // Tests a snapshot cursor command in a single shard environment. The set up inserts a
- // collection, shards it if it's a collection meant to be sharded, and inserts ten
- // documents.
- singleShard: {
- compatibleCollections: [shardedCollName, unshardedCollName],
- name: "singleShard",
- setUp: function(collName) {
- const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
- return shardingScenarios.allScenarios.setUp(st, collName);
- }
- },
- // Tests a snapshot cursor command in a multi shard enviroment. The set up inserts a
- // collection, shards the collection, and inserts ten documents. Afterwards, chunks are
- // split and moved such that every shard should have some documents that will be found
- // by the cursor command.
- multiShardAllShardReads: {
- compatibleCollections: [shardedCollName],
- name: "multiShardAllShardReads",
- setUp: function(collName) {
- let st = new ShardingTest({shards: 3, mongos: 1, config: 1});
- st = shardingScenarios.allScenarios.setUp(st, collName);
-
- if (st === undefined) {
- return;
- }
-
- const mongos = st.s0;
-
- const ns = dbName + '.' + shardedCollName;
-
- assert.commandWorked(st.splitAt(ns, {_id: 4}));
- assert.commandWorked(st.splitAt(ns, {_id: 7}));
-
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 4}, to: st.shard1.shardName}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
-
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
-
- return st;
- }
- },
- // Tests a snapshot cursor command in a multi shard enviroment. The set up inserts a
- // collection, shards the collection, and inserts ten documents. Afterwards, chunks are
- // split and moved such that only two out of three shards will have documents that will be
- // found by the cursor command.
- multiShardSomeShardReads: {
- compatibleCollections: [shardedCollName],
- name: "multiShardSomeShardReads",
- setUp: function(collName) {
- let st = new ShardingTest({shards: 3, mongos: 1, config: 1});
- st = shardingScenarios.allScenarios.setUp(st, collName);
-
- if (st === undefined) {
- return;
- }
-
- const mongos = st.s0;
-
- const ns = dbName + '.' + shardedCollName;
-
- assert.commandWorked(st.splitAt(ns, {_id: 5}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
-
- assert.eq(
- 0, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
-
- return st;
- }
+ },
+ find: {
+ firstCommand: function(collName) {
+ return {
+ find: collName,
+ sort: {_id: 1},
+ batchSize: 5,
+ readConcern: {level: "snapshot"},
+ };
},
- allScenarios: {
- name: "allScenarios",
- setUp: function(st, collName) {
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand(
- {shardCollection: st.s.getDB(dbName)[shardedCollName] + "", key: {_id: 1}}));
-
- const mainDb = st.s.getDB(dbName);
-
- let bulk = mainDb[collName].initializeUnorderedBulkOp();
- for (let x = 0; x < 10; ++x) {
- bulk.insert({_id: x});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- return st;
+ secondCommand: function(collName) {
+ return {
+ find: collName,
+ sort: {_id: 1},
+ batchSize: 20,
+ readConcern: {level: "snapshot"},
+ };
+ }
+ }
+};
+
+let shardingScenarios = {
+ // Tests a snapshot cursor command in a single shard environment. The set up inserts a
+ // collection, shards it if it's a collection meant to be sharded, and inserts ten
+ // documents.
+ singleShard: {
+ compatibleCollections: [shardedCollName, unshardedCollName],
+ name: "singleShard",
+ setUp: function(collName) {
+ const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
+ return shardingScenarios.allScenarios.setUp(st, collName);
+ }
+ },
+ // Tests a snapshot cursor command in a multi shard enviroment. The set up inserts a
+ // collection, shards the collection, and inserts ten documents. Afterwards, chunks are
+ // split and moved such that every shard should have some documents that will be found
+ // by the cursor command.
+ multiShardAllShardReads: {
+ compatibleCollections: [shardedCollName],
+ name: "multiShardAllShardReads",
+ setUp: function(collName) {
+ let st = new ShardingTest({shards: 3, mongos: 1, config: 1});
+ st = shardingScenarios.allScenarios.setUp(st, collName);
+
+ if (st === undefined) {
+ return;
}
+
+ const mongos = st.s0;
+
+ const ns = dbName + '.' + shardedCollName;
+
+ assert.commandWorked(st.splitAt(ns, {_id: 4}));
+ assert.commandWorked(st.splitAt(ns, {_id: 7}));
+
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 4}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
+
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns});
+
+ return st;
}
- };
+ },
+ // Tests a snapshot cursor command in a multi shard enviroment. The set up inserts a
+ // collection, shards the collection, and inserts ten documents. Afterwards, chunks are
+ // split and moved such that only two out of three shards will have documents that will be
+ // found by the cursor command.
+ multiShardSomeShardReads: {
+ compatibleCollections: [shardedCollName],
+ name: "multiShardSomeShardReads",
+ setUp: function(collName) {
+ let st = new ShardingTest({shards: 3, mongos: 1, config: 1});
+ st = shardingScenarios.allScenarios.setUp(st, collName);
+
+ if (st === undefined) {
+ return;
+ }
- function runScenario(testScenario, {useCausalConsistency}) {
- testScenario.compatibleCollections.forEach(function(collName) {
- jsTestLog("Running the " + testScenario.name + " scenario on collection " + collName);
- runTest(testScenario, {useCausalConsistency, commands, collName});
- });
- }
+ const mongos = st.s0;
+
+ const ns = dbName + '.' + shardedCollName;
+
+ assert.commandWorked(st.splitAt(ns, {_id: 5}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
- function runTest(testScenario, {useCausalConsistency, commands, collName}) {
- let st = testScenario.setUp(collName);
- assert(st);
- assert(commands);
- assert(collName);
-
- const mainDb = st.s.getDB(dbName);
-
- for (let commandKey in commands) {
- assert(commandKey);
- jsTestLog("Testing the " + commandKey + " command.");
- const command = commands[commandKey];
-
- const session =
- mainDb.getMongo().startSession({causalConsistency: useCausalConsistency});
- const lsid = session.getSessionId();
- const sessionDb = session.getDatabase(dbName);
-
- // Test snapshot reads.
- session.startTransaction({writeConcern: {w: "majority"}});
-
- let txnNumber = session.getTxnNumber_forTesting();
-
- // Establish a snapshot cursor, fetching the first 5 documents.
- let res = assert.commandWorked(sessionDb.runCommand(command.firstCommand(collName)));
-
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("firstBatch"));
- assert.eq(5, res.cursor.firstBatch.length);
-
- assert(res.cursor.hasOwnProperty("id"));
- const cursorId = res.cursor.id;
- assert.neq(cursorId, 0);
-
- // Insert an 11th document which should not be visible to the snapshot cursor. This
- // write is performed outside of the session.
- assert.writeOK(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
-
- verifyInvalidGetMoreAttempts(mainDb, collName, cursorId, lsid, txnNumber);
-
- // Fetch the 6th document. This confirms that the transaction stash is preserved across
- // multiple getMore invocations.
- res = assert.commandWorked(sessionDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 1,
- }));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.neq(0, res.cursor.id);
-
- // Exhaust the cursor, retrieving the remainder of the result set.
- res = assert.commandWorked(sessionDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 10,
- }));
-
- // The cursor has been exhausted.
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.eq(0, res.cursor.id);
-
- // Only the remaining 4 of the initial 10 documents are returned. The 11th document is
- // not part of the result set.
- assert(res.cursor.hasOwnProperty("nextBatch"));
- assert.eq(4, res.cursor.nextBatch.length);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Perform a second snapshot read under a new transaction.
- session.startTransaction({writeConcern: {w: "majority"}});
- res = assert.commandWorked(sessionDb.runCommand(command.secondCommand(collName)));
-
- // The cursor has been exhausted.
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.eq(0, res.cursor.id);
-
- // All 11 documents are returned.
- assert(res.cursor.hasOwnProperty("firstBatch"));
- assert.eq(11, res.cursor.firstBatch.length);
-
- // Remove the 11th document to preserve the collection for the next command.
- assert.writeOK(mainDb[collName].remove({_id: 10}, {writeConcern: {w: "majority"}}));
-
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+ assert.eq(0, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns});
+
+ return st;
}
+ },
+ allScenarios: {
+ name: "allScenarios",
+ setUp: function(st, collName) {
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand(
+ {shardCollection: st.s.getDB(dbName)[shardedCollName] + "", key: {_id: 1}}));
+
+ const mainDb = st.s.getDB(dbName);
+
+ let bulk = mainDb[collName].initializeUnorderedBulkOp();
+ for (let x = 0; x < 10; ++x) {
+ bulk.insert({_id: x});
+ }
+ assert.commandWorked(bulk.execute({w: "majority"}));
- st.stop();
+ return st;
+ }
+ }
+};
+
+function runScenario(testScenario, {useCausalConsistency}) {
+ testScenario.compatibleCollections.forEach(function(collName) {
+ jsTestLog("Running the " + testScenario.name + " scenario on collection " + collName);
+ runTest(testScenario, {useCausalConsistency, commands, collName});
+ });
+}
+
+function runTest(testScenario, {useCausalConsistency, commands, collName}) {
+ let st = testScenario.setUp(collName);
+ assert(st);
+ assert(commands);
+ assert(collName);
+
+ const mainDb = st.s.getDB(dbName);
+
+ for (let commandKey in commands) {
+ assert(commandKey);
+ jsTestLog("Testing the " + commandKey + " command.");
+ const command = commands[commandKey];
+
+ const session = mainDb.getMongo().startSession({causalConsistency: useCausalConsistency});
+ const lsid = session.getSessionId();
+ const sessionDb = session.getDatabase(dbName);
+
+ // Test snapshot reads.
+ session.startTransaction({writeConcern: {w: "majority"}});
+
+ let txnNumber = session.getTxnNumber_forTesting();
+
+ // Establish a snapshot cursor, fetching the first 5 documents.
+ let res = assert.commandWorked(sessionDb.runCommand(command.firstCommand(collName)));
+
+ assert(res.hasOwnProperty("cursor"));
+ assert(res.cursor.hasOwnProperty("firstBatch"));
+ assert.eq(5, res.cursor.firstBatch.length);
+
+ assert(res.cursor.hasOwnProperty("id"));
+ const cursorId = res.cursor.id;
+ assert.neq(cursorId, 0);
+
+ // Insert an 11th document which should not be visible to the snapshot cursor. This
+ // write is performed outside of the session.
+ assert.writeOK(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
+
+ verifyInvalidGetMoreAttempts(mainDb, collName, cursorId, lsid, txnNumber);
+
+ // Fetch the 6th document. This confirms that the transaction stash is preserved across
+ // multiple getMore invocations.
+ res = assert.commandWorked(sessionDb.runCommand({
+ getMore: cursorId,
+ collection: collName,
+ batchSize: 1,
+ }));
+ assert(res.hasOwnProperty("cursor"));
+ assert(res.cursor.hasOwnProperty("id"));
+ assert.neq(0, res.cursor.id);
+
+ // Exhaust the cursor, retrieving the remainder of the result set.
+ res = assert.commandWorked(sessionDb.runCommand({
+ getMore: cursorId,
+ collection: collName,
+ batchSize: 10,
+ }));
+
+ // The cursor has been exhausted.
+ assert(res.hasOwnProperty("cursor"));
+ assert(res.cursor.hasOwnProperty("id"));
+ assert.eq(0, res.cursor.id);
+
+ // Only the remaining 4 of the initial 10 documents are returned. The 11th document is
+ // not part of the result set.
+ assert(res.cursor.hasOwnProperty("nextBatch"));
+ assert.eq(4, res.cursor.nextBatch.length);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // Perform a second snapshot read under a new transaction.
+ session.startTransaction({writeConcern: {w: "majority"}});
+ res = assert.commandWorked(sessionDb.runCommand(command.secondCommand(collName)));
+
+ // The cursor has been exhausted.
+ assert(res.hasOwnProperty("cursor"));
+ assert(res.cursor.hasOwnProperty("id"));
+ assert.eq(0, res.cursor.id);
+
+ // All 11 documents are returned.
+ assert(res.cursor.hasOwnProperty("firstBatch"));
+ assert.eq(11, res.cursor.firstBatch.length);
+
+ // Remove the 11th document to preserve the collection for the next command.
+ assert.writeOK(mainDb[collName].remove({_id: 10}, {writeConcern: {w: "majority"}}));
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
}
- // Runs against a sharded and unsharded collection.
- runScenario(shardingScenarios.singleShard, {useCausalConsistency: false});
+ st.stop();
+}
+
+// Runs against a sharded and unsharded collection.
+runScenario(shardingScenarios.singleShard, {useCausalConsistency: false});
- runScenario(shardingScenarios.multiShardAllShardReads, {useCausalConsistency: false});
+runScenario(shardingScenarios.multiShardAllShardReads, {useCausalConsistency: false});
- runScenario(shardingScenarios.multiShardSomeShardReads,
- {useCausalConsistency: false, collName: shardedCollName});
+runScenario(shardingScenarios.multiShardSomeShardReads,
+ {useCausalConsistency: false, collName: shardedCollName});
})();
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index dc423ac14d7..ab73928836e 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -1,110 +1,110 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({name: "sort1", shards: 2, mongos: 2});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.data", key: {'sub.num': 1}});
-
- var db = s.getDB("test");
-
- const N = 100;
-
- var forward = [];
- var backward = [];
- for (var i = 0; i < N; i++) {
- db.data.insert({_id: i, sub: {num: i, x: N - i}});
- forward.push(i);
- backward.push((N - 1) - i);
- }
-
- s.adminCommand({split: "test.data", middle: {'sub.num': 33}});
- s.adminCommand({split: "test.data", middle: {'sub.num': 66}});
-
- s.adminCommand({
- movechunk: "test.data",
- find: {'sub.num': 50},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
-
- assert.lte(3, s.config.chunks.find({ns: 'test.data'}).itcount(), "A1");
-
- var temp = s.config.chunks.find({ns: 'test.data'}).sort({min: 1}).toArray();
- temp.forEach(printjsononeline);
-
- var z = 0;
- for (; z < temp.length; z++)
- if (temp[z].min["sub.num"] <= 50 && temp[z].max["sub.num"] > 50)
- break;
-
- assert.eq(temp[z - 1].shard, temp[z + 1].shard, "A2");
- assert.neq(temp[z - 1].shard, temp[z].shard, "A3");
-
- temp = db.data.find().sort({'sub.num': 1}).toArray();
- assert.eq(N, temp.length, "B1");
- for (i = 0; i < 100; i++) {
- assert.eq(i, temp[i].sub.num, "B2");
- }
-
- db.data.find().sort({'sub.num': 1}).toArray();
- s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
-
- var a = Date.timeFunc(function() {
- z = db.data.find().sort({'sub.num': 1}).toArray();
- }, 200);
- assert.eq(100, z.length, "C1");
-
- var b = 1.5 * Date.timeFunc(function() {
- z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
- }, 200);
- assert.eq(67, z.length, "C2");
-
- print("a: " + a + " b:" + b + " mongos slow down: " + Math.ceil(100 * ((a - b) / b)) + "%");
-
- // -- secondary index sorting
-
- function getSorted(by, dir, proj) {
- var s = {};
- s[by] = dir || 1;
- printjson(s);
- var cur = db.data.find({}, proj || {}).sort(s);
- return terse(cur.map(function(z) {
- return z.sub.num;
- }));
- }
-
- function terse(a) {
- var s = "";
- for (var i = 0; i < a.length; i++) {
- if (i > 0)
- s += ",";
- s += a[i];
- }
- return s;
+'use strict';
+
+var s = new ShardingTest({name: "sort1", shards: 2, mongos: 2});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.data", key: {'sub.num': 1}});
+
+var db = s.getDB("test");
+
+const N = 100;
+
+var forward = [];
+var backward = [];
+for (var i = 0; i < N; i++) {
+ db.data.insert({_id: i, sub: {num: i, x: N - i}});
+ forward.push(i);
+ backward.push((N - 1) - i);
+}
+
+s.adminCommand({split: "test.data", middle: {'sub.num': 33}});
+s.adminCommand({split: "test.data", middle: {'sub.num': 66}});
+
+s.adminCommand({
+ movechunk: "test.data",
+ find: {'sub.num': 50},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+assert.lte(3, s.config.chunks.find({ns: 'test.data'}).itcount(), "A1");
+
+var temp = s.config.chunks.find({ns: 'test.data'}).sort({min: 1}).toArray();
+temp.forEach(printjsononeline);
+
+var z = 0;
+for (; z < temp.length; z++)
+ if (temp[z].min["sub.num"] <= 50 && temp[z].max["sub.num"] > 50)
+ break;
+
+assert.eq(temp[z - 1].shard, temp[z + 1].shard, "A2");
+assert.neq(temp[z - 1].shard, temp[z].shard, "A3");
+
+temp = db.data.find().sort({'sub.num': 1}).toArray();
+assert.eq(N, temp.length, "B1");
+for (i = 0; i < 100; i++) {
+ assert.eq(i, temp[i].sub.num, "B2");
+}
+
+db.data.find().sort({'sub.num': 1}).toArray();
+s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+
+var a = Date.timeFunc(function() {
+ z = db.data.find().sort({'sub.num': 1}).toArray();
+}, 200);
+assert.eq(100, z.length, "C1");
+
+var b = 1.5 * Date.timeFunc(function() {
+ z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+}, 200);
+assert.eq(67, z.length, "C2");
+
+print("a: " + a + " b:" + b + " mongos slow down: " + Math.ceil(100 * ((a - b) / b)) + "%");
+
+// -- secondary index sorting
+
+function getSorted(by, dir, proj) {
+ var s = {};
+ s[by] = dir || 1;
+ printjson(s);
+ var cur = db.data.find({}, proj || {}).sort(s);
+ return terse(cur.map(function(z) {
+ return z.sub.num;
+ }));
+}
+
+function terse(a) {
+ var s = "";
+ for (var i = 0; i < a.length; i++) {
+ if (i > 0)
+ s += ",";
+ s += a[i];
}
+ return s;
+}
- forward = terse(forward);
- backward = terse(backward);
+forward = terse(forward);
+backward = terse(backward);
- assert.eq(forward, getSorted("sub.num", 1), "D1");
- assert.eq(backward, getSorted("sub.num", -1), "D2");
+assert.eq(forward, getSorted("sub.num", 1), "D1");
+assert.eq(backward, getSorted("sub.num", -1), "D2");
- assert.eq(backward, getSorted("sub.x", 1), "D3");
- assert.eq(forward, getSorted("sub.x", -1), "D4");
+assert.eq(backward, getSorted("sub.x", 1), "D3");
+assert.eq(forward, getSorted("sub.x", -1), "D4");
- assert.eq(backward, getSorted("sub.x", 1, {'sub.num': 1}), "D5");
- assert.eq(forward, getSorted("sub.x", -1, {'sub.num': 1}), "D6");
+assert.eq(backward, getSorted("sub.x", 1, {'sub.num': 1}), "D5");
+assert.eq(forward, getSorted("sub.x", -1, {'sub.num': 1}), "D6");
- assert.eq(backward, getSorted("sub.x", 1, {'sub': 1}), "D7");
- assert.eq(forward, getSorted("sub.x", -1, {'sub': 1}), "D8");
+assert.eq(backward, getSorted("sub.x", 1, {'sub': 1}), "D7");
+assert.eq(forward, getSorted("sub.x", -1, {'sub': 1}), "D8");
- assert.eq(backward, getSorted("sub.x", 1, {'_id': 0}), "D9");
- assert.eq(forward, getSorted("sub.x", -1, {'_id': 0}), "D10");
+assert.eq(backward, getSorted("sub.x", 1, {'_id': 0}), "D9");
+assert.eq(forward, getSorted("sub.x", -1, {'_id': 0}), "D10");
- assert.eq(backward, getSorted("sub.x", 1, {'_id': 0, 'sub.num': 1}), "D11");
- assert.eq(forward, getSorted("sub.x", -1, {'_id': 0, 'sub.num': 1}), "D12");
+assert.eq(backward, getSorted("sub.x", 1, {'_id': 0, 'sub.num': 1}), "D11");
+assert.eq(forward, getSorted("sub.x", -1, {'_id': 0, 'sub.num': 1}), "D12");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/split_against_shard_with_invalid_split_points.js b/jstests/sharding/split_against_shard_with_invalid_split_points.js
index 8817e524daf..54eff23d8b7 100644
--- a/jstests/sharding/split_against_shard_with_invalid_split_points.js
+++ b/jstests/sharding/split_against_shard_with_invalid_split_points.js
@@ -1,41 +1,41 @@
// Tests that executing splitChunk directly against a shard, with an invalid split point will not
// corrupt the chunks metadata
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 1});
-
- var testDB = st.s.getDB('TestSplitDB');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'TestSplitDB'}));
- st.ensurePrimaryShard('TestSplitDB', st.shard0.shardName);
-
- assert.commandWorked(testDB.adminCommand({shardCollection: 'TestSplitDB.Coll', key: {x: 1}}));
- assert.commandWorked(testDB.adminCommand({split: 'TestSplitDB.Coll', middle: {x: 0}}));
-
- var chunksBefore = st.s.getDB('config').chunks.find().toArray();
-
- // Try to do a split with invalid parameters through mongod
- var callSplit = function(db, minKey, maxKey, splitPoints) {
- var res = assert.commandWorked(st.s.adminCommand({getShardVersion: 'TestSplitDB.Coll'}));
- return db.runCommand({
- splitChunk: 'TestSplitDB.Coll',
- from: st.shard0.shardName,
- min: minKey,
- max: maxKey,
- keyPattern: {x: 1},
- splitKeys: splitPoints,
- epoch: res.versionEpoch,
- });
- };
-
- assert.commandFailedWithCode(
- callSplit(st.rs0.getPrimary().getDB('admin'), {x: MinKey}, {x: 0}, [{x: 2}]),
- ErrorCodes.InvalidOptions);
-
- var chunksAfter = st.s.getDB('config').chunks.find().toArray();
- assert.eq(chunksBefore,
- chunksAfter,
- 'Split chunks failed, but the chunks were updated in the config database');
-
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 1});
+
+var testDB = st.s.getDB('TestSplitDB');
+assert.commandWorked(testDB.adminCommand({enableSharding: 'TestSplitDB'}));
+st.ensurePrimaryShard('TestSplitDB', st.shard0.shardName);
+
+assert.commandWorked(testDB.adminCommand({shardCollection: 'TestSplitDB.Coll', key: {x: 1}}));
+assert.commandWorked(testDB.adminCommand({split: 'TestSplitDB.Coll', middle: {x: 0}}));
+
+var chunksBefore = st.s.getDB('config').chunks.find().toArray();
+
+// Try to do a split with invalid parameters through mongod
+var callSplit = function(db, minKey, maxKey, splitPoints) {
+ var res = assert.commandWorked(st.s.adminCommand({getShardVersion: 'TestSplitDB.Coll'}));
+ return db.runCommand({
+ splitChunk: 'TestSplitDB.Coll',
+ from: st.shard0.shardName,
+ min: minKey,
+ max: maxKey,
+ keyPattern: {x: 1},
+ splitKeys: splitPoints,
+ epoch: res.versionEpoch,
+ });
+};
+
+assert.commandFailedWithCode(
+ callSplit(st.rs0.getPrimary().getDB('admin'), {x: MinKey}, {x: 0}, [{x: 2}]),
+ ErrorCodes.InvalidOptions);
+
+var chunksAfter = st.s.getDB('config').chunks.find().toArray();
+assert.eq(chunksBefore,
+ chunksAfter,
+ 'Split chunks failed, but the chunks were updated in the config database');
+
+st.stop();
})();
diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js
index 5a5504594d2..5ee2ecf7bc7 100644
--- a/jstests/sharding/split_large_key.js
+++ b/jstests/sharding/split_large_key.js
@@ -1,68 +1,67 @@
// Test for splitting a chunk with a very large shard key value should not be allowed
// and does not corrupt the config.chunks metadata.
(function() {
- 'use strict';
+'use strict';
- function verifyChunk(keys, expectFail, ns) {
- // If split failed then there's only 1 chunk
- // With a min & max for the shardKey
- if (expectFail) {
- assert.eq(1, configDB.chunks.find({"ns": ns}).count(), "Chunks count no split");
- var chunkDoc = configDB.chunks.findOne({"ns": ns});
- assert.eq(0, bsonWoCompare(chunkDoc.min, keys.min), "Chunks min");
- assert.eq(0, bsonWoCompare(chunkDoc.max, keys.max), "Chunks max");
- } else {
- assert.eq(2, configDB.chunks.find({"ns": ns}).count(), "Chunks count split");
- }
+function verifyChunk(keys, expectFail, ns) {
+ // If split failed then there's only 1 chunk
+ // With a min & max for the shardKey
+ if (expectFail) {
+ assert.eq(1, configDB.chunks.find({"ns": ns}).count(), "Chunks count no split");
+ var chunkDoc = configDB.chunks.findOne({"ns": ns});
+ assert.eq(0, bsonWoCompare(chunkDoc.min, keys.min), "Chunks min");
+ assert.eq(0, bsonWoCompare(chunkDoc.max, keys.max), "Chunks max");
+ } else {
+ assert.eq(2, configDB.chunks.find({"ns": ns}).count(), "Chunks count split");
}
+}
- // Tests
- // - name: Name of test, used in collection name
- // - key: key to test
- // - keyFieldSize: size of each key field
- // - expectFail: true/false, true if key is too large to pre-split
- var tests = [
- {name: "Key size small", key: {x: 1}, keyFieldSize: 100, expectFail: false},
- {name: "Key size 512", key: {x: 1}, keyFieldSize: 512, expectFail: true},
- {name: "Key size 2000", key: {x: 1}, keyFieldSize: 2000, expectFail: true},
- {name: "Compound key size small", key: {x: 1, y: 1}, keyFieldSize: 100, expectFail: false},
- {name: "Compound key size 512", key: {x: 1, y: 1}, keyFieldSize: 256, expectFail: true},
- {name: "Compound key size 10000", key: {x: 1, y: 1}, keyFieldSize: 5000, expectFail: true},
- ];
+// Tests
+// - name: Name of test, used in collection name
+// - key: key to test
+// - keyFieldSize: size of each key field
+// - expectFail: true/false, true if key is too large to pre-split
+var tests = [
+ {name: "Key size small", key: {x: 1}, keyFieldSize: 100, expectFail: false},
+ {name: "Key size 512", key: {x: 1}, keyFieldSize: 512, expectFail: true},
+ {name: "Key size 2000", key: {x: 1}, keyFieldSize: 2000, expectFail: true},
+ {name: "Compound key size small", key: {x: 1, y: 1}, keyFieldSize: 100, expectFail: false},
+ {name: "Compound key size 512", key: {x: 1, y: 1}, keyFieldSize: 256, expectFail: true},
+ {name: "Compound key size 10000", key: {x: 1, y: 1}, keyFieldSize: 5000, expectFail: true},
+];
- var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
+var st = new ShardingTest({shards: 1});
+var configDB = st.s.getDB('config');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- tests.forEach(function(test) {
- var collName = "split_large_key_" + test.name;
- var midKey = {};
- var chunkKeys = {min: {}, max: {}};
- for (var k in test.key) {
- // new Array with join creates string length 1 less than size, so add 1
- midKey[k] = new Array(test.keyFieldSize + 1).join('a');
- // min & max keys for each field in the index
- chunkKeys.min[k] = MinKey;
- chunkKeys.max[k] = MaxKey;
- }
-
- assert.commandWorked(
- configDB.adminCommand({shardCollection: "test." + collName, key: test.key}));
+tests.forEach(function(test) {
+ var collName = "split_large_key_" + test.name;
+ var midKey = {};
+ var chunkKeys = {min: {}, max: {}};
+ for (var k in test.key) {
+ // new Array with join creates string length 1 less than size, so add 1
+ midKey[k] = new Array(test.keyFieldSize + 1).join('a');
+ // min & max keys for each field in the index
+ chunkKeys.min[k] = MinKey;
+ chunkKeys.max[k] = MaxKey;
+ }
- var res = configDB.adminCommand({split: "test." + collName, middle: midKey});
- if (test.expectFail) {
- assert(!res.ok, "Split: " + collName);
- assert(res.errmsg !== null, "Split errmsg: " + collName);
- } else {
- assert(res.ok, "Split: " + collName + " " + res.errmsg);
- }
+ assert.commandWorked(
+ configDB.adminCommand({shardCollection: "test." + collName, key: test.key}));
- verifyChunk(chunkKeys, test.expectFail, "test." + collName);
+ var res = configDB.adminCommand({split: "test." + collName, middle: midKey});
+ if (test.expectFail) {
+ assert(!res.ok, "Split: " + collName);
+ assert(res.errmsg !== null, "Split errmsg: " + collName);
+ } else {
+ assert(res.ok, "Split: " + collName + " " + res.errmsg);
+ }
- st.s0.getCollection("test." + collName).drop();
- });
+ verifyChunk(chunkKeys, test.expectFail, "test." + collName);
- st.stop();
+ st.s0.getCollection("test." + collName).drop();
+});
+st.stop();
})();
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 2554d41048f..8e281dcbe20 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -2,67 +2,67 @@
// Tests autosplit locations with force : true, for small collections
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
+var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var shardAdmin = st.shard0.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var shardAdmin = st.shard0.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- jsTest.log("Insert a bunch of data into the low chunk of a collection," +
- " to prevent relying on stats.");
+jsTest.log("Insert a bunch of data into the low chunk of a collection," +
+ " to prevent relying on stats.");
- var data128k = "x";
- for (var i = 0; i < 7; i++)
- data128k += data128k;
+var data128k = "x";
+for (var i = 0; i < 7; i++)
+ data128k += data128k;
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 1024; i++) {
- bulk.insert({_id: -(i + 1)});
- }
- assert.writeOK(bulk.execute());
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 1024; i++) {
+ bulk.insert({_id: -(i + 1)});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Insert 32 docs into the high chunk of a collection");
+jsTest.log("Insert 32 docs into the high chunk of a collection");
- bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 32; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 32; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Split off MaxKey chunk...");
+jsTest.log("Split off MaxKey chunk...");
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 32}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 32}}));
- jsTest.log("Keep splitting chunk multiple times...");
+jsTest.log("Keep splitting chunk multiple times...");
- st.printShardingStatus();
+st.printShardingStatus();
- for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", find: {_id: 0}}));
- st.printShardingStatus();
- }
+for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", find: {_id: 0}}));
+ st.printShardingStatus();
+}
- // Make sure we can't split further than 5 (2^5) times
- assert.commandFailed(admin.runCommand({split: coll + "", find: {_id: 0}}));
+// Make sure we can't split further than 5 (2^5) times
+assert.commandFailed(admin.runCommand({split: coll + "", find: {_id: 0}}));
- var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
- printjson(chunks);
+var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
+printjson(chunks);
- // Make sure the chunks grow by 2x (except the first)
- var nextSize = 1;
- for (var i = 0; i < chunks.size; i++) {
- assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
- if (i != 0)
- nextSize += nextSize;
- }
+// Make sure the chunks grow by 2x (except the first)
+var nextSize = 1;
+for (var i = 0; i < chunks.size; i++) {
+ assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
+ if (i != 0)
+ nextSize += nextSize;
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/ssv_config_check.js b/jstests/sharding/ssv_config_check.js
index cba03476cb0..48d7f2eac70 100644
--- a/jstests/sharding/ssv_config_check.js
+++ b/jstests/sharding/ssv_config_check.js
@@ -3,46 +3,46 @@
* replica set name, but with a member list that is not strictly the same.
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var testDB = st.s.getDB('test');
- testDB.adminCommand({enableSharding: 'test'});
- testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+var testDB = st.s.getDB('test');
+testDB.adminCommand({enableSharding: 'test'});
+testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- testDB.user.insert({x: 1});
+testDB.user.insert({x: 1});
- var directConn = new Mongo(st.rs0.getPrimary().host);
- var adminDB = directConn.getDB('admin');
+var directConn = new Mongo(st.rs0.getPrimary().host);
+var adminDB = directConn.getDB('admin');
- var configStr = adminDB.runCommand({getShardVersion: 'test.user'}).configServer;
- var alternateConfigStr = configStr.substring(0, configStr.lastIndexOf(','));
+var configStr = adminDB.runCommand({getShardVersion: 'test.user'}).configServer;
+var alternateConfigStr = configStr.substring(0, configStr.lastIndexOf(','));
- var shardDoc = st.s.getDB('config').shards.findOne();
+var shardDoc = st.s.getDB('config').shards.findOne();
- jsTest.log("Verify that the obsolete init form of setShardVersion succeeds on shards.");
- assert.commandWorked(adminDB.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: alternateConfigStr,
- shard: shardDoc._id,
- shardHost: shardDoc.host
- }));
+jsTest.log("Verify that the obsolete init form of setShardVersion succeeds on shards.");
+assert.commandWorked(adminDB.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: alternateConfigStr,
+ shard: shardDoc._id,
+ shardHost: shardDoc.host
+}));
- var configAdmin = st.c0.getDB('admin');
+var configAdmin = st.c0.getDB('admin');
- jsTest.log("Verify that setShardVersion fails on the config server");
- // Even if shardName sent is 'config' and connstring sent is config server's actual connstring.
- assert.commandFailedWithCode(configAdmin.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: configStr,
- shard: 'config'
- }),
- ErrorCodes.NoShardingEnabled);
+jsTest.log("Verify that setShardVersion fails on the config server");
+// Even if shardName sent is 'config' and connstring sent is config server's actual connstring.
+assert.commandFailedWithCode(configAdmin.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: configStr,
+ shard: 'config'
+}),
+ ErrorCodes.NoShardingEnabled);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/stale_mongos_updates_and_removes.js b/jstests/sharding/stale_mongos_updates_and_removes.js
index 1c0e251d296..85eb196e93e 100644
--- a/jstests/sharding/stale_mongos_updates_and_removes.js
+++ b/jstests/sharding/stale_mongos_updates_and_removes.js
@@ -18,250 +18,255 @@
*/
(function() {
- 'use strict';
+'use strict';
- // Create a new sharded collection with numDocs documents, with two docs sharing each shard key
- // (used for testing *multi* removes to a *specific* shard key).
- function resetCollection() {
- assert(staleMongos.getCollection(collNS).drop());
- assert.commandWorked(staleMongos.adminCommand({shardCollection: collNS, key: {x: 1}}));
+// Create a new sharded collection with numDocs documents, with two docs sharing each shard key
+// (used for testing *multi* removes to a *specific* shard key).
+function resetCollection() {
+ assert(staleMongos.getCollection(collNS).drop());
+ assert.commandWorked(staleMongos.adminCommand({shardCollection: collNS, key: {x: 1}}));
- for (let i = 0; i < numShardKeys; i++) {
- assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
- assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
- }
-
- // Make sure data has replicated to all config servers so freshMongos finds a sharded
- // collection: freshMongos has an older optime and won't wait to see what staleMongos did
- // (shardCollection).
- st.configRS.awaitLastOpCommitted();
+ for (let i = 0; i < numShardKeys; i++) {
+ assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
+ assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
}
- // Create a new sharded collection, then split it into two chunks on different shards using the
- // stale mongos. Then use the fresh mongos to consolidate the chunks onto one of the shards.
- // staleMongos will see:
- // shard0: (-inf, splitPoint]
- // shard1: (splitPoint, inf]
- // freshMongos will see:
- // shard0: (-inf, splitPoint], (splitPoint, inf]
- // shard1:
- function makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard() {
- resetCollection();
-
- // Make sure staleMongos sees all data on first shard.
- const chunk = staleMongos.getCollection("config.chunks")
- .findOne({min: {x: MinKey}, max: {x: MaxKey}});
- assert(chunk.shard === st.shard0.shardName);
-
- // Make sure staleMongos sees two chunks on two different shards.
- assert.commandWorked(staleMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
- assert.commandWorked(staleMongos.adminCommand(
- {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
-
- st.configRS.awaitLastOpCommitted();
-
- // Use freshMongos to consolidate the chunks on one shard.
- assert.commandWorked(freshMongos.adminCommand(
- {moveChunk: collNS, find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- }
-
- // Create a new sharded collection with a single chunk, then move that chunk from the primary
- // shard to another shard using the fresh mongos.
- // staleMongos will see:
- // shard0: (-inf, inf]
- // shard1:
- // freshMongos will see:
- // shard0:
- // shard1: (-inf, inf]
- function makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard() {
- resetCollection();
-
- // Make sure staleMongos sees all data on first shard.
- const chunk = staleMongos.getCollection("config.chunks")
- .findOne({min: {x: MinKey}, max: {x: MaxKey}});
- assert(chunk.shard === st.shard0.shardName);
-
- // Use freshMongos to move chunk to another shard.
- assert.commandWorked(freshMongos.adminCommand(
- {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ // Make sure data has replicated to all config servers so freshMongos finds a sharded
+ // collection: freshMongos has an older optime and won't wait to see what staleMongos did
+ // (shardCollection).
+ st.configRS.awaitLastOpCommitted();
+}
+
+// Create a new sharded collection, then split it into two chunks on different shards using the
+// stale mongos. Then use the fresh mongos to consolidate the chunks onto one of the shards.
+// staleMongos will see:
+// shard0: (-inf, splitPoint]
+// shard1: (splitPoint, inf]
+// freshMongos will see:
+// shard0: (-inf, splitPoint], (splitPoint, inf]
+// shard1:
+function makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard() {
+ resetCollection();
+
+ // Make sure staleMongos sees all data on first shard.
+ const chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
+ assert(chunk.shard === st.shard0.shardName);
+
+ // Make sure staleMongos sees two chunks on two different shards.
+ assert.commandWorked(staleMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
+ assert.commandWorked(staleMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Use freshMongos to consolidate the chunks on one shard.
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+}
+
+// Create a new sharded collection with a single chunk, then move that chunk from the primary
+// shard to another shard using the fresh mongos.
+// staleMongos will see:
+// shard0: (-inf, inf]
+// shard1:
+// freshMongos will see:
+// shard0:
+// shard1: (-inf, inf]
+function makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard() {
+ resetCollection();
+
+ // Make sure staleMongos sees all data on first shard.
+ const chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
+ assert(chunk.shard === st.shard0.shardName);
+
+ // Use freshMongos to move chunk to another shard.
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+}
+
+// Create a new sharded collection, then split it into two chunks on different shards using the
+// fresh mongos.
+// staleMongos will see:
+// shard0: (-inf, inf]
+// shard1:
+// freshMongos will see:
+// shard0: (-inf, splitPoint]
+// shard1: (splitPoint, inf]
+function makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards() {
+ resetCollection();
+
+ // Make sure staleMongos sees all data on first shard.
+ const chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
+ assert(chunk.shard === st.shard0.shardName);
+
+ // Use freshMongos to split and move chunks to both shards.
+ assert.commandWorked(freshMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+
+ st.configRS.awaitLastOpCommitted();
+}
+
+function checkAllRemoveQueries(makeMongosStaleFunc) {
+ const multi = {justOne: false};
+ const single = {justOne: true};
+
+ function doRemove(query, multiOption, makeMongosStaleFunc) {
+ makeMongosStaleFunc();
+ assert.writeOK(staleMongos.getCollection(collNS).remove(query, multiOption));
+ if (multiOption.justOne) {
+ // A total of one document should have been removed from the collection.
+ assert.eq(numDocs - 1, staleMongos.getCollection(collNS).find().itcount());
+ } else {
+ // All documents matching the query should have been removed.
+ assert.eq(0, staleMongos.getCollection(collNS).find(query).itcount());
+ }
}
- // Create a new sharded collection, then split it into two chunks on different shards using the
- // fresh mongos.
- // staleMongos will see:
- // shard0: (-inf, inf]
- // shard1:
- // freshMongos will see:
- // shard0: (-inf, splitPoint]
- // shard1: (splitPoint, inf]
- function makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards() {
- resetCollection();
-
- // Make sure staleMongos sees all data on first shard.
- const chunk = staleMongos.getCollection("config.chunks")
- .findOne({min: {x: MinKey}, max: {x: MaxKey}});
- assert(chunk.shard === st.shard0.shardName);
-
- // Use freshMongos to split and move chunks to both shards.
- assert.commandWorked(freshMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
- assert.commandWorked(freshMongos.adminCommand(
- {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
-
- st.configRS.awaitLastOpCommitted();
+ function checkRemoveIsInvalid(query, multiOption, makeMongosStaleFunc) {
+ makeMongosStaleFunc();
+ const res = staleMongos.getCollection(collNS).remove(query, multiOption);
+ assert.writeError(res);
}
- function checkAllRemoveQueries(makeMongosStaleFunc) {
- const multi = {justOne: false};
- const single = {justOne: true};
-
- function doRemove(query, multiOption, makeMongosStaleFunc) {
- makeMongosStaleFunc();
- assert.writeOK(staleMongos.getCollection(collNS).remove(query, multiOption));
- if (multiOption.justOne) {
- // A total of one document should have been removed from the collection.
- assert.eq(numDocs - 1, staleMongos.getCollection(collNS).find().itcount());
- } else {
- // All documents matching the query should have been removed.
- assert.eq(0, staleMongos.getCollection(collNS).find(query).itcount());
- }
+ // Not possible because single remove requires equality match on shard key.
+ checkRemoveIsInvalid(emptyQuery, single, makeMongosStaleFunc);
+ doRemove(emptyQuery, multi, makeMongosStaleFunc);
+
+ doRemove(pointQuery, single, makeMongosStaleFunc);
+ doRemove(pointQuery, multi, makeMongosStaleFunc);
+
+ // Not possible because can't do range query on a single remove.
+ checkRemoveIsInvalid(rangeQuery, single, makeMongosStaleFunc);
+ doRemove(rangeQuery, multi, makeMongosStaleFunc);
+
+ // Not possible because single remove must contain _id or shard key at top level
+ // (not within $or).
+ checkRemoveIsInvalid(multiPointQuery, single, makeMongosStaleFunc);
+ doRemove(multiPointQuery, multi, makeMongosStaleFunc);
+}
+
+function checkAllUpdateQueries(makeMongosStaleFunc) {
+ const oUpdate = {$inc: {fieldToUpdate: 1}}; // op-style update (non-idempotent)
+ const rUpdate = {x: 0, fieldToUpdate: 1}; // replacement-style update (idempotent)
+ const queryAfterUpdate = {fieldToUpdate: 1};
+
+ const multi = {multi: true};
+ const single = {multi: false};
+
+ function doUpdate(query, update, multiOption, makeMongosStaleFunc) {
+ makeMongosStaleFunc();
+ assert.writeOK(staleMongos.getCollection(collNS).update(query, update, multiOption));
+ if (multiOption.multi) {
+ // All documents matching the query should have been updated.
+ assert.eq(staleMongos.getCollection(collNS).find(query).itcount(),
+ staleMongos.getCollection(collNS).find(queryAfterUpdate).itcount());
+ } else {
+ // A total of one document should have been updated.
+ assert.eq(1, staleMongos.getCollection(collNS).find(queryAfterUpdate).itcount());
}
-
- function checkRemoveIsInvalid(query, multiOption, makeMongosStaleFunc) {
- makeMongosStaleFunc();
- const res = staleMongos.getCollection(collNS).remove(query, multiOption);
- assert.writeError(res);
- }
-
- // Not possible because single remove requires equality match on shard key.
- checkRemoveIsInvalid(emptyQuery, single, makeMongosStaleFunc);
- doRemove(emptyQuery, multi, makeMongosStaleFunc);
-
- doRemove(pointQuery, single, makeMongosStaleFunc);
- doRemove(pointQuery, multi, makeMongosStaleFunc);
-
- // Not possible because can't do range query on a single remove.
- checkRemoveIsInvalid(rangeQuery, single, makeMongosStaleFunc);
- doRemove(rangeQuery, multi, makeMongosStaleFunc);
-
- // Not possible because single remove must contain _id or shard key at top level
- // (not within $or).
- checkRemoveIsInvalid(multiPointQuery, single, makeMongosStaleFunc);
- doRemove(multiPointQuery, multi, makeMongosStaleFunc);
}
- function checkAllUpdateQueries(makeMongosStaleFunc) {
- const oUpdate = {$inc: {fieldToUpdate: 1}}; // op-style update (non-idempotent)
- const rUpdate = {x: 0, fieldToUpdate: 1}; // replacement-style update (idempotent)
- const queryAfterUpdate = {fieldToUpdate: 1};
-
- const multi = {multi: true};
- const single = {multi: false};
-
- function doUpdate(query, update, multiOption, makeMongosStaleFunc) {
- makeMongosStaleFunc();
- assert.writeOK(staleMongos.getCollection(collNS).update(query, update, multiOption));
- if (multiOption.multi) {
- // All documents matching the query should have been updated.
- assert.eq(staleMongos.getCollection(collNS).find(query).itcount(),
- staleMongos.getCollection(collNS).find(queryAfterUpdate).itcount());
- } else {
- // A total of one document should have been updated.
- assert.eq(1, staleMongos.getCollection(collNS).find(queryAfterUpdate).itcount());
- }
- }
-
- function assertUpdateIsInvalid(query, update, multiOption, makeMongosStaleFunc) {
- makeMongosStaleFunc();
- const res = staleMongos.getCollection(collNS).update(query, update, multiOption);
- assert.writeError(res);
- }
+ function assertUpdateIsInvalid(query, update, multiOption, makeMongosStaleFunc) {
+ makeMongosStaleFunc();
+ const res = staleMongos.getCollection(collNS).update(query, update, multiOption);
+ assert.writeError(res);
+ }
- function assertUpdateIsValidIfAllChunksOnSingleShard(
- query, update, multiOption, makeMongosStaleFunc) {
- if (makeMongosStaleFunc == makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards) {
- assertUpdateIsInvalid(query, update, multiOption, makeMongosStaleFunc);
- } else {
- doUpdate(query, update, multiOption, makeMongosStaleFunc);
- }
+ function assertUpdateIsValidIfAllChunksOnSingleShard(
+ query, update, multiOption, makeMongosStaleFunc) {
+ if (makeMongosStaleFunc == makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards) {
+ assertUpdateIsInvalid(query, update, multiOption, makeMongosStaleFunc);
+ } else {
+ doUpdate(query, update, multiOption, makeMongosStaleFunc);
}
+ }
- // Note on the tests below: single-doc updates are able to succeed even in cases where the
- // stale mongoS incorrectly believes that the update targets multiple shards, because the
- // mongoS write path swallows the first error encountered in each batch, then internally
- // refreshes its routing table and tries the write again. Because all chunks are actually
- // on a single shard in two of the three test cases, this second update attempt succeeds.
+ // Note on the tests below: single-doc updates are able to succeed even in cases where the
+ // stale mongoS incorrectly believes that the update targets multiple shards, because the
+ // mongoS write path swallows the first error encountered in each batch, then internally
+ // refreshes its routing table and tries the write again. Because all chunks are actually
+ // on a single shard in two of the three test cases, this second update attempt succeeds.
- // This update has inconsistent behavior as explained in SERVER-22895.
- // doUpdate(emptyQuery, rUpdate, single, makeMongosStaleFunc);
+ // This update has inconsistent behavior as explained in SERVER-22895.
+ // doUpdate(emptyQuery, rUpdate, single, makeMongosStaleFunc);
- // Not possible because replacement-style requires equality match on shard key.
- assertUpdateIsInvalid(emptyQuery, rUpdate, multi, makeMongosStaleFunc);
+ // Not possible because replacement-style requires equality match on shard key.
+ assertUpdateIsInvalid(emptyQuery, rUpdate, multi, makeMongosStaleFunc);
- // Single op-style update succeeds if all chunks are on one shard, regardless of staleness.
- assertUpdateIsValidIfAllChunksOnSingleShard(
- emptyQuery, oUpdate, single, makeMongosStaleFunc);
- doUpdate(emptyQuery, oUpdate, multi, makeMongosStaleFunc);
+ // Single op-style update succeeds if all chunks are on one shard, regardless of staleness.
+ assertUpdateIsValidIfAllChunksOnSingleShard(emptyQuery, oUpdate, single, makeMongosStaleFunc);
+ doUpdate(emptyQuery, oUpdate, multi, makeMongosStaleFunc);
- doUpdate(pointQuery, rUpdate, single, makeMongosStaleFunc);
+ doUpdate(pointQuery, rUpdate, single, makeMongosStaleFunc);
- // Not possible because replacement-style requires multi=false.
- assertUpdateIsInvalid(pointQuery, rUpdate, multi, makeMongosStaleFunc);
- doUpdate(pointQuery, oUpdate, single, makeMongosStaleFunc);
- doUpdate(pointQuery, oUpdate, multi, makeMongosStaleFunc);
+ // Not possible because replacement-style requires multi=false.
+ assertUpdateIsInvalid(pointQuery, rUpdate, multi, makeMongosStaleFunc);
+ doUpdate(pointQuery, oUpdate, single, makeMongosStaleFunc);
+ doUpdate(pointQuery, oUpdate, multi, makeMongosStaleFunc);
- doUpdate(rangeQuery, rUpdate, single, makeMongosStaleFunc);
+ doUpdate(rangeQuery, rUpdate, single, makeMongosStaleFunc);
- // Not possible because replacement-style requires multi=false.
- assertUpdateIsInvalid(rangeQuery, rUpdate, multi, makeMongosStaleFunc);
+ // Not possible because replacement-style requires multi=false.
+ assertUpdateIsInvalid(rangeQuery, rUpdate, multi, makeMongosStaleFunc);
- // Range query for a single update succeeds because the range falls entirely on one shard.
- doUpdate(rangeQuery, oUpdate, single, makeMongosStaleFunc);
- doUpdate(rangeQuery, oUpdate, multi, makeMongosStaleFunc);
+ // Range query for a single update succeeds because the range falls entirely on one shard.
+ doUpdate(rangeQuery, oUpdate, single, makeMongosStaleFunc);
+ doUpdate(rangeQuery, oUpdate, multi, makeMongosStaleFunc);
- doUpdate(multiPointQuery, rUpdate, single, makeMongosStaleFunc);
+ doUpdate(multiPointQuery, rUpdate, single, makeMongosStaleFunc);
- // Not possible because replacement-style requires multi=false.
- assertUpdateIsInvalid(multiPointQuery, rUpdate, multi, makeMongosStaleFunc);
+ // Not possible because replacement-style requires multi=false.
+ assertUpdateIsInvalid(multiPointQuery, rUpdate, multi, makeMongosStaleFunc);
- // Multi-point single-doc update succeeds if all points are on a single shard.
- assertUpdateIsValidIfAllChunksOnSingleShard(
- multiPointQuery, oUpdate, single, makeMongosStaleFunc);
- doUpdate(multiPointQuery, oUpdate, multi, makeMongosStaleFunc);
- }
+ // Multi-point single-doc update succeeds if all points are on a single shard.
+ assertUpdateIsValidIfAllChunksOnSingleShard(
+ multiPointQuery, oUpdate, single, makeMongosStaleFunc);
+ doUpdate(multiPointQuery, oUpdate, multi, makeMongosStaleFunc);
+}
- // TODO: SERVER-33954 remove shardAsReplicaSet: false.
- const st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
+// TODO: SERVER-33954 remove shardAsReplicaSet: false.
+const st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
- const dbName = 'test';
- const collNS = dbName + '.foo';
- const numShardKeys = 10;
- const numDocs = numShardKeys * 2;
- const splitPoint = numShardKeys / 2;
+const dbName = 'test';
+const collNS = dbName + '.foo';
+const numShardKeys = 10;
+const numDocs = numShardKeys * 2;
+const splitPoint = numShardKeys / 2;
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: collNS, key: {x: 1}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: collNS, key: {x: 1}}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- const freshMongos = st.s0;
- const staleMongos = st.s1;
+const freshMongos = st.s0;
+const staleMongos = st.s1;
- const emptyQuery = {};
- const pointQuery = {x: 0};
+const emptyQuery = {};
+const pointQuery = {
+ x: 0
+};
- // Choose a range that would fall on only one shard.
- // Use (splitPoint - 1) because of SERVER-20768.
- const rangeQuery = {x: {$gte: 0, $lt: splitPoint - 1}};
+// Choose a range that would fall on only one shard.
+// Use (splitPoint - 1) because of SERVER-20768.
+const rangeQuery = {
+ x: {$gte: 0, $lt: splitPoint - 1}
+};
- // Choose points that would fall on two different shards.
- const multiPointQuery = {$or: [{x: 0}, {x: numShardKeys}]};
+// Choose points that would fall on two different shards.
+const multiPointQuery = {
+ $or: [{x: 0}, {x: numShardKeys}]
+};
- checkAllRemoveQueries(makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard);
- checkAllRemoveQueries(makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard);
+checkAllRemoveQueries(makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard);
+checkAllRemoveQueries(makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard);
- checkAllUpdateQueries(makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard);
- checkAllUpdateQueries(makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard);
- checkAllUpdateQueries(makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards);
+checkAllUpdateQueries(makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard);
+checkAllUpdateQueries(makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard);
+checkAllUpdateQueries(makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js
index bd603124548..1183e369b2e 100644
--- a/jstests/sharding/stale_version_write.js
+++ b/jstests/sharding/stale_version_write.js
@@ -1,37 +1,37 @@
// Tests whether a reset sharding version triggers errors
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1, mongos: 2});
+var st = new ShardingTest({shards: 1, mongos: 2});
- var mongosA = st.s0;
- var mongosB = st.s1;
+var mongosA = st.s0;
+var mongosB = st.s1;
- jsTest.log("Adding new collections...");
+jsTest.log("Adding new collections...");
- var collA = mongosA.getCollection(jsTestName() + ".coll");
- assert.writeOK(collA.insert({hello: "world"}));
+var collA = mongosA.getCollection(jsTestName() + ".coll");
+assert.writeOK(collA.insert({hello: "world"}));
- var collB = mongosB.getCollection("" + collA);
- assert.writeOK(collB.insert({hello: "world"}));
+var collB = mongosB.getCollection("" + collA);
+assert.writeOK(collB.insert({hello: "world"}));
- jsTest.log("Enabling sharding...");
+jsTest.log("Enabling sharding...");
- assert.commandWorked(mongosA.getDB("admin").adminCommand({enableSharding: "" + collA.getDB()}));
- assert.commandWorked(
- mongosA.getDB("admin").adminCommand({shardCollection: "" + collA, key: {_id: 1}}));
+assert.commandWorked(mongosA.getDB("admin").adminCommand({enableSharding: "" + collA.getDB()}));
+assert.commandWorked(
+ mongosA.getDB("admin").adminCommand({shardCollection: "" + collA, key: {_id: 1}}));
- // MongoD doesn't know about the config shard version *until* MongoS tells it
- collA.findOne();
+// MongoD doesn't know about the config shard version *until* MongoS tells it
+collA.findOne();
- jsTest.log("Trigger shard version mismatch...");
+jsTest.log("Trigger shard version mismatch...");
- assert.writeOK(collB.insert({goodbye: "world"}));
+assert.writeOK(collB.insert({goodbye: "world"}));
- print("Inserted...");
+print("Inserted...");
- assert.eq(3, collA.find().itcount());
- assert.eq(3, collB.find().itcount());
+assert.eq(3, collA.find().itcount());
+assert.eq(3, collB.find().itcount());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js
index 2dc78e07d76..21fd233944c 100644
--- a/jstests/sharding/startup_with_all_configs_down.js
+++ b/jstests/sharding/startup_with_all_configs_down.js
@@ -11,92 +11,89 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- /**
- * Restarts the mongod backing the specified shard instance, without restarting the mongobridge.
- */
- function restartShard(shard, waitForConnect) {
- MongoRunner.stopMongod(shard);
- shard.restart = true;
- shard.waitForConnect = waitForConnect;
- MongoRunner.runMongod(shard);
+"use strict";
+
+/**
+ * Restarts the mongod backing the specified shard instance, without restarting the mongobridge.
+ */
+function restartShard(shard, waitForConnect) {
+ MongoRunner.stopMongod(shard);
+ shard.restart = true;
+ shard.waitForConnect = waitForConnect;
+ MongoRunner.runMongod(shard);
+}
+
+// TODO: SERVER-33830 remove shardAsReplicaSet: false
+var st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+
+jsTestLog("Setting up initial data");
+
+for (var i = 0; i < 100; i++) {
+ assert.writeOK(st.s.getDB('test').foo.insert({_id: i}));
+}
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'test.foo', find: {_id: 50}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: st.shard1.shardName}));
+
+// Make sure the pre-existing mongos already has the routing information loaded into memory
+assert.eq(100, st.s.getDB('test').foo.find().itcount());
+
+jsTestLog("Shutting down all config servers");
+for (var i = 0; i < st._configServers.length; i++) {
+ st.stopConfigServer(i);
+}
+
+jsTestLog("Starting a new mongos when there are no config servers up");
+var newMongosInfo = MongoRunner.runMongos({configdb: st._configDB, waitForConnect: false});
+// The new mongos won't accept any new connections, but it should stay up and continue trying
+// to contact the config servers to finish startup.
+assert.throws(function() {
+ new Mongo(newMongosInfo.host);
+});
+
+jsTestLog("Restarting a shard while there are no config servers up");
+restartShard(st.shard1, false);
+
+jsTestLog("Queries should fail because the shard can't initialize sharding state");
+var error = assert.throws(function() {
+ st.s.getDB('test').foo.find().itcount();
+});
+
+assert(ErrorCodes.ReplicaSetNotFound == error.code || ErrorCodes.ExceededTimeLimit == error.code ||
+ ErrorCodes.HostUnreachable == error.code);
+
+jsTestLog("Restarting the config servers");
+for (var i = 0; i < st._configServers.length; i++) {
+ st.restartConfigServer(i);
+}
+
+print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
+sleep(60000);
+
+jsTestLog("Queries against the original mongos should work again");
+assert.eq(100, st.s.getDB('test').foo.find().itcount());
+
+jsTestLog("Should now be possible to connect to the mongos that was started while the config " +
+ "servers were down");
+var newMongosConn = null;
+var caughtException = null;
+assert.soon(function() {
+ try {
+ newMongosConn = new Mongo(newMongosInfo.host);
+ return true;
+ } catch (e) {
+ caughtException = e;
+ return false;
}
+}, "Failed to connect to mongos after config servers were restarted: " + tojson(caughtException));
- // TODO: SERVER-33830 remove shardAsReplicaSet: false
- var st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+assert.eq(100, newMongosConn.getDB('test').foo.find().itcount());
- jsTestLog("Setting up initial data");
-
- for (var i = 0; i < 100; i++) {
- assert.writeOK(st.s.getDB('test').foo.insert({_id: i}));
- }
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
-
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'test.foo', find: {_id: 50}}));
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: st.shard1.shardName}));
-
- // Make sure the pre-existing mongos already has the routing information loaded into memory
- assert.eq(100, st.s.getDB('test').foo.find().itcount());
-
- jsTestLog("Shutting down all config servers");
- for (var i = 0; i < st._configServers.length; i++) {
- st.stopConfigServer(i);
- }
-
- jsTestLog("Starting a new mongos when there are no config servers up");
- var newMongosInfo = MongoRunner.runMongos({configdb: st._configDB, waitForConnect: false});
- // The new mongos won't accept any new connections, but it should stay up and continue trying
- // to contact the config servers to finish startup.
- assert.throws(function() {
- new Mongo(newMongosInfo.host);
- });
-
- jsTestLog("Restarting a shard while there are no config servers up");
- restartShard(st.shard1, false);
-
- jsTestLog("Queries should fail because the shard can't initialize sharding state");
- var error = assert.throws(function() {
- st.s.getDB('test').foo.find().itcount();
- });
-
- assert(ErrorCodes.ReplicaSetNotFound == error.code ||
- ErrorCodes.ExceededTimeLimit == error.code || ErrorCodes.HostUnreachable == error.code);
-
- jsTestLog("Restarting the config servers");
- for (var i = 0; i < st._configServers.length; i++) {
- st.restartConfigServer(i);
- }
-
- print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
- sleep(60000);
-
- jsTestLog("Queries against the original mongos should work again");
- assert.eq(100, st.s.getDB('test').foo.find().itcount());
-
- jsTestLog("Should now be possible to connect to the mongos that was started while the config " +
- "servers were down");
- var newMongosConn = null;
- var caughtException = null;
- assert.soon(
- function() {
- try {
- newMongosConn = new Mongo(newMongosInfo.host);
- return true;
- } catch (e) {
- caughtException = e;
- return false;
- }
- },
- "Failed to connect to mongos after config servers were restarted: " +
- tojson(caughtException));
-
- assert.eq(100, newMongosConn.getDB('test').foo.find().itcount());
-
- st.stop();
- MongoRunner.stopMongos(newMongosInfo);
+st.stop();
+MongoRunner.stopMongos(newMongosInfo);
}());
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index 76c64c8b41f..0e182997531 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -1,235 +1,222 @@
(function() {
- var s = new ShardingTest({name: "stats", shards: 2, mongos: 1});
-
- s.adminCommand({enablesharding: "test"});
-
- db = s.getDB("test");
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- function numKeys(o) {
- var num = 0;
- for (var x in o)
- num++;
- return num;
- }
-
- db.foo.drop();
- // SERVER-29678 changed collStats so versions > 4.0 now return 0s on NS not found
- if (MongoRunner.getBinVersionFor(jsTest.options().mongosBinVersion) === '4.0') {
- // TODO: This should be fixed in 4.4
- let res = db.foo.stats();
- if (res.ok === 1) {
- // Possible to hit a shard that is actually version >= 4.2 => result should be zeros
- assert(res.size === 0 && res.count === 0 && res.storageSize === 0 &&
- res.nindexes === 0);
- } else {
- assert.commandFailed(
- db.foo.stats(),
- 'db.collection.stats() should fail non-existent in versions <= 4.0');
- }
+var s = new ShardingTest({name: "stats", shards: 2, mongos: 1});
+
+s.adminCommand({enablesharding: "test"});
+
+db = s.getDB("test");
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+function numKeys(o) {
+ var num = 0;
+ for (var x in o)
+ num++;
+ return num;
+}
+
+db.foo.drop();
+// SERVER-29678 changed collStats so versions > 4.0 now return 0s on NS not found
+if (MongoRunner.getBinVersionFor(jsTest.options().mongosBinVersion) === '4.0') {
+ // TODO: This should be fixed in 4.4
+ let res = db.foo.stats();
+ if (res.ok === 1) {
+ // Possible to hit a shard that is actually version >= 4.2 => result should be zeros
+ assert(res.size === 0 && res.count === 0 && res.storageSize === 0 && res.nindexes === 0);
} else {
- assert.commandWorked(db.foo.stats(),
- 'db.collection.stats() should return 0s on non-existent collection');
+ assert.commandFailed(db.foo.stats(),
+ 'db.collection.stats() should fail non-existent in versions <= 4.0');
}
-
- // ---------- load some data -----
-
- // need collections sharded before and after main collection for proper test
- s.adminCommand({shardcollection: "test.aaa", key: {_id: 1}});
- s.adminCommand(
- {shardcollection: "test.foo", key: {_id: 1}}); // this collection is actually used
- s.adminCommand({shardcollection: "test.zzz", key: {_id: 1}});
-
- N = 10000;
- s.adminCommand({split: "test.foo", middle: {_id: N / 2}});
- s.adminCommand({
- moveChunk: "test.foo",
- find: {_id: 3},
- to: s.getNonPrimaries("test")[0],
- _waitForDelete: true
- });
-
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (i = 0; i < N; i++)
- bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
-
- // Flush all writes to disk since some of the stats are dependent on state in disk (like
- // totalIndexSize).
- assert.commandWorked(db.adminCommand({fsync: 1}));
-
- a = s.shard0.getDB("test");
- b = s.shard1.getDB("test");
-
- x = assert.commandWorked(db.foo.stats());
- assert.eq(N, x.count, "coll total count expected");
- assert.eq(db.foo.count(), x.count, "coll total count match");
- assert.eq(2, x.nchunks, "coll chunk num");
- assert.eq(2, numKeys(x.shards), "coll shard num");
- assert.eq(
- N / 2, x.shards[s.shard0.shardName].count, "coll count on s.shard0.shardName expected");
- assert.eq(
- N / 2, x.shards[s.shard1.shardName].count, "coll count on s.shard1.shardName expected");
- assert.eq(a.foo.count(),
- x.shards[s.shard0.shardName].count,
- "coll count on s.shard0.shardName match");
- assert.eq(b.foo.count(),
- x.shards[s.shard1.shardName].count,
- "coll count on s.shard1.shardName match");
- assert(!x.shards[s.shard0.shardName].indexDetails,
- 'indexDetails should not be present in s.shard0.shardName: ' +
- tojson(x.shards[s.shard0.shardName]));
- assert(!x.shards[s.shard1.shardName].indexDetails,
- 'indexDetails should not be present in s.shard1.shardName: ' +
- tojson(x.shards[s.shard1.shardName]));
-
- a_extras = a.stats().objects - a.foo.count();
- b_extras = b.stats().objects - b.foo.count();
- print("a_extras: " + a_extras);
- print("b_extras: " + b_extras);
-
- x = assert.commandWorked(db.stats());
-
- assert.eq(N + (a_extras + b_extras), x.objects, "db total count expected");
- assert.eq(2, numKeys(x.raw), "db shard num");
- assert.eq((N / 2) + a_extras,
- x.raw[s.shard0.name].objects,
- "db count on s.shard0.shardName expected");
- assert.eq((N / 2) + b_extras,
- x.raw[s.shard1.name].objects,
- "db count on s.shard1.shardName expected");
- assert.eq(
- a.stats().objects, x.raw[s.shard0.name].objects, "db count on s.shard0.shardName match");
- assert.eq(
- b.stats().objects, x.raw[s.shard1.name].objects, "db count on s.shard1.shardName match");
-
- /* Test db.stat() and db.collection.stat() scaling */
-
- /* Helper functions */
- function statComp(stat, stat_scaled, scale) {
- /* Because of loss of floating point precision, do not check exact equality */
- if (stat == stat_scaled)
- return true;
-
- var msg = 'scaled: ' + stat_scaled + ', stat: ' + stat + ', scale: ' + scale;
- assert.lte((stat_scaled - 2), (stat / scale), msg);
- assert.gte((stat_scaled + 2), (stat / scale), msg);
+} else {
+ assert.commandWorked(db.foo.stats(),
+ 'db.collection.stats() should return 0s on non-existent collection');
+}
+
+// ---------- load some data -----
+
+// need collections sharded before and after main collection for proper test
+s.adminCommand({shardcollection: "test.aaa", key: {_id: 1}});
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); // this collection is actually used
+s.adminCommand({shardcollection: "test.zzz", key: {_id: 1}});
+
+N = 10000;
+s.adminCommand({split: "test.foo", middle: {_id: N / 2}});
+s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: 3},
+ to: s.getNonPrimaries("test")[0],
+ _waitForDelete: true
+});
+
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (i = 0; i < N; i++)
+ bulk.insert({_id: i});
+assert.writeOK(bulk.execute());
+
+// Flush all writes to disk since some of the stats are dependent on state in disk (like
+// totalIndexSize).
+assert.commandWorked(db.adminCommand({fsync: 1}));
+
+a = s.shard0.getDB("test");
+b = s.shard1.getDB("test");
+
+x = assert.commandWorked(db.foo.stats());
+assert.eq(N, x.count, "coll total count expected");
+assert.eq(db.foo.count(), x.count, "coll total count match");
+assert.eq(2, x.nchunks, "coll chunk num");
+assert.eq(2, numKeys(x.shards), "coll shard num");
+assert.eq(N / 2, x.shards[s.shard0.shardName].count, "coll count on s.shard0.shardName expected");
+assert.eq(N / 2, x.shards[s.shard1.shardName].count, "coll count on s.shard1.shardName expected");
+assert.eq(
+ a.foo.count(), x.shards[s.shard0.shardName].count, "coll count on s.shard0.shardName match");
+assert.eq(
+ b.foo.count(), x.shards[s.shard1.shardName].count, "coll count on s.shard1.shardName match");
+assert(!x.shards[s.shard0.shardName].indexDetails,
+ 'indexDetails should not be present in s.shard0.shardName: ' +
+ tojson(x.shards[s.shard0.shardName]));
+assert(!x.shards[s.shard1.shardName].indexDetails,
+ 'indexDetails should not be present in s.shard1.shardName: ' +
+ tojson(x.shards[s.shard1.shardName]));
+
+a_extras = a.stats().objects - a.foo.count();
+b_extras = b.stats().objects - b.foo.count();
+print("a_extras: " + a_extras);
+print("b_extras: " + b_extras);
+
+x = assert.commandWorked(db.stats());
+
+assert.eq(N + (a_extras + b_extras), x.objects, "db total count expected");
+assert.eq(2, numKeys(x.raw), "db shard num");
+assert.eq(
+ (N / 2) + a_extras, x.raw[s.shard0.name].objects, "db count on s.shard0.shardName expected");
+assert.eq(
+ (N / 2) + b_extras, x.raw[s.shard1.name].objects, "db count on s.shard1.shardName expected");
+assert.eq(a.stats().objects, x.raw[s.shard0.name].objects, "db count on s.shard0.shardName match");
+assert.eq(b.stats().objects, x.raw[s.shard1.name].objects, "db count on s.shard1.shardName match");
+
+/* Test db.stat() and db.collection.stat() scaling */
+
+/* Helper functions */
+function statComp(stat, stat_scaled, scale) {
+ /* Because of loss of floating point precision, do not check exact equality */
+ if (stat == stat_scaled)
+ return true;
+
+ var msg = 'scaled: ' + stat_scaled + ', stat: ' + stat + ', scale: ' + scale;
+ assert.lte((stat_scaled - 2), (stat / scale), msg);
+ assert.gte((stat_scaled + 2), (stat / scale), msg);
+}
+
+function dbStatComp(stat_obj, stat_obj_scaled, scale) {
+ statComp(stat_obj.dataSize, stat_obj_scaled.dataSize, scale);
+ statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
+ statComp(stat_obj.indexSize, stat_obj_scaled.indexSize, scale);
+ statComp(stat_obj.fileSize, stat_obj_scaled.fileSize, scale);
+ /* avgObjSize not scaled. See SERVER-7347 */
+ statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
+}
+
+function collStatComp(stat_obj, stat_obj_scaled, scale, mongos) {
+ statComp(stat_obj.size, stat_obj_scaled.size, scale);
+ statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
+ statComp(stat_obj.totalIndexSize, stat_obj_scaled.totalIndexSize, scale);
+ statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
+ /* lastExtentSize doesn't exist in mongos level collection stats */
+ if (!mongos) {
+ statComp(stat_obj.lastExtentSize, stat_obj_scaled.lastExtentSize, scale);
}
+}
- function dbStatComp(stat_obj, stat_obj_scaled, scale) {
- statComp(stat_obj.dataSize, stat_obj_scaled.dataSize, scale);
- statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
- statComp(stat_obj.indexSize, stat_obj_scaled.indexSize, scale);
- statComp(stat_obj.fileSize, stat_obj_scaled.fileSize, scale);
- /* avgObjSize not scaled. See SERVER-7347 */
- statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
- }
+/* db.stats() tests */
+db_not_scaled = assert.commandWorked(db.stats());
+db_scaled_512 = assert.commandWorked(db.stats(512));
+db_scaled_1024 = assert.commandWorked(db.stats(1024));
- function collStatComp(stat_obj, stat_obj_scaled, scale, mongos) {
- statComp(stat_obj.size, stat_obj_scaled.size, scale);
- statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
- statComp(stat_obj.totalIndexSize, stat_obj_scaled.totalIndexSize, scale);
- statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
- /* lastExtentSize doesn't exist in mongos level collection stats */
- if (!mongos) {
- statComp(stat_obj.lastExtentSize, stat_obj_scaled.lastExtentSize, scale);
- }
- }
+for (var shard in db_not_scaled.raw) {
+ dbStatComp(db_not_scaled.raw[shard], db_scaled_512.raw[shard], 512);
+ dbStatComp(db_not_scaled.raw[shard], db_scaled_1024.raw[shard], 1024);
+}
- /* db.stats() tests */
- db_not_scaled = assert.commandWorked(db.stats());
- db_scaled_512 = assert.commandWorked(db.stats(512));
- db_scaled_1024 = assert.commandWorked(db.stats(1024));
+dbStatComp(db_not_scaled, db_scaled_512, 512);
+dbStatComp(db_not_scaled, db_scaled_1024, 1024);
- for (var shard in db_not_scaled.raw) {
- dbStatComp(db_not_scaled.raw[shard], db_scaled_512.raw[shard], 512);
- dbStatComp(db_not_scaled.raw[shard], db_scaled_1024.raw[shard], 1024);
- }
+/* db.collection.stats() tests */
+coll_not_scaled = assert.commandWorked(db.foo.stats());
+coll_scaled_512 = assert.commandWorked(db.foo.stats(512));
+coll_scaled_1024 = assert.commandWorked(db.foo.stats(1024));
- dbStatComp(db_not_scaled, db_scaled_512, 512);
- dbStatComp(db_not_scaled, db_scaled_1024, 1024);
+for (var shard in coll_not_scaled.shards) {
+ collStatComp(coll_not_scaled.shards[shard], coll_scaled_512.shards[shard], 512, false);
+ collStatComp(coll_not_scaled.shards[shard], coll_scaled_1024.shards[shard], 1024, false);
+}
- /* db.collection.stats() tests */
- coll_not_scaled = assert.commandWorked(db.foo.stats());
- coll_scaled_512 = assert.commandWorked(db.foo.stats(512));
- coll_scaled_1024 = assert.commandWorked(db.foo.stats(1024));
+collStatComp(coll_not_scaled, coll_scaled_512, 512, true);
+collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
- for (var shard in coll_not_scaled.shards) {
- collStatComp(coll_not_scaled.shards[shard], coll_scaled_512.shards[shard], 512, false);
- collStatComp(coll_not_scaled.shards[shard], coll_scaled_1024.shards[shard], 1024, false);
+/* db.collection.stats() - indexDetails tests */
+(function() {
+var t = db.foo;
+
+assert.commandWorked(t.ensureIndex({a: 1}));
+assert.eq(2, t.getIndexes().length);
+
+var isWiredTiger =
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger");
+
+var stats = assert.commandWorked(t.stats({indexDetails: true}));
+var shardName;
+var shardStats;
+for (shardName in stats.shards) {
+ shardStats = stats.shards[shardName];
+ assert(shardStats.indexDetails,
+ 'indexDetails missing for ' + shardName + ': ' + tojson(shardStats));
+ if (isWiredTiger) {
+ assert.eq(t.getIndexes().length,
+ Object.keys(shardStats.indexDetails).length,
+ 'incorrect number of entries in WiredTiger indexDetails: ' + tojson(shardStats));
}
+}
- collStatComp(coll_not_scaled, coll_scaled_512, 512, true);
- collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
-
- /* db.collection.stats() - indexDetails tests */
- (function() {
- var t = db.foo;
-
- assert.commandWorked(t.ensureIndex({a: 1}));
- assert.eq(2, t.getIndexes().length);
-
- var isWiredTiger =
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger");
-
- var stats = assert.commandWorked(t.stats({indexDetails: true}));
- var shardName;
- var shardStats;
- for (shardName in stats.shards) {
- shardStats = stats.shards[shardName];
- assert(shardStats.indexDetails,
- 'indexDetails missing for ' + shardName + ': ' + tojson(shardStats));
- if (isWiredTiger) {
- assert.eq(t.getIndexes().length,
- Object.keys(shardStats.indexDetails).length,
- 'incorrect number of entries in WiredTiger indexDetails: ' +
- tojson(shardStats));
- }
- }
-
- function getIndexName(indexKey) {
- var indexes = t.getIndexes().filter(function(doc) {
- return friendlyEqual(doc.key, indexKey);
- });
- assert.eq(
- 1,
- indexes.length,
- tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes()));
- return indexes[0].name;
- }
-
- function checkIndexDetails(options, indexName) {
- var stats = assert.commandWorked(t.stats(options));
- for (shardName in stats.shards) {
- shardStats = stats.shards[shardName];
- assert(shardStats.indexDetails,
- 'indexDetails missing from db.collection.stats(' + tojson(options) +
- ').shards[' + shardName + '] result: ' + tojson(shardStats));
- // Currently, indexDetails is only supported with WiredTiger.
- if (isWiredTiger) {
- assert.eq(1,
- Object.keys(shardStats.indexDetails).length,
- 'WiredTiger indexDetails must have exactly one entry');
- assert(shardStats.indexDetails[indexName],
- indexName + ' missing from WiredTiger indexDetails: ' +
- tojson(shardStats.indexDetails));
- assert.neq(0,
- Object.keys(shardStats.indexDetails[indexName]).length,
- indexName + ' exists in indexDetails but contains no information: ' +
- tojson(shardStats.indexDetails));
- }
- }
+function getIndexName(indexKey) {
+ var indexes = t.getIndexes().filter(function(doc) {
+ return friendlyEqual(doc.key, indexKey);
+ });
+ assert.eq(1,
+ indexes.length,
+ tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes()));
+ return indexes[0].name;
+}
+
+function checkIndexDetails(options, indexName) {
+ var stats = assert.commandWorked(t.stats(options));
+ for (shardName in stats.shards) {
+ shardStats = stats.shards[shardName];
+ assert(shardStats.indexDetails,
+ 'indexDetails missing from db.collection.stats(' + tojson(options) + ').shards[' +
+ shardName + '] result: ' + tojson(shardStats));
+ // Currently, indexDetails is only supported with WiredTiger.
+ if (isWiredTiger) {
+ assert.eq(1,
+ Object.keys(shardStats.indexDetails).length,
+ 'WiredTiger indexDetails must have exactly one entry');
+ assert(shardStats.indexDetails[indexName],
+ indexName +
+ ' missing from WiredTiger indexDetails: ' + tojson(shardStats.indexDetails));
+ assert.neq(0,
+ Object.keys(shardStats.indexDetails[indexName]).length,
+ indexName + ' exists in indexDetails but contains no information: ' +
+ tojson(shardStats.indexDetails));
}
+ }
+}
- // indexDetailsKey - show indexDetails results for this index key only.
- var indexKey = {a: 1};
- var indexName = getIndexName(indexKey);
- checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
+// indexDetailsKey - show indexDetails results for this index key only.
+var indexKey = {a: 1};
+var indexName = getIndexName(indexKey);
+checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
- // indexDetailsName - show indexDetails results for this index name only.
- checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
- }());
+// indexDetailsName - show indexDetails results for this index name only.
+checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
+}());
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index ac80c057d85..46e9325052c 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -1,35 +1,35 @@
// Test to make sure that tag ranges get split when full keys are used for the tag ranges
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 1});
+var s = new ShardingTest({shards: 2, mongos: 1});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
- s.addShardTag(s.shard0.shardName, "a");
- s.addShardTag(s.shard0.shardName, "b");
+s.addShardTag(s.shard0.shardName, "a");
+s.addShardTag(s.shard0.shardName, "b");
- s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
- s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
+s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
+s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
- s.startBalancer();
+s.startBalancer();
- assert.soon(function() {
- return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
- }, 'Split did not occur', 3 * 60 * 1000);
+assert.soon(function() {
+ return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
+}, 'Split did not occur', 3 * 60 * 1000);
- s.awaitBalancerRound();
- s.printShardingStatus(true);
- assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
+s.awaitBalancerRound();
+s.printShardingStatus(true);
+assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15}}).itcount());
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/tag_auto_split_partial_key.js b/jstests/sharding/tag_auto_split_partial_key.js
index 35f1c6c7b65..dc19059b726 100644
--- a/jstests/sharding/tag_auto_split_partial_key.js
+++ b/jstests/sharding/tag_auto_split_partial_key.js
@@ -1,45 +1,45 @@
// Test to make sure that tag ranges get split when partial keys are used for the tag ranges
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 1});
+var s = new ShardingTest({shards: 2, mongos: 1});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1, a: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1, a: 1}}));
- assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
- s.addShardTag(s.shard0.shardName, "a");
- s.addShardTag(s.shard0.shardName, "b");
+s.addShardTag(s.shard0.shardName, "a");
+s.addShardTag(s.shard0.shardName, "b");
- s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
- s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
+s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
+s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
- s.startBalancer();
+s.startBalancer();
- assert.soon(function() {
- return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
- }, 'Split did not occur', 3 * 60 * 1000);
+assert.soon(function() {
+ return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
+}, 'Split did not occur', 3 * 60 * 1000);
- s.awaitBalancerRound();
- s.printShardingStatus(true);
- assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
+s.awaitBalancerRound();
+s.printShardingStatus(true);
+assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
- s.config.chunks.find({"ns": "test.foo"}).forEach(function(chunk) {
- var numFields = 0;
- for (var x in chunk.min) {
- numFields++;
- assert(x == "_id" || x == "a", tojson(chunk));
- }
- assert.eq(2, numFields, tojson(chunk));
- });
+s.config.chunks.find({"ns": "test.foo"}).forEach(function(chunk) {
+ var numFields = 0;
+ for (var x in chunk.min) {
+ numFields++;
+ assert(x == "_id" || x == "a", tojson(chunk));
+ }
+ assert.eq(2, numFields, tojson(chunk));
+});
- // Check chunk mins correspond exactly to tag range boundaries, extended to match shard key
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey, a: MinKey}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5, a: MinKey}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10, a: MinKey}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15, a: MinKey}}).itcount());
+// Check chunk mins correspond exactly to tag range boundaries, extended to match shard key
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey, a: MinKey}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5, a: MinKey}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10, a: MinKey}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15, a: MinKey}}).itcount());
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js
index 3cb99e6ab9f..0cfb3cd35a1 100644
--- a/jstests/sharding/tag_range.js
+++ b/jstests/sharding/tag_range.js
@@ -1,93 +1,91 @@
// tests to make sure that tag ranges are added/removed/updated successfully
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2, mongos: 1});
+const st = new ShardingTest({shards: 2, mongos: 1});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.tag_range', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.tag_range', key: {_id: 1}}));
- function countTags(num, message) {
- assert.eq(st.config.tags.count(), num, message);
- }
+function countTags(num, message) {
+ assert.eq(st.config.tags.count(), num, message);
+}
- assert.eq(1, st.config.chunks.count({"ns": "test.tag_range"}));
+assert.eq(1, st.config.chunks.count({"ns": "test.tag_range"}));
- st.addShardTag(st.shard0.shardName, 'a');
- st.addShardTag(st.shard0.shardName, 'b');
- st.addShardTag(st.shard0.shardName, 'c');
+st.addShardTag(st.shard0.shardName, 'a');
+st.addShardTag(st.shard0.shardName, 'b');
+st.addShardTag(st.shard0.shardName, 'c');
- // add two ranges, verify the additions
+// add two ranges, verify the additions
- assert.commandWorked(st.addTagRange('test.tag_range', {_id: 5}, {_id: 10}, 'a'));
- assert.commandWorked(st.addTagRange('test.tag_range', {_id: 10}, {_id: 15}, 'b'));
+assert.commandWorked(st.addTagRange('test.tag_range', {_id: 5}, {_id: 10}, 'a'));
+assert.commandWorked(st.addTagRange('test.tag_range', {_id: 10}, {_id: 15}, 'b'));
- countTags(2, 'tag ranges were not successfully added');
+countTags(2, 'tag ranges were not successfully added');
- // remove the second range, should be left with one
+// remove the second range, should be left with one
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 10}, {_id: 15}, 'b'));
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 10}, {_id: 15}, 'b'));
- countTags(1, 'tag range not removed successfully');
+countTags(1, 'tag range not removed successfully');
- // add range min=max, verify the additions
+// add range min=max, verify the additions
- try {
- st.addTagRange('test.tag_range', {_id: 20}, {_id: 20}, 'a');
- } catch (e) {
- countTags(1, 'tag range should not have been added');
- }
+try {
+ st.addTagRange('test.tag_range', {_id: 20}, {_id: 20}, 'a');
+} catch (e) {
+ countTags(1, 'tag range should not have been added');
+}
- // Test that a dotted path is allowed in a tag range if it includes the shard key.
- assert.commandWorked(
- st.s0.adminCommand({shardCollection: 'test.tag_range_dotted', key: {"_id.a": 1}}));
- assert.commandWorked(st.addTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c'));
- countTags(2, 'Dotted path tag range not successfully added.');
+// Test that a dotted path is allowed in a tag range if it includes the shard key.
+assert.commandWorked(
+ st.s0.adminCommand({shardCollection: 'test.tag_range_dotted', key: {"_id.a": 1}}));
+assert.commandWorked(st.addTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c'));
+countTags(2, 'Dotted path tag range not successfully added.');
- assert.commandWorked(
- st.removeTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c'));
- assert.commandFailed(st.addTagRange('test.tag_range_dotted', {"_id.b": 5}, {"_id.b": 10}, 'c'));
- countTags(1, 'Incorrectly added tag range.');
+assert.commandWorked(st.removeTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c'));
+assert.commandFailed(st.addTagRange('test.tag_range_dotted', {"_id.b": 5}, {"_id.b": 10}, 'c'));
+countTags(1, 'Incorrectly added tag range.');
- // Test that ranges on embedded fields of the shard key are not allowed.
- assert.commandFailed(
- st.addTagRange('test.tag_range_dotted', {_id: {a: 5}}, {_id: {a: 10}}, 'c'));
- countTags(1, 'Incorrectly added embedded field tag range');
+// Test that ranges on embedded fields of the shard key are not allowed.
+assert.commandFailed(st.addTagRange('test.tag_range_dotted', {_id: {a: 5}}, {_id: {a: 10}}, 'c'));
+countTags(1, 'Incorrectly added embedded field tag range');
- // removeTagRange tests for tag ranges that do not exist
+// removeTagRange tests for tag ranges that do not exist
- // Bad namespace
- assert.commandFailed(st.removeTagRange('badns', {_id: 5}, {_id: 11}, 'a'));
- countTags(1, 'Bad namespace: tag range does not exist');
+// Bad namespace
+assert.commandFailed(st.removeTagRange('badns', {_id: 5}, {_id: 11}, 'a'));
+countTags(1, 'Bad namespace: tag range does not exist');
- // Bad tag
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 'badtag'));
- countTags(1, 'Bad tag: tag range does not exist');
+// Bad tag
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 'badtag'));
+countTags(1, 'Bad tag: tag range does not exist');
- // Bad min
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 0}, {_id: 11}, 'a'));
- countTags(1, 'Bad min: tag range does not exist');
+// Bad min
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 0}, {_id: 11}, 'a'));
+countTags(1, 'Bad min: tag range does not exist');
- // Bad max
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 12}, 'a'));
- countTags(1, 'Bad max: tag range does not exist');
+// Bad max
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 12}, 'a'));
+countTags(1, 'Bad max: tag range does not exist');
- // Invalid namesapce
- assert.commandFailed(st.removeTagRange(35, {_id: 5}, {_id: 11}, 'a'));
- countTags(1, 'Invalid namespace: tag range does not exist');
+// Invalid namesapce
+assert.commandFailed(st.removeTagRange(35, {_id: 5}, {_id: 11}, 'a'));
+countTags(1, 'Invalid namespace: tag range does not exist');
- // Invalid tag
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 35));
- countTags(1, 'Invalid tag: tag range does not exist');
+// Invalid tag
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 35));
+countTags(1, 'Invalid tag: tag range does not exist');
- // Invalid min
- assert.commandFailed(st.removeTagRange('test.tag_range', 35, {_id: 11}, 'a'));
- countTags(1, 'Invalid min: tag range does not exist');
+// Invalid min
+assert.commandFailed(st.removeTagRange('test.tag_range', 35, {_id: 11}, 'a'));
+countTags(1, 'Invalid min: tag range does not exist');
- // Invalid max
- assert.commandFailed(st.removeTagRange('test.tag_range', {_id: 5}, 35, 'a'));
- countTags(1, 'Invalid max: tag range does not exist');
+// Invalid max
+assert.commandFailed(st.removeTagRange('test.tag_range', {_id: 5}, 35, 'a'));
+countTags(1, 'Invalid max: tag range does not exist');
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js
index b8baba5f5b2..b85a188938d 100644
--- a/jstests/sharding/test_stacked_migration_cleanup.js
+++ b/jstests/sharding/test_stacked_migration_cleanup.js
@@ -1,68 +1,67 @@
// Tests "stacking" multiple migration cleanup threads and their behavior when the collection
// changes
(function() {
- 'use strict';
+'use strict';
- // start up a new sharded cluster
- var st = new ShardingTest({shards: 2, mongos: 1});
+// start up a new sharded cluster
+var st = new ShardingTest({shards: 2, mongos: 1});
- var mongos = st.s;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
- // Enable sharding of the collection
- assert.commandWorked(mongos.adminCommand({enablesharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(mongos.adminCommand({shardcollection: coll + "", key: {_id: 1}}));
+// Enable sharding of the collection
+assert.commandWorked(mongos.adminCommand({enablesharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({shardcollection: coll + "", key: {_id: 1}}));
- var numChunks = 30;
+var numChunks = 30;
- // Create a bunch of chunks
- for (var i = 0; i < numChunks; i++) {
- assert.commandWorked(mongos.adminCommand({split: coll + "", middle: {_id: i}}));
- }
+// Create a bunch of chunks
+for (var i = 0; i < numChunks; i++) {
+ assert.commandWorked(mongos.adminCommand({split: coll + "", middle: {_id: i}}));
+}
- jsTest.log("Inserting a lot of small documents...");
+jsTest.log("Inserting a lot of small documents...");
- // Insert a lot of small documents to make multiple cursor batches
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 10 * 1000; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+// Insert a lot of small documents to make multiple cursor batches
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 10 * 1000; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Opening a mongod cursor...");
+jsTest.log("Opening a mongod cursor...");
- // Open a new cursor on the mongod
- var cursor = coll.find();
- var next = cursor.next();
+// Open a new cursor on the mongod
+var cursor = coll.find();
+var next = cursor.next();
- jsTest.log("Moving a bunch of chunks to stack cleanup...");
+jsTest.log("Moving a bunch of chunks to stack cleanup...");
- // Move a bunch of chunks, but don't close the cursor so they stack.
- for (var i = 0; i < numChunks; i++) {
- assert.commandWorked(
- mongos.adminCommand({moveChunk: coll + "", find: {_id: i}, to: st.shard1.shardName}));
- }
+// Move a bunch of chunks, but don't close the cursor so they stack.
+for (var i = 0; i < numChunks; i++) {
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + "", find: {_id: i}, to: st.shard1.shardName}));
+}
- jsTest.log("Dropping and re-creating collection...");
+jsTest.log("Dropping and re-creating collection...");
- coll.drop();
+coll.drop();
- bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numChunks; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numChunks; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- sleep(10 * 1000);
+sleep(10 * 1000);
- jsTest.log("Checking that documents were not cleaned up...");
+jsTest.log("Checking that documents were not cleaned up...");
- for (var i = 0; i < numChunks; i++) {
- assert.neq(null, coll.findOne({_id: i}));
- }
-
- st.stop();
+for (var i = 0; i < numChunks; i++) {
+ assert.neq(null, coll.findOne({_id: i}));
+}
+st.stop();
})();
diff --git a/jstests/sharding/time_zone_info_mongos.js b/jstests/sharding/time_zone_info_mongos.js
index c75ac56628b..73b59b16f7a 100644
--- a/jstests/sharding/time_zone_info_mongos.js
+++ b/jstests/sharding/time_zone_info_mongos.js
@@ -1,100 +1,99 @@
// Test that mongoS accepts --timeZoneInfo <timezoneDBPath> as a command-line argument and that an
// aggregation pipeline with timezone expressions executes correctly on mongoS.
(function() {
- const tzGoodInfo = "jstests/libs/config_files/good_timezone_info";
- const tzBadInfo = "jstests/libs/config_files/bad_timezone_info";
- const tzNoInfo = "jstests/libs/config_files/missing_directory";
-
- const st = new ShardingTest({
- shards: 2,
- mongos: {s0: {timeZoneInfo: tzGoodInfo}},
- rs: {nodes: 1, timeZoneInfo: tzGoodInfo}
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Confirm that the timeZoneInfo command-line argument has been set on mongoS.
- const mongosCfg = assert.commandWorked(mongosDB.adminCommand({getCmdLineOpts: 1}));
- assert.eq(mongosCfg.parsed.processManagement.timeZoneInfo, tzGoodInfo);
-
- // Test that a bad timezone file causes mongoS startup to fail.
- let conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzBadInfo});
- assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail");
- assert.neq(-1, rawMongoProgramOutput().indexOf("Fatal assertion 40475"));
-
- // Test that a non-existent timezone directory causes mongoS startup to fail.
- conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzNoInfo});
- assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail");
- // Look for either old or new error message
- assert(rawMongoProgramOutput().indexOf("Failed to create service context") != -1 ||
- rawMongoProgramOutput().indexOf("Failed global initialization") != -1);
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey) chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- // Write a document containing a 'date' field to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1, date: ISODate("2017-11-13T12:00:00.000+0000")}));
- assert.writeOK(mongosColl.insert({_id: 1, date: ISODate("2017-11-13T03:00:00.000+0600")}));
-
- // Constructs a pipeline which splits the 'date' field into its constituent parts on mongoD,
- // reassembles the original date on mongoS, and verifies that the two match. All timezone
- // expressions in the pipeline use the passed 'tz' string or, if absent, default to "GMT".
- function buildTimeZonePipeline(tz) {
- // We use $const here so that the input pipeline matches the format of the explain output.
- const tzExpr = {$const: (tz || "GMT")};
- return [
- {$addFields: {mongodParts: {$dateToParts: {date: "$date", timezone: tzExpr}}}},
- {$_internalSplitPipeline: {mergeType: "mongos"}},
- {
- $addFields: {
- mongosDate: {
- $dateFromParts: {
- year: "$mongodParts.year",
- month: "$mongodParts.month",
- day: "$mongodParts.day",
- hour: "$mongodParts.hour",
- minute: "$mongodParts.minute",
- second: "$mongodParts.second",
- millisecond: "$mongodParts.millisecond",
- timezone: tzExpr
- }
- }
- }
- },
- {$match: {$expr: {$eq: ["$date", "$mongosDate"]}}}
- ];
- }
-
- // Confirm that the pipe splits at '$_internalSplitPipeline' and that the merge runs on mongoS.
- let timeZonePipeline = buildTimeZonePipeline("GMT");
- const tzExplain = assert.commandWorked(mongosColl.explain().aggregate(timeZonePipeline));
- assert.eq(tzExplain.splitPipeline.shardsPart, [timeZonePipeline[0]]);
- assert.eq(tzExplain.splitPipeline.mergerPart, timeZonePipeline.slice(1));
- assert.eq(tzExplain.mergeType, "mongos");
-
- // Confirm that both documents are output by the pipeline, demonstrating that the date has been
- // correctly disassembled on mongoD and reassembled on mongoS.
- assert.eq(mongosColl.aggregate(timeZonePipeline).itcount(), 2);
-
- // Confirm that aggregating with a timezone which is not present in 'good_timezone_info' fails.
- timeZonePipeline = buildTimeZonePipeline("Europe/Dublin");
- assert.eq(assert.throws(() => mongosColl.aggregate(timeZonePipeline)).code, 40485);
-
- st.stop();
+const tzGoodInfo = "jstests/libs/config_files/good_timezone_info";
+const tzBadInfo = "jstests/libs/config_files/bad_timezone_info";
+const tzNoInfo = "jstests/libs/config_files/missing_directory";
+
+const st = new ShardingTest({
+ shards: 2,
+ mongos: {s0: {timeZoneInfo: tzGoodInfo}},
+ rs: {nodes: 1, timeZoneInfo: tzGoodInfo}
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Confirm that the timeZoneInfo command-line argument has been set on mongoS.
+const mongosCfg = assert.commandWorked(mongosDB.adminCommand({getCmdLineOpts: 1}));
+assert.eq(mongosCfg.parsed.processManagement.timeZoneInfo, tzGoodInfo);
+
+// Test that a bad timezone file causes mongoS startup to fail.
+let conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzBadInfo});
+assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail");
+assert.neq(-1, rawMongoProgramOutput().indexOf("Fatal assertion 40475"));
+
+// Test that a non-existent timezone directory causes mongoS startup to fail.
+conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzNoInfo});
+assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail");
+// Look for either old or new error message
+assert(rawMongoProgramOutput().indexOf("Failed to create service context") != -1 ||
+ rawMongoProgramOutput().indexOf("Failed global initialization") != -1);
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey) chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+
+// Write a document containing a 'date' field to each chunk.
+assert.writeOK(mongosColl.insert({_id: -1, date: ISODate("2017-11-13T12:00:00.000+0000")}));
+assert.writeOK(mongosColl.insert({_id: 1, date: ISODate("2017-11-13T03:00:00.000+0600")}));
+
+// Constructs a pipeline which splits the 'date' field into its constituent parts on mongoD,
+// reassembles the original date on mongoS, and verifies that the two match. All timezone
+// expressions in the pipeline use the passed 'tz' string or, if absent, default to "GMT".
+function buildTimeZonePipeline(tz) {
+ // We use $const here so that the input pipeline matches the format of the explain output.
+ const tzExpr = {$const: (tz || "GMT")};
+ return [
+ {$addFields: {mongodParts: {$dateToParts: {date: "$date", timezone: tzExpr}}}},
+ {$_internalSplitPipeline: {mergeType: "mongos"}},
+ {
+ $addFields: {
+ mongosDate: {
+ $dateFromParts: {
+ year: "$mongodParts.year",
+ month: "$mongodParts.month",
+ day: "$mongodParts.day",
+ hour: "$mongodParts.hour",
+ minute: "$mongodParts.minute",
+ second: "$mongodParts.second",
+ millisecond: "$mongodParts.millisecond",
+ timezone: tzExpr
+ }
+ }
+ }
+ },
+ {$match: {$expr: {$eq: ["$date", "$mongosDate"]}}}
+ ];
+}
+
+// Confirm that the pipe splits at '$_internalSplitPipeline' and that the merge runs on mongoS.
+let timeZonePipeline = buildTimeZonePipeline("GMT");
+const tzExplain = assert.commandWorked(mongosColl.explain().aggregate(timeZonePipeline));
+assert.eq(tzExplain.splitPipeline.shardsPart, [timeZonePipeline[0]]);
+assert.eq(tzExplain.splitPipeline.mergerPart, timeZonePipeline.slice(1));
+assert.eq(tzExplain.mergeType, "mongos");
+
+// Confirm that both documents are output by the pipeline, demonstrating that the date has been
+// correctly disassembled on mongoD and reassembled on mongoS.
+assert.eq(mongosColl.aggregate(timeZonePipeline).itcount(), 2);
+
+// Confirm that aggregating with a timezone which is not present in 'good_timezone_info' fails.
+timeZonePipeline = buildTimeZonePipeline("Europe/Dublin");
+assert.eq(assert.throws(() => mongosColl.aggregate(timeZonePipeline)).code, 40485);
+
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 78b86e64e50..bbf930c9b8f 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -157,119 +157,119 @@ var configDB = st.s.getDB('config');
// high - high shard key value
var tests = [
{
- // Test auto-split on the "low" top chunk to another tagged shard
- name: "low top chunk with tag move",
- lowOrHigh: lowChunk,
- movedToShard: st.rs2.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]},
- ],
- tagRanges: [
- {range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}
- ],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to another tagged shard
+ name: "low top chunk with tag move",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs2.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]},
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "low" top chunk to same tagged shard
- name: "low top chunk with tag no move",
- lowOrHigh: lowChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]},
- ],
- tagRanges: [
- {range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}
- ],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to same tagged shard
+ name: "low top chunk with tag no move",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs0.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]},
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "low" top chunk to another shard
- name: "low top chunk no tag move",
- lowOrHigh: lowChunk,
- movedToShard: st.rs3.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 20},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20},
- {name: st.rs2.name, range: highChunkRange, chunks: 5},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1}
- ],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to another shard
+ name: "low top chunk no tag move",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs3.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 20},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20},
+ {name: st.rs2.name, range: highChunkRange, chunks: 5},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk to another tagged shard
- name: "high top chunk with tag move",
- lowOrHigh: highChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}
- ],
- tagRanges: [
- {range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to another tagged shard
+ name: "high top chunk with tag move",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs0.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to another shard
- name: "high top chunk no tag move",
- lowOrHigh: highChunk,
- movedToShard: st.rs3.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 5},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20},
- {name: st.rs2.name, range: highChunkRange, chunks: 20},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to another shard
+ name: "high top chunk no tag move",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs3.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 5},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20},
+ {name: st.rs2.name, range: highChunkRange, chunks: 20},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to same tagged shard
- name: "high top chunk with tag no move",
- lowOrHigh: highChunk,
- movedToShard: st.rs2.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}
- ],
- tagRanges: [
- {range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to same tagged shard
+ name: "high top chunk with tag no move",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs2.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to same shard
- name: "high top chunk no tag no move",
- lowOrHigh: highChunk,
- movedToShard: st.rs2.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 20},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20},
- {name: st.rs2.name, range: highChunkRange, chunks: 1},
- {name: st.rs3.name, range: midChunkRange2, chunks: 5}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to same shard
+ name: "high top chunk no tag no move",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs2.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 20},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20},
+ {name: st.rs2.name, range: highChunkRange, chunks: 1},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 5}
+ ],
+ inserts: highChunkInserts
},
];
@@ -296,20 +296,20 @@ st.ensurePrimaryShard(dbName, st.rs0.name);
var singleNodeTests = [
{
- // Test auto-split on the "low" top chunk on single node shard
- name: "single node shard - low top chunk",
- lowOrHigh: lowChunk,
- movedToShard: st.rs0.name,
- shards: [{name: st.rs0.name, range: lowChunkRange, chunks: 2}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk on single node shard
+ name: "single node shard - low top chunk",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs0.name,
+ shards: [{name: st.rs0.name, range: lowChunkRange, chunks: 2}],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk on single node shard
- name: "single node shard - high top chunk",
- lowOrHigh: highChunk,
- movedToShard: st.rs0.name,
- shards: [{name: st.rs0.name, range: highChunkRange, chunks: 2}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk on single node shard
+ name: "single node shard - high top chunk",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs0.name,
+ shards: [{name: st.rs0.name, range: highChunkRange, chunks: 2}],
+ inserts: highChunkInserts
},
];
@@ -336,26 +336,26 @@ configDB = st.s.getDB('config');
var maxSizeTests = [
{
- // Test auto-split on the "low" top chunk with maxSize on destination shard
- name: "maxSize - low top chunk",
- lowOrHigh: lowChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 10},
- {name: st.rs1.name, range: highChunkRange, chunks: 1}
- ],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk with maxSize on destination shard
+ name: "maxSize - low top chunk",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs0.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 10},
+ {name: st.rs1.name, range: highChunkRange, chunks: 1}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk with maxSize on destination shard
- name: "maxSize - high top chunk",
- lowOrHigh: highChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: highChunkRange, chunks: 10},
- {name: st.rs1.name, range: lowChunkRange, chunks: 1}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk with maxSize on destination shard
+ name: "maxSize - high top chunk",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs0.name,
+ shards: [
+ {name: st.rs0.name, range: highChunkRange, chunks: 10},
+ {name: st.rs1.name, range: lowChunkRange, chunks: 1}
+ ],
+ inserts: highChunkInserts
},
];
diff --git a/jstests/sharding/top_chunk_split.js b/jstests/sharding/top_chunk_split.js
index 5aeeb14ddfd..d7d6c4cda0f 100644
--- a/jstests/sharding/top_chunk_split.js
+++ b/jstests/sharding/top_chunk_split.js
@@ -5,144 +5,139 @@
* entire shard key space.
*/
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 1});
-
- var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
-
- var callSplit = function(db, minKey, maxKey, splitPoints) {
- var res = st.s.adminCommand({getShardVersion: "test.user"});
- assert.commandWorked(res);
- var shardVersion = [res.version, res.versionEpoch];
- return db.runCommand({
- splitChunk: 'test.user',
- from: st.shard0.shardName,
- min: minKey,
- max: maxKey,
- keyPattern: {x: 1},
- splitKeys: splitPoints,
- epoch: res.versionEpoch,
- });
- };
-
- var tests = [
- //
- // Lower extreme chunk tests.
- //
-
- // All chunks have 1 doc.
- //
- // Expected doc counts for new chunks:
- // [ MinKey, -2 ): 1
- // [ -2, -1 ): 1
- // [ -1, 0): 1
- //
- function(db) {
- var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}, {x: -1}]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
- tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0,
- tojson(res.shouldMigrate.max));
- },
-
- // One chunk has single doc, extreme doesn't.
- //
- // Expected doc counts for new chunks:
- // [ MinKey, -1 ): 2
- // [ -1, 0): 1
- //
- function(db) {
- var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -1}]);
- assert.commandWorked(res);
- assert.eq(res.shouldMigrate, null, tojson(res));
- },
-
- // Only extreme has single doc.
- //
- // Expected doc counts for new chunks:
- // [ MinKey, -2 ): 1
- // [ -2, 0): 2
- //
- function(db) {
- var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
- tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0,
- tojson(res.shouldMigrate.max));
- },
-
- //
- // Upper extreme chunk tests.
- //
-
- // All chunks have 1 doc.
- //
- // Expected doc counts for new chunks:
- // [ 0, 1 ): 1
- // [ 1, 2 ): 1
- // [ 2, MaxKey): 1
- //
- function(db) {
- var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}, {x: 2}]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0,
- tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
- tojson(res.shouldMigrate.max));
- },
-
- // One chunk has single doc, extreme doesn't.
- //
- // Expected doc counts for new chunks:
- // [ 0, 1 ): 1
- // [ 1, MaxKey): 2
- //
- function(db) {
- var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}]);
- assert.commandWorked(res);
- assert.eq(res.shouldMigrate, null, tojson(res));
- },
-
- // Only extreme has single doc.
- //
- // Expected doc counts for new chunks:
- // [ 0, 2 ): 2
- // [ 2, MaxKey): 1
- //
- function(db) {
- var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 2}]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0,
- tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
- tojson(res.shouldMigrate.max));
- },
- ];
-
- tests.forEach(function(test) {
- // setup
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
-
- for (var x = -3; x < 3; x++) {
- testDB.user.insert({x: x});
- }
-
- // run test
- test(st.rs0.getPrimary().getDB('admin'));
-
- // teardown
- testDB.user.drop();
+'use strict';
+
+var st = new ShardingTest({shards: 1});
+
+var testDB = st.s.getDB('test');
+assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+
+var callSplit = function(db, minKey, maxKey, splitPoints) {
+ var res = st.s.adminCommand({getShardVersion: "test.user"});
+ assert.commandWorked(res);
+ var shardVersion = [res.version, res.versionEpoch];
+ return db.runCommand({
+ splitChunk: 'test.user',
+ from: st.shard0.shardName,
+ min: minKey,
+ max: maxKey,
+ keyPattern: {x: 1},
+ splitKeys: splitPoints,
+ epoch: res.versionEpoch,
});
-
- st.stop();
-
+};
+
+var tests = [
+ //
+ // Lower extreme chunk tests.
+ //
+
+ // All chunks have 1 doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -2 ): 1
+ // [ -2, -1 ): 1
+ // [ -1, 0): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}, {x: -1}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
+ tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0, tojson(res.shouldMigrate.max));
+ },
+
+ // One chunk has single doc, extreme doesn't.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -1 ): 2
+ // [ -1, 0): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -1}]);
+ assert.commandWorked(res);
+ assert.eq(res.shouldMigrate, null, tojson(res));
+ },
+
+ // Only extreme has single doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -2 ): 1
+ // [ -2, 0): 2
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
+ tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0, tojson(res.shouldMigrate.max));
+ },
+
+ //
+ // Upper extreme chunk tests.
+ //
+
+ // All chunks have 1 doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 1 ): 1
+ // [ 1, 2 ): 1
+ // [ 2, MaxKey): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}, {x: 2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0, tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
+ tojson(res.shouldMigrate.max));
+ },
+
+ // One chunk has single doc, extreme doesn't.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 1 ): 1
+ // [ 1, MaxKey): 2
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}]);
+ assert.commandWorked(res);
+ assert.eq(res.shouldMigrate, null, tojson(res));
+ },
+
+ // Only extreme has single doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 2 ): 2
+ // [ 2, MaxKey): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0, tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
+ tojson(res.shouldMigrate.max));
+ },
+];
+
+tests.forEach(function(test) {
+ // setup
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+
+ for (var x = -3; x < 3; x++) {
+ testDB.user.insert({x: x});
+ }
+
+ // run test
+ test(st.rs0.getPrimary().getDB('admin'));
+
+ // teardown
+ testDB.user.drop();
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js
index b09003617d4..329ad529ac3 100644
--- a/jstests/sharding/trace_missing_docs_test.js
+++ b/jstests/sharding/trace_missing_docs_test.js
@@ -2,45 +2,45 @@
load('jstests/libs/trace_missing_docs.js');
(function() {
- 'use strict';
+'use strict';
- var testDocMissing = function(useReplicaSet) {
- var options = {
- rs: useReplicaSet,
- shardOptions: {oplogSize: 10},
- rsOptions: {nodes: 1, oplogSize: 10}
- };
+var testDocMissing = function(useReplicaSet) {
+ var options = {
+ rs: useReplicaSet,
+ shardOptions: {oplogSize: 10},
+ rsOptions: {nodes: 1, oplogSize: 10}
+ };
- var st = new ShardingTest({shards: 2, mongos: 1, other: options});
+ var st = new ShardingTest({shards: 2, mongos: 1, other: options});
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var admin = mongos.getDB("admin");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- coll.ensureIndex({sk: 1});
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {sk: 1}}));
+ coll.ensureIndex({sk: 1});
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {sk: 1}}));
- assert.writeOK(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
- assert.writeOK(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
- assert.writeOK(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
+ assert.writeOK(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
+ assert.writeOK(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
+ assert.writeOK(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: coll + "", find: {sk: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {sk: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- st.printShardingStatus();
+ st.printShardingStatus();
- var ops = traceMissingDoc(coll, {_id: 12345, sk: 67890});
+ var ops = traceMissingDoc(coll, {_id: 12345, sk: 67890});
- assert.eq(ops[0].op, 'i');
- assert.eq(ops.length, 5);
+ assert.eq(ops[0].op, 'i');
+ assert.eq(ops.length, 5);
- jsTest.log("DONE! (using rs)");
+ jsTest.log("DONE! (using rs)");
- st.stop();
- };
+ st.stop();
+};
- testDocMissing(true);
+testDocMissing(true);
})();
diff --git a/jstests/sharding/transactions_causal_consistency.js b/jstests/sharding/transactions_causal_consistency.js
index 5ab2c8e9aba..e2f6a9aed58 100644
--- a/jstests/sharding/transactions_causal_consistency.js
+++ b/jstests/sharding/transactions_causal_consistency.js
@@ -7,78 +7,77 @@
// uses_transactions,
// ]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: 2, mongos: 2});
+const st = new ShardingTest({shards: 2, mongos: 2});
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- // Set up a sharded collection with 2 chunks, [min, 0) and [0, max), one on each shard, with one
- // document in each.
+// Set up a sharded collection with 2 chunks, [min, 0) and [0, max), one on each shard, with one
+// document in each.
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+// Verifies transactions using causal consistency read all causally prior operations.
+function runTest(st, readConcern) {
+ jsTestLog("Testing readConcern: " + tojson(readConcern));
+
+ const session = st.s.startSession({causalConsistency: true});
+ const sessionDB = session.getDatabase(dbName);
+
+ // Insert data to one shard in a causally consistent session.
+ const docToInsert = {_id: 5};
+ assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [docToInsert]}));
+
+ // Through a separate router move the chunk that was inserted to, so the original router is
+ // stale when it starts its transaction.
+ const otherRouter = st.s1;
assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
-
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- // Verifies transactions using causal consistency read all causally prior operations.
- function runTest(st, readConcern) {
- jsTestLog("Testing readConcern: " + tojson(readConcern));
-
- const session = st.s.startSession({causalConsistency: true});
- const sessionDB = session.getDatabase(dbName);
-
- // Insert data to one shard in a causally consistent session.
- const docToInsert = {_id: 5};
- assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [docToInsert]}));
-
- // Through a separate router move the chunk that was inserted to, so the original router is
- // stale when it starts its transaction.
- const otherRouter = st.s1;
- assert.commandWorked(
- otherRouter.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard0.shardName}));
-
- session.startTransaction({readConcern: readConcern});
-
- // The transaction should always see the document written earlier through its session,
- // regardless of the move.
- //
- // Note: until transactions can read from secondaries and/or disabling speculative snapshot
- // is allowed, read concerns that do not require global snapshots (i.e. local and majority)
- // will always read the inserted document here because the local snapshot established on
- // this shard will include all currently applied operations, which must include all earlier
- // acknowledged writes.
- assert.docEq(docToInsert,
- sessionDB[collName].findOne(docToInsert),
- "sharded transaction with read concern " + tojson(readConcern) +
- " did not see expected document");
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Clean up for the next iteration.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard1.shardName}));
- assert.writeOK(sessionDB[collName].remove(docToInsert));
- }
-
- const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
- for (let readConcernLevel of kAllowedReadConcernLevels) {
- runTest(st, {level: readConcernLevel});
- }
-
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
-
- st.stop();
+ otherRouter.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard0.shardName}));
+
+ session.startTransaction({readConcern: readConcern});
+
+ // The transaction should always see the document written earlier through its session,
+ // regardless of the move.
+ //
+ // Note: until transactions can read from secondaries and/or disabling speculative snapshot
+ // is allowed, read concerns that do not require global snapshots (i.e. local and majority)
+ // will always read the inserted document here because the local snapshot established on
+ // this shard will include all currently applied operations, which must include all earlier
+ // acknowledged writes.
+ assert.docEq(docToInsert,
+ sessionDB[collName].findOne(docToInsert),
+ "sharded transaction with read concern " + tojson(readConcern) +
+ " did not see expected document");
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // Clean up for the next iteration.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard1.shardName}));
+ assert.writeOK(sessionDB[collName].remove(docToInsert));
+}
+
+const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
+for (let readConcernLevel of kAllowedReadConcernLevels) {
+ runTest(st, {level: readConcernLevel});
+}
+
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+
+st.stop();
})();
diff --git a/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js b/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
index ffb7d9dde37..11a2c39997f 100644
--- a/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
+++ b/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
@@ -5,47 +5,45 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const st = new ShardingTest({shards: 1});
+const st = new ShardingTest({shards: 1});
- // Set up a sharded and unsharded collection, each with one document.
+// Set up a sharded and unsharded collection, each with one document.
- const unshardedDbName = "unsharded_db";
- const unshardedCollName = "unsharded_coll";
+const unshardedDbName = "unsharded_db";
+const unshardedCollName = "unsharded_coll";
- const shardedDbName = "sharded_db";
- const shardedCollName = "sharded_coll";
- const shardedNs = shardedDbName + "." + shardedCollName;
+const shardedDbName = "sharded_db";
+const shardedCollName = "sharded_coll";
+const shardedNs = shardedDbName + "." + shardedCollName;
- assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: shardedNs, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: shardedNs, key: {_id: 1}}));
- const session = st.s.startSession();
- const unshardedCollDB = session.getDatabase(unshardedDbName);
- const shardedCollDB = session.getDatabase(shardedDbName);
+const session = st.s.startSession();
+const unshardedCollDB = session.getDatabase(unshardedDbName);
+const shardedCollDB = session.getDatabase(shardedDbName);
- assert.writeOK(unshardedCollDB[unshardedCollName].insert({_id: "jack"}));
- assert.writeOK(shardedCollDB[shardedCollName].insert({_id: "jack"}));
+assert.writeOK(unshardedCollDB[unshardedCollName].insert({_id: "jack"}));
+assert.writeOK(shardedCollDB[shardedCollName].insert({_id: "jack"}));
- // Reload metadata to avoid stale config or stale database version errors.
- flushRoutersAndRefreshShardMetadata(st, {ns: shardedNs, dbNames: [unshardedDbName]});
+// Reload metadata to avoid stale config or stale database version errors.
+flushRoutersAndRefreshShardMetadata(st, {ns: shardedNs, dbNames: [unshardedDbName]});
- // Can run distinct on an unsharded collection.
- session.startTransaction();
- assert.eq(unshardedCollDB.runCommand({distinct: unshardedCollName, key: "_id"}).values,
- ["jack"]);
- assert.commandWorked(session.commitTransaction_forTesting());
+// Can run distinct on an unsharded collection.
+session.startTransaction();
+assert.eq(unshardedCollDB.runCommand({distinct: unshardedCollName, key: "_id"}).values, ["jack"]);
+assert.commandWorked(session.commitTransaction_forTesting());
- // Cannot run distinct on a sharded collection.
- session.startTransaction();
- assert.commandFailedWithCode(shardedCollDB.runCommand({distinct: shardedCollName, key: "_id"}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// Cannot run distinct on a sharded collection.
+session.startTransaction();
+assert.commandFailedWithCode(shardedCollDB.runCommand({distinct: shardedCollName, key: "_id"}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- session.endSession();
- st.stop();
+session.endSession();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_error_labels.js b/jstests/sharding/transactions_error_labels.js
index 755f2120167..f295dddf588 100644
--- a/jstests/sharding/transactions_error_labels.js
+++ b/jstests/sharding/transactions_error_labels.js
@@ -1,198 +1,195 @@
// Test TransientTransactionErrors error label in mongos write commands.
// @tags: [uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- const failCommandWithError = function(rst, {commandToFail, errorCode, closeConnection}) {
- rst.nodes.forEach(function(node) {
- assert.commandWorked(node.getDB("admin").runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- closeConnection: closeConnection,
- errorCode: errorCode,
- failCommands: [commandToFail],
- failInternalCommands: true // mongod sees mongos as an internal client
- }
- }));
- });
- };
-
- const failCommandWithWriteConcernError = function(rst, commandToFail) {
- rst.nodes.forEach(function(node) {
- assert.commandWorked(node.getDB("admin").runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- writeConcernError: {code: NumberInt(12345), errmsg: "dummy"},
- failCommands: [commandToFail],
- failInternalCommands: true // mongod sees mongos as an internal client
- }
- }));
- });
- };
-
- const turnOffFailCommand = function(rst) {
- rst.nodes.forEach(function(node) {
- assert.commandWorked(
- node.getDB("admin").runCommand({configureFailPoint: "failCommand", mode: "off"}));
- });
- };
-
- let numCalls = 0;
- const startTransaction = function(mongosSession, dbName, collName) {
- numCalls++;
- mongosSession.startTransaction();
- return mongosSession.getDatabase(dbName).runCommand({
- insert: collName,
- // Target both chunks, wherever they may be
- documents: [{_id: -1 * numCalls}, {_id: numCalls}],
- readConcern: {level: "snapshot"},
- });
- };
-
- const abortTransactionDirectlyOnParticipant = function(rst, lsid, txnNumber) {
- assert.commandWorked(rst.getPrimary().adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+const failCommandWithError = function(rst, {commandToFail, errorCode, closeConnection}) {
+ rst.nodes.forEach(function(node) {
+ assert.commandWorked(node.getDB("admin").runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ closeConnection: closeConnection,
+ errorCode: errorCode,
+ failCommands: [commandToFail],
+ failInternalCommands: true // mongod sees mongos as an internal client
+ }
}));
- };
-
- const commitTransaction = function(mongosSession) {
- let res = mongosSession.commitTransaction_forTesting();
- print("commitTransaction response from mongos: " + tojson(res));
- return res;
- };
-
- const checkMongosResponse = function(
- res, expectedErrorCode, expectedErrorLabel, writeConcernErrorExpected) {
- if (expectedErrorCode) {
- assert.eq(0, res.ok, tojson(res));
- assert.eq(expectedErrorCode, res.code, tojson(res));
- } else {
- assert.eq(1, res.ok, tojson(res));
- }
-
- if (expectedErrorLabel) {
- assert.neq(null, res.errorLabels, tojson(res));
- assert.contains(expectedErrorLabel, res.errorLabels, tojson(res));
- } else {
- assert.eq(null, res.errorLabels, tojson(res));
- }
-
- if (writeConcernErrorExpected) {
- assert.neq(null, res.writeConcernError, tojson(res));
- } else {
- assert.eq(null, res.writeConcernError, tojson(res));
- }
- };
-
- const runCommitTests = function(commandSentToShard) {
- jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
- " returns success.");
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, null, null, null);
-
- jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
- " returns success with writeConcern error.");
- failCommandWithWriteConcernError(st.rs0, commandSentToShard);
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, null, null, true);
- turnOffFailCommand(st.rs0);
-
- jsTest.log("Mongos attaches 'TransientTransactionError' label if " + commandSentToShard +
- " returns NoSuchTransaction.");
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- abortTransactionDirectlyOnParticipant(
- st.rs0, mongosSession.getSessionId(), mongosSession.getTxnNumber_forTesting());
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, ErrorCodes.NoSuchTransaction, "TransientTransactionError", null);
- turnOffFailCommand(st.rs0);
-
- jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
- " returns NoSuchTransaction with writeConcern error.");
- failCommandWithWriteConcernError(st.rs0, commandSentToShard);
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- abortTransactionDirectlyOnParticipant(
- st.rs0, mongosSession.getSessionId(), mongosSession.getTxnNumber_forTesting());
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, ErrorCodes.NoSuchTransaction, null, true);
- turnOffFailCommand(st.rs0);
-
- jsTest.log("No error label for network error if " + commandSentToShard +
- " returns network error");
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- failCommandWithError(st.rs0, {
- commandToFail: commandSentToShard,
- errorCode: ErrorCodes.InternalError,
- closeConnection: true
- });
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, ErrorCodes.HostUnreachable, false /* expectedErrorLabel */, null);
- turnOffFailCommand(st.rs0);
- };
-
- let st = new ShardingTest({shards: 2, config: 1, mongosOptions: {verbose: 3}});
-
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- let mongosSession = st.s.startSession();
- let mongosSessionDB = mongosSession.getDatabase(dbName);
-
- let res;
-
- // write statement
- jsTest.log(
- "'TransientTransactionError' label is attached if write statement returns WriteConflict");
- failCommandWithError(
- st.rs0,
- {commandToFail: "insert", errorCode: ErrorCodes.WriteConflict, closeConnection: false});
- res = startTransaction(mongosSession, dbName, collName);
- checkMongosResponse(res, ErrorCodes.WriteConflict, "TransientTransactionError", null);
- turnOffFailCommand(st.rs0);
- assert.commandFailedWithCode(mongosSession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // statements prior to commit network error
- failCommandWithError(
- st.rs0,
- {commandToFail: "insert", errorCode: ErrorCodes.InternalError, closeConnection: true});
- res = startTransaction(mongosSession, dbName, collName);
- checkMongosResponse(res, ErrorCodes.HostUnreachable, "TransientTransactionError", null);
+ });
+};
+
+const failCommandWithWriteConcernError = function(rst, commandToFail) {
+ rst.nodes.forEach(function(node) {
+ assert.commandWorked(node.getDB("admin").runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ writeConcernError: {code: NumberInt(12345), errmsg: "dummy"},
+ failCommands: [commandToFail],
+ failInternalCommands: true // mongod sees mongos as an internal client
+ }
+ }));
+ });
+};
+
+const turnOffFailCommand = function(rst) {
+ rst.nodes.forEach(function(node) {
+ assert.commandWorked(
+ node.getDB("admin").runCommand({configureFailPoint: "failCommand", mode: "off"}));
+ });
+};
+
+let numCalls = 0;
+const startTransaction = function(mongosSession, dbName, collName) {
+ numCalls++;
+ mongosSession.startTransaction();
+ return mongosSession.getDatabase(dbName).runCommand({
+ insert: collName,
+ // Target both chunks, wherever they may be
+ documents: [{_id: -1 * numCalls}, {_id: numCalls}],
+ readConcern: {level: "snapshot"},
+ });
+};
+
+const abortTransactionDirectlyOnParticipant = function(rst, lsid, txnNumber) {
+ assert.commandWorked(rst.getPrimary().adminCommand({
+ abortTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ }));
+};
+
+const commitTransaction = function(mongosSession) {
+ let res = mongosSession.commitTransaction_forTesting();
+ print("commitTransaction response from mongos: " + tojson(res));
+ return res;
+};
+
+const checkMongosResponse = function(
+ res, expectedErrorCode, expectedErrorLabel, writeConcernErrorExpected) {
+ if (expectedErrorCode) {
+ assert.eq(0, res.ok, tojson(res));
+ assert.eq(expectedErrorCode, res.code, tojson(res));
+ } else {
+ assert.eq(1, res.ok, tojson(res));
+ }
+
+ if (expectedErrorLabel) {
+ assert.neq(null, res.errorLabels, tojson(res));
+ assert.contains(expectedErrorLabel, res.errorLabels, tojson(res));
+ } else {
+ assert.eq(null, res.errorLabels, tojson(res));
+ }
+
+ if (writeConcernErrorExpected) {
+ assert.neq(null, res.writeConcernError, tojson(res));
+ } else {
+ assert.eq(null, res.writeConcernError, tojson(res));
+ }
+};
+
+const runCommitTests = function(commandSentToShard) {
+ jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
+ " returns success.");
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, null, null, null);
+
+ jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
+ " returns success with writeConcern error.");
+ failCommandWithWriteConcernError(st.rs0, commandSentToShard);
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, null, null, true);
turnOffFailCommand(st.rs0);
- assert.commandFailedWithCode(mongosSession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- // commitTransaction for single-shard transaction (mongos sends commitTransaction)
- runCommitTests("commitTransaction");
+ jsTest.log("Mongos attaches 'TransientTransactionError' label if " + commandSentToShard +
+ " returns NoSuchTransaction.");
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ abortTransactionDirectlyOnParticipant(
+ st.rs0, mongosSession.getSessionId(), mongosSession.getTxnNumber_forTesting());
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, ErrorCodes.NoSuchTransaction, "TransientTransactionError", null);
+ turnOffFailCommand(st.rs0);
- // commitTransaction for multi-shard transaction (mongos sends coordinateCommitTransaction)
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
- flushRoutersAndRefreshShardMetadata(st, {ns});
- runCommitTests("coordinateCommitTransaction");
+ jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
+ " returns NoSuchTransaction with writeConcern error.");
+ failCommandWithWriteConcernError(st.rs0, commandSentToShard);
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ abortTransactionDirectlyOnParticipant(
+ st.rs0, mongosSession.getSessionId(), mongosSession.getTxnNumber_forTesting());
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, ErrorCodes.NoSuchTransaction, null, true);
+ turnOffFailCommand(st.rs0);
- st.stop();
+ jsTest.log("No error label for network error if " + commandSentToShard +
+ " returns network error");
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ failCommandWithError(st.rs0, {
+ commandToFail: commandSentToShard,
+ errorCode: ErrorCodes.InternalError,
+ closeConnection: true
+ });
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, ErrorCodes.HostUnreachable, false /* expectedErrorLabel */, null);
+ turnOffFailCommand(st.rs0);
+};
+
+let st = new ShardingTest({shards: 2, config: 1, mongosOptions: {verbose: 3}});
+
+// Create a sharded collection with a chunk on each shard:
+// shard0: [-inf, 0)
+// shard1: [0, +inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+
+// These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+// from the shards starting, aborting, and restarting the transaction due to needing to
+// refresh after the transaction has started.
+assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+let mongosSession = st.s.startSession();
+let mongosSessionDB = mongosSession.getDatabase(dbName);
+
+let res;
+
+// write statement
+jsTest.log(
+ "'TransientTransactionError' label is attached if write statement returns WriteConflict");
+failCommandWithError(
+ st.rs0, {commandToFail: "insert", errorCode: ErrorCodes.WriteConflict, closeConnection: false});
+res = startTransaction(mongosSession, dbName, collName);
+checkMongosResponse(res, ErrorCodes.WriteConflict, "TransientTransactionError", null);
+turnOffFailCommand(st.rs0);
+assert.commandFailedWithCode(mongosSession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+// statements prior to commit network error
+failCommandWithError(
+ st.rs0, {commandToFail: "insert", errorCode: ErrorCodes.InternalError, closeConnection: true});
+res = startTransaction(mongosSession, dbName, collName);
+checkMongosResponse(res, ErrorCodes.HostUnreachable, "TransientTransactionError", null);
+turnOffFailCommand(st.rs0);
+assert.commandFailedWithCode(mongosSession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+// commitTransaction for single-shard transaction (mongos sends commitTransaction)
+runCommitTests("commitTransaction");
+
+// commitTransaction for multi-shard transaction (mongos sends coordinateCommitTransaction)
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+flushRoutersAndRefreshShardMetadata(st, {ns});
+runCommitTests("coordinateCommitTransaction");
+
+st.stop();
}());
diff --git a/jstests/sharding/transactions_expiration.js b/jstests/sharding/transactions_expiration.js
index 65178dd1c30..e337a81632c 100644
--- a/jstests/sharding/transactions_expiration.js
+++ b/jstests/sharding/transactions_expiration.js
@@ -5,75 +5,74 @@
// @tags: [uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- let st = new ShardingTest({shards: 2});
+let st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.name);
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {x: 0}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.name);
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {x: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
+
+let lowerTxnTimeout = (conn) => {
assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
-
- let lowerTxnTimeout = (conn) => {
- assert.commandWorked(
- conn.getDB('admin').runCommand({setParameter: 1, transactionLifetimeLimitSeconds: 1}));
- };
-
- st.rs0.nodes.forEach(lowerTxnTimeout);
- st.rs1.nodes.forEach(lowerTxnTimeout);
-
- let testDB = st.s.getDB('test');
-
- // Create the collections in the shards outside the transactions.
- assert.commandWorked(testDB.runCommand(
- {insert: 'user', documents: [{x: -1}, {x: 1}], writeConcern: {w: 'majority'}}));
-
- const session = st.s.startSession();
- const sessionDb = session.getDatabase('test');
-
- let txnNumber = 0;
-
- assert.commandWorked(sessionDb.runCommand({
- insert: 'user',
- documents: [{x: -10}, {x: 10}],
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false,
- }));
-
- // We can deterministically wait for the transaction to be aborted by waiting for currentOp
- // to cease reporting the inactive transaction: the transaction should disappear from the
- // currentOp results once aborted.
- assert.soon(
- function() {
- const sessionFilter = {
- active: false,
- opid: {$exists: false},
- desc: "inactive transaction",
- "transaction.parameters.txnNumber": NumberLong(txnNumber),
- "lsid.id": session.getSessionId().id
- };
-
- const priConn = st.rs0.getPrimary();
- const res = priConn.getDB("admin").aggregate(
- [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter}]);
-
- return (res.itcount() == 0);
- },
- "currentOp reports that the idle transaction still exists, it has not been " +
- "aborted as expected.");
-
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: 'user',
- documents: [{x: -100}, {x: 100}],
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
-
- session.endSession();
-
- st.stop();
+ conn.getDB('admin').runCommand({setParameter: 1, transactionLifetimeLimitSeconds: 1}));
+};
+
+st.rs0.nodes.forEach(lowerTxnTimeout);
+st.rs1.nodes.forEach(lowerTxnTimeout);
+
+let testDB = st.s.getDB('test');
+
+// Create the collections in the shards outside the transactions.
+assert.commandWorked(testDB.runCommand(
+ {insert: 'user', documents: [{x: -1}, {x: 1}], writeConcern: {w: 'majority'}}));
+
+const session = st.s.startSession();
+const sessionDb = session.getDatabase('test');
+
+let txnNumber = 0;
+
+assert.commandWorked(sessionDb.runCommand({
+ insert: 'user',
+ documents: [{x: -10}, {x: 10}],
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false,
+}));
+
+// We can deterministically wait for the transaction to be aborted by waiting for currentOp
+// to cease reporting the inactive transaction: the transaction should disappear from the
+// currentOp results once aborted.
+assert.soon(
+ function() {
+ const sessionFilter = {
+ active: false,
+ opid: {$exists: false},
+ desc: "inactive transaction",
+ "transaction.parameters.txnNumber": NumberLong(txnNumber),
+ "lsid.id": session.getSessionId().id
+ };
+
+ const priConn = st.rs0.getPrimary();
+ const res = priConn.getDB("admin").aggregate(
+ [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter}]);
+
+ return (res.itcount() == 0);
+ },
+ "currentOp reports that the idle transaction still exists, it has not been " +
+ "aborted as expected.");
+
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: 'user',
+ documents: [{x: -100}, {x: 100}],
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}),
+ ErrorCodes.NoSuchTransaction);
+
+session.endSession();
+
+st.stop();
}());
diff --git a/jstests/sharding/transactions_implicit_abort.js b/jstests/sharding/transactions_implicit_abort.js
index a965c7bec87..003b6e4cefe 100644
--- a/jstests/sharding/transactions_implicit_abort.js
+++ b/jstests/sharding/transactions_implicit_abort.js
@@ -3,65 +3,58 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- // Set up a sharded collection with one chunk on each shard.
+// Set up a sharded collection with one chunk on each shard.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
- //
- // An unhandled error during a transaction should try to abort it on all participants.
- //
+//
+// An unhandled error during a transaction should try to abort it on all participants.
+//
- session.startTransaction();
+session.startTransaction();
- // Targets Shard0 successfully.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: -1}}));
+// Targets Shard0 successfully.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: -1}}));
- // Sharding tests require failInternalCommands: true, since the mongos appears to mongod to be
- // an internal client.
- assert.commandWorked(st.rs1.getPrimary().adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.InternalError,
- failCommands: ["find"],
- failInternalCommands: true
- }
- }));
+// Sharding tests require failInternalCommands: true, since the mongos appears to mongod to be
+// an internal client.
+assert.commandWorked(st.rs1.getPrimary().adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.InternalError, failCommands: ["find"], failInternalCommands: true}
+}));
- // Targets Shard1 and encounters a transaction fatal error.
- assert.commandFailedWithCode(sessionDB.runCommand({find: collName, filter: {_id: 1}}),
- ErrorCodes.InternalError);
+// Targets Shard1 and encounters a transaction fatal error.
+assert.commandFailedWithCode(sessionDB.runCommand({find: collName, filter: {_id: 1}}),
+ ErrorCodes.InternalError);
- assert.commandWorked(
- st.rs1.getPrimary().adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+assert.commandWorked(
+ st.rs1.getPrimary().adminCommand({configureFailPoint: "failCommand", mode: "off"}));
- // The transaction should have been aborted on both shards.
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// The transaction should have been aborted on both shards.
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_multi_writes.js b/jstests/sharding/transactions_multi_writes.js
index 403ab4dcc8b..e4c8b43cd95 100644
--- a/jstests/sharding/transactions_multi_writes.js
+++ b/jstests/sharding/transactions_multi_writes.js
@@ -6,145 +6,143 @@
// uses_transactions,
// ]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- const st = new ShardingTest({shards: 3, config: 1, mongos: 2});
-
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
-
- // Set up a sharded collection with 3 chunks, [min, 0), [0, 10), [10, max), one on each shard,
- // with one document in each.
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {skey: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {skey: 15}, to: st.shard2.shardName}));
-
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 3, counter: 0, skey: 15}));
-
- // Runs the given multi-write and asserts a manually inserted orphan document is not affected.
- // The write is assumed to target chunks [min, 0) and [0, 10), which begin on shard0 and shard1,
- // respectively.
- function runTest(st, session, writeCmd, staleRouter) {
- const isUpdate = writeCmd.hasOwnProperty("update");
- const sessionDB = session.getDatabase(dbName);
-
- let orphanShardName;
- let orphanDoc = {_id: 2, counter: 0, skey: 5};
- if (staleRouter) {
- // Using a separate router, move a chunk that will be targeted by the write to a shard
- // that would not be targeted by a stale router. Put the orphan on the shard that
- // previously owned the chunk to verify the multi-write obeys the shard versioning
- // protocol.
- assert.commandWorked(st.s1.adminCommand(
- {moveChunk: ns, find: {skey: 5}, to: st.shard2.shardName, _waitForDelete: true}));
- orphanShardName = "rs1";
- } else {
- // Otherwise put the orphan on a shard that should not be targeted by a fresh router to
- // verify the multi-write is not broadcast to all shards.
- orphanShardName = "rs2";
- }
-
- const orphanShardDB = st[orphanShardName].getPrimary().getDB(dbName);
- assert.writeOK(orphanShardDB[collName].insert(orphanDoc, {writeConcern: {w: "majority"}}));
-
- // Start a transaction with majority read concern to ensure the orphan will be visible if
- // its shard is targeted and send the multi-write.
- session.startTransaction({readConcern: {level: "majority"}});
- assert.commandWorked(sessionDB.runCommand(writeCmd));
-
- // The write shouldn't be visible until the transaction commits.
- assert.sameMembers(st.getDB(dbName)[collName].find().toArray(), [
- {_id: 1, counter: 0, skey: -5},
- {_id: 2, counter: 0, skey: 5},
- {_id: 3, counter: 0, skey: 15}
- ]);
-
- // Commit the transaction and verify the write was successful.
- assert.commandWorked(session.commitTransaction_forTesting());
- if (isUpdate) {
- assert.sameMembers(st.getDB(dbName)[collName].find().toArray(),
- [
- {_id: 1, counter: 1, skey: -5},
- {_id: 2, counter: 1, skey: 5},
- {_id: 3, counter: 0, skey: 15}
- ],
- "document mismatch for update, stale: " + staleRouter + ", cmd: " +
- tojson(writeCmd));
- } else { // isDelete
- assert.sameMembers(st.getDB(dbName)[collName].find().toArray(),
- [{_id: 3, counter: 0, skey: 15}],
- "document mismatch for delete, stale: " + staleRouter + ", cmd: " +
- tojson(writeCmd));
- }
-
- // The orphaned document should not have been affected.
- assert.docEq(orphanDoc,
- orphanShardDB[collName].findOne({skey: orphanDoc.skey}),
- "document mismatch for orphaned doc, stale: " + staleRouter + ", cmd: " +
- tojson(writeCmd));
-
- // Reset the database state for the next iteration.
- if (isUpdate) {
- assert.writeOK(sessionDB[collName].update({}, {$set: {counter: 0}}, {multi: true}));
- } else { // isDelete
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
- }
-
- assert.writeOK(orphanShardDB[collName].remove({skey: orphanDoc.skey}));
-
- if (staleRouter) {
- // Move the chunk back with the main router so it isn't stale.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName, _waitForDelete: true}));
- }
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+const st = new ShardingTest({shards: 3, config: 1, mongos: 2});
+
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+
+// Set up a sharded collection with 3 chunks, [min, 0), [0, 10), [10, max), one on each shard,
+// with one document in each.
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {skey: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 0}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 10}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 15}, to: st.shard2.shardName}));
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 3, counter: 0, skey: 15}));
+
+// Runs the given multi-write and asserts a manually inserted orphan document is not affected.
+// The write is assumed to target chunks [min, 0) and [0, 10), which begin on shard0 and shard1,
+// respectively.
+function runTest(st, session, writeCmd, staleRouter) {
+ const isUpdate = writeCmd.hasOwnProperty("update");
+ const sessionDB = session.getDatabase(dbName);
+
+ let orphanShardName;
+ let orphanDoc = {_id: 2, counter: 0, skey: 5};
+ if (staleRouter) {
+ // Using a separate router, move a chunk that will be targeted by the write to a shard
+ // that would not be targeted by a stale router. Put the orphan on the shard that
+ // previously owned the chunk to verify the multi-write obeys the shard versioning
+ // protocol.
+ assert.commandWorked(st.s1.adminCommand(
+ {moveChunk: ns, find: {skey: 5}, to: st.shard2.shardName, _waitForDelete: true}));
+ orphanShardName = "rs1";
+ } else {
+ // Otherwise put the orphan on a shard that should not be targeted by a fresh router to
+ // verify the multi-write is not broadcast to all shards.
+ orphanShardName = "rs2";
}
- const session = st.s.startSession();
+ const orphanShardDB = st[orphanShardName].getPrimary().getDB(dbName);
+ assert.writeOK(orphanShardDB[collName].insert(orphanDoc, {writeConcern: {w: "majority"}}));
+
+ // Start a transaction with majority read concern to ensure the orphan will be visible if
+ // its shard is targeted and send the multi-write.
+ session.startTransaction({readConcern: {level: "majority"}});
+ assert.commandWorked(sessionDB.runCommand(writeCmd));
+
+ // The write shouldn't be visible until the transaction commits.
+ assert.sameMembers(st.getDB(dbName)[collName].find().toArray(), [
+ {_id: 1, counter: 0, skey: -5},
+ {_id: 2, counter: 0, skey: 5},
+ {_id: 3, counter: 0, skey: 15}
+ ]);
+
+ // Commit the transaction and verify the write was successful.
+ assert.commandWorked(session.commitTransaction_forTesting());
+ if (isUpdate) {
+ assert.sameMembers(
+ st.getDB(dbName)[collName].find().toArray(),
+ [
+ {_id: 1, counter: 1, skey: -5},
+ {_id: 2, counter: 1, skey: 5},
+ {_id: 3, counter: 0, skey: 15}
+ ],
+ "document mismatch for update, stale: " + staleRouter + ", cmd: " + tojson(writeCmd));
+ } else { // isDelete
+ assert.sameMembers(
+ st.getDB(dbName)[collName].find().toArray(),
+ [{_id: 3, counter: 0, skey: 15}],
+ "document mismatch for delete, stale: " + staleRouter + ", cmd: " + tojson(writeCmd));
+ }
+
+ // The orphaned document should not have been affected.
+ assert.docEq(
+ orphanDoc,
+ orphanShardDB[collName].findOne({skey: orphanDoc.skey}),
+ "document mismatch for orphaned doc, stale: " + staleRouter + ", cmd: " + tojson(writeCmd));
+
+ // Reset the database state for the next iteration.
+ if (isUpdate) {
+ assert.writeOK(sessionDB[collName].update({}, {$set: {counter: 0}}, {multi: true}));
+ } else { // isDelete
+ assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
+ assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
+ }
+
+ assert.writeOK(orphanShardDB[collName].remove({skey: orphanDoc.skey}));
+
+ if (staleRouter) {
+ // Move the chunk back with the main router so it isn't stale.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName, _waitForDelete: true}));
+ }
+}
+
+const session = st.s.startSession();
- let multiUpdate = {
- update: collName,
- updates: [{q: {skey: {$lte: 5}}, u: {$inc: {counter: 1}}, multi: true}]
- };
+let multiUpdate = {
+ update: collName,
+ updates: [{q: {skey: {$lte: 5}}, u: {$inc: {counter: 1}}, multi: true}]
+};
- multiUpdate.ordered = false;
- runTest(st, session, multiUpdate, false /*staleRouter*/);
- // TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
- // runTest(st, session, multiUpdate, true /*staleRouter*/);
+multiUpdate.ordered = false;
+runTest(st, session, multiUpdate, false /*staleRouter*/);
+// TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
+// runTest(st, session, multiUpdate, true /*staleRouter*/);
- multiUpdate.ordered = true;
- runTest(st, session, multiUpdate, false /*staleRouter*/);
- // TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
- // runTest(st, session, multiUpdate, true /*staleRouter*/);
+multiUpdate.ordered = true;
+runTest(st, session, multiUpdate, false /*staleRouter*/);
+// TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
+// runTest(st, session, multiUpdate, true /*staleRouter*/);
- let multiDelete = {delete: collName, deletes: [{q: {skey: {$lte: 5}}, limit: 0}]};
+let multiDelete = {delete: collName, deletes: [{q: {skey: {$lte: 5}}, limit: 0}]};
- multiDelete.ordered = false;
- runTest(st, session, multiDelete, false /*staleRouter*/);
- // TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
- // runTest(st, session, multiDelete, true /*staleRouter*/);
+multiDelete.ordered = false;
+runTest(st, session, multiDelete, false /*staleRouter*/);
+// TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
+// runTest(st, session, multiDelete, true /*staleRouter*/);
- multiDelete.ordered = true;
- runTest(st, session, multiDelete, false /*staleRouter*/);
- // TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
- // runTest(st, session, multiDelete, true /*staleRouter*/);
+multiDelete.ordered = true;
+runTest(st, session, multiDelete, false /*staleRouter*/);
+// TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
+// runTest(st, session, multiDelete, true /*staleRouter*/);
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_read_concerns.js b/jstests/sharding/transactions_read_concerns.js
index 0b01e2a42ec..af2c24b2b02 100644
--- a/jstests/sharding/transactions_read_concerns.js
+++ b/jstests/sharding/transactions_read_concerns.js
@@ -7,77 +7,76 @@
// uses_transactions,
// ]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: 2, config: 1});
+const st = new ShardingTest({shards: 2, config: 1});
- // Set up a sharded collection with 2 chunks, one on each shard.
+// Set up a sharded collection with 2 chunks, one on each shard.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
- // Refresh second shard to avoid stale shard version error on the second transaction statement.
- assert.commandWorked(st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+// Refresh second shard to avoid stale shard version error on the second transaction statement.
+assert.commandWorked(st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
- function runTest(st, readConcern, sessionOptions) {
- jsTestLog("Testing readConcern: " + tojson(readConcern) + ", sessionOptions: " +
- tojson(sessionOptions));
+function runTest(st, readConcern, sessionOptions) {
+ jsTestLog("Testing readConcern: " + tojson(readConcern) +
+ ", sessionOptions: " + tojson(sessionOptions));
- const session = st.s.startSession(sessionOptions);
- const sessionDB = session.getDatabase(dbName);
+ const session = st.s.startSession(sessionOptions);
+ const sessionDB = session.getDatabase(dbName);
- if (readConcern) {
- session.startTransaction({readConcern: readConcern});
- } else {
- session.startTransaction();
- }
-
- // Target only the first shard.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: -1}}));
-
- // On a separate, causally consistent session, read from the first shard then write to the
- // second one. This write is guaranteed to commit at a later cluster time than that of the
- // snapshot established by the transaction on the first shard.
- const otherSessionDB = st.s.startSession().getDatabase(dbName);
- assert.commandWorked(otherSessionDB.runCommand({find: collName}));
- assert.commandWorked(otherSessionDB.runCommand({insert: collName, documents: [{_id: 5}]}));
-
- // Depending on the transaction's read concern, the new document will or will not be visible
- // to the next statement.
- const numExpectedDocs = readConcern && readConcern.level === "snapshot" ? 0 : 1;
- assert.eq(numExpectedDocs,
- sessionDB[collName].find({_id: 5}).itcount(),
- "sharded transaction with read concern " + tojson(readConcern) +
- " did not see expected number of documents, sessionOptions: " +
- tojson(sessionOptions));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Clean up for the next iteration.
- assert.writeOK(sessionDB[collName].remove({_id: 5}));
- }
-
- // Specifying no read concern level is allowed and should not compute a global snapshot.
- runTest(st, undefined, {causalConsistency: false});
- runTest(st, undefined, {causalConsistency: true});
-
- const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
- for (let readConcernLevel of kAllowedReadConcernLevels) {
- runTest(st, {level: readConcernLevel}, {causalConsistency: false});
- runTest(st, {level: readConcernLevel}, {causalConsistency: true});
+ if (readConcern) {
+ session.startTransaction({readConcern: readConcern});
+ } else {
+ session.startTransaction();
}
- st.stop();
+ // Target only the first shard.
+ assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: -1}}));
+
+ // On a separate, causally consistent session, read from the first shard then write to the
+ // second one. This write is guaranteed to commit at a later cluster time than that of the
+ // snapshot established by the transaction on the first shard.
+ const otherSessionDB = st.s.startSession().getDatabase(dbName);
+ assert.commandWorked(otherSessionDB.runCommand({find: collName}));
+ assert.commandWorked(otherSessionDB.runCommand({insert: collName, documents: [{_id: 5}]}));
+
+ // Depending on the transaction's read concern, the new document will or will not be visible
+ // to the next statement.
+ const numExpectedDocs = readConcern && readConcern.level === "snapshot" ? 0 : 1;
+ assert.eq(
+ numExpectedDocs,
+ sessionDB[collName].find({_id: 5}).itcount(),
+ "sharded transaction with read concern " + tojson(readConcern) +
+ " did not see expected number of documents, sessionOptions: " + tojson(sessionOptions));
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // Clean up for the next iteration.
+ assert.writeOK(sessionDB[collName].remove({_id: 5}));
+}
+
+// Specifying no read concern level is allowed and should not compute a global snapshot.
+runTest(st, undefined, {causalConsistency: false});
+runTest(st, undefined, {causalConsistency: true});
+
+const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
+for (let readConcernLevel of kAllowedReadConcernLevels) {
+ runTest(st, {level: readConcernLevel}, {causalConsistency: false});
+ runTest(st, {level: readConcernLevel}, {causalConsistency: true});
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
index d1c5ef5c225..12c7fa1fab3 100644
--- a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
+++ b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
@@ -8,161 +8,159 @@
// uses_transactions,
// ]
(function() {
- "use strict";
+"use strict";
- function expectChunks(st, ns, chunks) {
- for (let i = 0; i < chunks.length; i++) {
- assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
- "unexpected number of chunks on shard " + i);
- }
+function expectChunks(st, ns, chunks) {
+ for (let i = 0; i < chunks.length; i++) {
+ assert.eq(chunks[i],
+ st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ "unexpected number of chunks on shard " + i);
}
+}
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
+
+// Set up one sharded collection with 2 chunks, both on the primary shard.
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+
+expectChunks(st, ns, [2, 0, 0]);
+
+// Force a routing table refresh on Shard2, to avoid picking a global read timestamp before the
+// sharding metadata cache collections are created.
+assert.commandWorked(st.rs2.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+
+expectChunks(st, ns, [1, 1, 0]);
+
+// The command should target only the second chunk.
+const kCommandTestCases = [
+ {
+ name: "insert",
+ command: {insert: collName, documents: [{_id: 6}]},
+ },
+ {
+ name: "update_query",
+ command: {update: collName, updates: [{q: {_id: 5}, u: {$set: {x: 1}}}]},
+ },
+ {
+ name: "update_replacement",
+ command: {update: collName, updates: [{q: {_id: 5}, u: {_id: 5, x: 1}}]},
+ },
+ {
+ name: "delete",
+ command: {delete: collName, deletes: [{q: {_id: 5}, limit: 1}]},
+ },
+ {
+ name: "findAndModify_update",
+ command: {findAndModify: collName, query: {_id: 5}, update: {$set: {x: 1}}},
+ },
+ {
+ name: "findAndModify_delete",
+ command: {findAndModify: collName, query: {_id: 5}, remove: true},
+ }
+];
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
-
- // Set up one sharded collection with 2 chunks, both on the primary shard.
-
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
-
- expectChunks(st, ns, [2, 0, 0]);
+function runTest(testCase, moveChunkBack) {
+ const testCaseName = testCase.name;
+ const cmdTargetChunk2 = testCase.command;
- // Force a routing table refresh on Shard2, to avoid picking a global read timestamp before the
- // sharding metadata cache collections are created.
- assert.commandWorked(st.rs2.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+ jsTestLog("Testing " + testCaseName + ", moveChunkBack: " + moveChunkBack);
expectChunks(st, ns, [1, 1, 0]);
- // The command should target only the second chunk.
- const kCommandTestCases = [
- {
- name: "insert",
- command: {insert: collName, documents: [{_id: 6}]},
- },
- {
- name: "update_query",
- command: {update: collName, updates: [{q: {_id: 5}, u: {$set: {x: 1}}}]},
- },
- {
- name: "update_replacement",
- command: {update: collName, updates: [{q: {_id: 5}, u: {_id: 5, x: 1}}]},
- },
- {
- name: "delete",
- command: {delete: collName, deletes: [{q: {_id: 5}, limit: 1}]},
- },
- {
- name: "findAndModify_update",
- command: {findAndModify: collName, query: {_id: 5}, update: {$set: {x: 1}}},
- },
- {
- name: "findAndModify_delete",
- command: {findAndModify: collName, query: {_id: 5}, remove: true},
- }
- ];
-
- function runTest(testCase, moveChunkBack) {
- const testCaseName = testCase.name;
- const cmdTargetChunk2 = testCase.command;
-
- jsTestLog("Testing " + testCaseName + ", moveChunkBack: " + moveChunkBack);
+ const session = st.s.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB[collName];
- expectChunks(st, ns, [1, 1, 0]);
+ session.startTransaction({readConcern: {level: "snapshot"}});
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
+ // Start a transaction on Shard0 which will select and pin a global read timestamp.
+ assert.eq(sessionColl.find({_id: -5}).itcount(), 1, "expected to find document in first chunk");
- session.startTransaction({readConcern: {level: "snapshot"}});
+ // Move a chunk from Shard1 to Shard2 outside of the transaction. This will happen at a
+ // later logical time than the transaction's read timestamp.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
- // Start a transaction on Shard0 which will select and pin a global read timestamp.
- assert.eq(
- sessionColl.find({_id: -5}).itcount(), 1, "expected to find document in first chunk");
+ if (moveChunkBack) {
+ // If the chunk is moved back to the shard that owned it at the transaction's read
+ // timestamp the later write should still be rejected because conflicting operations may
+ // have occurred while the chunk was moved away, which otherwise may not be detected
+ // when the shard prepares the transaction.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- // Move a chunk from Shard1 to Shard2 outside of the transaction. This will happen at a
- // later logical time than the transaction's read timestamp.
+ // Flush metadata on the destination shard so the next request doesn't encounter
+ // StaleConfig. The router refreshes after moving a chunk, so it will already be fresh.
assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+ st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ }
- if (moveChunkBack) {
- // If the chunk is moved back to the shard that owned it at the transaction's read
- // timestamp the later write should still be rejected because conflicting operations may
- // have occurred while the chunk was moved away, which otherwise may not be detected
- // when the shard prepares the transaction.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
-
- // Flush metadata on the destination shard so the next request doesn't encounter
- // StaleConfig. The router refreshes after moving a chunk, so it will already be fresh.
- assert.commandWorked(
- st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
- }
+ // The write should always fail, but the particular error varies.
+ const res = assert.commandFailed(
+ sessionDB.runCommand(cmdTargetChunk2),
+ "expected write to second chunk to fail, case: " + testCaseName +
+ ", cmd: " + tojson(cmdTargetChunk2) + ", moveChunkBack: " + moveChunkBack);
- // The write should always fail, but the particular error varies.
- const res = assert.commandFailed(sessionDB.runCommand(cmdTargetChunk2),
- "expected write to second chunk to fail, case: " +
- testCaseName + ", cmd: " + tojson(cmdTargetChunk2) +
- ", moveChunkBack: " + moveChunkBack);
+ const errMsg = "write to second chunk failed with unexpected error, res: " + tojson(res) +
+ ", case: " + testCaseName + ", cmd: " + tojson(cmdTargetChunk2) +
+ ", moveChunkBack: " + moveChunkBack;
- const errMsg = "write to second chunk failed with unexpected error, res: " + tojson(res) +
- ", case: " + testCaseName + ", cmd: " + tojson(cmdTargetChunk2) + ", moveChunkBack: " +
- moveChunkBack;
+ // On slow hosts, this request can always fail with SnapshotTooOld or StaleChunkHistory if
+ // a migration takes long enough.
+ const expectedCodes = [ErrorCodes.SnapshotTooOld, ErrorCodes.StaleChunkHistory];
- // On slow hosts, this request can always fail with SnapshotTooOld or StaleChunkHistory if
- // a migration takes long enough.
- const expectedCodes = [ErrorCodes.SnapshotTooOld, ErrorCodes.StaleChunkHistory];
+ if (testCaseName === "insert") {
+ // Insert always inserts a new document, so the only typical error is MigrationConflict.
+ expectedCodes.push(ErrorCodes.MigrationConflict);
+ assert.commandFailedWithCode(res, expectedCodes, errMsg);
+ } else {
+ // The other commands modify an existing document so they may also fail with
+ // WriteConflict, depending on when orphaned documents are modified.
- if (testCaseName === "insert") {
- // Insert always inserts a new document, so the only typical error is MigrationConflict.
- expectedCodes.push(ErrorCodes.MigrationConflict);
- assert.commandFailedWithCode(res, expectedCodes, errMsg);
+ if (moveChunkBack) {
+ // Orphans from the first migration must have been deleted before the chunk was
+ // moved back, so the only typical error is WriteConflict.
+ expectedCodes.push(ErrorCodes.WriteConflict);
} else {
- // The other commands modify an existing document so they may also fail with
- // WriteConflict, depending on when orphaned documents are modified.
-
- if (moveChunkBack) {
- // Orphans from the first migration must have been deleted before the chunk was
- // moved back, so the only typical error is WriteConflict.
- expectedCodes.push(ErrorCodes.WriteConflict);
- } else {
- // If the chunk wasn't moved back, the write races with the range deleter. If the
- // range deleter has not run, the write should fail with MigrationConflict,
- // otherwise with WriteConflict, so both codes are acceptable.
- expectedCodes.push(ErrorCodes.WriteConflict, ErrorCodes.MigrationConflict);
- }
- assert.commandFailedWithCode(res, expectedCodes, errMsg);
+ // If the chunk wasn't moved back, the write races with the range deleter. If the
+ // range deleter has not run, the write should fail with MigrationConflict,
+ // otherwise with WriteConflict, so both codes are acceptable.
+ expectedCodes.push(ErrorCodes.WriteConflict, ErrorCodes.MigrationConflict);
}
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+ assert.commandFailedWithCode(res, expectedCodes, errMsg);
+ }
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
- // The commit should fail because the earlier write failed.
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ // The commit should fail because the earlier write failed.
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
- // Move the chunk back to Shard1, if necessary, and reset the database state for the next
- // iteration.
- if (!moveChunkBack) {
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- }
- assert.writeOK(sessionColl.remove({}));
- assert.writeOK(sessionColl.insert([{_id: 5}, {_id: -5}]));
+ // Move the chunk back to Shard1, if necessary, and reset the database state for the next
+ // iteration.
+ if (!moveChunkBack) {
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
}
+ assert.writeOK(sessionColl.remove({}));
+ assert.writeOK(sessionColl.insert([{_id: 5}, {_id: -5}]));
+}
- kCommandTestCases.forEach(testCase => runTest(testCase, false /*moveChunkBack*/));
- kCommandTestCases.forEach(testCase => runTest(testCase, true /*moveChunkBack*/));
+kCommandTestCases.forEach(testCase => runTest(testCase, false /*moveChunkBack*/));
+kCommandTestCases.forEach(testCase => runTest(testCase, true /*moveChunkBack*/));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_snapshot_errors_first_statement.js b/jstests/sharding/transactions_snapshot_errors_first_statement.js
index 2d915425033..3b0f5f74953 100644
--- a/jstests/sharding/transactions_snapshot_errors_first_statement.js
+++ b/jstests/sharding/transactions_snapshot_errors_first_statement.js
@@ -8,159 +8,157 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const kCommandTestCases = [
- {name: "aggregate", command: {aggregate: collName, pipeline: [], cursor: {}}},
- {name: "distinct", command: {distinct: collName, query: {}, key: "_id"}},
- {name: "find", command: {find: collName}},
- {
- // findAndModify can only target one shard, even in the two shard case.
- name: "findAndModify",
- command: {findAndModify: collName, query: {_id: 1}, update: {$set: {x: 1}}}
- },
- {name: "insert", command: {insert: collName, documents: [{_id: 1}, {_id: 11}]}},
- {
- name: "update",
- command: {
- update: collName,
- updates: [{q: {_id: 1}, u: {$set: {_id: 2}}}, {q: {_id: 11}, u: {$set: {_id: 12}}}]
- }
- },
- {
- name: "delete",
- command:
- {delete: collName, deletes: [{q: {_id: 2}, limit: 1}, {q: {_id: 12}, limit: 1}]}
- },
- // We cannot test killCursors because mongos discards the response from any killCursors
- // requests that may be sent to shards.
- ];
-
- // Verify that all commands that can start a transaction are able to retry on snapshot errors.
- function runTest(st, collName, numShardsToError, errorCode, isSharded) {
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
-
- for (let commandTestCase of kCommandTestCases) {
- const commandName = commandTestCase.name;
- const commandBody = commandTestCase.command;
-
- if (isSharded && commandName === "distinct") {
- // Distinct isn't allowed on sharded collections in a multi-document transaction.
- print("Skipping distinct test case for sharded collection");
- continue;
- }
-
- //
- // Retry on a single error.
- //
-
- setFailCommandOnShards(st, {times: 1}, [commandName], errorCode, numShardsToError);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand(commandBody));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- unsetFailCommandOnEachShard(st, numShardsToError);
-
- // Clean up after insert to avoid duplicate key errors.
- if (commandName === "insert") {
- assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
- }
-
- //
- // Retry on multiple errors.
- //
-
- setFailCommandOnShards(st, {times: 3}, [commandName], errorCode, numShardsToError);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand(commandBody));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- unsetFailCommandOnEachShard(st, numShardsToError);
-
- // Clean up after insert to avoid duplicate key errors.
- if (commandName === "insert") {
- assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
- }
-
- //
- // Exhaust retry attempts.
- //
-
- setFailCommandOnShards(st, "alwaysOn", [commandName], errorCode, numShardsToError);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandFailedWithCode(sessionDB.runCommand(commandBody), errorCode);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- unsetFailCommandOnEachShard(st, numShardsToError);
-
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
-
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+const kCommandTestCases = [
+ {name: "aggregate", command: {aggregate: collName, pipeline: [], cursor: {}}},
+ {name: "distinct", command: {distinct: collName, query: {}, key: "_id"}},
+ {name: "find", command: {find: collName}},
+ {
+ // findAndModify can only target one shard, even in the two shard case.
+ name: "findAndModify",
+ command: {findAndModify: collName, query: {_id: 1}, update: {$set: {x: 1}}}
+ },
+ {name: "insert", command: {insert: collName, documents: [{_id: 1}, {_id: 11}]}},
+ {
+ name: "update",
+ command: {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {_id: 2}}}, {q: {_id: 11}, u: {$set: {_id: 12}}}]
+ }
+ },
+ {
+ name: "delete",
+ command: {delete: collName, deletes: [{q: {_id: 2}, limit: 1}, {q: {_id: 12}, limit: 1}]}
+ },
+ // We cannot test killCursors because mongos discards the response from any killCursors
+ // requests that may be sent to shards.
+];
+
+// Verify that all commands that can start a transaction are able to retry on snapshot errors.
+function runTest(st, collName, numShardsToError, errorCode, isSharded) {
+ const session = st.s.startSession();
+ const sessionDB = session.getDatabase(dbName);
+
+ for (let commandTestCase of kCommandTestCases) {
+ const commandName = commandTestCase.name;
+ const commandBody = commandTestCase.command;
+
+ if (isSharded && commandName === "distinct") {
+ // Distinct isn't allowed on sharded collections in a multi-document transaction.
+ print("Skipping distinct test case for sharded collection");
+ continue;
}
- }
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+ //
+ // Retry on a single error.
+ //
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+ setFailCommandOnShards(st, {times: 1}, [commandName], errorCode, numShardsToError);
- jsTestLog("Unsharded transaction");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(sessionDB.runCommand(commandBody));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+ assert.commandWorked(session.commitTransaction_forTesting());
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, 1, errorCode, false /* isSharded */);
- }
+ unsetFailCommandOnEachShard(st, numShardsToError);
- // Enable sharding and set up 2 chunks, [minKey, 10), [10, maxKey), each with one document
- // (includes the document already inserted).
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ // Clean up after insert to avoid duplicate key errors.
+ if (commandName === "insert") {
+ assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
+ }
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+ //
+ // Retry on multiple errors.
+ //
- jsTestLog("One shard sharded transaction");
+ setFailCommandOnShards(st, {times: 3}, [commandName], errorCode, numShardsToError);
- assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(sessionDB.runCommand(commandBody));
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, 1, errorCode, true /* isSharded */);
- }
+ assert.commandWorked(session.commitTransaction_forTesting());
- jsTestLog("Two shard sharded transaction");
+ unsetFailCommandOnEachShard(st, numShardsToError);
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
- assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+ // Clean up after insert to avoid duplicate key errors.
+ if (commandName === "insert") {
+ assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
+ }
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, 2, errorCode, true /* isSharded */);
- }
+ //
+ // Exhaust retry attempts.
+ //
- // Test only one shard throwing the error when more than one are targeted.
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, 1, errorCode, true /* isSharded */);
+ setFailCommandOnShards(st, "alwaysOn", [commandName], errorCode, numShardsToError);
+
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandFailedWithCode(sessionDB.runCommand(commandBody), errorCode);
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+ unsetFailCommandOnEachShard(st, numShardsToError);
+
+ assertNoSuchTransactionOnAllShards(
+ st, session.getSessionId(), session.getTxnNumber_forTesting());
+
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
}
+}
+
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+
+jsTestLog("Unsharded transaction");
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, 1, errorCode, false /* isSharded */);
+}
+
+// Enable sharding and set up 2 chunks, [minKey, 10), [10, maxKey), each with one document
+// (includes the document already inserted).
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+
+jsTestLog("One shard sharded transaction");
+
+assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, 1, errorCode, true /* isSharded */);
+}
+
+jsTestLog("Two shard sharded transaction");
+
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
+assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, 2, errorCode, true /* isSharded */);
+}
+
+// Test only one shard throwing the error when more than one are targeted.
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, 1, errorCode, true /* isSharded */);
+}
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
index 27855d9bc1a..e83ef670708 100644
--- a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
+++ b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
@@ -7,116 +7,114 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const kCommandTestCases = [
- {name: "aggregate", command: {aggregate: collName, pipeline: [], cursor: {}}},
- {name: "distinct", command: {distinct: collName, query: {}, key: "_id"}},
- {name: "find", command: {find: collName}},
- {
- // findAndModify can only target one shard, even in the two shard case.
- name: "findAndModify",
- command: {findAndModify: collName, query: {_id: 1}, update: {$set: {x: 1}}}
- },
- {name: "insert", command: {insert: collName, documents: [{_id: 1}, {_id: 11}]}},
- {
- name: "update",
- command: {
- update: collName,
- updates: [{q: {_id: 1}, u: {$set: {_id: 2}}}, {q: {_id: 11}, u: {$set: {_id: 12}}}]
- }
- },
- {
- name: "delete",
- command:
- {delete: collName, deletes: [{q: {_id: 2}, limit: 1}, {q: {_id: 12}, limit: 1}]}
- },
- // We cannot test killCursors because mongos discards the response from any killCursors
- // requests that may be sent to shards.
- ];
-
- function runTest(st, collName, errorCode, isSharded) {
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
-
- for (let commandTestCase of kCommandTestCases) {
- const commandName = commandTestCase.name;
- const commandBody = commandTestCase.command;
-
- if (isSharded && commandName === "distinct") {
- // Distinct isn't allowed on sharded collections in a multi-document transaction.
- print("Skipping distinct test case for sharded collections");
- continue;
- }
-
- // Successfully start a transaction on one shard.
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 15}}));
-
- // Verify the command must fail on a snapshot error from a subsequent statement.
- setFailCommandOnShards(st, {times: 1}, [commandName], errorCode, 1);
- const res = assert.commandFailedWithCode(sessionDB.runCommand(commandBody), errorCode);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+const kCommandTestCases = [
+ {name: "aggregate", command: {aggregate: collName, pipeline: [], cursor: {}}},
+ {name: "distinct", command: {distinct: collName, query: {}, key: "_id"}},
+ {name: "find", command: {find: collName}},
+ {
+ // findAndModify can only target one shard, even in the two shard case.
+ name: "findAndModify",
+ command: {findAndModify: collName, query: {_id: 1}, update: {$set: {x: 1}}}
+ },
+ {name: "insert", command: {insert: collName, documents: [{_id: 1}, {_id: 11}]}},
+ {
+ name: "update",
+ command: {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {_id: 2}}}, {q: {_id: 11}, u: {$set: {_id: 12}}}]
}
+ },
+ {
+ name: "delete",
+ command: {delete: collName, deletes: [{q: {_id: 2}, limit: 1}, {q: {_id: 12}, limit: 1}]}
+ },
+ // We cannot test killCursors because mongos discards the response from any killCursors
+ // requests that may be sent to shards.
+];
+
+function runTest(st, collName, errorCode, isSharded) {
+ const session = st.s.startSession();
+ const sessionDB = session.getDatabase(dbName);
+
+ for (let commandTestCase of kCommandTestCases) {
+ const commandName = commandTestCase.name;
+ const commandBody = commandTestCase.command;
+
+ if (isSharded && commandName === "distinct") {
+ // Distinct isn't allowed on sharded collections in a multi-document transaction.
+ print("Skipping distinct test case for sharded collections");
+ continue;
+ }
+
+ // Successfully start a transaction on one shard.
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 15}}));
+
+ // Verify the command must fail on a snapshot error from a subsequent statement.
+ setFailCommandOnShards(st, {times: 1}, [commandName], errorCode, 1);
+ const res = assert.commandFailedWithCode(sessionDB.runCommand(commandBody), errorCode);
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+ assertNoSuchTransactionOnAllShards(
+ st, session.getSessionId(), session.getTxnNumber_forTesting());
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
}
+}
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- jsTestLog("Unsharded transaction");
+jsTestLog("Unsharded transaction");
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- // Single shard case simulates the storage engine discarding an in-use snapshot.
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, errorCode, false /* isSharded */);
- }
+// Single shard case simulates the storage engine discarding an in-use snapshot.
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, errorCode, false /* isSharded */);
+}
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- // Set up 2 chunks, [minKey, 10), [10, maxKey), each with one document (includes the document
- // already inserted).
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+// Set up 2 chunks, [minKey, 10), [10, maxKey), each with one document (includes the document
+// already inserted).
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
- jsTestLog("One shard transaction");
+jsTestLog("One shard transaction");
- assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, errorCode, true /* isSharded */);
- }
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, errorCode, true /* isSharded */);
+}
- jsTestLog("Two shard transaction");
+jsTestLog("Two shard transaction");
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
- assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
+assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- // Multi shard case simulates adding a new participant that can no longer support the already
- // chosen read timestamp.
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, errorCode, true /* isSharded */);
- }
+// Multi shard case simulates adding a new participant that can no longer support the already
+// chosen read timestamp.
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, errorCode, true /* isSharded */);
+}
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_stale_database_version_errors.js b/jstests/sharding/transactions_stale_database_version_errors.js
index 0302162cf3c..01531a3c208 100644
--- a/jstests/sharding/transactions_stale_database_version_errors.js
+++ b/jstests/sharding/transactions_stale_database_version_errors.js
@@ -2,128 +2,123 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const dbName = "test";
- const collName = "foo";
+const dbName = "test";
+const collName = "foo";
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- // Set up two unsharded collections in different databases with shard0 as their primary.
+// Set up two unsharded collections in different databases with shard0 as their primary.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
- //
- // Stale database version on first overall command should succeed.
- //
+//
+// Stale database version on first overall command should succeed.
+//
- session.startTransaction();
+session.startTransaction();
- // No database versioned requests have been sent to Shard0, so it is stale.
- assert.commandWorked(sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}));
+// No database versioned requests have been sent to Shard0, so it is stale.
+assert.commandWorked(sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Stale database version on second command to a shard should fail.
- //
+//
+// Stale database version on second command to a shard should fail.
+//
- st.ensurePrimaryShard(dbName, st.shard1.shardName);
+st.ensurePrimaryShard(dbName, st.shard1.shardName);
- session.startTransaction();
+session.startTransaction();
- // Find is not database versioned so it will not trigger SDV or a refresh on Shard0.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 0}}));
+// Find is not database versioned so it will not trigger SDV or a refresh on Shard0.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 0}}));
- // Distinct is database versioned, so it will trigger SDV. The router will retry and the retry
- // will discover the transaction was aborted, because a previous statement had completed on
- // Shard0.
- let res = assert.commandFailedWithCode(
- sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}),
- ErrorCodes.NoSuchTransaction);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Distinct is database versioned, so it will trigger SDV. The router will retry and the retry
+// will discover the transaction was aborted, because a previous statement had completed on
+// Shard0.
+let res = assert.commandFailedWithCode(
+ sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}),
+ ErrorCodes.NoSuchTransaction);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- //
- // Stale database version on first command to a new shard should succeed.
- //
+//
+// Stale database version on first command to a new shard should succeed.
+//
- // Create a new database on Shard0.
- const otherDbName = "other_test";
- const otherCollName = "bar";
+// Create a new database on Shard0.
+const otherDbName = "other_test";
+const otherCollName = "bar";
- assert.writeOK(
- st.s.getDB(otherDbName)[otherCollName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: otherDbName}));
- st.ensurePrimaryShard(otherDbName, st.shard0.shardName);
+assert.writeOK(
+ st.s.getDB(otherDbName)[otherCollName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: otherDbName}));
+st.ensurePrimaryShard(otherDbName, st.shard0.shardName);
- const sessionOtherDB = session.getDatabase(otherDbName);
+const sessionOtherDB = session.getDatabase(otherDbName);
- // Advance the router's cached last committed opTime for Shard0, so it chooses a read timestamp
- // after the collection is created on shard1, to avoid SnapshotUnavailable.
- assert.commandWorked(
- sessionOtherDB.runCommand({find: otherCollName})); // Not database versioned.
- assert.writeOK(sessionDB[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+// Advance the router's cached last committed opTime for Shard0, so it chooses a read timestamp
+// after the collection is created on shard1, to avoid SnapshotUnavailable.
+assert.commandWorked(sessionOtherDB.runCommand({find: otherCollName})); // Not database versioned.
+assert.writeOK(sessionDB[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
- session.startTransaction();
+session.startTransaction();
- // Target the first database which is on Shard1.
- assert.commandWorked(sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}));
+// Target the first database which is on Shard1.
+assert.commandWorked(sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}));
- // Targets the new database on Shard0 which is stale, so a database versioned request should
- // trigger SDV.
- assert.commandWorked(
- sessionOtherDB.runCommand({distinct: otherCollName, key: "_id", query: {_id: 0}}));
+// Targets the new database on Shard0 which is stale, so a database versioned request should
+// trigger SDV.
+assert.commandWorked(
+ sessionOtherDB.runCommand({distinct: otherCollName, key: "_id", query: {_id: 0}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // The final StaleDbVersion error should be returned if the router exhausts its retries.
- //
+//
+// The final StaleDbVersion error should be returned if the router exhausts its retries.
+//
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- st.ensurePrimaryShard(otherDbName, st.shard1.shardName);
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+st.ensurePrimaryShard(otherDbName, st.shard1.shardName);
- // Disable database metadata refreshes on the stale shard so it will indefinitely return a stale
- // version error.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "skipDatabaseVersionMetadataRefresh", mode: "alwaysOn"}));
+// Disable database metadata refreshes on the stale shard so it will indefinitely return a stale
+// version error.
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "skipDatabaseVersionMetadataRefresh", mode: "alwaysOn"}));
- session.startTransaction();
+session.startTransaction();
- // Target Shard1, to verify the transaction on it is implicitly aborted later.
- assert.commandWorked(sessionOtherDB.runCommand({find: otherCollName}));
+// Target Shard1, to verify the transaction on it is implicitly aborted later.
+assert.commandWorked(sessionOtherDB.runCommand({find: otherCollName}));
- // Target the first database which is on Shard0. The shard is stale and won't refresh its
- // metadata, so mongos should exhaust its retries and implicitly abort the transaction.
- res = assert.commandFailedWithCode(
- sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}),
- ErrorCodes.StaleDbVersion);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Target the first database which is on Shard0. The shard is stale and won't refresh its
+// metadata, so mongos should exhaust its retries and implicitly abort the transaction.
+res = assert.commandFailedWithCode(
+ sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}),
+ ErrorCodes.StaleDbVersion);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- // Verify all shards aborted the transaction.
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// Verify all shards aborted the transaction.
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "skipDatabaseVersionMetadataRefresh", mode: "off"}));
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "skipDatabaseVersionMetadataRefresh", mode: "off"}));
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_stale_shard_version_errors.js b/jstests/sharding/transactions_stale_shard_version_errors.js
index bfb2a5b0178..3bc71a01083 100644
--- a/jstests/sharding/transactions_stale_shard_version_errors.js
+++ b/jstests/sharding/transactions_stale_shard_version_errors.js
@@ -2,263 +2,248 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- function expectChunks(st, ns, chunks) {
- for (let i = 0; i < chunks.length; i++) {
- assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
- "unexpected number of chunks on shard " + i);
- }
+function expectChunks(st, ns, chunks) {
+ for (let i = 0; i < chunks.length; i++) {
+ assert.eq(chunks[i],
+ st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ "unexpected number of chunks on shard " + i);
}
+}
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
- const st = new ShardingTest({shards: 3, mongos: 2, config: 1});
+const st = new ShardingTest({shards: 3, mongos: 2, config: 1});
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- // Disable the best-effort recipient metadata refresh after migrations to simplify simulating
- // stale shard version errors.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
- assert.commandWorked(st.rs1.getPrimary().adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
- assert.commandWorked(st.rs2.getPrimary().adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
+// Disable the best-effort recipient metadata refresh after migrations to simplify simulating
+// stale shard version errors.
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
+assert.commandWorked(st.rs1.getPrimary().adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
+assert.commandWorked(st.rs2.getPrimary().adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
- // Shard two collections in the same database, each with 2 chunks, [minKey, 0), [0, maxKey),
- // with one document each, all on Shard0.
+// Shard two collections in the same database, each with 2 chunks, [minKey, 0), [0, maxKey),
+// with one document each, all on Shard0.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- expectChunks(st, ns, [2, 0, 0]);
+expectChunks(st, ns, [2, 0, 0]);
- const otherCollName = "bar";
- const otherNs = dbName + "." + otherCollName;
+const otherCollName = "bar";
+const otherNs = dbName + "." + otherCollName;
- assert.writeOK(
- st.s.getDB(dbName)[otherCollName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(
- st.s.getDB(dbName)[otherCollName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(
+ st.s.getDB(dbName)[otherCollName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[otherCollName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({shardCollection: otherNs, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: otherNs, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({shardCollection: otherNs, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: otherNs, middle: {_id: 0}}));
- expectChunks(st, otherNs, [2, 0, 0]);
+expectChunks(st, otherNs, [2, 0, 0]);
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
- //
- // Stale shard version on first overall command should succeed.
- //
+//
+// Stale shard version on first overall command should succeed.
+//
- // Move a chunk in the first collection from Shard0 to Shard1 through the main mongos, so Shard1
- // is stale but not the router.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- expectChunks(st, ns, [1, 1, 0]);
+// Move a chunk in the first collection from Shard0 to Shard1 through the main mongos, so Shard1
+// is stale but not the router.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+expectChunks(st, ns, [1, 1, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1, which is stale.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Targets Shard1, which is stale.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Stale shard version on second command to a shard should fail.
- //
+//
+// Stale shard version on second command to a shard should fail.
+//
- expectChunks(st, ns, [1, 1, 0]);
+expectChunks(st, ns, [1, 1, 0]);
- // Move a chunk in the other collection from Shard0 to Shard1 through the main mongos, so Shard1
- // is stale for the other collection but not the router.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: otherNs, find: {_id: 5}, to: st.shard1.shardName}));
- expectChunks(st, otherNs, [1, 1, 0]);
+// Move a chunk in the other collection from Shard0 to Shard1 through the main mongos, so Shard1
+// is stale for the other collection but not the router.
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: otherNs, find: {_id: 5}, to: st.shard1.shardName}));
+expectChunks(st, otherNs, [1, 1, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1 for the first ns, which is not stale.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Targets Shard1 for the first ns, which is not stale.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- // Targets the other sharded collection on Shard1, which is stale. Because a previous statement
- // has executed on Shard1, the retry will not restart the transaction, and will fail when it
- // finds the transaction has aborted because of the stale shard version.
- let res =
- assert.commandFailedWithCode(sessionDB.runCommand({find: otherCollName, filter: {_id: 5}}),
- ErrorCodes.NoSuchTransaction);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Targets the other sharded collection on Shard1, which is stale. Because a previous statement
+// has executed on Shard1, the retry will not restart the transaction, and will fail when it
+// finds the transaction has aborted because of the stale shard version.
+let res = assert.commandFailedWithCode(
+ sessionDB.runCommand({find: otherCollName, filter: {_id: 5}}), ErrorCodes.NoSuchTransaction);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- //
- // Stale shard version on first command to a new shard should succeed.
- //
+//
+// Stale shard version on first command to a new shard should succeed.
+//
- expectChunks(st, ns, [1, 1, 0]);
+expectChunks(st, ns, [1, 1, 0]);
- // Move a chunk for the other collection from Shard1 to Shard0 through the main mongos, so
- // Shard0 is stale for it and the router is not.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: otherNs, find: {_id: 5}, to: st.shard0.shardName}));
- expectChunks(st, otherNs, [2, 0, 0]);
+// Move a chunk for the other collection from Shard1 to Shard0 through the main mongos, so
+// Shard0 is stale for it and the router is not.
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: otherNs, find: {_id: 5}, to: st.shard0.shardName}));
+expectChunks(st, otherNs, [2, 0, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1 for the first ns, which is not stale.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Targets Shard1 for the first ns, which is not stale.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- // Targets Shard0 for the other ns, which is stale.
- assert.commandWorked(sessionDB.runCommand({find: otherCollName, filter: {_id: 5}}));
+// Targets Shard0 for the other ns, which is stale.
+assert.commandWorked(sessionDB.runCommand({find: otherCollName, filter: {_id: 5}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Stale mongos aborts on old shard.
- //
+//
+// Stale mongos aborts on old shard.
+//
- // Move a chunk in the first collection from Shard1 to Shard0 through the other mongos, so
- // Shard1 and the main mongos are stale for it.
- const otherMongos = st.s1;
- assert.commandWorked(
- otherMongos.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard0.shardName}));
- expectChunks(st, ns, [2, 0, 0]);
+// Move a chunk in the first collection from Shard1 to Shard0 through the other mongos, so
+// Shard1 and the main mongos are stale for it.
+const otherMongos = st.s1;
+assert.commandWorked(
+ otherMongos.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard0.shardName}));
+expectChunks(st, ns, [2, 0, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1, which hits a stale version error, then re-targets Shard0, which is also
- // stale but should succeed.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Targets Shard1, which hits a stale version error, then re-targets Shard0, which is also
+// stale but should succeed.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- // Verify there is no in-progress transaction on Shard1.
- res = assert.commandFailedWithCode(st.rs1.getPrimary().getDB(dbName).runCommand({
- find: collName,
- lsid: session.getSessionId(),
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Verify there is no in-progress transaction on Shard1.
+res = assert.commandFailedWithCode(st.rs1.getPrimary().getDB(dbName).runCommand({
+ find: collName,
+ lsid: session.getSessionId(),
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false,
+}),
+ ErrorCodes.NoSuchTransaction);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- //
- // More than one stale shard version error.
- //
+//
+// More than one stale shard version error.
+//
- // Move chunks for the first ns from Shard0 to Shard1 and Shard2 through the main mongos, so
- // both are stale but not the router.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
- expectChunks(st, ns, [1, 0, 1]);
+// Move chunks for the first ns from Shard0 to Shard1 and Shard2 through the main mongos, so
+// both are stale but not the router.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+expectChunks(st, ns, [1, 0, 1]);
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: -5}, to: st.shard1.shardName}));
- expectChunks(st, ns, [0, 1, 1]);
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: -5}, to: st.shard1.shardName}));
+expectChunks(st, ns, [0, 1, 1]);
- session.startTransaction();
+session.startTransaction();
- // Targets all shards, two of which are stale.
- assert.commandWorked(sessionDB.runCommand({find: collName}));
+// Targets all shards, two of which are stale.
+assert.commandWorked(sessionDB.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Can retry a stale write on the first statement.
- //
+//
+// Can retry a stale write on the first statement.
+//
- // Move a chunk to Shard1 to make it stale.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- expectChunks(st, ns, [0, 2, 0]);
+// Move a chunk to Shard1 to make it stale.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+expectChunks(st, ns, [0, 2, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1, which is stale.
- assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{_id: 6}]}));
+// Targets Shard1, which is stale.
+assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{_id: 6}]}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Cannot retry a stale write past the first statement.
- //
- // TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to allow
- // retrying writes beyond the first overall statement.
- //
+//
+// Cannot retry a stale write past the first statement.
+//
+// TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to allow
+// retrying writes beyond the first overall statement.
+//
- // Move a chunk to Shard2 to make it stale.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
- expectChunks(st, ns, [0, 1, 1]);
+// Move a chunk to Shard2 to make it stale.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+expectChunks(st, ns, [0, 1, 1]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1, which is not stale.
- assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{_id: -4}]}));
+// Targets Shard1, which is not stale.
+assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{_id: -4}]}));
- // Targets Shard2, which is stale.
- res = assert.commandFailedWithCode(
- sessionDB.runCommand({insert: collName, documents: [{_id: 7}]}), ErrorCodes.StaleConfig);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Targets Shard2, which is stale.
+res = assert.commandFailedWithCode(sessionDB.runCommand({insert: collName, documents: [{_id: 7}]}),
+ ErrorCodes.StaleConfig);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- // The transaction should have been implicitly aborted on all shards.
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// The transaction should have been implicitly aborted on all shards.
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- //
- // The final StaleConfig error should be returned if the router exhausts its retries.
- //
+//
+// The final StaleConfig error should be returned if the router exhausts its retries.
+//
- // Move a chunk to Shard0 to make it stale.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: -5}, to: st.shard0.shardName}));
- expectChunks(st, ns, [1, 0, 1]);
+// Move a chunk to Shard0 to make it stale.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: -5}, to: st.shard0.shardName}));
+expectChunks(st, ns, [1, 0, 1]);
- // Disable metadata refreshes on the stale shard so it will indefinitely return a stale version
- // error.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "alwaysOn"}));
+// Disable metadata refreshes on the stale shard so it will indefinitely return a stale version
+// error.
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "alwaysOn"}));
- session.startTransaction();
+session.startTransaction();
- // Target Shard2, to verify the transaction on it is aborted implicitly later.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Target Shard2, to verify the transaction on it is aborted implicitly later.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- // Targets all shards. Shard0 is stale and won't refresh its metadata, so mongos should exhaust
- // its retries and implicitly abort the transaction.
- res = assert.commandFailedWithCode(sessionDB.runCommand({find: collName}),
- ErrorCodes.StaleConfig);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Targets all shards. Shard0 is stale and won't refresh its metadata, so mongos should exhaust
+// its retries and implicitly abort the transaction.
+res = assert.commandFailedWithCode(sessionDB.runCommand({find: collName}), ErrorCodes.StaleConfig);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- // Verify the shards that did not return an error were also aborted.
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// Verify the shards that did not return an error were also aborted.
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "off"}));
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "off"}));
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_target_at_point_in_time.js b/jstests/sharding/transactions_target_at_point_in_time.js
index 1243676cfba..3cdfb4b49fe 100644
--- a/jstests/sharding/transactions_target_at_point_in_time.js
+++ b/jstests/sharding/transactions_target_at_point_in_time.js
@@ -8,103 +8,101 @@
// uses_transactions,
// ]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- function expectChunks(st, ns, chunks) {
- for (let i = 0; i < chunks.length; i++) {
- assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
- "unexpected number of chunks on shard " + i);
- }
+function expectChunks(st, ns, chunks) {
+ for (let i = 0; i < chunks.length; i++) {
+ assert.eq(chunks[i],
+ st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ "unexpected number of chunks on shard " + i);
}
+}
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
- const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
+const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
- // Set up one sharded collection with 2 chunks, both on the primary shard.
+// Set up one sharded collection with 2 chunks, both on the primary shard.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- expectChunks(st, ns, [2, 0, 0]);
+expectChunks(st, ns, [2, 0, 0]);
- // Temporarily move a chunk to Shard2, to avoid picking a global read timestamp before the
- // sharding metadata cache collections are created.
+// Temporarily move a chunk to Shard2, to avoid picking a global read timestamp before the
+// sharding metadata cache collections are created.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+expectChunks(st, ns, [1, 1, 0]);
+
+// First command targets the first chunk, the second command targets the second chunk.
+const kCommandTestCases = [
+ {
+ name: "aggregate",
+ commandFuncs: [
+ (coll) => coll.aggregate({$match: {_id: -5}}).itcount(),
+ (coll) => coll.aggregate({$match: {_id: 5}}).itcount(),
+ ]
+ },
+ {
+ name: "find",
+ commandFuncs: [
+ (coll) => coll.find({_id: -5}).itcount(),
+ (coll) => coll.find({_id: 5}).itcount(),
+ ]
+ }
+];
+
+function runTest(testCase) {
+ const cmdName = testCase.name;
+ const targetChunk1Func = testCase.commandFuncs[0];
+ const targetChunk2Func = testCase.commandFuncs[1];
+
+ jsTestLog("Testing " + cmdName);
+
+ expectChunks(st, ns, [1, 1, 0]);
+
+ const session = st.s.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB[collName];
+
+ session.startTransaction({readConcern: {level: "snapshot"}});
+
+ // Start a transaction on Shard0 which will select and pin a global read timestamp.
+ assert.eq(targetChunk1Func(sessionColl),
+ 1,
+ "expected to find document in first chunk, cmd: " + cmdName);
+
+ // Move a chunk from Shard1 to Shard2 outside of the transaction. This will happen at a
+ // later logical time than the transaction's read timestamp.
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+ // Target a document in the chunk that was moved. The router should get a stale shard
+ // version from Shard1 then retry on Shard1 and see the document.
+ assert.eq(targetChunk2Func(sessionColl),
+ 1,
+ "expected to find document in second chunk, cmd: " + cmdName);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // Move the chunk back to Shard1 for the next iteration.
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- expectChunks(st, ns, [1, 1, 0]);
-
- // First command targets the first chunk, the second command targets the second chunk.
- const kCommandTestCases = [
- {
- name: "aggregate",
- commandFuncs: [
- (coll) => coll.aggregate({$match: {_id: -5}}).itcount(),
- (coll) => coll.aggregate({$match: {_id: 5}}).itcount(),
- ]
- },
- {
- name: "find",
- commandFuncs: [
- (coll) => coll.find({_id: -5}).itcount(),
- (coll) => coll.find({_id: 5}).itcount(),
- ]
- }
- ];
-
- function runTest(testCase) {
- const cmdName = testCase.name;
- const targetChunk1Func = testCase.commandFuncs[0];
- const targetChunk2Func = testCase.commandFuncs[1];
-
- jsTestLog("Testing " + cmdName);
-
- expectChunks(st, ns, [1, 1, 0]);
-
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
-
- session.startTransaction({readConcern: {level: "snapshot"}});
-
- // Start a transaction on Shard0 which will select and pin a global read timestamp.
- assert.eq(targetChunk1Func(sessionColl),
- 1,
- "expected to find document in first chunk, cmd: " + cmdName);
-
- // Move a chunk from Shard1 to Shard2 outside of the transaction. This will happen at a
- // later logical time than the transaction's read timestamp.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
-
- // Target a document in the chunk that was moved. The router should get a stale shard
- // version from Shard1 then retry on Shard1 and see the document.
- assert.eq(targetChunk2Func(sessionColl),
- 1,
- "expected to find document in second chunk, cmd: " + cmdName);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Move the chunk back to Shard1 for the next iteration.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- }
+}
- kCommandTestCases.forEach(runTest);
+kCommandTestCases.forEach(runTest);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_targeting_errors.js b/jstests/sharding/transactions_targeting_errors.js
index 9f490994c88..5fb3a0dfba8 100644
--- a/jstests/sharding/transactions_targeting_errors.js
+++ b/jstests/sharding/transactions_targeting_errors.js
@@ -2,41 +2,39 @@
//
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {skey: "hashed"}}));
+const st = new ShardingTest({shards: 2});
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {skey: "hashed"}}));
- const session = st.s.startSession();
- const sessionDB = session.getDatabase("test");
+const session = st.s.startSession();
+const sessionDB = session.getDatabase("test");
- // Failed update.
+// Failed update.
- session.startTransaction();
+session.startTransaction();
- let res = sessionDB.runCommand(
- {update: collName, updates: [{q: {skey: {$lte: 5}}, u: {$set: {x: 1}}, multi: false}]});
- assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
- assert(res.hasOwnProperty("writeErrors"), "expected write errors, res: " + tojson(res));
+let res = sessionDB.runCommand(
+ {update: collName, updates: [{q: {skey: {$lte: 5}}, u: {$set: {x: 1}}, multi: false}]});
+assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+assert(res.hasOwnProperty("writeErrors"), "expected write errors, res: " + tojson(res));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- // Failed delete.
+// Failed delete.
- session.startTransaction();
+session.startTransaction();
- res = sessionDB.runCommand({delete: collName, deletes: [{q: {skey: {$lte: 5}}, limit: 1}]});
- assert.commandFailedWithCode(res, ErrorCodes.ShardKeyNotFound);
- assert(res.hasOwnProperty("writeErrors"), "expected write errors, res: " + tojson(res));
+res = sessionDB.runCommand({delete: collName, deletes: [{q: {skey: {$lte: 5}}, limit: 1}]});
+assert.commandFailedWithCode(res, ErrorCodes.ShardKeyNotFound);
+assert(res.hasOwnProperty("writeErrors"), "expected write errors, res: " + tojson(res));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/transactions_view_resolution.js b/jstests/sharding/transactions_view_resolution.js
index be5d5ab7845..1a8224ee089 100644
--- a/jstests/sharding/transactions_view_resolution.js
+++ b/jstests/sharding/transactions_view_resolution.js
@@ -7,289 +7,286 @@
// uses_transactions,
// ]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const shardedDbName = "shardedDB";
- const shardedCollName = "sharded";
- const shardedViewName = "sharded_view";
-
- const unshardedDbName = "unshardedDB";
- const unshardedCollName = "unsharded";
- const unshardedViewName = "unsharded_view";
-
- const viewOnShardedViewName = "sharded_view_view";
-
- function setUpUnshardedCollectionAndView(st, session, primaryShard) {
- assert.writeOK(st.s.getDB(unshardedDbName)[unshardedCollName].insert(
- {_id: 1, x: "unsharded"}, {writeConcern: {w: "majority"}}));
- st.ensurePrimaryShard(unshardedDbName, primaryShard);
-
- const unshardedView = session.getDatabase(unshardedDbName)[unshardedViewName];
- assert.commandWorked(unshardedView.runCommand(
- "create", {viewOn: unshardedCollName, pipeline: [], writeConcern: {w: "majority"}}));
-
- return unshardedView;
- }
-
- function setUpShardedCollectionAndView(st, session, primaryShard) {
- const ns = shardedDbName + "." + shardedCollName;
-
- assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
- {_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
- {_id: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
- st.ensurePrimaryShard(shardedDbName, primaryShard);
-
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
-
- const shardedView = session.getDatabase(shardedDbName)[shardedViewName];
- assert.commandWorked(shardedView.runCommand(
- "create", {viewOn: shardedCollName, pipeline: [], writeConcern: {w: "majority"}}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns, dbNames: [shardedDbName, unshardedDbName]});
-
- return shardedView;
- }
-
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- const session = st.s.startSession();
-
- // Set up an unsharded collection on shard0.
- const unshardedView = setUpUnshardedCollectionAndView(st, session, st.shard0.shardName);
-
- // Set up a sharded collection with one chunk on each shard in a database with shard0 as its
- // primary shard.
- const shardedView = setUpShardedCollectionAndView(st, session, st.shard0.shardName);
-
- // Set up a view on the sharded view, in the same database.
- const viewOnShardedView = session.getDatabase(shardedDbName)[viewOnShardedViewName];
- assert.commandWorked(viewOnShardedView.runCommand(
- "create", {viewOn: shardedViewName, pipeline: [], writeConcern: {w: "majority"}}));
-
- //
- // The first statement a participant shard receives reading from a view should succeed.
- //
-
- function readFromViewOnFirstParticipantStatement(session, view, viewFunc, numDocsExpected) {
- session.startTransaction();
- assert.eq(viewFunc(view), numDocsExpected);
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // Unsharded view.
- readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 1);
- readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
- return view.distinct("_id").length;
- }, 1);
- readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
- return view.find().itcount();
- }, 1);
-
- // Sharded view.
- readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 2);
- readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
- return view.distinct("_id").length;
- }, 2);
- readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
- return view.find().itcount();
- }, 2);
-
- // View on sharded view.
- readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 2);
- readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
- return view.distinct("_id").length;
- }, 2);
- readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
- return view.find().itcount();
- }, 2);
-
- //
- // A later statement a participant shard receives reading from a view should succeed.
- //
-
- function readFromViewOnLaterParticipantStatement(session, view, viewFunc, numDocsExpected) {
- session.startTransaction();
- assert.eq(view.aggregate({$match: {}}).itcount(), numDocsExpected);
- assert.eq(viewFunc(view), numDocsExpected);
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // Unsharded view.
- readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 1);
- readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
- return view.distinct("_id").length;
- }, 1);
- readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
- return view.find().itcount();
- }, 1);
-
- // Sharded view.
- readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 2);
- readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
- return view.distinct("_id").length;
- }, 2);
- readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
- return view.find().itcount();
- }, 2);
-
- // View on sharded view.
- readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 2);
- readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
- return view.distinct("_id").length;
- }, 2);
- readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
- return view.find().itcount();
- }, 2);
-
- //
- // Transactions on shards that return a view resolution error on the first statement remain
- // aborted if the shard is not targeted by the retry on the resolved namespace.
- //
- // This may happen when reading from a sharded view, because mongos will target the primary
- // shard first to resolve the view, but the retry on the underlying sharded collection is not
- // guaranteed to target the primary again.
- //
-
- // Assumes the request in viewFunc does not target the primary shard, Shard0.
- function primaryShardNotReTargeted_FirstStatement(session, view, viewFunc, numDocsExpected) {
- session.startTransaction();
- assert.eq(viewFunc(view), numDocsExpected);
-
- // There should not be an in-progress transaction on the primary shard.
- assert.commandFailedWithCode(st.rs0.getPrimary().getDB("foo").runCommand({
- find: "bar",
- lsid: session.getSessionId(),
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // The transaction should not have been committed on the primary shard.
- assert.commandFailedWithCode(st.rs0.getPrimary().getDB("foo").runCommand({
- find: "bar",
- lsid: session.getSessionId(),
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
- }
-
- // This is only possible against sharded views.
- primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
- return view.aggregate({$match: {_id: 1}}).itcount();
- }, 1);
- primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
- return view.distinct("_id", {_id: {$gte: 1}}).length;
- }, 1);
- primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
- return view.find({_id: 1}).itcount();
- }, 1);
-
- // View on sharded view.
- primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
- return view.aggregate({$match: {_id: 1}}).itcount();
- }, 1);
- primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
- return view.distinct("_id", {_id: {$gte: 1}}).length;
- }, 1);
- primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
- return view.find({_id: 1}).itcount();
- }, 1);
-
- //
- // Shards do not abort on a view resolution error if they have already completed a statement for
- // a transaction.
- //
-
- // Assumes the primary shard for view is Shard0.
- function primaryShardNotReTargeted_LaterStatement(session, view, viewFunc, numDocsExpected) {
- session.startTransaction();
- // Complete a statement on the primary shard for the view.
- assert.eq(view.aggregate({$match: {_id: -1}}).itcount(), 1);
- // Targets the primary first, but the resolved retry only targets Shard1.
- assert.eq(viewFunc(view), numDocsExpected);
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // This is only possible against sharded views.
- primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
- return view.aggregate({$match: {_id: 1}}).itcount();
- }, 1);
- primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
- return view.distinct("_id", {_id: {$gte: 1}}).length;
- }, 1);
- primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
- return view.find({_id: 1}).itcount();
- }, 1);
-
- // View on sharded view.
- primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
- return view.aggregate({$match: {_id: 1}}).itcount();
- }, 1);
- primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
- return view.distinct("_id", {_id: {$gte: 1}}).length;
- }, 1);
- primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
- return view.find({_id: 1}).itcount();
- }, 1);
-
- //
- // Reading from a view using $lookup and $graphLookup should succeed.
- //
-
- function assertAggResultEqInTransaction(coll, pipeline, expected) {
- session.startTransaction();
- const resArray = coll.aggregate(pipeline).toArray();
- assert(arrayEq(resArray, expected), tojson({got: resArray, expected: expected}));
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // Set up an unsharded collection to use for $lookup. We cannot lookup into sharded collections.
- // TODO SERVER-29159: Add testing for lookup into sharded collections in a transaction once that
- // is supported.
- const lookupDbName = "dbForLookup";
- const lookupCollName = "collForLookup";
- assert.writeOK(
- st.s.getDB(lookupDbName)[lookupCollName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
- const lookupColl = session.getDatabase(unshardedDbName)[unshardedCollName];
-
- // Lookup the document in the unsharded collection with _id: 1 through the unsharded view.
- assertAggResultEqInTransaction(
- lookupColl,
- [
- {$match: {_id: 1}},
- {
- $lookup:
- {from: unshardedViewName, localField: "_id", foreignField: "_id", as: "matched"}
- },
- {$unwind: "$matched"},
- {$project: {_id: 1, matchedX: "$matched.x"}}
- ],
- [{_id: 1, matchedX: "unsharded"}]);
-
- // Find the same document through the view using $graphLookup.
- assertAggResultEqInTransaction(lookupColl,
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const shardedDbName = "shardedDB";
+const shardedCollName = "sharded";
+const shardedViewName = "sharded_view";
+
+const unshardedDbName = "unshardedDB";
+const unshardedCollName = "unsharded";
+const unshardedViewName = "unsharded_view";
+
+const viewOnShardedViewName = "sharded_view_view";
+
+function setUpUnshardedCollectionAndView(st, session, primaryShard) {
+ assert.writeOK(st.s.getDB(unshardedDbName)[unshardedCollName].insert(
+ {_id: 1, x: "unsharded"}, {writeConcern: {w: "majority"}}));
+ st.ensurePrimaryShard(unshardedDbName, primaryShard);
+
+ const unshardedView = session.getDatabase(unshardedDbName)[unshardedViewName];
+ assert.commandWorked(unshardedView.runCommand(
+ "create", {viewOn: unshardedCollName, pipeline: [], writeConcern: {w: "majority"}}));
+
+ return unshardedView;
+}
+
+function setUpShardedCollectionAndView(st, session, primaryShard) {
+ const ns = shardedDbName + "." + shardedCollName;
+
+ assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
+ {_id: -1}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
+ {_id: 1}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
+ st.ensurePrimaryShard(shardedDbName, primaryShard);
+
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+
+ const shardedView = session.getDatabase(shardedDbName)[shardedViewName];
+ assert.commandWorked(shardedView.runCommand(
+ "create", {viewOn: shardedCollName, pipeline: [], writeConcern: {w: "majority"}}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns, dbNames: [shardedDbName, unshardedDbName]});
+
+ return shardedView;
+}
+
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+const session = st.s.startSession();
+
+// Set up an unsharded collection on shard0.
+const unshardedView = setUpUnshardedCollectionAndView(st, session, st.shard0.shardName);
+
+// Set up a sharded collection with one chunk on each shard in a database with shard0 as its
+// primary shard.
+const shardedView = setUpShardedCollectionAndView(st, session, st.shard0.shardName);
+
+// Set up a view on the sharded view, in the same database.
+const viewOnShardedView = session.getDatabase(shardedDbName)[viewOnShardedViewName];
+assert.commandWorked(viewOnShardedView.runCommand(
+ "create", {viewOn: shardedViewName, pipeline: [], writeConcern: {w: "majority"}}));
+
+//
+// The first statement a participant shard receives reading from a view should succeed.
+//
+
+function readFromViewOnFirstParticipantStatement(session, view, viewFunc, numDocsExpected) {
+ session.startTransaction();
+ assert.eq(viewFunc(view), numDocsExpected);
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// Unsharded view.
+readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 1);
+readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
+ return view.distinct("_id").length;
+}, 1);
+readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
+ return view.find().itcount();
+}, 1);
+
+// Sharded view.
+readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 2);
+readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
+ return view.distinct("_id").length;
+}, 2);
+readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
+ return view.find().itcount();
+}, 2);
+
+// View on sharded view.
+readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 2);
+readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.distinct("_id").length;
+}, 2);
+readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.find().itcount();
+}, 2);
+
+//
+// A later statement a participant shard receives reading from a view should succeed.
+//
+
+function readFromViewOnLaterParticipantStatement(session, view, viewFunc, numDocsExpected) {
+ session.startTransaction();
+ assert.eq(view.aggregate({$match: {}}).itcount(), numDocsExpected);
+ assert.eq(viewFunc(view), numDocsExpected);
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// Unsharded view.
+readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 1);
+readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
+ return view.distinct("_id").length;
+}, 1);
+readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
+ return view.find().itcount();
+}, 1);
+
+// Sharded view.
+readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 2);
+readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
+ return view.distinct("_id").length;
+}, 2);
+readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
+ return view.find().itcount();
+}, 2);
+
+// View on sharded view.
+readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 2);
+readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.distinct("_id").length;
+}, 2);
+readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.find().itcount();
+}, 2);
+
+//
+// Transactions on shards that return a view resolution error on the first statement remain
+// aborted if the shard is not targeted by the retry on the resolved namespace.
+//
+// This may happen when reading from a sharded view, because mongos will target the primary
+// shard first to resolve the view, but the retry on the underlying sharded collection is not
+// guaranteed to target the primary again.
+//
+
+// Assumes the request in viewFunc does not target the primary shard, Shard0.
+function primaryShardNotReTargeted_FirstStatement(session, view, viewFunc, numDocsExpected) {
+ session.startTransaction();
+ assert.eq(viewFunc(view), numDocsExpected);
+
+ // There should not be an in-progress transaction on the primary shard.
+ assert.commandFailedWithCode(st.rs0.getPrimary().getDB("foo").runCommand({
+ find: "bar",
+ lsid: session.getSessionId(),
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // The transaction should not have been committed on the primary shard.
+ assert.commandFailedWithCode(st.rs0.getPrimary().getDB("foo").runCommand({
+ find: "bar",
+ lsid: session.getSessionId(),
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+}
+
+// This is only possible against sharded views.
+primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
+ return view.aggregate({$match: {_id: 1}}).itcount();
+}, 1);
+primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
+ return view.distinct("_id", {_id: {$gte: 1}}).length;
+}, 1);
+primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
+ return view.find({_id: 1}).itcount();
+}, 1);
+
+// View on sharded view.
+primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
+ return view.aggregate({$match: {_id: 1}}).itcount();
+}, 1);
+primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
+ return view.distinct("_id", {_id: {$gte: 1}}).length;
+}, 1);
+primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
+ return view.find({_id: 1}).itcount();
+}, 1);
+
+//
+// Shards do not abort on a view resolution error if they have already completed a statement for
+// a transaction.
+//
+
+// Assumes the primary shard for view is Shard0.
+function primaryShardNotReTargeted_LaterStatement(session, view, viewFunc, numDocsExpected) {
+ session.startTransaction();
+ // Complete a statement on the primary shard for the view.
+ assert.eq(view.aggregate({$match: {_id: -1}}).itcount(), 1);
+ // Targets the primary first, but the resolved retry only targets Shard1.
+ assert.eq(viewFunc(view), numDocsExpected);
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// This is only possible against sharded views.
+primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
+ return view.aggregate({$match: {_id: 1}}).itcount();
+}, 1);
+primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
+ return view.distinct("_id", {_id: {$gte: 1}}).length;
+}, 1);
+primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
+ return view.find({_id: 1}).itcount();
+}, 1);
+
+// View on sharded view.
+primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
+ return view.aggregate({$match: {_id: 1}}).itcount();
+}, 1);
+primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
+ return view.distinct("_id", {_id: {$gte: 1}}).length;
+}, 1);
+primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
+ return view.find({_id: 1}).itcount();
+}, 1);
+
+//
+// Reading from a view using $lookup and $graphLookup should succeed.
+//
+
+function assertAggResultEqInTransaction(coll, pipeline, expected) {
+ session.startTransaction();
+ const resArray = coll.aggregate(pipeline).toArray();
+ assert(arrayEq(resArray, expected), tojson({got: resArray, expected: expected}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// Set up an unsharded collection to use for $lookup. We cannot lookup into sharded collections.
+// TODO SERVER-29159: Add testing for lookup into sharded collections in a transaction once that
+// is supported.
+const lookupDbName = "dbForLookup";
+const lookupCollName = "collForLookup";
+assert.writeOK(
+ st.s.getDB(lookupDbName)[lookupCollName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+const lookupColl = session.getDatabase(unshardedDbName)[unshardedCollName];
+
+// Lookup the document in the unsharded collection with _id: 1 through the unsharded view.
+assertAggResultEqInTransaction(
+ lookupColl,
+ [
+ {$match: {_id: 1}},
+ {$lookup: {from: unshardedViewName, localField: "_id", foreignField: "_id", as: "matched"}},
+ {$unwind: "$matched"},
+ {$project: {_id: 1, matchedX: "$matched.x"}}
+ ],
+ [{_id: 1, matchedX: "unsharded"}]);
+
+// Find the same document through the view using $graphLookup.
+assertAggResultEqInTransaction(lookupColl,
[
{$match: {_id: 1}},
{
@@ -306,5 +303,5 @@
],
[{_id: 1, matchedX: "unsharded"}]);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_writes_not_retryable.js b/jstests/sharding/transactions_writes_not_retryable.js
index d86c7a0e8a5..e6782394ec7 100644
--- a/jstests/sharding/transactions_writes_not_retryable.js
+++ b/jstests/sharding/transactions_writes_not_retryable.js
@@ -4,116 +4,106 @@
* @tags: [requires_sharding, uses_transactions]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- function runTest(st, session, sessionDB, writeCmdName, writeCmd, isSharded) {
- jsTestLog("Testing " + writeCmdName + ", cmd: " + tojson(writeCmd) + ", sharded: " +
- isSharded);
-
- // Fail with retryable error.
- // Sharding tests require failInternalCommands: true, since the mongos appears to mongod to
- // be an internal client.
- const retryableError = ErrorCodes.InterruptedDueToReplStateChange;
- assert.commandWorked(st.rs0.getPrimary().adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- errorCode: retryableError,
- failCommands: [writeCmdName],
- failInternalCommands: true
- }
- }));
-
- session.startTransaction();
- assert.commandFailedWithCode(
- sessionDB.runCommand(writeCmd),
- retryableError,
- "expected write in transaction not to be retried on retryable error, cmd: " +
- tojson(writeCmd) + ", sharded: " + isSharded);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // Fail with closed connection.
- assert.commandWorked(st.rs0.getPrimary().adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- closeConnection: true,
- failCommands: [writeCmdName],
- failInternalCommands: true
- }
- }));
-
- session.startTransaction();
- let res = assert.commandFailed(
- sessionDB.runCommand(writeCmd),
- "expected write in transaction not to be retried on closed connection, cmd: " +
- tojson(writeCmd) + ", sharded: " + isSharded);
-
- // Network errors during sharded transactions are transient transaction errors, so they're
- // returned as top level codes for all commands, including batch writes.
- assert(ErrorCodes.isNetworkError(res.code),
- "expected network error, got: " + tojson(res.code));
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- assert.commandWorked(
- st.rs0.getPrimary().adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+"use strict";
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+function runTest(st, session, sessionDB, writeCmdName, writeCmd, isSharded) {
+ jsTestLog("Testing " + writeCmdName + ", cmd: " + tojson(writeCmd) + ", sharded: " + isSharded);
+
+ // Fail with retryable error.
+ // Sharding tests require failInternalCommands: true, since the mongos appears to mongod to
+ // be an internal client.
+ const retryableError = ErrorCodes.InterruptedDueToReplStateChange;
+ assert.commandWorked(st.rs0.getPrimary().adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {errorCode: retryableError, failCommands: [writeCmdName], failInternalCommands: true}
+ }));
+
+ session.startTransaction();
+ assert.commandFailedWithCode(
+ sessionDB.runCommand(writeCmd),
+ retryableError,
+ "expected write in transaction not to be retried on retryable error, cmd: " +
+ tojson(writeCmd) + ", sharded: " + isSharded);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ // Fail with closed connection.
+ assert.commandWorked(st.rs0.getPrimary().adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {closeConnection: true, failCommands: [writeCmdName], failInternalCommands: true}
+ }));
+
+ session.startTransaction();
+ let res = assert.commandFailed(
+ sessionDB.runCommand(writeCmd),
+ "expected write in transaction not to be retried on closed connection, cmd: " +
+ tojson(writeCmd) + ", sharded: " + isSharded);
+
+ // Network errors during sharded transactions are transient transaction errors, so they're
+ // returned as top level codes for all commands, including batch writes.
+ assert(ErrorCodes.isNetworkError(res.code), "expected network error, got: " + tojson(res.code));
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ assert.commandWorked(
+ st.rs0.getPrimary().adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+}
+
+const kCmdTestCases = [
+ {
+ name: "insert",
+ command: {insert: collName, documents: [{_id: 6}]},
+ },
+ {
+ name: "update",
+ command: {update: collName, updates: [{q: {_id: 5}, u: {$set: {x: 1}}}]},
+ },
+ {
+ name: "delete",
+ command: {delete: collName, deletes: [{q: {_id: 5}, limit: 1}]},
+ },
+ {
+ name: "findAndModify", // update
+ command: {findAndModify: collName, query: {_id: 5}, update: {$set: {x: 1}}},
+ },
+ {
+ name: "findAndModify", // delete
+ command: {findAndModify: collName, query: {_id: 5}, remove: true},
}
+];
- const kCmdTestCases = [
- {
- name: "insert",
- command: {insert: collName, documents: [{_id: 6}]},
- },
- {
- name: "update",
- command: {update: collName, updates: [{q: {_id: 5}, u: {$set: {x: 1}}}]},
- },
- {
- name: "delete",
- command: {delete: collName, deletes: [{q: {_id: 5}, limit: 1}]},
- },
- {
- name: "findAndModify", // update
- command: {findAndModify: collName, query: {_id: 5}, update: {$set: {x: 1}}},
- },
- {
- name: "findAndModify", // delete
- command: {findAndModify: collName, query: {_id: 5}, remove: true},
- }
- ];
-
- const st = new ShardingTest({shards: 1, config: 1});
-
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
-
- // Unsharded.
- jsTestLog("Testing against unsharded collection");
-
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
- kCmdTestCases.forEach(cmdTestCase => {
- runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, false /*isSharded*/);
- });
-
- // Sharded
- jsTestLog("Testing against sharded collection");
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- kCmdTestCases.forEach(cmdTestCase => {
- runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, true /*isSharded*/);
- });
-
- st.stop();
+const st = new ShardingTest({shards: 1, config: 1});
+
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
+
+// Unsharded.
+jsTestLog("Testing against unsharded collection");
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+
+kCmdTestCases.forEach(cmdTestCase => {
+ runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, false /*isSharded*/);
+});
+
+// Sharded
+jsTestLog("Testing against sharded collection");
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+kCmdTestCases.forEach(cmdTestCase => {
+ runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, true /*isSharded*/);
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_agg.js b/jstests/sharding/txn_agg.js
index cd5170a8ce1..7ae71a37b7c 100644
--- a/jstests/sharding/txn_agg.js
+++ b/jstests/sharding/txn_agg.js
@@ -1,117 +1,116 @@
// @tags: [uses_transactions, requires_find_command, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
+const st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard("test", st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard("test", st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {_id: 0}}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName}));
- // Preemptively create the collections in the shard since it is not allowed in transactions.
- let coll = st.s.getDB('test').user;
- coll.insert({_id: 1});
- coll.insert({_id: -1});
- coll.remove({});
+// Preemptively create the collections in the shard since it is not allowed in transactions.
+let coll = st.s.getDB('test').user;
+coll.insert({_id: 1});
+coll.insert({_id: -1});
+coll.remove({});
- let unshardedColl = st.s.getDB('test').foo;
- unshardedColl.insert({_id: 0});
- unshardedColl.remove({});
+let unshardedColl = st.s.getDB('test').foo;
+unshardedColl.insert({_id: 0});
+unshardedColl.remove({});
- let session = st.s.startSession();
- let sessionDB = session.getDatabase('test');
- let sessionColl = sessionDB.getCollection('user');
- let sessionUnsharded = sessionDB.getCollection('foo');
+let session = st.s.startSession();
+let sessionDB = session.getDatabase('test');
+let sessionColl = sessionDB.getCollection('user');
+let sessionUnsharded = sessionDB.getCollection('foo');
- // passthrough
+// passthrough
- session.startTransaction();
+session.startTransaction();
- sessionUnsharded.insert({_id: -1});
- sessionUnsharded.insert({_id: 1});
- assert.eq(2, sessionUnsharded.find().itcount());
+sessionUnsharded.insert({_id: -1});
+sessionUnsharded.insert({_id: 1});
+assert.eq(2, sessionUnsharded.find().itcount());
- let res = sessionUnsharded.aggregate([{$match: {_id: {$gte: -200}}}]).toArray();
- assert.eq(2, res.length, tojson(res));
+let res = sessionUnsharded.aggregate([{$match: {_id: {$gte: -200}}}]).toArray();
+assert.eq(2, res.length, tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- // merge on mongos
+// merge on mongos
- session.startTransaction();
+session.startTransaction();
- sessionColl.insert({_id: -1});
- sessionColl.insert({_id: 1});
- assert.eq(2, sessionColl.find().itcount());
+sessionColl.insert({_id: -1});
+sessionColl.insert({_id: 1});
+assert.eq(2, sessionColl.find().itcount());
- res = sessionColl.aggregate([{$match: {_id: {$gte: -200}}}], {allowDiskUse: false}).toArray();
- assert.eq(2, res.length, tojson(res));
+res = sessionColl.aggregate([{$match: {_id: {$gte: -200}}}], {allowDiskUse: false}).toArray();
+assert.eq(2, res.length, tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- // merge on shard. This will require the merging shard to open a cursor on itself.
- session.startTransaction();
+// merge on shard. This will require the merging shard to open a cursor on itself.
+session.startTransaction();
- sessionColl.insert({_id: -1});
- sessionColl.insert({_id: 1});
- assert.eq(2, sessionColl.find().itcount());
+sessionColl.insert({_id: -1});
+sessionColl.insert({_id: 1});
+assert.eq(2, sessionColl.find().itcount());
- res =
- sessionColl
- .aggregate(
- [{$match: {_id: {$gte: -200}}}, {$_internalSplitPipeline: {mergeType: "anyShard"}}])
- .toArray();
- assert.eq(2, res.length, tojson(res));
+res = sessionColl
+ .aggregate(
+ [{$match: {_id: {$gte: -200}}}, {$_internalSplitPipeline: {mergeType: "anyShard"}}])
+ .toArray();
+assert.eq(2, res.length, tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- // Error case: provide a readConcern on an operation which comes in the middle of a transaction.
- session.startTransaction();
+// Error case: provide a readConcern on an operation which comes in the middle of a transaction.
+session.startTransaction();
- sessionColl.insert({_id: -1});
- assert.eq(1, sessionColl.find().itcount());
+sessionColl.insert({_id: -1});
+assert.eq(1, sessionColl.find().itcount());
- const err = assert.throws(
- () => sessionColl.aggregate(
- [{$match: {_id: {$gte: -200}}}, {$_internalSplitPipeline: {mergeType: "anyShard"}}],
- {readConcern: {level: "majority"}}
+const err = assert.throws(
+ () => sessionColl.aggregate(
+ [{$match: {_id: {$gte: -200}}}, {$_internalSplitPipeline: {mergeType: "anyShard"}}],
+ {readConcern: {level: "majority"}}
- ));
- assert.eq(err.code, ErrorCodes.InvalidOptions, err);
+ ));
+assert.eq(err.code, ErrorCodes.InvalidOptions, err);
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- // Insert some data outside of a transaction.
- assert.commandWorked(sessionColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
+// Insert some data outside of a transaction.
+assert.commandWorked(sessionColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
- // Run an aggregation which requires merging on a shard as the first operation in a transaction.
- session.startTransaction();
- assert.eq(
- [{_id: -1}, {_id: 0}, {_id: 1}],
- sessionColl
- .aggregate([{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}])
- .toArray());
- assert.commandWorked(session.commitTransaction_forTesting());
+// Run an aggregation which requires merging on a shard as the first operation in a transaction.
+session.startTransaction();
+assert.eq(
+ [{_id: -1}, {_id: 0}, {_id: 1}],
+ sessionColl
+ .aggregate([{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}])
+ .toArray());
+assert.commandWorked(session.commitTransaction_forTesting());
- // Move all of the data to shard 1.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.user', find: {_id: -1}, to: st.shard1.shardName}));
+// Move all of the data to shard 1.
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.user', find: {_id: -1}, to: st.shard1.shardName}));
- // Be sure that only one shard will be targeted after the moveChunk.
- const pipeline = [{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}];
- const explain = sessionColl.explain().aggregate(pipeline);
- assert.eq(Object.keys(explain.shards), [st.shard1.shardName], explain);
+// Be sure that only one shard will be targeted after the moveChunk.
+const pipeline = [{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}];
+const explain = sessionColl.explain().aggregate(pipeline);
+assert.eq(Object.keys(explain.shards), [st.shard1.shardName], explain);
- // Now run the same aggregation, but again, force shard 0 to be the merger even though it has no
- // chunks for the collection.
- session.startTransaction();
- assert.eq([{_id: -1}, {_id: 0}, {_id: 1}], sessionColl.aggregate(pipeline).toArray());
- assert.commandWorked(session.commitTransaction_forTesting());
+// Now run the same aggregation, but again, force shard 0 to be the merger even though it has no
+// chunks for the collection.
+session.startTransaction();
+assert.eq([{_id: -1}, {_id: 0}, {_id: 1}], sessionColl.aggregate(pipeline).toArray());
+assert.commandWorked(session.commitTransaction_forTesting());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js b/jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js
index 3bc4cce7846..2e5751cc738 100644
--- a/jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js
+++ b/jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js
@@ -6,101 +6,104 @@
*/
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- // Set to something high enough that a slow machine shouldn't cause our
- // transaction to be aborted before committing, but low enough that the test
- // won't be unnecessarily slow when we wait for the periodic transaction
- // abort job to run.
- TestData.transactionLifetimeLimitSeconds = 10;
-
- const rsOpts = {nodes: 3, settings: {chainingAllowed: false}};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts, rs2: rsOpts}});
-
- const coordinator = st.shard0;
- const participant1 = st.shard1;
- const participant2 = st.shard2;
-
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- // Start a new session and start a transaction on that session.
- const session = st.s.startSession();
- session.startTransaction();
-
- // Insert a document onto each shard to make this a cross-shard transaction.
- assert.commandWorked(session.getDatabase(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- }));
-
- // Set a failpoint to make oplog application hang on one secondary after applying the
- // operations in the transaction but before preparing the TransactionParticipant.
- const applyOpsHangBeforePreparingTransaction = "applyOpsHangBeforePreparingTransaction";
- const firstSecondary = st.rs0.getSecondary();
- assert.commandWorked(firstSecondary.adminCommand({
- configureFailPoint: applyOpsHangBeforePreparingTransaction,
- mode: "alwaysOn",
- }));
-
- // Commit the transaction, which will execute two-phase commit.
- assert.commandWorked(session.commitTransaction_forTesting());
-
- jsTest.log("Verify that the transaction was committed on all shards.");
- // Use assert.soon(), because although coordinateCommitTransaction currently blocks
- // until the commit process is fully complete, it will eventually be changed to only
- // block until the decision is *written*, at which point the test can pass the
- // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
- // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
- assert.soon(function() {
- return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
- });
-
- jsTest.log("Waiting for secondary to apply the prepare oplog entry.");
- waitForFailpoint("Hit " + applyOpsHangBeforePreparingTransaction + " failpoint", 1);
-
- // Wait for the periodic transaction abort job to run while oplog
- // application is hanging. The job should run every 10 seconds due to the
- // transactionLifetimeLimitSeconds parameter being set to 10 above, so the
- // likelihood of it running while sleeping 30 seconds is high. If it does
- // not run, the test will trivially pass without testing the desired
- // behavior, but it will not cause the test to fail.
- sleep(30000);
-
- jsTest.log("Turning off " + applyOpsHangBeforePreparingTransaction + " failpoint.");
- // Allow oplog application to continue by turning off the failpoint. The
- // transaction should prepare successfully and should not have been aborted
- // by the transaction abort job.
- assert.commandWorked(firstSecondary.adminCommand({
- configureFailPoint: applyOpsHangBeforePreparingTransaction,
- mode: "off",
- }));
-
- jsTest.log("Turned off " + applyOpsHangBeforePreparingTransaction + " failpoint.");
-
- st.stop();
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+// Set to something high enough that a slow machine shouldn't cause our
+// transaction to be aborted before committing, but low enough that the test
+// won't be unnecessarily slow when we wait for the periodic transaction
+// abort job to run.
+TestData.transactionLifetimeLimitSeconds = 10;
+
+const rsOpts = {
+ nodes: 3,
+ settings: {chainingAllowed: false}
+};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts, rs2: rsOpts}});
+
+const coordinator = st.shard0;
+const participant1 = st.shard1;
+const participant2 = st.shard2;
+
+// Create a sharded collection with a chunk on each shard:
+// shard0: [-inf, 0)
+// shard1: [0, 10)
+// shard2: [10, +inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+// These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+// from the shards starting, aborting, and restarting the transaction due to needing to
+// refresh after the transaction has started.
+assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+// Start a new session and start a transaction on that session.
+const session = st.s.startSession();
+session.startTransaction();
+
+// Insert a document onto each shard to make this a cross-shard transaction.
+assert.commandWorked(session.getDatabase(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+}));
+
+// Set a failpoint to make oplog application hang on one secondary after applying the
+// operations in the transaction but before preparing the TransactionParticipant.
+const applyOpsHangBeforePreparingTransaction = "applyOpsHangBeforePreparingTransaction";
+const firstSecondary = st.rs0.getSecondary();
+assert.commandWorked(firstSecondary.adminCommand({
+ configureFailPoint: applyOpsHangBeforePreparingTransaction,
+ mode: "alwaysOn",
+}));
+
+// Commit the transaction, which will execute two-phase commit.
+assert.commandWorked(session.commitTransaction_forTesting());
+
+jsTest.log("Verify that the transaction was committed on all shards.");
+// Use assert.soon(), because although coordinateCommitTransaction currently blocks
+// until the commit process is fully complete, it will eventually be changed to only
+// block until the decision is *written*, at which point the test can pass the
+// operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
+// read to ensure the read sees the transaction's writes (TODO SERVER-37165).
+assert.soon(function() {
+ return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
+});
+
+jsTest.log("Waiting for secondary to apply the prepare oplog entry.");
+waitForFailpoint("Hit " + applyOpsHangBeforePreparingTransaction + " failpoint", 1);
+
+// Wait for the periodic transaction abort job to run while oplog
+// application is hanging. The job should run every 10 seconds due to the
+// transactionLifetimeLimitSeconds parameter being set to 10 above, so the
+// likelihood of it running while sleeping 30 seconds is high. If it does
+// not run, the test will trivially pass without testing the desired
+// behavior, but it will not cause the test to fail.
+sleep(30000);
+
+jsTest.log("Turning off " + applyOpsHangBeforePreparingTransaction + " failpoint.");
+// Allow oplog application to continue by turning off the failpoint. The
+// transaction should prepare successfully and should not have been aborted
+// by the transaction abort job.
+assert.commandWorked(firstSecondary.adminCommand({
+ configureFailPoint: applyOpsHangBeforePreparingTransaction,
+ mode: "off",
+}));
+
+jsTest.log("Turned off " + applyOpsHangBeforePreparingTransaction + " failpoint.");
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
index ce643ed3ad1..fc6137f2ff7 100644
--- a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
+++ b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
@@ -10,355 +10,352 @@
*/
(function() {
- 'use strict';
-
- load("jstests/libs/write_concern_util.js");
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- // Waits for the given log to appear a number of times in the shell's rawMongoProgramOutput.
- // Loops because it is not guaranteed the program output will immediately contain all lines
- // logged at an earlier wall clock time.
- function waitForLog(logLine, times) {
- assert.soon(function() {
- const matches = rawMongoProgramOutput().match(new RegExp(logLine, "g")) || [];
- return matches.length === times;
- }, 'Failed to find "' + logLine + '" logged ' + times + ' times');
- }
-
- const addTxnFields = function(command, lsid, txnNumber, startTransaction) {
- let txnFields = {
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- };
- if (startTransaction) {
- txnFields.startTransaction = true;
- }
- return Object.assign({}, command, txnFields);
- };
-
- const defaultCommitCommand = {
- commitTransaction: 1,
- writeConcern: {w: "majority", wtimeout: 6000}
- };
- const noop = () => {};
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- // TODO (SERVER-37364): Uncomment this line; otherwise, the coordinator will wait too long to
- // time out waiting for votes and the test will time out.
- // Lower the transaction timeout, since this test exercises cases where the coordinator should
- // time out collecting prepare votes.
- // TestData.transactionLifetimeLimitSeconds = 30;
-
- let st = new ShardingTest({
- shards: 3,
- // Create shards with more than one node because we test for writeConcern majority failing.
- config: 1,
- other: {
- mongosOptions: {verbose: 3},
- rs0: {nodes: [{}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{}, {rsConfig: {priority: 0}}]},
- rs2: {nodes: [{}, {rsConfig: {priority: 0}}]},
- },
- });
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));
-
- // Create a "dummy" collection for doing noop writes to advance shard's last applied OpTimes.
- assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
-
- // The test uses three shards with one chunk each in order to control which shards are targeted
- // for each statement:
- //
- // (-inf, 0): shard key = txnNumber * -1
- // (0, MAX_TRANSACTIONS): shard key = txnNumber
- // (MAX_TRANSACTIONS, +inf): shard key = txnNumber + MAX_TRANSACTIONS
- //
- // So, if the test ever exceeds txnNumber transactions, statements that are meant to target the
- // middle chunk will instead target the highest chunk. To fix this, increase MAX_TRANSACTIONS.
- const MAX_TRANSACTIONS = 10000;
-
- // Create a sharded collection with a chunk on each shard:
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: MAX_TRANSACTIONS}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: -1}, to: st.shard0.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: MAX_TRANSACTIONS}, to: st.shard2.shardName}));
-
- // Insert something into each chunk so that a multi-update actually results in a write on each
- // shard (otherwise the shard may remain read-only). This also ensures all the routers and
- // shards have fresh routing table caches, so they do not need to be refreshed separately.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -1 * MAX_TRANSACTIONS}, {_id: 0}, {_id: MAX_TRANSACTIONS}]
- }));
-
- let lsid = {id: UUID()};
- let txnNumber = 1;
-
- const readShard0 = txnNumber => {
- return {find: collName, filter: {_id: (-1 * txnNumber)}};
+'use strict';
+
+load("jstests/libs/write_concern_util.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+// Waits for the given log to appear a number of times in the shell's rawMongoProgramOutput.
+// Loops because it is not guaranteed the program output will immediately contain all lines
+// logged at an earlier wall clock time.
+function waitForLog(logLine, times) {
+ assert.soon(function() {
+ const matches = rawMongoProgramOutput().match(new RegExp(logLine, "g")) || [];
+ return matches.length === times;
+ }, 'Failed to find "' + logLine + '" logged ' + times + ' times');
+}
+
+const addTxnFields = function(command, lsid, txnNumber, startTransaction) {
+ let txnFields = {
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false,
};
-
- const readShard1 = txnNumber => {
- return {find: collName, filter: {_id: txnNumber}};
- };
-
- const readShard2 = txnNumber => {
- return {find: collName, filter: {_id: (MAX_TRANSACTIONS + txnNumber)}};
- };
-
- const readAllShards = () => {
- return {find: collName};
- };
-
- const writeShard0 = txnNumber => {
- return {
- update: collName,
- updates: [
- {q: {_id: (txnNumber * -1)}, u: {_id: (txnNumber * -1), updated: 1}, upsert: true}
- ],
- };
+ if (startTransaction) {
+ txnFields.startTransaction = true;
+ }
+ return Object.assign({}, command, txnFields);
+};
+
+const defaultCommitCommand = {
+ commitTransaction: 1,
+ writeConcern: {w: "majority", wtimeout: 6000}
+};
+const noop = () => {};
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+// TODO (SERVER-37364): Uncomment this line; otherwise, the coordinator will wait too long to
+// time out waiting for votes and the test will time out.
+// Lower the transaction timeout, since this test exercises cases where the coordinator should
+// time out collecting prepare votes.
+// TestData.transactionLifetimeLimitSeconds = 30;
+
+let st = new ShardingTest({
+ shards: 3,
+ // Create shards with more than one node because we test for writeConcern majority failing.
+ config: 1,
+ other: {
+ mongosOptions: {verbose: 3},
+ rs0: {nodes: [{}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{}, {rsConfig: {priority: 0}}]},
+ rs2: {nodes: [{}, {rsConfig: {priority: 0}}]},
+ },
+});
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));
+
+// Create a "dummy" collection for doing noop writes to advance shard's last applied OpTimes.
+assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
+
+// The test uses three shards with one chunk each in order to control which shards are targeted
+// for each statement:
+//
+// (-inf, 0): shard key = txnNumber * -1
+// (0, MAX_TRANSACTIONS): shard key = txnNumber
+// (MAX_TRANSACTIONS, +inf): shard key = txnNumber + MAX_TRANSACTIONS
+//
+// So, if the test ever exceeds txnNumber transactions, statements that are meant to target the
+// middle chunk will instead target the highest chunk. To fix this, increase MAX_TRANSACTIONS.
+const MAX_TRANSACTIONS = 10000;
+
+// Create a sharded collection with a chunk on each shard:
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: MAX_TRANSACTIONS}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: -1}, to: st.shard0.shardName}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: MAX_TRANSACTIONS}, to: st.shard2.shardName}));
+
+// Insert something into each chunk so that a multi-update actually results in a write on each
+// shard (otherwise the shard may remain read-only). This also ensures all the routers and
+// shards have fresh routing table caches, so they do not need to be refreshed separately.
+assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -1 * MAX_TRANSACTIONS}, {_id: 0}, {_id: MAX_TRANSACTIONS}]
+}));
+
+let lsid = {id: UUID()};
+let txnNumber = 1;
+
+const readShard0 = txnNumber => {
+ return {find: collName, filter: {_id: (-1 * txnNumber)}};
+};
+
+const readShard1 = txnNumber => {
+ return {find: collName, filter: {_id: txnNumber}};
+};
+
+const readShard2 = txnNumber => {
+ return {find: collName, filter: {_id: (MAX_TRANSACTIONS + txnNumber)}};
+};
+
+const readAllShards = () => {
+ return {find: collName};
+};
+
+const writeShard0 = txnNumber => {
+ return {
+ update: collName,
+ updates:
+ [{q: {_id: (txnNumber * -1)}, u: {_id: (txnNumber * -1), updated: 1}, upsert: true}],
};
+};
- const writeShard1 = txnNumber => {
- return {
- update: collName,
- updates: [{q: {_id: txnNumber}, u: {_id: txnNumber, updated: 1}, upsert: true}],
- };
+const writeShard1 = txnNumber => {
+ return {
+ update: collName,
+ updates: [{q: {_id: txnNumber}, u: {_id: txnNumber, updated: 1}, upsert: true}],
};
-
- const writeShard2 = txnNumber => {
- return {
- update: collName,
- updates: [{
- q: {_id: (txnNumber + MAX_TRANSACTIONS)},
- u: {_id: (txnNumber + MAX_TRANSACTIONS), updated: 1},
- upsert: true
- }],
- };
+};
+
+const writeShard2 = txnNumber => {
+ return {
+ update: collName,
+ updates: [{
+ q: {_id: (txnNumber + MAX_TRANSACTIONS)},
+ u: {_id: (txnNumber + MAX_TRANSACTIONS), updated: 1},
+ upsert: true
+ }],
};
+};
- const writeAllShards = () => {
- return {
- update: collName,
- updates: [{q: {}, u: {$inc: {updated: 1}}, multi: true}],
- };
+const writeAllShards = () => {
+ return {
+ update: collName,
+ updates: [{q: {}, u: {$inc: {updated: 1}}, multi: true}],
};
-
- // For each transaction type, contains the list of statements for that type.
- const transactionTypes = {
- readOnlySingleShardSingleStatementExpectSingleShardCommit: txnNumber => {
- return [readShard0(txnNumber)];
- },
- readOnlySingleShardMultiStatementExpectSingleShardCommit: txnNumber => {
- return [readShard0(txnNumber), readShard0(txnNumber)];
+};
+
+// For each transaction type, contains the list of statements for that type.
+const transactionTypes = {
+ readOnlySingleShardSingleStatementExpectSingleShardCommit: txnNumber => {
+ return [readShard0(txnNumber)];
+ },
+ readOnlySingleShardMultiStatementExpectSingleShardCommit: txnNumber => {
+ return [readShard0(txnNumber), readShard0(txnNumber)];
+ },
+ readOnlyMultiShardSingleStatementExpectReadOnlyCommit: txnNumber => {
+ return [readAllShards(txnNumber)];
+ },
+ readOnlyMultiShardMultiStatementExpectReadOnlyCommit: txnNumber => {
+ return [readShard0(txnNumber), readShard1(txnNumber), readShard2(txnNumber)];
+ },
+ writeSingleShardSingleStatementExpectSingleShardCommit: txnNumber => {
+ return [writeShard0(txnNumber)];
+ },
+ writeSingleShardMultiStatementExpectSingleShardCommit: txnNumber => {
+ return [writeShard0(txnNumber), writeShard0(txnNumber)];
+ },
+ writeMultiShardSingleStatementExpectTwoPhaseCommit: txnNumber => {
+ return [writeAllShards(txnNumber)];
+ },
+ writeMultiShardMultiStatementExpectTwoPhaseCommit: txnNumber => {
+ return [writeShard0(txnNumber), writeShard1(txnNumber), writeShard2(txnNumber)];
+ },
+ readWriteSingleShardExpectSingleShardCommit: txnNumber => {
+ return [readShard0(txnNumber), writeShard0(txnNumber)];
+ },
+ writeReadSingleShardExpectSingleShardCommit: txnNumber => {
+ return [writeShard0(txnNumber), readShard0(txnNumber)];
+ },
+ readOneShardWriteOtherShardExpectSingleWriteShardCommit: txnNumber => {
+ return [readShard0(txnNumber), writeShard1(txnNumber)];
+ },
+ writeOneShardReadOtherShardExpectSingleWriteShardCommit: txnNumber => {
+ return [writeShard0(txnNumber), readShard1(txnNumber)];
+ },
+ readOneShardWriteTwoOtherShardsExpectTwoPhaseCommit: txnNumber => {
+ return [readShard0(txnNumber), writeShard1(txnNumber), writeShard2(txnNumber)];
+ },
+ writeTwoShardsReadOneOtherShardExpectTwoPhaseCommit: txnNumber => {
+ return [writeShard0(txnNumber), writeShard1(txnNumber), readShard2(txnNumber)];
+ },
+};
+
+const failureModes = {
+ noFailures: {
+ beforeStatements: noop,
+ beforeCommit: noop,
+ getCommitCommand: (lsid, txnNumber) => {
+ return addTxnFields(defaultCommitCommand, lsid, txnNumber);
},
- readOnlyMultiShardSingleStatementExpectReadOnlyCommit: txnNumber => {
- return [readAllShards(txnNumber)];
+ checkCommitResult: (res) => {
+ // Commit should return ok without writeConcern error
+ assert.commandWorked(res);
+ assert.eq(null, res.errorLabels);
},
- readOnlyMultiShardMultiStatementExpectReadOnlyCommit: txnNumber => {
- return [readShard0(txnNumber), readShard1(txnNumber), readShard2(txnNumber)];
+ cleanUp: noop,
+ },
+ participantStepsDownBeforeClientSendsCommit: {
+ beforeStatements: noop,
+ beforeCommit: () => {
+ // Participant primary steps down.
+ assert.commandWorked(
+ st.shard0.adminCommand({replSetStepDown: 1 /* stepDownSecs */, force: true}));
},
- writeSingleShardSingleStatementExpectSingleShardCommit: txnNumber => {
- return [writeShard0(txnNumber)];
+ getCommitCommand: (lsid, txnNumber) => {
+ return addTxnFields(defaultCommitCommand, lsid, txnNumber);
},
- writeSingleShardMultiStatementExpectSingleShardCommit: txnNumber => {
- return [writeShard0(txnNumber), writeShard0(txnNumber)];
+ checkCommitResult: (res) => {
+ // Commit should return NoSuchTransaction.
+ assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+ assert.eq(["TransientTransactionError"], res.errorLabels);
},
- writeMultiShardSingleStatementExpectTwoPhaseCommit: txnNumber => {
- return [writeAllShards(txnNumber)];
+ cleanUp: () => {
+ st.rs0.awaitNodesAgreeOnPrimary();
},
- writeMultiShardMultiStatementExpectTwoPhaseCommit: txnNumber => {
- return [writeShard0(txnNumber), writeShard1(txnNumber), writeShard2(txnNumber)];
+ },
+ participantCannotMajorityCommitWritesClientSendsWriteConcernMajority: {
+ beforeStatements: () => {
+ // Participant cannot majority commit writes.
+ stopServerReplication(st.rs0.getSecondaries());
+
+ // Do a write on rs0 through the router outside the transaction to ensure the
+ // transaction will choose a read time that has not been majority committed.
+ assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
},
- readWriteSingleShardExpectSingleShardCommit: txnNumber => {
- return [readShard0(txnNumber), writeShard0(txnNumber)];
+ beforeCommit: noop,
+ getCommitCommand: (lsid, txnNumber) => {
+ return addTxnFields(defaultCommitCommand, lsid, txnNumber);
},
- writeReadSingleShardExpectSingleShardCommit: txnNumber => {
- return [writeShard0(txnNumber), readShard0(txnNumber)];
+ checkCommitResult: (res) => {
+ // Commit should return ok with a writeConcernError with wtimeout.
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ checkWriteConcernTimedOut(res);
+ assert.eq(null, res.errorLabels);
},
- readOneShardWriteOtherShardExpectSingleWriteShardCommit: txnNumber => {
- return [readShard0(txnNumber), writeShard1(txnNumber)];
+ cleanUp: () => {
+ restartServerReplication(st.rs0.getSecondaries());
},
- writeOneShardReadOtherShardExpectSingleWriteShardCommit: txnNumber => {
- return [writeShard0(txnNumber), readShard1(txnNumber)];
+ },
+ participantCannotMajorityCommitWritesClientSendsWriteConcern1: {
+ beforeStatements: () => {
+ // Participant cannot majority commit writes.
+ stopServerReplication(st.rs0.getSecondaries());
+
+ // Do a write on rs0 through the router outside the transaction to ensure the
+ // transaction will choose a read time that has not been majority committed.
+ assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
},
- readOneShardWriteTwoOtherShardsExpectTwoPhaseCommit: txnNumber => {
- return [readShard0(txnNumber), writeShard1(txnNumber), writeShard2(txnNumber)];
+ beforeCommit: noop,
+ getCommitCommand: (lsid, txnNumber) => {
+ return addTxnFields({commitTransaction: 1, writeConcern: {w: 1}}, lsid, txnNumber);
},
- writeTwoShardsReadOneOtherShardExpectTwoPhaseCommit: txnNumber => {
- return [writeShard0(txnNumber), writeShard1(txnNumber), readShard2(txnNumber)];
- },
- };
-
- const failureModes = {
- noFailures: {
- beforeStatements: noop,
- beforeCommit: noop,
- getCommitCommand: (lsid, txnNumber) => {
- return addTxnFields(defaultCommitCommand, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return ok without writeConcern error
- assert.commandWorked(res);
- assert.eq(null, res.errorLabels);
- },
- cleanUp: noop,
+ checkCommitResult: (res) => {
+ // Commit should return ok without writeConcern error
+ assert.commandWorked(res);
+ assert.eq(null, res.errorLabels);
},
- participantStepsDownBeforeClientSendsCommit: {
- beforeStatements: noop,
- beforeCommit: () => {
- // Participant primary steps down.
- assert.commandWorked(
- st.shard0.adminCommand({replSetStepDown: 1 /* stepDownSecs */, force: true}));
- },
- getCommitCommand: (lsid, txnNumber) => {
- return addTxnFields(defaultCommitCommand, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return NoSuchTransaction.
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
- assert.eq(["TransientTransactionError"], res.errorLabels);
- },
- cleanUp: () => {
- st.rs0.awaitNodesAgreeOnPrimary();
- },
+ cleanUp: () => {
+ restartServerReplication(st.rs0.getSecondaries());
},
- participantCannotMajorityCommitWritesClientSendsWriteConcernMajority: {
- beforeStatements: () => {
- // Participant cannot majority commit writes.
- stopServerReplication(st.rs0.getSecondaries());
-
- // Do a write on rs0 through the router outside the transaction to ensure the
- // transaction will choose a read time that has not been majority committed.
- assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
- },
- beforeCommit: noop,
- getCommitCommand: (lsid, txnNumber) => {
- return addTxnFields(defaultCommitCommand, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return ok with a writeConcernError with wtimeout.
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- checkWriteConcernTimedOut(res);
- assert.eq(null, res.errorLabels);
- },
- cleanUp: () => {
- restartServerReplication(st.rs0.getSecondaries());
- },
+ },
+ clientSendsInvalidWriteConcernOnCommit: {
+ beforeStatements: noop,
+ beforeCommit: noop,
+ getCommitCommand: (lsid, txnNumber) => {
+ // Client sends invalid writeConcern on commit.
+ return addTxnFields(
+ {commitTransaction: 1, writeConcern: {w: "invalid"}}, lsid, txnNumber);
},
- participantCannotMajorityCommitWritesClientSendsWriteConcern1: {
- beforeStatements: () => {
- // Participant cannot majority commit writes.
- stopServerReplication(st.rs0.getSecondaries());
-
- // Do a write on rs0 through the router outside the transaction to ensure the
- // transaction will choose a read time that has not been majority committed.
- assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
- },
- beforeCommit: noop,
- getCommitCommand: (lsid, txnNumber) => {
- return addTxnFields({commitTransaction: 1, writeConcern: {w: 1}}, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return ok without writeConcern error
- assert.commandWorked(res);
- assert.eq(null, res.errorLabels);
- },
- cleanUp: () => {
- restartServerReplication(st.rs0.getSecondaries());
- },
+ checkCommitResult: (res) => {
+ // Commit should return ok with writeConcernError without wtimeout.
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assertWriteConcernError(res);
+ assert.eq(ErrorCodes.UnknownReplWriteConcern, res.writeConcernError.code);
+ assert.eq(null, res.writeConcernError.errInfo); // errInfo only set for wtimeout
+ assert.eq(null, res.errorLabels);
},
- clientSendsInvalidWriteConcernOnCommit: {
- beforeStatements: noop,
- beforeCommit: noop,
- getCommitCommand: (lsid, txnNumber) => {
- // Client sends invalid writeConcern on commit.
- return addTxnFields(
- {commitTransaction: 1, writeConcern: {w: "invalid"}}, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return ok with writeConcernError without wtimeout.
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assertWriteConcernError(res);
- assert.eq(ErrorCodes.UnknownReplWriteConcern, res.writeConcernError.code);
- assert.eq(null, res.writeConcernError.errInfo); // errInfo only set for wtimeout
- assert.eq(null, res.errorLabels);
- },
- cleanUp: noop,
- },
- };
+ cleanUp: noop,
+ },
+};
+
+for (const failureModeName in failureModes) {
+ for (const type in transactionTypes) {
+ // TODO (SERVER-37364): Unblacklist these test cases once the coordinator returns the
+ // decision as soon as the decision is made. At the moment, the coordinator makes an
+ // abort decision after timing out waiting for votes, but coordinateCommitTransaction
+ // hangs because it waits for the decision to be majority-ack'd by all participants,
+ // which can't happen while a participant can't majority commit writes.
+ if (failureModeName.includes("participantCannotMajorityCommitWrites") &&
+ type.includes("ExpectTwoPhaseCommit")) {
+ jsTest.log(
+ `${failureModeName} with ${type} is skipped until SERVER-37364 is implemented`);
+ continue;
+ }
- for (const failureModeName in failureModes) {
- for (const type in transactionTypes) {
- // TODO (SERVER-37364): Unblacklist these test cases once the coordinator returns the
- // decision as soon as the decision is made. At the moment, the coordinator makes an
- // abort decision after timing out waiting for votes, but coordinateCommitTransaction
- // hangs because it waits for the decision to be majority-ack'd by all participants,
- // which can't happen while a participant can't majority commit writes.
- if (failureModeName.includes("participantCannotMajorityCommitWrites") &&
- type.includes("ExpectTwoPhaseCommit")) {
- jsTest.log(
- `${failureModeName} with ${type} is skipped until SERVER-37364 is implemented`);
- continue;
- }
-
- txnNumber++;
- assert.lt(txnNumber,
- MAX_TRANSACTIONS,
- "Test exceeded maximum number of transactions allowable by the test's chunk" +
- " distribution created during the test setup. Please increase" +
- " MAX_TRANSACTIONS in the test.");
-
- jsTest.log(`Testing ${failureModeName} with ${type} at txnNumber ${txnNumber}`);
-
- const failureMode = failureModes[failureModeName];
-
- // Run the statements.
- failureMode.beforeStatements();
- let startTransaction = true;
- transactionTypes[type](txnNumber).forEach(command => {
- assert.commandWorked(st.s.getDB(dbName).runCommand(
- addTxnFields(command, lsid, txnNumber, startTransaction)));
- startTransaction = false;
- });
-
- // Run commit.
- const commitCmd = failureMode.getCommitCommand(lsid, txnNumber);
- failureMode.beforeCommit();
- const commitRes = st.s.adminCommand(commitCmd);
- failureMode.checkCommitResult(commitRes);
-
- // Re-running commit should return the same response.
- const commitRetryRes = st.s.adminCommand(commitCmd);
- failureMode.checkCommitResult(commitRetryRes);
-
- if (type.includes("ExpectSingleShardCommit")) {
- waitForLog("Committing single-shard transaction", 2);
- } else if (type.includes("ExpectReadOnlyCommit")) {
- waitForLog("Committing read-only transaction", 2);
- } else if (type.includes("ExpectSingleWriteShardCommit")) {
- waitForLog("Committing single-write-shard transaction", 2);
- } else if (type.includes("ExpectTwoPhaseCommit")) {
- waitForLog("Committing using two-phase commit", 2);
- } else {
- assert(false, `Unknown transaction type: ${type}`);
- }
-
- clearRawMongoProgramOutput();
-
- failureMode.cleanUp();
+ txnNumber++;
+ assert.lt(txnNumber,
+ MAX_TRANSACTIONS,
+ "Test exceeded maximum number of transactions allowable by the test's chunk" +
+ " distribution created during the test setup. Please increase" +
+ " MAX_TRANSACTIONS in the test.");
+
+ jsTest.log(`Testing ${failureModeName} with ${type} at txnNumber ${txnNumber}`);
+
+ const failureMode = failureModes[failureModeName];
+
+ // Run the statements.
+ failureMode.beforeStatements();
+ let startTransaction = true;
+ transactionTypes[type](txnNumber).forEach(command => {
+ assert.commandWorked(st.s.getDB(dbName).runCommand(
+ addTxnFields(command, lsid, txnNumber, startTransaction)));
+ startTransaction = false;
+ });
+
+ // Run commit.
+ const commitCmd = failureMode.getCommitCommand(lsid, txnNumber);
+ failureMode.beforeCommit();
+ const commitRes = st.s.adminCommand(commitCmd);
+ failureMode.checkCommitResult(commitRes);
+
+ // Re-running commit should return the same response.
+ const commitRetryRes = st.s.adminCommand(commitCmd);
+ failureMode.checkCommitResult(commitRetryRes);
+
+ if (type.includes("ExpectSingleShardCommit")) {
+ waitForLog("Committing single-shard transaction", 2);
+ } else if (type.includes("ExpectReadOnlyCommit")) {
+ waitForLog("Committing read-only transaction", 2);
+ } else if (type.includes("ExpectSingleWriteShardCommit")) {
+ waitForLog("Committing single-write-shard transaction", 2);
+ } else if (type.includes("ExpectTwoPhaseCommit")) {
+ waitForLog("Committing using two-phase commit", 2);
+ } else {
+ assert(false, `Unknown transaction type: ${type}`);
}
- }
- st.stop();
+ clearRawMongoProgramOutput();
+
+ failureMode.cleanUp();
+ }
+}
+st.stop();
})();
diff --git a/jstests/sharding/txn_recover_decision_using_recovery_router.js b/jstests/sharding/txn_recover_decision_using_recovery_router.js
index 0c5d1cf204c..d148c0fdfbf 100644
--- a/jstests/sharding/txn_recover_decision_using_recovery_router.js
+++ b/jstests/sharding/txn_recover_decision_using_recovery_router.js
@@ -9,559 +9,550 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
- load("jstests/libs/write_concern_util.js");
-
- // The test modifies config.transactions, which must be done outside of a session.
- TestData.disableImplicitSessions = true;
-
- // Reducing this from the resmoke default, which is several hours, so that tests that rely on a
- // transaction coordinator being canceled after a timeout happen in a reasonable amount of time.
- TestData.transactionLifetimeLimitSeconds = 15;
-
- const readFromShard0 = function({lsid, txnNumber, startTransaction}) {
- let findDocumentOnShard0Command = {
- find: 'user',
- filter: {x: -1},
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- };
-
- if (startTransaction) {
- findDocumentOnShard0Command.startTransaction = true;
- }
-
- let res = assert.commandWorked(testDB.runCommand(findDocumentOnShard0Command));
- assert.neq(null, res.recoveryToken);
- return res.recoveryToken;
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/libs/write_concern_util.js");
+
+// The test modifies config.transactions, which must be done outside of a session.
+TestData.disableImplicitSessions = true;
+
+// Reducing this from the resmoke default, which is several hours, so that tests that rely on a
+// transaction coordinator being canceled after a timeout happen in a reasonable amount of time.
+TestData.transactionLifetimeLimitSeconds = 15;
+
+const readFromShard0 = function({lsid, txnNumber, startTransaction}) {
+ let findDocumentOnShard0Command = {
+ find: 'user',
+ filter: {x: -1},
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
};
- const readFromShard1 = function({lsid, txnNumber, startTransaction}) {
- let findDocumentOnShard1Command = {
- find: 'user',
- filter: {x: 1},
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- };
-
- if (startTransaction) {
- findDocumentOnShard1Command.startTransaction = true;
- }
-
- let res = assert.commandWorked(testDB.runCommand(findDocumentOnShard1Command));
- assert.neq(null, res.recoveryToken);
- return res.recoveryToken;
+ if (startTransaction) {
+ findDocumentOnShard0Command.startTransaction = true;
+ }
+
+ let res = assert.commandWorked(testDB.runCommand(findDocumentOnShard0Command));
+ assert.neq(null, res.recoveryToken);
+ return res.recoveryToken;
+};
+
+const readFromShard1 = function({lsid, txnNumber, startTransaction}) {
+ let findDocumentOnShard1Command = {
+ find: 'user',
+ filter: {x: 1},
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
};
- const writeToShard0 = function({lsid, txnNumber, startTransaction}) {
- const updateDocumentOnShard0 = {
- q: {x: -1},
- u: {"$set": {lastTxnNumber: txnNumber}},
- upsert: true
- };
-
- let updateDocumentOnShard0Command = {
- update: 'user',
- updates: [updateDocumentOnShard0],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- };
-
- if (startTransaction) {
- updateDocumentOnShard0Command.startTransaction = true;
- }
-
- let res = assert.commandWorked(testDB.runCommand(updateDocumentOnShard0Command));
- assert.neq(null, res.recoveryToken);
- return res.recoveryToken;
+ if (startTransaction) {
+ findDocumentOnShard1Command.startTransaction = true;
+ }
+
+ let res = assert.commandWorked(testDB.runCommand(findDocumentOnShard1Command));
+ assert.neq(null, res.recoveryToken);
+ return res.recoveryToken;
+};
+
+const writeToShard0 = function({lsid, txnNumber, startTransaction}) {
+ const updateDocumentOnShard0 = {
+ q: {x: -1},
+ u: {"$set": {lastTxnNumber: txnNumber}},
+ upsert: true
};
- const writeToShard1 = function({lsid, txnNumber, startTransaction}) {
- const updateDocumentOnShard1 = {
- q: {x: 1},
- u: {"$set": {lastTxnNumber: txnNumber}},
- upsert: true
- };
-
- let updateDocumentOnShard1Command = {
- update: 'user',
- updates: [updateDocumentOnShard1],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- };
-
- if (startTransaction) {
- updateDocumentOnShard1Command.startTransaction = true;
- }
-
- let res = assert.commandWorked(testDB.runCommand(updateDocumentOnShard1Command));
- assert.neq(null, res.recoveryToken);
- return res.recoveryToken;
+ let updateDocumentOnShard0Command = {
+ update: 'user',
+ updates: [updateDocumentOnShard0],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
};
- const startNewSingleShardReadOnlyTransaction = function() {
- const recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
- assert.eq(null, recoveryToken.recoveryShardId);
- return recoveryToken;
+ if (startTransaction) {
+ updateDocumentOnShard0Command.startTransaction = true;
+ }
+
+ let res = assert.commandWorked(testDB.runCommand(updateDocumentOnShard0Command));
+ assert.neq(null, res.recoveryToken);
+ return res.recoveryToken;
+};
+
+const writeToShard1 = function({lsid, txnNumber, startTransaction}) {
+ const updateDocumentOnShard1 = {
+ q: {x: 1},
+ u: {"$set": {lastTxnNumber: txnNumber}},
+ upsert: true
};
- const startNewSingleShardWriteTransaction = function() {
- const recoveryToken = writeToShard0({lsid, txnNumber, startTransaction: true});
- assert.neq(null, recoveryToken.recoveryShardId);
- return recoveryToken;
+ let updateDocumentOnShard1Command = {
+ update: 'user',
+ updates: [updateDocumentOnShard1],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
};
- const startNewMultiShardReadOnlyTransaction = function() {
- let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
- assert.eq(null, recoveryToken.recoveryShardId);
+ if (startTransaction) {
+ updateDocumentOnShard1Command.startTransaction = true;
+ }
+
+ let res = assert.commandWorked(testDB.runCommand(updateDocumentOnShard1Command));
+ assert.neq(null, res.recoveryToken);
+ return res.recoveryToken;
+};
+
+const startNewSingleShardReadOnlyTransaction = function() {
+ const recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
+ assert.eq(null, recoveryToken.recoveryShardId);
+ return recoveryToken;
+};
+
+const startNewSingleShardWriteTransaction = function() {
+ const recoveryToken = writeToShard0({lsid, txnNumber, startTransaction: true});
+ assert.neq(null, recoveryToken.recoveryShardId);
+ return recoveryToken;
+};
+
+const startNewMultiShardReadOnlyTransaction = function() {
+ let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
+ assert.eq(null, recoveryToken.recoveryShardId);
+
+ recoveryToken = readFromShard1({lsid, txnNumber});
+ assert.eq(null, recoveryToken.recoveryShardId);
+
+ return recoveryToken;
+};
+
+const startNewSingleWriteShardTransaction = function() {
+ let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
+ assert.eq(null, recoveryToken.recoveryShardId);
+
+ recoveryToken = writeToShard1({lsid, txnNumber});
+ assert.neq(null, recoveryToken.recoveryShardId);
+
+ return recoveryToken;
+};
+
+const startNewMultiShardWriteTransaction = function() {
+ let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
+ assert.eq(null, recoveryToken.recoveryShardId);
+
+ // Write to shard 1, not shard 0, otherwise the recovery shard will still be the same as the
+ // coordinator shard.
+ recoveryToken = writeToShard1({lsid, txnNumber});
+ assert.neq(null, recoveryToken.recoveryShardId);
+
+ recoveryToken = writeToShard0({lsid, txnNumber});
+ assert.neq(null, recoveryToken.recoveryShardId);
+
+ return recoveryToken;
+};
+
+const abortTransactionOnShardDirectly = function(shardPrimaryConn, lsid, txnNumber) {
+ assert.commandWorked(shardPrimaryConn.adminCommand(
+ {abortTransaction: 1, lsid: lsid, txnNumber: NumberLong(txnNumber), autocommit: false}));
+};
+
+const sendCommitViaOriginalMongos = function(lsid, txnNumber, recoveryToken) {
+ return st.s0.getDB('admin').runCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ recoveryToken: recoveryToken
+ });
+};
+
+const sendCommitViaRecoveryMongos = function(lsid, txnNumber, recoveryToken, writeConcern) {
+ writeConcern = writeConcern || {};
+ return st.s1.getDB('admin').runCommand(Object.merge({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ recoveryToken: recoveryToken
+ },
+ writeConcern));
+};
+
+let st =
+ new ShardingTest({shards: 2, rs: {nodes: 2}, mongos: 2, other: {mongosOptions: {verbose: 3}}});
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.name);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: 0}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
+
+// Insert documents to prime mongos and shards with the latest sharding metadata.
+let testDB = st.s0.getDB('test');
+assert.commandWorked(testDB.runCommand({insert: 'user', documents: [{x: -10}, {x: 10}]}));
+
+const lsid = {
+ id: UUID()
+};
+let txnNumber = 0;
+
+//
+// Generic test cases that are agnostic as to the transaction type
+//
- recoveryToken = readFromShard1({lsid, txnNumber});
- assert.eq(null, recoveryToken.recoveryShardId);
+(function() {
+jsTest.log("Testing recovering transaction with lower number than latest");
+++txnNumber;
- return recoveryToken;
- };
+const oldTxnNumber = txnNumber;
+const oldRecoveryToken = startNewMultiShardWriteTransaction();
- const startNewSingleWriteShardTransaction = function() {
- let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
- assert.eq(null, recoveryToken.recoveryShardId);
+txnNumber++;
+const newRecoveryToken = startNewMultiShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, newRecoveryToken));
- recoveryToken = writeToShard1({lsid, txnNumber});
- assert.neq(null, recoveryToken.recoveryShardId);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, oldTxnNumber, oldRecoveryToken),
+ ErrorCodes.TransactionTooOld);
- return recoveryToken;
- };
+// The client can still the recover decision for current transaction number.
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, newRecoveryToken));
+})();
- const startNewMultiShardWriteTransaction = function() {
- let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
- assert.eq(null, recoveryToken.recoveryShardId);
+(function() {
+jsTest.log("Testing recovering transaction with higher number than latest");
+txnNumber++;
- // Write to shard 1, not shard 0, otherwise the recovery shard will still be the same as the
- // coordinator shard.
- recoveryToken = writeToShard1({lsid, txnNumber});
- assert.neq(null, recoveryToken.recoveryShardId);
+const oldTxnNumber = txnNumber;
+const oldRecoveryToken = startNewMultiShardWriteTransaction();
- recoveryToken = writeToShard0({lsid, txnNumber});
- assert.neq(null, recoveryToken.recoveryShardId);
+txnNumber++;
+const fakeRecoveryToken = {
+ recoveryShardId: st.shard0.shardName
+};
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, fakeRecoveryToken),
+ ErrorCodes.NoSuchTransaction);
- return recoveryToken;
- };
+// The active transaction can still be committed.
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, oldTxnNumber, oldRecoveryToken));
+})();
- const abortTransactionOnShardDirectly = function(shardPrimaryConn, lsid, txnNumber) {
- assert.commandWorked(shardPrimaryConn.adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
- };
+(function() {
+jsTest.log("Testing recovering transaction whose recovery shard forgot the transaction");
+txnNumber++;
- const sendCommitViaOriginalMongos = function(lsid, txnNumber, recoveryToken) {
- return st.s0.getDB('admin').runCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- recoveryToken: recoveryToken
- });
- };
+const recoveryToken = startNewMultiShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- const sendCommitViaRecoveryMongos = function(lsid, txnNumber, recoveryToken, writeConcern) {
- writeConcern = writeConcern || {};
- return st.s1.getDB('admin').runCommand(Object.merge({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- recoveryToken: recoveryToken
- },
- writeConcern));
- };
+assert.writeOK(st.rs1.getPrimary().getDB("config").transactions.remove({}, false /* justOne */));
+
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+})();
+
+(function() {
+jsTest.log("Testing that a recovery node does a noop write before returning 'aborted'");
+
+txnNumber++;
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+const recoveryShardReplSetTest = st.rs1;
+
+stopReplicationOnSecondaries(recoveryShardReplSetTest);
+
+// Do a write on the recovery node to bump the recovery node's system last OpTime.
+recoveryShardReplSetTest.getPrimary().getDB("dummy").getCollection("dummy").insert({dummy: 1});
+
+// While the recovery shard primary cannot majority commit writes, commitTransaction returns
+// NoSuchTransaction with a writeConcern error.
+let res = sendCommitViaRecoveryMongos(
+ lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority", wtimeout: 500}});
+assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+checkWriteConcernTimedOut(res);
+
+// Once the recovery shard primary can majority commit writes again, commitTransaction
+// returns NoSuchTransaction without a writeConcern error.
+restartReplicationOnSecondaries(recoveryShardReplSetTest);
+res = sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority"}});
+assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+assert.eq(null, res.writeConcernError);
+})();
+
+(function() {
+jsTest.log("Testing that a recovery node does a noop write before returning 'committed'");
+
+txnNumber++;
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- let st = new ShardingTest(
- {shards: 2, rs: {nodes: 2}, mongos: 2, other: {mongosOptions: {verbose: 3}}});
+const recoveryShardReplSetTest = st.rs1;
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.name);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: 0}}));
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
+stopReplicationOnSecondaries(recoveryShardReplSetTest);
- // Insert documents to prime mongos and shards with the latest sharding metadata.
- let testDB = st.s0.getDB('test');
- assert.commandWorked(testDB.runCommand({insert: 'user', documents: [{x: -10}, {x: 10}]}));
+// Do a write on the recovery node to bump the recovery node's system last OpTime.
+recoveryShardReplSetTest.getPrimary().getDB("dummy").getCollection("dummy").insert({dummy: 1});
- const lsid = {id: UUID()};
- let txnNumber = 0;
+// While the recovery shard primary cannot majority commit writes, commitTransaction returns
+// ok with a writeConcern error.
+let res = sendCommitViaRecoveryMongos(
+ lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority", wtimeout: 500}});
+assert.commandWorkedIgnoringWriteConcernErrors(res);
+checkWriteConcernTimedOut(res);
- //
- // Generic test cases that are agnostic as to the transaction type
- //
+// Once the recovery shard primary can majority commit writes again, commitTransaction
+// returns ok without a writeConcern error.
+restartReplicationOnSecondaries(recoveryShardReplSetTest);
+assert.commandWorked(
+ sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority"}}));
+})();
- (function() {
- jsTest.log("Testing recovering transaction with lower number than latest");
- ++txnNumber;
+//
+// Single-shard read-only transactions
+//
- const oldTxnNumber = txnNumber;
- const oldRecoveryToken = startNewMultiShardWriteTransaction();
+(function() {
+jsTest.log("Testing recovering single-shard read-only transaction that is in progress");
+txnNumber++;
+const recoveryToken = startNewSingleShardReadOnlyTransaction();
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
- txnNumber++;
- const newRecoveryToken = startNewMultiShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, newRecoveryToken));
+// A read-only transaction can still commit after reporting an abort decision.
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+}());
- assert.commandFailedWithCode(
- sendCommitViaRecoveryMongos(lsid, oldTxnNumber, oldRecoveryToken),
- ErrorCodes.TransactionTooOld);
+(function() {
+jsTest.log("Testing recovering single-shard read-only transaction that aborted");
+txnNumber++;
+const recoveryToken = startNewSingleShardReadOnlyTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
- // The client can still the recover decision for current transaction number.
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, newRecoveryToken));
- })();
+(function() {
+jsTest.log("Testing recovering single-shard read-only transaction that committed");
+txnNumber++;
+const recoveryToken = startNewSingleShardReadOnlyTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+//
+// Single-shard write transactions
+//
- (function() {
- jsTest.log("Testing recovering transaction with higher number than latest");
- txnNumber++;
+(function() {
+jsTest.log("Testing recovering single-shard write transaction that in progress");
+txnNumber++;
+const recoveryToken = startNewSingleShardWriteTransaction();
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
- const oldTxnNumber = txnNumber;
- const oldRecoveryToken = startNewMultiShardWriteTransaction();
+// A write transaction fails to commit after having reported an abort decision.
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
- txnNumber++;
- const fakeRecoveryToken = {recoveryShardId: st.shard0.shardName};
- assert.commandFailedWithCode(
- sendCommitViaRecoveryMongos(lsid, txnNumber, fakeRecoveryToken),
- ErrorCodes.NoSuchTransaction);
+(function() {
+jsTest.log("Testing recovering single-shard write transaction that aborted");
+txnNumber++;
+const recoveryToken = startNewSingleShardWriteTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+(function() {
+jsTest.log("Testing recovering single-shard write transaction that committed");
+txnNumber++;
+const recoveryToken = startNewSingleShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
+}());
+
+//
+// Multi-shard read-only transactions
+//
+
+(function() {
+jsTest.log("Testing recovering multi-shard read-only transaction that is in progress");
+txnNumber++;
+const recoveryToken = startNewMultiShardReadOnlyTransaction();
+
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+// A read-only transaction can still commit after reporting an abort decision.
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-shard read-only transaction that aborted");
+txnNumber++;
+const recoveryToken = startNewMultiShardReadOnlyTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-shard read-only transaction that committed");
+txnNumber++;
+const recoveryToken = startNewMultiShardReadOnlyTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+})();
+
+//
+// Single-write-shard transactions (there are multiple participants but only one did a write)
+//
+
+(function() {
+jsTest.log("Testing recovering single-write-shard transaction that is in progress");
+txnNumber++;
+const recoveryToken = startNewSingleWriteShardTransaction();
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+// A write transaction fails to commit after having reported an abort decision.
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+(function() {
+jsTest.log("Testing recovering single-write-shard transaction that aborted on read-only shard" +
+ " but is in progress on write shard");
+txnNumber++;
+const recoveryToken = startNewSingleWriteShardTransaction();
+abortTransactionOnShardDirectly(st.rs1.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+(function() {
+jsTest.log("Testing recovering single-write-shard transaction that aborted on write" +
+ " shard but is in progress on read-only shard");
+txnNumber++;
+const recoveryToken = startNewSingleWriteShardTransaction();
+abortTransactionOnShardDirectly(st.rs1.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+(function() {
+jsTest.log("Testing recovering single-write-shard transaction that committed");
+txnNumber++;
+const recoveryToken = startNewSingleWriteShardTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
+}());
+
+//
+// Multi-write-shard transactions (there are multiple participants and more than one did writes)
+//
+
+(function() {
+jsTest.log("Testing recovering multi-write-shard transaction that is in progress");
+txnNumber++;
+
+// Set the transaction expiry to be very high, so we can ascertain the recovery request
+// through the alternate router is what causes the transaction to abort.
+const getParamRes =
+ st.rs1.getPrimary().adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1});
+assert.commandWorked(getParamRes);
+assert.neq(null, getParamRes.transactionLifetimeLimitSeconds);
+const originalTransactionLifetimeLimitSeconds = getParamRes.transactionLifetimeLimitSeconds;
+
+assert.commandWorked(st.rs1.getPrimary().adminCommand(
+ {setParameter: 1, transactionLifetimeLimitSeconds: 60 * 60 * 1000 /* 1000 hours */}));
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+// A write transaction fails to commit after having reported an abort decision.
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+assert.commandWorked(st.rs1.getPrimary().adminCommand(
+ {setParameter: 1, transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds}));
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-write-shard transaction that is in prepare");
+txnNumber++;
+const recoveryToken = startNewMultiShardWriteTransaction();
+
+// Ensure the coordinator will hang after putting the participants into prepare but
+// before sending the decision to the participants.
+clearRawMongoProgramOutput();
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "hangBeforeWritingDecision", mode: "alwaysOn"}));
+
+assert.commandFailedWithCode(st.s0.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ // Specify maxTimeMS to make the command return so the test can continue.
+ maxTimeMS: 3000,
+}),
+ ErrorCodes.MaxTimeMSExpired);
+
+waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1);
+
+// Trying to recover the decision should block because the recovery shard's participant
+// is in prepare.
+assert.commandFailedWithCode(st.s1.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ recoveryToken: recoveryToken,
+ // Specify maxTimeMS to make the command return so the test can continue.
+ maxTimeMS: 3000,
+}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Allow the transaction to complete.
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "hangBeforeWritingDecision", mode: "off"}));
+
+// Trying to recover the decision should now return that the transaction committed.
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-write-shard transaction after coordinator finished" +
+ " coordinating an abort decision.");
+txnNumber++;
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-write-shard transaction after coordinator finished" +
+ " coordinating a commit decision.");
+txnNumber++;
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
+})();
- // The active transaction can still be committed.
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, oldTxnNumber, oldRecoveryToken));
- })();
-
- (function() {
- jsTest.log("Testing recovering transaction whose recovery shard forgot the transaction");
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
-
- assert.writeOK(
- st.rs1.getPrimary().getDB("config").transactions.remove({}, false /* justOne */));
-
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- })();
-
- (function() {
- jsTest.log("Testing that a recovery node does a noop write before returning 'aborted'");
-
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- const recoveryShardReplSetTest = st.rs1;
-
- stopReplicationOnSecondaries(recoveryShardReplSetTest);
-
- // Do a write on the recovery node to bump the recovery node's system last OpTime.
- recoveryShardReplSetTest.getPrimary().getDB("dummy").getCollection("dummy").insert(
- {dummy: 1});
-
- // While the recovery shard primary cannot majority commit writes, commitTransaction returns
- // NoSuchTransaction with a writeConcern error.
- let res = sendCommitViaRecoveryMongos(
- lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority", wtimeout: 500}});
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
- checkWriteConcernTimedOut(res);
-
- // Once the recovery shard primary can majority commit writes again, commitTransaction
- // returns NoSuchTransaction without a writeConcern error.
- restartReplicationOnSecondaries(recoveryShardReplSetTest);
- res = sendCommitViaRecoveryMongos(
- lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority"}});
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
- assert.eq(null, res.writeConcernError);
- })();
-
- (function() {
- jsTest.log("Testing that a recovery node does a noop write before returning 'committed'");
-
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
-
- const recoveryShardReplSetTest = st.rs1;
-
- stopReplicationOnSecondaries(recoveryShardReplSetTest);
-
- // Do a write on the recovery node to bump the recovery node's system last OpTime.
- recoveryShardReplSetTest.getPrimary().getDB("dummy").getCollection("dummy").insert(
- {dummy: 1});
-
- // While the recovery shard primary cannot majority commit writes, commitTransaction returns
- // ok with a writeConcern error.
- let res = sendCommitViaRecoveryMongos(
- lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority", wtimeout: 500}});
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- checkWriteConcernTimedOut(res);
-
- // Once the recovery shard primary can majority commit writes again, commitTransaction
- // returns ok without a writeConcern error.
- restartReplicationOnSecondaries(recoveryShardReplSetTest);
- assert.commandWorked(sendCommitViaRecoveryMongos(
- lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority"}}));
- })();
-
- //
- // Single-shard read-only transactions
- //
-
- (function() {
- jsTest.log("Testing recovering single-shard read-only transaction that is in progress");
- txnNumber++;
- const recoveryToken = startNewSingleShardReadOnlyTransaction();
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A read-only transaction can still commit after reporting an abort decision.
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- }());
-
- (function() {
- jsTest.log("Testing recovering single-shard read-only transaction that aborted");
- txnNumber++;
- const recoveryToken = startNewSingleShardReadOnlyTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-shard read-only transaction that committed");
- txnNumber++;
- const recoveryToken = startNewSingleShardReadOnlyTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- //
- // Single-shard write transactions
- //
-
- (function() {
- jsTest.log("Testing recovering single-shard write transaction that in progress");
- txnNumber++;
- const recoveryToken = startNewSingleShardWriteTransaction();
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A write transaction fails to commit after having reported an abort decision.
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-shard write transaction that aborted");
- txnNumber++;
- const recoveryToken = startNewSingleShardWriteTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-shard write transaction that committed");
- txnNumber++;
- const recoveryToken = startNewSingleShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
- }());
-
- //
- // Multi-shard read-only transactions
- //
-
- (function() {
- jsTest.log("Testing recovering multi-shard read-only transaction that is in progress");
- txnNumber++;
- const recoveryToken = startNewMultiShardReadOnlyTransaction();
-
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A read-only transaction can still commit after reporting an abort decision.
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-shard read-only transaction that aborted");
- txnNumber++;
- const recoveryToken = startNewMultiShardReadOnlyTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-shard read-only transaction that committed");
- txnNumber++;
- const recoveryToken = startNewMultiShardReadOnlyTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- })();
-
- //
- // Single-write-shard transactions (there are multiple participants but only one did a write)
- //
-
- (function() {
- jsTest.log("Testing recovering single-write-shard transaction that is in progress");
- txnNumber++;
- const recoveryToken = startNewSingleWriteShardTransaction();
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A write transaction fails to commit after having reported an abort decision.
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log(
- "Testing recovering single-write-shard transaction that aborted on read-only shard" +
- " but is in progress on write shard");
- txnNumber++;
- const recoveryToken = startNewSingleWriteShardTransaction();
- abortTransactionOnShardDirectly(st.rs1.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-write-shard transaction that aborted on write" +
- " shard but is in progress on read-only shard");
- txnNumber++;
- const recoveryToken = startNewSingleWriteShardTransaction();
- abortTransactionOnShardDirectly(st.rs1.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-write-shard transaction that committed");
- txnNumber++;
- const recoveryToken = startNewSingleWriteShardTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
- }());
-
- //
- // Multi-write-shard transactions (there are multiple participants and more than one did writes)
- //
-
- (function() {
- jsTest.log("Testing recovering multi-write-shard transaction that is in progress");
- txnNumber++;
-
- // Set the transaction expiry to be very high, so we can ascertain the recovery request
- // through the alternate router is what causes the transaction to abort.
- const getParamRes =
- st.rs1.getPrimary().adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1});
- assert.commandWorked(getParamRes);
- assert.neq(null, getParamRes.transactionLifetimeLimitSeconds);
- const originalTransactionLifetimeLimitSeconds = getParamRes.transactionLifetimeLimitSeconds;
-
- assert.commandWorked(st.rs1.getPrimary().adminCommand(
- {setParameter: 1, transactionLifetimeLimitSeconds: 60 * 60 * 1000 /* 1000 hours */}));
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A write transaction fails to commit after having reported an abort decision.
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- assert.commandWorked(st.rs1.getPrimary().adminCommand({
- setParameter: 1,
- transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds
- }));
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-write-shard transaction that is in prepare");
- txnNumber++;
- const recoveryToken = startNewMultiShardWriteTransaction();
-
- // Ensure the coordinator will hang after putting the participants into prepare but
- // before sending the decision to the participants.
- clearRawMongoProgramOutput();
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "hangBeforeWritingDecision", mode: "alwaysOn"}));
-
- assert.commandFailedWithCode(st.s0.adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- // Specify maxTimeMS to make the command return so the test can continue.
- maxTimeMS: 3000,
- }),
- ErrorCodes.MaxTimeMSExpired);
-
- waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1);
-
- // Trying to recover the decision should block because the recovery shard's participant
- // is in prepare.
- assert.commandFailedWithCode(st.s1.adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- recoveryToken: recoveryToken,
- // Specify maxTimeMS to make the command return so the test can continue.
- maxTimeMS: 3000,
- }),
- ErrorCodes.MaxTimeMSExpired);
-
- // Allow the transaction to complete.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "hangBeforeWritingDecision", mode: "off"}));
-
- // Trying to recover the decision should now return that the transaction committed.
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-write-shard transaction after coordinator finished" +
- " coordinating an abort decision.");
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-write-shard transaction after coordinator finished" +
- " coordinating a commit decision.");
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
- })();
-
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_basic.js b/jstests/sharding/txn_two_phase_commit_basic.js
index 09f4f1bf0cf..535cbe294b7 100644
--- a/jstests/sharding/txn_two_phase_commit_basic.js
+++ b/jstests/sharding/txn_two_phase_commit_basic.js
@@ -6,249 +6,250 @@
*/
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- let st = new ShardingTest({shards: 3, causallyConsistent: true});
-
- let coordinator = st.shard0;
- let participant1 = st.shard1;
- let participant2 = st.shard2;
-
- let expectedParticipantList =
- [participant1.shardName, participant2.shardName, coordinator.shardName];
-
- let lsid = {id: UUID()};
- let txnNumber = 0;
-
- const checkParticipantListMatches = function(
- coordinatorConn, lsid, txnNumber, expectedParticipantList) {
- let coordDoc = coordinatorConn.getDB("config")
- .getCollection("transaction_coordinators")
- .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
- assert.neq(null, coordDoc);
- assert.sameMembers(coordDoc.participants, expectedParticipantList);
- };
-
- const checkDecisionIs = function(coordinatorConn, lsid, txnNumber, expectedDecision) {
- let coordDoc = coordinatorConn.getDB("config")
- .getCollection("transaction_coordinators")
- .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
- assert.neq(null, coordDoc);
- assert.eq(expectedDecision, coordDoc.decision.decision);
- if (expectedDecision === "commit") {
- assert.neq(null, coordDoc.decision.commitTimestamp);
- } else {
- assert.eq(null, coordDoc.decision.commitTimestamp);
- }
- };
-
- const checkDocumentDeleted = function(coordinatorConn, lsid, txnNumber) {
- let coordDoc = coordinatorConn.getDB("config")
- .getCollection("transaction_coordinators")
- .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
- return null === coordDoc;
- };
-
- const runCommitThroughMongosInParallelShellExpectSuccess = function() {
- const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "}));";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const runCommitThroughMongosInParallelShellExpectAbort = function() {
- const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "})," +
- "ErrorCodes.NoSuchTransaction);";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const startSimulatingNetworkFailures = function(connArray) {
- connArray.forEach(function(conn) {
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 10},
- data: {
- errorCode: ErrorCodes.NotMaster,
- failCommands:
- ["prepareTransaction", "abortTransaction", "commitTransaction"]
- }
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint:
- "participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic",
- mode: {times: 5}
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "participantReturnNetworkErrorForAbortAfterExecutingAbortLogic",
- mode: {times: 5}
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint:
- "participantReturnNetworkErrorForCommitAfterExecutingCommitLogic",
- mode: {times: 5}
- }));
- });
- };
-
- const stopSimulatingNetworkFailures = function(connArray) {
- connArray.forEach(function(conn) {
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "failCommand",
- mode: "off",
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint:
- "participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic",
- mode: "off"
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "participantReturnNetworkErrorForAbortAfterExecutingAbortLogic",
- mode: "off"
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint:
- "participantReturnNetworkErrorForCommitAfterExecutingCommitLogic",
- mode: "off"
- }));
- });
- };
-
- const setUp = function() {
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- // Start a new transaction by inserting a document onto each shard.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false,
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+let st = new ShardingTest({shards: 3, causallyConsistent: true});
+
+let coordinator = st.shard0;
+let participant1 = st.shard1;
+let participant2 = st.shard2;
+
+let expectedParticipantList =
+ [participant1.shardName, participant2.shardName, coordinator.shardName];
+
+let lsid = {id: UUID()};
+let txnNumber = 0;
+
+const checkParticipantListMatches = function(
+ coordinatorConn, lsid, txnNumber, expectedParticipantList) {
+ let coordDoc = coordinatorConn.getDB("config")
+ .getCollection("transaction_coordinators")
+ .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
+ assert.neq(null, coordDoc);
+ assert.sameMembers(coordDoc.participants, expectedParticipantList);
+};
+
+const checkDecisionIs = function(coordinatorConn, lsid, txnNumber, expectedDecision) {
+ let coordDoc = coordinatorConn.getDB("config")
+ .getCollection("transaction_coordinators")
+ .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
+ assert.neq(null, coordDoc);
+ assert.eq(expectedDecision, coordDoc.decision.decision);
+ if (expectedDecision === "commit") {
+ assert.neq(null, coordDoc.decision.commitTimestamp);
+ } else {
+ assert.eq(null, coordDoc.decision.commitTimestamp);
+ }
+};
+
+const checkDocumentDeleted = function(coordinatorConn, lsid, txnNumber) {
+ let coordDoc = coordinatorConn.getDB("config")
+ .getCollection("transaction_coordinators")
+ .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
+ return null === coordDoc;
+};
+
+const runCommitThroughMongosInParallelShellExpectSuccess = function() {
+ const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "}));";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+};
+
+const runCommitThroughMongosInParallelShellExpectAbort = function() {
+ const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.NoSuchTransaction);";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+};
+
+const startSimulatingNetworkFailures = function(connArray) {
+ connArray.forEach(function(conn) {
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 10},
+ data: {
+ errorCode: ErrorCodes.NotMaster,
+ failCommands: ["prepareTransaction", "abortTransaction", "commitTransaction"]
+ }
}));
- };
-
- const testCommitProtocol = function(shouldCommit, simulateNetworkFailures) {
- jsTest.log("Testing two-phase " + (shouldCommit ? "commit" : "abort") +
- " protocol with simulateNetworkFailures: " + simulateNetworkFailures);
-
- txnNumber++;
- setUp();
-
- if (!shouldCommit) {
- // Manually abort the transaction on one of the participants, so that the participant
- // fails to prepare.
- assert.commandWorked(participant2.adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }));
- }
-
- if (simulateNetworkFailures) {
- startSimulatingNetworkFailures([participant1, participant2]);
- }
-
- // Turn on failpoints so that the coordinator hangs after each write it does, so that the
- // test can check that the write happened correctly.
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: "hangBeforeWaitingForParticipantListWriteConcern",
- mode: "alwaysOn",
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic",
+ mode: {times: 5}
}));
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: "hangBeforeWaitingForDecisionWriteConcern",
- mode: "alwaysOn",
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForAbortAfterExecutingAbortLogic",
+ mode: {times: 5}
}));
-
- // Run commitTransaction through a parallel shell.
- let awaitResult;
- if (shouldCommit) {
- awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
- } else {
- awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
- }
-
- // Check that the coordinator wrote the participant list.
- waitForFailpoint("Hit hangBeforeWaitingForParticipantListWriteConcern failpoint",
- txnNumber);
- checkParticipantListMatches(coordinator, lsid, txnNumber, expectedParticipantList);
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: "hangBeforeWaitingForParticipantListWriteConcern",
- mode: "off",
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForCommitAfterExecutingCommitLogic",
+ mode: {times: 5}
}));
+ });
+};
- // Check that the coordinator wrote the decision.
- waitForFailpoint("Hit hangBeforeWaitingForDecisionWriteConcern failpoint", txnNumber);
- checkParticipantListMatches(coordinator, lsid, txnNumber, expectedParticipantList);
- checkDecisionIs(coordinator, lsid, txnNumber, (shouldCommit ? "commit" : "abort"));
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: "hangBeforeWaitingForDecisionWriteConcern",
+const stopSimulatingNetworkFailures = function(connArray) {
+ connArray.forEach(function(conn) {
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "failCommand",
mode: "off",
}));
-
- // Check that the coordinator deleted its persisted state.
- awaitResult();
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic",
+ mode: "off"
+ }));
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForAbortAfterExecutingAbortLogic",
+ mode: "off"
+ }));
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForCommitAfterExecutingCommitLogic",
+ mode: "off"
+ }));
+ });
+};
+
+const setUp = function() {
+ // Create a sharded collection with a chunk on each shard:
+ // shard0: [-inf, 0)
+ // shard1: [0, 10)
+ // shard2: [10, +inf)
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+ // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+ // from the shards starting, aborting, and restarting the transaction due to needing to
+ // refresh after the transaction has started.
+ assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+ // Start a new transaction by inserting a document onto each shard.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ }));
+};
+
+const testCommitProtocol = function(shouldCommit, simulateNetworkFailures) {
+ jsTest.log("Testing two-phase " + (shouldCommit ? "commit" : "abort") +
+ " protocol with simulateNetworkFailures: " + simulateNetworkFailures);
+
+ txnNumber++;
+ setUp();
+
+ if (!shouldCommit) {
+ // Manually abort the transaction on one of the participants, so that the participant
+ // fails to prepare.
+ assert.commandWorked(participant2.adminCommand({
+ abortTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false,
+ }));
+ }
+
+ if (simulateNetworkFailures) {
+ startSimulatingNetworkFailures([participant1, participant2]);
+ }
+
+ // Turn on failpoints so that the coordinator hangs after each write it does, so that the
+ // test can check that the write happened correctly.
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: "hangBeforeWaitingForParticipantListWriteConcern",
+ mode: "alwaysOn",
+ }));
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: "hangBeforeWaitingForDecisionWriteConcern",
+ mode: "alwaysOn",
+ }));
+
+ // Run commitTransaction through a parallel shell.
+ let awaitResult;
+ if (shouldCommit) {
+ awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
+ } else {
+ awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
+ }
+
+ // Check that the coordinator wrote the participant list.
+ waitForFailpoint("Hit hangBeforeWaitingForParticipantListWriteConcern failpoint", txnNumber);
+ checkParticipantListMatches(coordinator, lsid, txnNumber, expectedParticipantList);
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: "hangBeforeWaitingForParticipantListWriteConcern",
+ mode: "off",
+ }));
+
+ // Check that the coordinator wrote the decision.
+ waitForFailpoint("Hit hangBeforeWaitingForDecisionWriteConcern failpoint", txnNumber);
+ checkParticipantListMatches(coordinator, lsid, txnNumber, expectedParticipantList);
+ checkDecisionIs(coordinator, lsid, txnNumber, (shouldCommit ? "commit" : "abort"));
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: "hangBeforeWaitingForDecisionWriteConcern",
+ mode: "off",
+ }));
+
+ // Check that the coordinator deleted its persisted state.
+ awaitResult();
+ assert.soon(function() {
+ return checkDocumentDeleted(coordinator, lsid, txnNumber);
+ });
+
+ if (simulateNetworkFailures) {
+ stopSimulatingNetworkFailures([participant1, participant2]);
+ }
+
+ // Check that the transaction committed or aborted as expected.
+ if (!shouldCommit) {
+ jsTest.log("Verify that the transaction was aborted on all shards.");
+ assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
+ } else {
+ jsTest.log("Verify that the transaction was committed on all shards.");
+ // Use assert.soon(), because although coordinateCommitTransaction currently blocks
+ // until the commit process is fully complete, it will eventually be changed to only
+ // block until the decision is *written*, at which point the test can pass the
+ // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
+ // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
assert.soon(function() {
- return checkDocumentDeleted(coordinator, lsid, txnNumber);
+ return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
});
+ }
+
+ st.s.getDB(dbName).getCollection(collName).drop();
+};
- if (simulateNetworkFailures) {
- stopSimulatingNetworkFailures([participant1, participant2]);
- }
-
- // Check that the transaction committed or aborted as expected.
- if (!shouldCommit) {
- jsTest.log("Verify that the transaction was aborted on all shards.");
- assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
- } else {
- jsTest.log("Verify that the transaction was committed on all shards.");
- // Use assert.soon(), because although coordinateCommitTransaction currently blocks
- // until the commit process is fully complete, it will eventually be changed to only
- // block until the decision is *written*, at which point the test can pass the
- // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
- // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
- assert.soon(function() {
- return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
- });
- }
-
- st.s.getDB(dbName).getCollection(collName).drop();
- };
-
- testCommitProtocol(false /* test abort */, false /* no network failures */);
- testCommitProtocol(true /* test commit */, false /* no network failures */);
- testCommitProtocol(false /* test abort */, true /* with network failures */);
- testCommitProtocol(true /* test commit */, true /* with network failures */);
-
- st.stop();
+testCommitProtocol(false /* test abort */, false /* no network failures */);
+testCommitProtocol(true /* test commit */, false /* no network failures */);
+testCommitProtocol(false /* test abort */, true /* with network failures */);
+testCommitProtocol(true /* test commit */, true /* with network failures */);
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js b/jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js
index 1e48f4f5ad5..b51d2c0c8ea 100644
--- a/jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js
+++ b/jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js
@@ -4,62 +4,63 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "foo";
+const dbName = "test";
+const collName = "foo";
- const txnNumber = 0;
- const lsid = {id: UUID()};
+const txnNumber = 0;
+const lsid = {
+ id: UUID()
+};
- const checkCoordinatorCommandsRejected = function(conn, expectedErrorCode) {
- assert.commandFailedWithCode(conn.adminCommand({
- coordinateCommitTransaction: 1,
- participants: [{shardId: "dummy1"}, {shardId: "dummy2"}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- expectedErrorCode);
- };
+const checkCoordinatorCommandsRejected = function(conn, expectedErrorCode) {
+ assert.commandFailedWithCode(conn.adminCommand({
+ coordinateCommitTransaction: 1,
+ participants: [{shardId: "dummy1"}, {shardId: "dummy2"}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(1),
+ autocommit: false
+ }),
+ expectedErrorCode);
+};
- const checkCoordinatorCommandsAgainstNonAdminDbRejected = function(conn) {
- const testDB = conn.getDB(dbName);
- assert.commandFailedWithCode(testDB.runCommand({
- coordinateCommitTransaction: 1,
- participants: [{shardId: "dummy1"}, {shardId: "dummy2"}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false
- }),
- ErrorCodes.Unauthorized);
- };
+const checkCoordinatorCommandsAgainstNonAdminDbRejected = function(conn) {
+ const testDB = conn.getDB(dbName);
+ assert.commandFailedWithCode(testDB.runCommand({
+ coordinateCommitTransaction: 1,
+ participants: [{shardId: "dummy1"}, {shardId: "dummy2"}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false
+ }),
+ ErrorCodes.Unauthorized);
+};
- const st = new ShardingTest({shards: 1});
+const st = new ShardingTest({shards: 1});
- jsTest.log("Verify that coordinator commands are only accepted against the admin database");
- checkCoordinatorCommandsAgainstNonAdminDbRejected(st.rs0.getPrimary());
- checkCoordinatorCommandsAgainstNonAdminDbRejected(st.configRS.getPrimary());
+jsTest.log("Verify that coordinator commands are only accepted against the admin database");
+checkCoordinatorCommandsAgainstNonAdminDbRejected(st.rs0.getPrimary());
+checkCoordinatorCommandsAgainstNonAdminDbRejected(st.configRS.getPrimary());
- st.stop();
+st.stop();
- jsTest.log(
- "Verify that a shard server that has not yet been added to a cluster does not accept coordinator commands");
- const shardsvrReplSet = new ReplSetTest({nodes: 1, nodeOptions: {shardsvr: ""}});
- shardsvrReplSet.startSet();
- shardsvrReplSet.initiate();
- checkCoordinatorCommandsRejected(shardsvrReplSet.getPrimary(),
- ErrorCodes.ShardingStateNotInitialized);
- shardsvrReplSet.stopSet();
-
- jsTest.log(
- "Verify that a non-config server, non-shard server does not accept coordinator commands");
- const standaloneReplSet = new ReplSetTest({nodes: 1});
- standaloneReplSet.startSet();
- standaloneReplSet.initiate();
- checkCoordinatorCommandsRejected(standaloneReplSet.getPrimary(), ErrorCodes.NoShardingEnabled);
- standaloneReplSet.stopSet();
+jsTest.log(
+ "Verify that a shard server that has not yet been added to a cluster does not accept coordinator commands");
+const shardsvrReplSet = new ReplSetTest({nodes: 1, nodeOptions: {shardsvr: ""}});
+shardsvrReplSet.startSet();
+shardsvrReplSet.initiate();
+checkCoordinatorCommandsRejected(shardsvrReplSet.getPrimary(),
+ ErrorCodes.ShardingStateNotInitialized);
+shardsvrReplSet.stopSet();
+jsTest.log(
+ "Verify that a non-config server, non-shard server does not accept coordinator commands");
+const standaloneReplSet = new ReplSetTest({nodes: 1});
+standaloneReplSet.startSet();
+standaloneReplSet.initiate();
+checkCoordinatorCommandsRejected(standaloneReplSet.getPrimary(), ErrorCodes.NoShardingEnabled);
+standaloneReplSet.stopSet();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js b/jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js
index f976218f7d5..ef5c42665b6 100644
--- a/jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js
+++ b/jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js
@@ -16,130 +16,147 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckDBHashes = true;
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
- load('jstests/libs/write_concern_util.js');
-
- const rs0_opts = {nodes: [{}, {}]};
- // Start the participant replSet with one node as a priority 0 node to avoid flip flopping.
- const rs1_opts = {nodes: [{}, {rsConfig: {priority: 0}}]};
- const st = new ShardingTest(
- {shards: {rs0: rs0_opts, rs1: rs1_opts}, mongos: 1, causallyConsistent: true});
-
- // Create a sharded collection:
- // shard0: [-inf, 0)
- // shard1: [0, inf)
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.name);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: 0}}));
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
-
- const testDB = st.s0.getDB('test');
- assert.commandWorked(testDB.runCommand({insert: 'user', documents: [{x: -10}, {x: 10}]}));
-
- const coordinatorReplSetTest = st.rs0;
- const participantReplSetTest = st.rs1;
-
- let coordinatorPrimaryConn = coordinatorReplSetTest.getPrimary();
- let participantPrimaryConn = participantReplSetTest.getPrimary();
-
- const lsid = {id: UUID()};
- let txnNumber = 0;
- const participantList = [{shardId: st.shard0.shardName}, {shardId: st.shard1.shardName}];
-
- // Build the following command as a string since we need to persist the lsid and the txnNumber
- // into the scope of the parallel shell.
- // assert.commandFailedWithCode(db.adminCommand({
- // commitTransaction: 1,
- // maxTimeMS: 2000 * 10,
- // lsid: lsid,
- // txnNumber: NumberLong(txnNumber),
- // stmtId: NumberInt(0),
- // autocommit: false,
- // }), ErrorCodes.MaxTimeMSExpired);
- const runCommitThroughMongosInParallelShellExpectTimeOut = function() {
- const runCommitExpectTimeOutCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1, maxTimeMS: 2000 * 10, " + "lsid: " + tojson(lsid) + "," +
- "txnNumber: NumberLong(" + txnNumber + ")," + "stmtId: NumberInt(0)," +
- "autocommit: false," + "})," + "ErrorCodes.MaxTimeMSExpired);";
- return startParallelShell(runCommitExpectTimeOutCode, st.s.port);
- };
-
- jsTest.log("Starting a cross-shard transaction");
- // Start a cross shard transaction through mongos.
- const updateDocumentOnShard0 = {q: {x: -1}, u: {"$set": {a: 1}}, upsert: true};
-
- const updateDocumentOnShard1 = {q: {x: 1}, u: {"$set": {a: 1}}, upsert: true};
-
- assert.commandWorked(testDB.runCommand({
- update: 'user',
- updates: [updateDocumentOnShard0, updateDocumentOnShard1],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
-
- jsTest.log("Turn on hangBeforeWritingDecision failpoint");
- // Make the commit coordination hang before writing the decision, and send commitTransaction.
- // The transaction on the participant will remain in prepare.
- assert.commandWorked(coordinatorPrimaryConn.adminCommand({
- configureFailPoint: "hangBeforeWritingDecision",
- mode: "alwaysOn",
- }));
-
- // Run commit through mongos in a parallel shell. This should timeout since we have set the
- // failpoint.
- runCommitThroughMongosInParallelShellExpectTimeOut();
- waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1 /* numTimes */);
-
- jsTest.log("Stopping coordinator shard");
- // Stop the mongods on the coordinator shard using the SIGTERM signal. We must skip validation
- // checks since we'll be shutting down a node with a prepared transaction.
- coordinatorReplSetTest.stopSet(15, true /* forRestart */, {skipValidation: true} /* opts */);
-
- // Once the coordinator has gone down, do a majority write on the participant while there is a
- // prepared transaction. This will ensure that the stable timestamp is able to advance since
- // this write must be in the committed snapshot.
- const session = participantPrimaryConn.startSession();
- const sessionDB = session.getDatabase("dummy");
- const sessionColl = sessionDB.getCollection("dummy");
- session.resetOperationTime_forTesting();
- assert.commandWorked(sessionColl.insert({dummy: 2}, {writeConcern: {w: "majority"}}));
- assert.neq(session.getOperationTime(), null);
- assert.neq(session.getClusterTime(), null);
- jsTest.log("Successfully completed majority write on participant");
-
- // Confirm that a majority read on the secondary includes the dummy write. This would mean that
- // the stable timestamp also advanced on the secondary.
- // In order to do this read with readConcern majority, we must use afterClusterTime with causal
- // consistency enabled.
- const participantSecondaryConn = participantReplSetTest.getSecondary();
- const secondaryDB = participantSecondaryConn.getDB("dummy");
- const res = secondaryDB.runCommand({
- find: "dummy",
- readConcern: {level: "majority", afterClusterTime: session.getOperationTime()},
- });
- assert.eq(res.cursor.firstBatch.length, 1);
-
- jsTest.log("Restarting coordinator");
- // Restarting the coordinator will reset the fail point.
- coordinatorReplSetTest.startSet({restart: true});
- coordinatorPrimaryConn = coordinatorReplSetTest.getPrimary();
-
- jsTest.log("Committing transaction");
- // Now, commitTransaction should succeed.
- assert.commandWorked(st.s.adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false
- }));
-
- st.stop();
-
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+load('jstests/libs/write_concern_util.js');
+
+const rs0_opts = {
+ nodes: [{}, {}]
+};
+// Start the participant replSet with one node as a priority 0 node to avoid flip flopping.
+const rs1_opts = {
+ nodes: [{}, {rsConfig: {priority: 0}}]
+};
+const st =
+ new ShardingTest({shards: {rs0: rs0_opts, rs1: rs1_opts}, mongos: 1, causallyConsistent: true});
+
+// Create a sharded collection:
+// shard0: [-inf, 0)
+// shard1: [0, inf)
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.name);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: 0}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
+
+const testDB = st.s0.getDB('test');
+assert.commandWorked(testDB.runCommand({insert: 'user', documents: [{x: -10}, {x: 10}]}));
+
+const coordinatorReplSetTest = st.rs0;
+const participantReplSetTest = st.rs1;
+
+let coordinatorPrimaryConn = coordinatorReplSetTest.getPrimary();
+let participantPrimaryConn = participantReplSetTest.getPrimary();
+
+const lsid = {
+ id: UUID()
+};
+let txnNumber = 0;
+const participantList = [{shardId: st.shard0.shardName}, {shardId: st.shard1.shardName}];
+
+// Build the following command as a string since we need to persist the lsid and the txnNumber
+// into the scope of the parallel shell.
+// assert.commandFailedWithCode(db.adminCommand({
+// commitTransaction: 1,
+// maxTimeMS: 2000 * 10,
+// lsid: lsid,
+// txnNumber: NumberLong(txnNumber),
+// stmtId: NumberInt(0),
+// autocommit: false,
+// }), ErrorCodes.MaxTimeMSExpired);
+const runCommitThroughMongosInParallelShellExpectTimeOut = function() {
+ const runCommitExpectTimeOutCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1, maxTimeMS: 2000 * 10, " +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.MaxTimeMSExpired);";
+ return startParallelShell(runCommitExpectTimeOutCode, st.s.port);
+};
+
+jsTest.log("Starting a cross-shard transaction");
+// Start a cross shard transaction through mongos.
+const updateDocumentOnShard0 = {
+ q: {x: -1},
+ u: {"$set": {a: 1}},
+ upsert: true
+};
+
+const updateDocumentOnShard1 = {
+ q: {x: 1},
+ u: {"$set": {a: 1}},
+ upsert: true
+};
+
+assert.commandWorked(testDB.runCommand({
+ update: 'user',
+ updates: [updateDocumentOnShard0, updateDocumentOnShard1],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+}));
+
+jsTest.log("Turn on hangBeforeWritingDecision failpoint");
+// Make the commit coordination hang before writing the decision, and send commitTransaction.
+// The transaction on the participant will remain in prepare.
+assert.commandWorked(coordinatorPrimaryConn.adminCommand({
+ configureFailPoint: "hangBeforeWritingDecision",
+ mode: "alwaysOn",
+}));
+
+// Run commit through mongos in a parallel shell. This should timeout since we have set the
+// failpoint.
+runCommitThroughMongosInParallelShellExpectTimeOut();
+waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1 /* numTimes */);
+
+jsTest.log("Stopping coordinator shard");
+// Stop the mongods on the coordinator shard using the SIGTERM signal. We must skip validation
+// checks since we'll be shutting down a node with a prepared transaction.
+coordinatorReplSetTest.stopSet(15, true /* forRestart */, {skipValidation: true} /* opts */);
+
+// Once the coordinator has gone down, do a majority write on the participant while there is a
+// prepared transaction. This will ensure that the stable timestamp is able to advance since
+// this write must be in the committed snapshot.
+const session = participantPrimaryConn.startSession();
+const sessionDB = session.getDatabase("dummy");
+const sessionColl = sessionDB.getCollection("dummy");
+session.resetOperationTime_forTesting();
+assert.commandWorked(sessionColl.insert({dummy: 2}, {writeConcern: {w: "majority"}}));
+assert.neq(session.getOperationTime(), null);
+assert.neq(session.getClusterTime(), null);
+jsTest.log("Successfully completed majority write on participant");
+
+// Confirm that a majority read on the secondary includes the dummy write. This would mean that
+// the stable timestamp also advanced on the secondary.
+// In order to do this read with readConcern majority, we must use afterClusterTime with causal
+// consistency enabled.
+const participantSecondaryConn = participantReplSetTest.getSecondary();
+const secondaryDB = participantSecondaryConn.getDB("dummy");
+const res = secondaryDB.runCommand({
+ find: "dummy",
+ readConcern: {level: "majority", afterClusterTime: session.getOperationTime()},
+});
+assert.eq(res.cursor.firstBatch.length, 1);
+
+jsTest.log("Restarting coordinator");
+// Restarting the coordinator will reset the fail point.
+coordinatorReplSetTest.startSet({restart: true});
+coordinatorPrimaryConn = coordinatorReplSetTest.getPrimary();
+
+jsTest.log("Committing transaction");
+// Now, commitTransaction should succeed.
+assert.commandWorked(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false
+}));
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_failover.js b/jstests/sharding/txn_two_phase_commit_failover.js
index 02fbe8bd88c..7ee5128d23d 100644
--- a/jstests/sharding/txn_two_phase_commit_failover.js
+++ b/jstests/sharding/txn_two_phase_commit_failover.js
@@ -11,200 +11,205 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+// Lower the transaction timeout for participants, since this test exercises the case where the
+// coordinator fails over before writing the participant list and then checks that the
+// transaction is aborted on all participants, and the participants will only abort on reaching
+// the transaction timeout.
+TestData.transactionLifetimeLimitSeconds = 30;
+
+let lsid = {id: UUID()};
+let txnNumber = 0;
+
+const runTest = function(sameNodeStepsUpAfterFailover) {
+ let stepDownSecs; // The amount of time the node has to wait before becoming primary again.
+ let numCoordinatorNodes;
+ if (sameNodeStepsUpAfterFailover) {
+ numCoordinatorNodes = 1;
+ stepDownSecs = 1;
+ } else {
+ numCoordinatorNodes = 3;
+ stepDownSecs = 3;
+ }
+
+ let st = new ShardingTest({
+ shards: 3,
+ rs0: {nodes: numCoordinatorNodes},
+ causallyConsistent: true,
+ other: {mongosOptions: {verbose: 3}}
+ });
+
+ let coordinatorReplSetTest = st.rs0;
+
+ let participant0 = st.shard0;
+ let participant1 = st.shard1;
+ let participant2 = st.shard2;
+
+ const runCommitThroughMongosInParallelShellExpectSuccess = function() {
+ const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "}));";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+ };
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
+ const runCommitThroughMongosInParallelShellExpectAbort = function() {
+ const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.NoSuchTransaction);";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+ };
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+ const setUp = function() {
+ // Create a sharded collection with a chunk on each shard:
+ // shard0: [-inf, 0)
+ // shard1: [0, 10)
+ // shard2: [10, +inf)
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: participant0.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns});
+
+ // Start a new transaction by inserting a document onto each shard.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ }));
+ };
- // Lower the transaction timeout for participants, since this test exercises the case where the
- // coordinator fails over before writing the participant list and then checks that the
- // transaction is aborted on all participants, and the participants will only abort on reaching
- // the transaction timeout.
- TestData.transactionLifetimeLimitSeconds = 30;
+ const testCommitProtocol = function(makeAParticipantAbort, failpointData, expectAbortResponse) {
+ jsTest.log("Testing commit protocol with sameNodeStepsUpAfterFailover: " +
+ sameNodeStepsUpAfterFailover + ", makeAParticipantAbort: " +
+ makeAParticipantAbort + ", expectAbortResponse: " + expectAbortResponse +
+ ", and failpointData: " + tojson(failpointData));
- let lsid = {id: UUID()};
- let txnNumber = 0;
+ txnNumber++;
+ setUp();
- const runTest = function(sameNodeStepsUpAfterFailover) {
- let stepDownSecs; // The amount of time the node has to wait before becoming primary again.
- let numCoordinatorNodes;
- if (sameNodeStepsUpAfterFailover) {
- numCoordinatorNodes = 1;
- stepDownSecs = 1;
- } else {
- numCoordinatorNodes = 3;
- stepDownSecs = 3;
- }
+ coordinatorReplSetTest.awaitNodesAgreeOnPrimary();
+ let coordPrimary = coordinatorReplSetTest.getPrimary();
- let st = new ShardingTest({
- shards: 3,
- rs0: {nodes: numCoordinatorNodes},
- causallyConsistent: true,
- other: {mongosOptions: {verbose: 3}}
- });
-
- let coordinatorReplSetTest = st.rs0;
-
- let participant0 = st.shard0;
- let participant1 = st.shard1;
- let participant2 = st.shard2;
-
- const runCommitThroughMongosInParallelShellExpectSuccess = function() {
- const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "}));";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const runCommitThroughMongosInParallelShellExpectAbort = function() {
- const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "})," +
- "ErrorCodes.NoSuchTransaction);";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const setUp = function() {
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(
- st.s.adminCommand({movePrimary: dbName, to: participant0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
-
- // Start a new transaction by inserting a document onto each shard.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ if (makeAParticipantAbort) {
+ // Manually abort the transaction on one of the participants, so that the
+ // participant fails to prepare.
+ assert.commandWorked(participant2.adminCommand({
+ abortTransaction: 1,
lsid: lsid,
txnNumber: NumberLong(txnNumber),
stmtId: NumberInt(0),
- startTransaction: true,
autocommit: false,
}));
- };
-
- const testCommitProtocol = function(
- makeAParticipantAbort, failpointData, expectAbortResponse) {
- jsTest.log("Testing commit protocol with sameNodeStepsUpAfterFailover: " +
- sameNodeStepsUpAfterFailover + ", makeAParticipantAbort: " +
- makeAParticipantAbort + ", expectAbortResponse: " + expectAbortResponse +
- ", and failpointData: " + tojson(failpointData));
-
- txnNumber++;
- setUp();
-
- coordinatorReplSetTest.awaitNodesAgreeOnPrimary();
- let coordPrimary = coordinatorReplSetTest.getPrimary();
-
- if (makeAParticipantAbort) {
- // Manually abort the transaction on one of the participants, so that the
- // participant fails to prepare.
- assert.commandWorked(participant2.adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }));
- }
-
- assert.commandWorked(coordPrimary.adminCommand({
- configureFailPoint: failpointData.failpoint,
- mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
- }));
+ }
- // Run commitTransaction through a parallel shell.
- let awaitResult;
- if (expectAbortResponse) {
- awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
- } else {
- awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
- }
-
- var numTimesShouldBeHit = failpointData.numTimesShouldBeHit;
- if ((failpointData.failpoint == "hangWhileTargetingLocalHost" &&
- !failpointData.skip) && // We are testing the prepare phase
- makeAParticipantAbort) { // A remote participant will vote abort
- // Wait for the abort to the local host to be scheduled as well.
- numTimesShouldBeHit++;
- }
-
- waitForFailpoint("Hit " + failpointData.failpoint + " failpoint", numTimesShouldBeHit);
-
- // Induce the coordinator primary to step down.
- assert.commandWorked(
- coordPrimary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
- assert.commandWorked(coordPrimary.adminCommand({
- configureFailPoint: failpointData.failpoint,
- mode: "off",
- }));
+ assert.commandWorked(coordPrimary.adminCommand({
+ configureFailPoint: failpointData.failpoint,
+ mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
+ }));
- // The router should retry commitTransaction against the new primary.
- awaitResult();
-
- // Check that the transaction committed or aborted as expected.
- if (expectAbortResponse) {
- jsTest.log("Verify that the transaction was aborted on all shards.");
- assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
- } else {
- jsTest.log("Verify that the transaction was committed on all shards.");
- // Use assert.soon(), because although coordinateCommitTransaction currently blocks
- // until the commit process is fully complete, it will eventually be changed to only
- // block until the decision is *written*, at which point the test can pass the
- // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in
- // the read to ensure the read sees the transaction's writes (TODO SERVER-37165).
- assert.soon(function() {
- return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
- });
- }
-
- st.s.getDB(dbName).getCollection(collName).drop();
- clearRawMongoProgramOutput();
- };
-
- //
- // Run through all the failpoints when one participant responds to prepare with vote abort.
- //
-
- failpointDataArr.forEach(function(failpointData) {
- testCommitProtocol(true /* make a participant abort */,
- failpointData,
- true /* expect abort decision */);
- });
-
- //
- // Run through all the failpoints when all participants respond to prepare with vote commit.
- //
-
- failpointDataArr.forEach(function(failpointData) {
- // Note: If the coordinator fails over before making the participant list durable,
- // the transaction will abort even if all participants could have committed. This is
- // a property of the coordinator only, and would be true even if a participant's
- // in-progress transaction could survive failover.
- let expectAbort = (failpointData.failpoint == "hangBeforeWritingParticipantList") ||
- (failpointData.failpoint == "hangWhileTargetingLocalHost" && !failpointData.skip) ||
- false;
- testCommitProtocol(false /* make a participant abort */, failpointData, expectAbort);
- });
- st.stop();
- };
+ // Run commitTransaction through a parallel shell.
+ let awaitResult;
+ if (expectAbortResponse) {
+ awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
+ } else {
+ awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
+ }
+
+ var numTimesShouldBeHit = failpointData.numTimesShouldBeHit;
+ if ((failpointData.failpoint == "hangWhileTargetingLocalHost" &&
+ !failpointData.skip) && // We are testing the prepare phase
+ makeAParticipantAbort) { // A remote participant will vote abort
+ // Wait for the abort to the local host to be scheduled as well.
+ numTimesShouldBeHit++;
+ }
+
+ waitForFailpoint("Hit " + failpointData.failpoint + " failpoint", numTimesShouldBeHit);
+
+ // Induce the coordinator primary to step down.
+ assert.commandWorked(
+ coordPrimary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
+ assert.commandWorked(coordPrimary.adminCommand({
+ configureFailPoint: failpointData.failpoint,
+ mode: "off",
+ }));
- const failpointDataArr = getCoordinatorFailpoints();
+ // The router should retry commitTransaction against the new primary.
+ awaitResult();
+
+ // Check that the transaction committed or aborted as expected.
+ if (expectAbortResponse) {
+ jsTest.log("Verify that the transaction was aborted on all shards.");
+ assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
+ } else {
+ jsTest.log("Verify that the transaction was committed on all shards.");
+ // Use assert.soon(), because although coordinateCommitTransaction currently blocks
+ // until the commit process is fully complete, it will eventually be changed to only
+ // block until the decision is *written*, at which point the test can pass the
+ // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in
+ // the read to ensure the read sees the transaction's writes (TODO SERVER-37165).
+ assert.soon(function() {
+ return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
+ });
+ }
+
+ st.s.getDB(dbName).getCollection(collName).drop();
+ clearRawMongoProgramOutput();
+ };
- runTest(true /* same node always steps up after stepping down */, false);
- runTest(false /* same node always steps up after stepping down */, false);
+ //
+ // Run through all the failpoints when one participant responds to prepare with vote abort.
+ //
+
+ failpointDataArr.forEach(function(failpointData) {
+ testCommitProtocol(
+ true /* make a participant abort */, failpointData, true /* expect abort decision */);
+ });
+
+ //
+ // Run through all the failpoints when all participants respond to prepare with vote commit.
+ //
+
+ failpointDataArr.forEach(function(failpointData) {
+ // Note: If the coordinator fails over before making the participant list durable,
+ // the transaction will abort even if all participants could have committed. This is
+ // a property of the coordinator only, and would be true even if a participant's
+ // in-progress transaction could survive failover.
+ let expectAbort = (failpointData.failpoint == "hangBeforeWritingParticipantList") ||
+ (failpointData.failpoint == "hangWhileTargetingLocalHost" && !failpointData.skip) ||
+ false;
+ testCommitProtocol(false /* make a participant abort */, failpointData, expectAbort);
+ });
+ st.stop();
+};
+
+const failpointDataArr = getCoordinatorFailpoints();
+
+runTest(true /* same node always steps up after stepping down */, false);
+runTest(false /* same node always steps up after stepping down */, false);
})();
diff --git a/jstests/sharding/txn_two_phase_commit_killop.js b/jstests/sharding/txn_two_phase_commit_killop.js
index c49123d3fb7..18a13d58dd9 100644
--- a/jstests/sharding/txn_two_phase_commit_killop.js
+++ b/jstests/sharding/txn_two_phase_commit_killop.js
@@ -7,176 +7,183 @@
*/
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- let st = new ShardingTest({shards: 3, causallyConsistent: true});
-
- let coordinator = st.shard0;
- let participant1 = st.shard1;
- let participant2 = st.shard2;
-
- let lsid = {id: UUID()};
- let txnNumber = 0;
-
- const runCommitThroughMongosInParallelShellExpectSuccess = function() {
- const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "}));";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const runCommitThroughMongosInParallelShellExpectAbort = function() {
- const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "})," +
- "ErrorCodes.NoSuchTransaction);";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const setUp = function() {
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- // Start a new transaction by inserting a document onto each shard.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+let st = new ShardingTest({shards: 3, causallyConsistent: true});
+
+let coordinator = st.shard0;
+let participant1 = st.shard1;
+let participant2 = st.shard2;
+
+let lsid = {id: UUID()};
+let txnNumber = 0;
+
+const runCommitThroughMongosInParallelShellExpectSuccess = function() {
+ const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "}));";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+};
+
+const runCommitThroughMongosInParallelShellExpectAbort = function() {
+ const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.NoSuchTransaction);";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+};
+
+const setUp = function() {
+ // Create a sharded collection with a chunk on each shard:
+ // shard0: [-inf, 0)
+ // shard1: [0, 10)
+ // shard2: [10, +inf)
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+ // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+ // from the shards starting, aborting, and restarting the transaction due to needing to
+ // refresh after the transaction has started.
+ assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+ // Start a new transaction by inserting a document onto each shard.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ }));
+};
+
+const testCommitProtocol = function(shouldCommit, failpointData) {
+ jsTest.log("Testing two-phase " + (shouldCommit ? "commit" : "abort") +
+ " protocol with failpointData: " + tojson(failpointData));
+
+ txnNumber++;
+ setUp();
+
+ if (!shouldCommit) {
+ // Manually abort the transaction on one of the participants, so that the participant
+ // fails to prepare.
+ assert.commandWorked(participant2.adminCommand({
+ abortTransaction: 1,
lsid: lsid,
txnNumber: NumberLong(txnNumber),
stmtId: NumberInt(0),
- startTransaction: true,
autocommit: false,
}));
- };
-
- const testCommitProtocol = function(shouldCommit, failpointData) {
- jsTest.log("Testing two-phase " + (shouldCommit ? "commit" : "abort") +
- " protocol with failpointData: " + tojson(failpointData));
-
- txnNumber++;
- setUp();
-
- if (!shouldCommit) {
- // Manually abort the transaction on one of the participants, so that the participant
- // fails to prepare.
- assert.commandWorked(participant2.adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }));
- }
-
- // Turn on failpoint to make the coordinator hang at a the specified point.
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: failpointData.failpoint,
- mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
- }));
-
- // Run commitTransaction through a parallel shell.
- let awaitResult;
- if (shouldCommit) {
- awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
- } else {
- awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
- }
-
- // Deliver killOp once the failpoint has been hit.
-
- waitForFailpoint("Hit " + failpointData.failpoint + " failpoint",
- failpointData.numTimesShouldBeHit);
-
- jsTest.log("Going to find coordinator opCtx ids");
- let coordinatorOps =
- coordinator.getDB("admin")
- .aggregate(
- [{$currentOp: {'allUsers': true}}, {$match: {desc: "TransactionCoordinator"}}])
- .toArray();
-
- // Use "greater than or equal to" since, for failpoints that pause the coordinator while
- // it's sending prepare or sending the decision, there might be one additional thread that's
- // doing the "send" to the local participant (or that thread might have already completed).
- assert.gte(coordinatorOps.length, failpointData.numTimesShouldBeHit);
-
- coordinatorOps.forEach(function(coordinatorOp) {
- coordinator.getDB("admin").killOp(coordinatorOp.opid);
+ }
+
+ // Turn on failpoint to make the coordinator hang at a the specified point.
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: failpointData.failpoint,
+ mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
+ }));
+
+ // Run commitTransaction through a parallel shell.
+ let awaitResult;
+ if (shouldCommit) {
+ awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
+ } else {
+ awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
+ }
+
+ // Deliver killOp once the failpoint has been hit.
+
+ waitForFailpoint("Hit " + failpointData.failpoint + " failpoint",
+ failpointData.numTimesShouldBeHit);
+
+ jsTest.log("Going to find coordinator opCtx ids");
+ let coordinatorOps =
+ coordinator.getDB("admin")
+ .aggregate(
+ [{$currentOp: {'allUsers': true}}, {$match: {desc: "TransactionCoordinator"}}])
+ .toArray();
+
+ // Use "greater than or equal to" since, for failpoints that pause the coordinator while
+ // it's sending prepare or sending the decision, there might be one additional thread that's
+ // doing the "send" to the local participant (or that thread might have already completed).
+ assert.gte(coordinatorOps.length, failpointData.numTimesShouldBeHit);
+
+ coordinatorOps.forEach(function(coordinatorOp) {
+ coordinator.getDB("admin").killOp(coordinatorOp.opid);
+ });
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: failpointData.failpoint,
+ mode: "off",
+ }));
+
+ // If the commit coordination was not robust to killOp, then commitTransaction would fail
+ // with an Interrupted error rather than fail with NoSuchTransaction or return success.
+ jsTest.log("Wait for the commit coordination to complete.");
+ awaitResult();
+
+ // If deleting the coordinator doc was not robust to killOp, the document would still exist.
+ assert.eq(0, coordinator.getDB("config").getCollection("transaction_coordinators").count());
+
+ // Check that the transaction committed or aborted as expected.
+ if (!shouldCommit) {
+ jsTest.log("Verify that the transaction was aborted on all shards.");
+ assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
+ } else {
+ jsTest.log("Verify that the transaction was committed on all shards.");
+ // Use assert.soon(), because although coordinateCommitTransaction currently blocks
+ // until the commit process is fully complete, it will eventually be changed to only
+ // block until the decision is *written*, at which point the test can pass the
+ // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
+ // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
+ assert.soon(function() {
+ return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
});
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: failpointData.failpoint,
- mode: "off",
- }));
+ }
- // If the commit coordination was not robust to killOp, then commitTransaction would fail
- // with an Interrupted error rather than fail with NoSuchTransaction or return success.
- jsTest.log("Wait for the commit coordination to complete.");
- awaitResult();
-
- // If deleting the coordinator doc was not robust to killOp, the document would still exist.
- assert.eq(0, coordinator.getDB("config").getCollection("transaction_coordinators").count());
-
- // Check that the transaction committed or aborted as expected.
- if (!shouldCommit) {
- jsTest.log("Verify that the transaction was aborted on all shards.");
- assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
- } else {
- jsTest.log("Verify that the transaction was committed on all shards.");
- // Use assert.soon(), because although coordinateCommitTransaction currently blocks
- // until the commit process is fully complete, it will eventually be changed to only
- // block until the decision is *written*, at which point the test can pass the
- // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
- // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
- assert.soon(function() {
- return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
- });
- }
-
- st.s.getDB(dbName).getCollection(collName).drop();
- };
-
- const failpointDataArr = getCoordinatorFailpoints();
-
- // TODO(SERVER-39754): The abort path is unreliable, because depending on the stage at which the
- // transaction is aborted, the failpoints might be hit more than the specified number of times.
- //
- // // Test abort path.
-
- // failpointDataArr.forEach(function(failpointData) {
- // testCommitProtocol(false /* shouldCommit */, failpointData);
- // clearRawMongoProgramOutput();
- // });
-
- // Test commit path.
-
- failpointDataArr.forEach(function(failpointData) {
- testCommitProtocol(true /* shouldCommit */, failpointData);
- clearRawMongoProgramOutput();
- });
+ st.s.getDB(dbName).getCollection(collName).drop();
+};
+
+const failpointDataArr = getCoordinatorFailpoints();
+
+// TODO(SERVER-39754): The abort path is unreliable, because depending on the stage at which the
+// transaction is aborted, the failpoints might be hit more than the specified number of times.
+//
+// // Test abort path.
+
+// failpointDataArr.forEach(function(failpointData) {
+// testCommitProtocol(false /* shouldCommit */, failpointData);
+// clearRawMongoProgramOutput();
+// });
+
+// Test commit path.
- st.stop();
+failpointDataArr.forEach(function(failpointData) {
+ testCommitProtocol(true /* shouldCommit */, failpointData);
+ clearRawMongoProgramOutput();
+});
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_server_status.js b/jstests/sharding/txn_two_phase_commit_server_status.js
index d4bd4b3c75e..407ee038d50 100644
--- a/jstests/sharding/txn_two_phase_commit_server_status.js
+++ b/jstests/sharding/txn_two_phase_commit_server_status.js
@@ -1,21 +1,21 @@
// Basic test that the two-phase commit coordinator metrics fields appear in serverStatus output.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 1, config: 1});
+const st = new ShardingTest({shards: 1, config: 1});
- const res = assert.commandWorked(st.shard0.adminCommand({serverStatus: 1}));
- assert.neq(null, res.twoPhaseCommitCoordinator);
- assert.eq(0, res.twoPhaseCommitCoordinator.totalCreated);
- assert.eq(0, res.twoPhaseCommitCoordinator.totalStartedTwoPhaseCommit);
- assert.eq(0, res.twoPhaseCommitCoordinator.totalCommittedTwoPhaseCommit);
- assert.eq(0, res.twoPhaseCommitCoordinator.totalAbortedTwoPhaseCommit);
- assert.neq(null, res.twoPhaseCommitCoordinator.currentInSteps);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingParticipantList);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForVotes);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingDecision);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForDecisionAcks);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.deletingCoordinatorDoc);
+const res = assert.commandWorked(st.shard0.adminCommand({serverStatus: 1}));
+assert.neq(null, res.twoPhaseCommitCoordinator);
+assert.eq(0, res.twoPhaseCommitCoordinator.totalCreated);
+assert.eq(0, res.twoPhaseCommitCoordinator.totalStartedTwoPhaseCommit);
+assert.eq(0, res.twoPhaseCommitCoordinator.totalCommittedTwoPhaseCommit);
+assert.eq(0, res.twoPhaseCommitCoordinator.totalAbortedTwoPhaseCommit);
+assert.neq(null, res.twoPhaseCommitCoordinator.currentInSteps);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingParticipantList);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForVotes);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingDecision);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForDecisionAcks);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.deletingCoordinatorDoc);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js b/jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js
index 847c1c64346..aaad8537e7f 100644
--- a/jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js
+++ b/jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js
@@ -10,119 +10,123 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js'); // for waitForFailpoint
- load('jstests/libs/write_concern_util.js'); // for stopping/restarting replication
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- let st = new ShardingTest({
- shards: 3,
- rs0: {nodes: 2},
- causallyConsistent: true,
- other: {
- mongosOptions: {verbose: 3},
- }
- });
-
- let coordinatorReplSetTest = st.rs0;
- let participant0 = st.shard0;
- let participant1 = st.shard1;
- let participant2 = st.shard2;
-
- let lsid = {id: UUID()};
- let txnNumber = 0;
-
- const runCommitThroughMongosInParallelShellExpectTimeOut = function() {
- const runCommitExpectTimeOutCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1, maxTimeMS: 1000 * 10, " + "lsid: " + tojson(lsid) + "," +
- "txnNumber: NumberLong(" + txnNumber + ")," + "stmtId: NumberInt(0)," +
- "autocommit: false," + "})," + "ErrorCodes.MaxTimeMSExpired);";
- return startParallelShell(runCommitExpectTimeOutCode, st.s.port);
- };
-
- const setUp = function() {
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: participant0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(participant0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- // Start a new transaction by inserting a document onto each shard.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false,
- }));
- };
- setUp();
-
- let coordPrimary = coordinatorReplSetTest.getPrimary();
- let coordSecondary = coordinatorReplSetTest.getSecondary();
-
- // Make the commit coordination hang before writing the decision, and send commitTransaction.
- assert.commandWorked(coordPrimary.adminCommand({
- configureFailPoint: "hangBeforeWritingDecision",
- mode: "alwaysOn",
- }));
- let awaitResult = runCommitThroughMongosInParallelShellExpectTimeOut();
- waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1);
-
- // Stop replication on all nodes in the coordinator replica set so that the write done on stepup
- // cannot become majority committed, regardless of which node steps up.
- stopServerReplication([coordPrimary, coordSecondary]);
-
- // Induce the coordinator primary to step down.
-
- // The amount of time the node has to wait before becoming primary again.
- const stepDownSecs = 1;
- assert.commandWorked(coordPrimary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
-
- assert.commandWorked(coordPrimary.adminCommand({
- configureFailPoint: "hangBeforeWritingDecision",
- mode: "off",
- }));
-
- // The router should retry commitTransaction against the new primary and time out waiting to
- // access the coordinator catalog.
- awaitResult();
-
- // Re-enable replication, so that the write done on stepup can become majority committed.
- restartReplSetReplication(coordinatorReplSetTest);
-
- // Now, commitTransaction should succeed.
- assert.commandWorked(st.s.adminCommand({
- commitTransaction: 1,
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js'); // for waitForFailpoint
+load('jstests/libs/write_concern_util.js'); // for stopping/restarting replication
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+let st = new ShardingTest({
+ shards: 3,
+ rs0: {nodes: 2},
+ causallyConsistent: true,
+ other: {
+ mongosOptions: {verbose: 3},
+ }
+});
+
+let coordinatorReplSetTest = st.rs0;
+let participant0 = st.shard0;
+let participant1 = st.shard1;
+let participant2 = st.shard2;
+
+let lsid = {id: UUID()};
+let txnNumber = 0;
+
+const runCommitThroughMongosInParallelShellExpectTimeOut = function() {
+ const runCommitExpectTimeOutCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1, maxTimeMS: 1000 * 10, " +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.MaxTimeMSExpired);";
+ return startParallelShell(runCommitExpectTimeOutCode, st.s.port);
+};
+
+const setUp = function() {
+ // Create a sharded collection with a chunk on each shard:
+ // shard0: [-inf, 0)
+ // shard1: [0, 10)
+ // shard2: [10, +inf)
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: participant0.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+ // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+ // from the shards starting, aborting, and restarting the transaction due to needing to
+ // refresh after the transaction has started.
+ assert.commandWorked(participant0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+ // Start a new transaction by inserting a document onto each shard.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
lsid: lsid,
txnNumber: NumberLong(txnNumber),
stmtId: NumberInt(0),
- autocommit: false
+ startTransaction: true,
+ autocommit: false,
}));
-
- jsTest.log("Verify that the transaction was committed on all shards.");
- assert.eq(3, st.s.getDB(dbName).getCollection(collName).find().itcount());
-
- st.stop();
+};
+setUp();
+
+let coordPrimary = coordinatorReplSetTest.getPrimary();
+let coordSecondary = coordinatorReplSetTest.getSecondary();
+
+// Make the commit coordination hang before writing the decision, and send commitTransaction.
+assert.commandWorked(coordPrimary.adminCommand({
+ configureFailPoint: "hangBeforeWritingDecision",
+ mode: "alwaysOn",
+}));
+let awaitResult = runCommitThroughMongosInParallelShellExpectTimeOut();
+waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1);
+
+// Stop replication on all nodes in the coordinator replica set so that the write done on stepup
+// cannot become majority committed, regardless of which node steps up.
+stopServerReplication([coordPrimary, coordSecondary]);
+
+// Induce the coordinator primary to step down.
+
+// The amount of time the node has to wait before becoming primary again.
+const stepDownSecs = 1;
+assert.commandWorked(coordPrimary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
+
+assert.commandWorked(coordPrimary.adminCommand({
+ configureFailPoint: "hangBeforeWritingDecision",
+ mode: "off",
+}));
+
+// The router should retry commitTransaction against the new primary and time out waiting to
+// access the coordinator catalog.
+awaitResult();
+
+// Re-enable replication, so that the write done on stepup can become majority committed.
+restartReplSetReplication(coordinatorReplSetTest);
+
+// Now, commitTransaction should succeed.
+assert.commandWorked(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false
+}));
+
+jsTest.log("Verify that the transaction was committed on all shards.");
+assert.eq(3, st.s.getDB(dbName).getCollection(collName).find().itcount());
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_with_several_routers.js b/jstests/sharding/txn_with_several_routers.js
index 89906937a3f..4dededd0cb0 100644
--- a/jstests/sharding/txn_with_several_routers.js
+++ b/jstests/sharding/txn_with_several_routers.js
@@ -6,198 +6,196 @@
*/
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- function removeAllDocumentsFromTestCollection() {
- assert.commandWorked(router0.getDB(dbName).foo.deleteMany({}));
- }
-
- function runTest(testFn) {
- testFn();
- removeAllDocumentsFromTestCollection();
- }
-
- let st = new ShardingTest({shards: 3, mongos: 2, causallyConsistent: true});
- let router0 = st.s0;
- let router1 = st.s1;
-
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: st.shard2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
-
- // Test that trying to run start txn from two routers with the same transaction number fails
- // through the second router if they target overlapping shards.
- runTest(() => {
- let lsid = {id: UUID()};
- let txnNumber = 0;
-
- // Start a new transaction on router 0 by inserting a document onto each shard.
- assert.commandWorked(router0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false,
- }));
-
- // Try to start a new transaction with the same transaction number on router 1 by inserting
- // a document onto each shard.
- assert.commandFailedWithCode(router1.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -50}, {_id: 4}, {_id: 150}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false,
- // Because ordered writes are done serially for different shard targets and abort early
- // on first error, this can leave the transaction on the other shards open.
- // To ensure this router implicitly aborts the transaction on all participants (so
- // that the next test case does not encounter an open transaction), make this
- // router do an *unordered* write that touches all the same participants as the
- // first router touched.
- ordered: false,
- }),
- 50911);
-
- // The automatic abort-on-error path will occur when the above
- // statement fails, so commit will fail.
- assert.commandFailedWithCode(router0.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
- });
-
- // Test that trying to run start txn from one router and running an operation for that same
- // transaction from another router fails through the second router.
- runTest(() => {
- let lsid = {id: UUID()};
- let txnNumber = 0;
- let stmtId = 0;
-
- // Start a new transaction on router 0 by inserting a document onto each shard.
- assert.commandWorked(router0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- startTransaction: true,
- autocommit: false,
- }));
-
- ++stmtId;
-
- // Try to continue the same transaction but through router 1. Should fail because no txn
- // with this number exists on that router.
- assert.commandFailed(router1.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -50}, {_id: 4}, {_id: 150}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- }));
-
- // Commit should succeed since the command from router 2 should never reach the shard.
- assert.commandWorked(router0.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }));
- });
-
- // Test that trying to run start txn from one router, start txn on the second router with the
- // same transaction number, and running operations on overlapping shards will lead to failure.
- runTest(() => {
- let lsid = {id: UUID()};
- let txnNumber = 0;
- let stmtId = 0;
-
- // Start a new transaction on router 0 by inserting a document onto the first shard
- assert.commandWorked(router0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- startTransaction: true,
- autocommit: false,
- }));
-
- // Start a new transaction on router 1 with the same transaction number, targeting the last
- // shard.
- assert.commandWorked(router1.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- startTransaction: true,
- autocommit: false,
- }));
-
- ++stmtId;
-
- // Try to do an operation on the last shard through router 0. Fails because it sends
- // startTxn: true to the new participant, which has already seen an operation from router 1.
- // Implicitly aborts the transaction when the error is thrown.
- assert.commandFailedWithCode(router0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: 50}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- }),
- 50911);
-
- // Commit through router 0 should fail.
- assert.commandFailedWithCode(router0.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
-
- // Commit through router 1 should fail.
- assert.commandFailedWithCode(router1.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
- });
-
- st.stop();
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+function removeAllDocumentsFromTestCollection() {
+ assert.commandWorked(router0.getDB(dbName).foo.deleteMany({}));
+}
+
+function runTest(testFn) {
+ testFn();
+ removeAllDocumentsFromTestCollection();
+}
+
+let st = new ShardingTest({shards: 3, mongos: 2, causallyConsistent: true});
+let router0 = st.s0;
+let router1 = st.s1;
+
+// Create a sharded collection with a chunk on each shard:
+// shard0: [-inf, 0)
+// shard1: [0, 10)
+// shard2: [10, +inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: st.shard2.shardName}));
+
+flushRoutersAndRefreshShardMetadata(st, {ns});
+
+// Test that trying to run start txn from two routers with the same transaction number fails
+// through the second router if they target overlapping shards.
+runTest(() => {
+ let lsid = {id: UUID()};
+ let txnNumber = 0;
+
+ // Start a new transaction on router 0 by inserting a document onto each shard.
+ assert.commandWorked(router0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ }));
+
+ // Try to start a new transaction with the same transaction number on router 1 by inserting
+ // a document onto each shard.
+ assert.commandFailedWithCode(router1.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -50}, {_id: 4}, {_id: 150}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ // Because ordered writes are done serially for different shard targets and abort early
+ // on first error, this can leave the transaction on the other shards open.
+ // To ensure this router implicitly aborts the transaction on all participants (so
+ // that the next test case does not encounter an open transaction), make this
+ // router do an *unordered* write that touches all the same participants as the
+ // first router touched.
+ ordered: false,
+ }),
+ 50911);
+
+ // The automatic abort-on-error path will occur when the above
+ // statement fails, so commit will fail.
+ assert.commandFailedWithCode(router0.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+});
+
+// Test that trying to run start txn from one router and running an operation for that same
+// transaction from another router fails through the second router.
+runTest(() => {
+ let lsid = {id: UUID()};
+ let txnNumber = 0;
+ let stmtId = 0;
+
+ // Start a new transaction on router 0 by inserting a document onto each shard.
+ assert.commandWorked(router0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ startTransaction: true,
+ autocommit: false,
+ }));
+
+ ++stmtId;
+
+ // Try to continue the same transaction but through router 1. Should fail because no txn
+ // with this number exists on that router.
+ assert.commandFailed(router1.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -50}, {_id: 4}, {_id: 150}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ }));
+
+ // Commit should succeed since the command from router 2 should never reach the shard.
+ assert.commandWorked(router0.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false,
+ }));
+});
+
+// Test that trying to run start txn from one router, start txn on the second router with the
+// same transaction number, and running operations on overlapping shards will lead to failure.
+runTest(() => {
+ let lsid = {id: UUID()};
+ let txnNumber = 0;
+ let stmtId = 0;
+
+ // Start a new transaction on router 0 by inserting a document onto the first shard
+ assert.commandWorked(router0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ startTransaction: true,
+ autocommit: false,
+ }));
+
+ // Start a new transaction on router 1 with the same transaction number, targeting the last
+ // shard.
+ assert.commandWorked(router1.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ startTransaction: true,
+ autocommit: false,
+ }));
+
+ ++stmtId;
+
+ // Try to do an operation on the last shard through router 0. Fails because it sends
+ // startTxn: true to the new participant, which has already seen an operation from router 1.
+ // Implicitly aborts the transaction when the error is thrown.
+ assert.commandFailedWithCode(router0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: 50}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ }),
+ 50911);
+
+ // Commit through router 0 should fail.
+ assert.commandFailedWithCode(router0.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+
+ // Commit through router 1 should fail.
+ assert.commandFailedWithCode(router1.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_writes_during_movechunk.js b/jstests/sharding/txn_writes_during_movechunk.js
index 47f1dc4a7ba..357ea22e14e 100644
--- a/jstests/sharding/txn_writes_during_movechunk.js
+++ b/jstests/sharding/txn_writes_during_movechunk.js
@@ -2,56 +2,56 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
+'use strict';
- let staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+let staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- let st = new ShardingTest({shards: 2});
+let st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- let coll = st.s.getDB('test').user;
- assert.writeOK(coll.insert({_id: 'updateMe'}));
- assert.writeOK(coll.insert({_id: 'deleteMe'}));
+let coll = st.s.getDB('test').user;
+assert.writeOK(coll.insert({_id: 'updateMe'}));
+assert.writeOK(coll.insert({_id: 'deleteMe'}));
- pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- let joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {_id: 0}, null, 'test.user', st.shard1.shardName);
+let joinMoveChunk =
+ moveChunkParallel(staticMongod, st.s0.host, {_id: 0}, null, 'test.user', st.shard1.shardName);
- waitForMigrateStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+waitForMigrateStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- let session = st.s.startSession();
- let sessionDB = session.getDatabase('test');
- let sessionColl = sessionDB.getCollection('user');
+let session = st.s.startSession();
+let sessionDB = session.getDatabase('test');
+let sessionColl = sessionDB.getCollection('user');
- session.startTransaction();
- sessionColl.insert({_id: 'insertMe'});
- sessionColl.update({_id: 'updateMe'}, {$inc: {y: 1}});
- sessionColl.remove({_id: 'deleteMe'});
+session.startTransaction();
+sessionColl.insert({_id: 'insertMe'});
+sessionColl.update({_id: 'updateMe'}, {$inc: {y: 1}});
+sessionColl.remove({_id: 'deleteMe'});
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- unpauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+unpauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- let recipientColl = st.rs1.getPrimary().getDB('test').user;
- assert.eq(null, recipientColl.findOne({_id: 'insertMe'}));
- assert.eq({_id: 'updateMe'}, recipientColl.findOne({_id: 'updateMe'}));
- assert.eq({_id: 'deleteMe'}, recipientColl.findOne({_id: 'deleteMe'}));
+let recipientColl = st.rs1.getPrimary().getDB('test').user;
+assert.eq(null, recipientColl.findOne({_id: 'insertMe'}));
+assert.eq({_id: 'updateMe'}, recipientColl.findOne({_id: 'updateMe'}));
+assert.eq({_id: 'deleteMe'}, recipientColl.findOne({_id: 'deleteMe'}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- joinMoveChunk();
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+joinMoveChunk();
- assert.eq({_id: 'insertMe'}, recipientColl.findOne({_id: 'insertMe'}));
- assert.eq({_id: 'updateMe', y: 1}, recipientColl.findOne({_id: 'updateMe'}));
- assert.eq(null, recipientColl.findOne({_id: 'deleteMe'}));
+assert.eq({_id: 'insertMe'}, recipientColl.findOne({_id: 'insertMe'}));
+assert.eq({_id: 'updateMe', y: 1}, recipientColl.findOne({_id: 'updateMe'}));
+assert.eq(null, recipientColl.findOne({_id: 'deleteMe'}));
- assert.eq(null, recipientColl.findOne({x: 1}));
+assert.eq(null, recipientColl.findOne({x: 1}));
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/unique_index_on_shardservers.js b/jstests/sharding/unique_index_on_shardservers.js
index 1776aa44260..4ee9bb007d1 100644
--- a/jstests/sharding/unique_index_on_shardservers.js
+++ b/jstests/sharding/unique_index_on_shardservers.js
@@ -1,30 +1,30 @@
// SERVER-34954 This test ensures a node started with --shardsvr and added to a replica set has
// the correct version of unique indexes upon re-initiation.
(function() {
- "use strict";
- load("jstests/libs/check_unique_indexes.js");
+"use strict";
+load("jstests/libs/check_unique_indexes.js");
- let st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
- let mongos = st.s;
- let rs = st.rs0;
+let st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
+let mongos = st.s;
+let rs = st.rs0;
- // Create `test.coll` and add some indexes on it:
- // with index versions as default, v=1 and v=2; both unique and standard types
- assert.writeOK(mongos.getDB("test").coll.insert({_id: 1, a: 1, b: 1, c: 1, d: 1, e: 1, f: 1}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({a: 1}, {"v": 1}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({b: 1}, {"v": 1, "unique": true}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({c: 1}, {"v": 2}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({d: 1}, {"v": 2, "unique": true}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({e: 1}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({f: 1}, {"unique": true}));
+// Create `test.coll` and add some indexes on it:
+// with index versions as default, v=1 and v=2; both unique and standard types
+assert.writeOK(mongos.getDB("test").coll.insert({_id: 1, a: 1, b: 1, c: 1, d: 1, e: 1, f: 1}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({a: 1}, {"v": 1}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({b: 1}, {"v": 1, "unique": true}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({c: 1}, {"v": 2}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({d: 1}, {"v": 2, "unique": true}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({e: 1}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({f: 1}, {"unique": true}));
- // Add a node with --shardsvr to the replica set.
- let newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}});
- rs.reInitiate();
- rs.awaitSecondaryNodes();
+// Add a node with --shardsvr to the replica set.
+let newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}});
+rs.reInitiate();
+rs.awaitSecondaryNodes();
- // After adding a new node as a ShardServer ensure the new node has unique indexes
- // in the correct version
- checkUniqueIndexFormatVersion(newNode.getDB("admin"));
- st.stop();
+// After adding a new node as a ShardServer ensure the new node has unique indexes
+// in the correct version
+checkUniqueIndexFormatVersion(newNode.getDB("admin"));
+st.stop();
})();
diff --git a/jstests/sharding/unowned_doc_filtering.js b/jstests/sharding/unowned_doc_filtering.js
index f1d6e96541d..5a337aaa454 100644
--- a/jstests/sharding/unowned_doc_filtering.js
+++ b/jstests/sharding/unowned_doc_filtering.js
@@ -11,44 +11,44 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var testDB = st.s.getDB('test');
+var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
+assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(testDB.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
- var inserts = [];
- for (var i = 0; i < 100; i++) {
- inserts.push({x: i});
- }
- assert.writeOK(testDB.foo.insert(inserts));
-
- assert.commandWorked(testDB.adminCommand({split: 'test.foo', find: {x: 50}}));
- assert.commandWorked(
- testDB.adminCommand({moveChunk: 'test.foo', find: {x: 100}, to: st.shard1.shardName}));
-
- // Insert some documents directly into the shards into chunks not owned by that shard.
- st.rs0.getPrimary().getDB('test').foo.insert({x: 100});
- st.rs1.getPrimary().getDB('test').foo.insert({x: 0});
-
- st.rs0.restart(0);
- st.rs1.restart(0);
-
- var fooCount;
- for (var retries = 0; retries <= 2; retries++) {
- try {
- fooCount = testDB.foo.find().itcount();
- break;
- } catch (e) {
- // expected for reestablishing connections broken by the mongod restart.
- assert.eq(ErrorCodes.HostUnreachable, e.code, tojson(e));
- }
+var inserts = [];
+for (var i = 0; i < 100; i++) {
+ inserts.push({x: i});
+}
+assert.writeOK(testDB.foo.insert(inserts));
+
+assert.commandWorked(testDB.adminCommand({split: 'test.foo', find: {x: 50}}));
+assert.commandWorked(
+ testDB.adminCommand({moveChunk: 'test.foo', find: {x: 100}, to: st.shard1.shardName}));
+
+// Insert some documents directly into the shards into chunks not owned by that shard.
+st.rs0.getPrimary().getDB('test').foo.insert({x: 100});
+st.rs1.getPrimary().getDB('test').foo.insert({x: 0});
+
+st.rs0.restart(0);
+st.rs1.restart(0);
+
+var fooCount;
+for (var retries = 0; retries <= 2; retries++) {
+ try {
+ fooCount = testDB.foo.find().itcount();
+ break;
+ } catch (e) {
+ // expected for reestablishing connections broken by the mongod restart.
+ assert.eq(ErrorCodes.HostUnreachable, e.code, tojson(e));
}
- assert.eq(100, fooCount);
+}
+assert.eq(100, fooCount);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/unsharded_collection_targetting.js b/jstests/sharding/unsharded_collection_targetting.js
index efe4c3c05c4..5393a212ae4 100644
--- a/jstests/sharding/unsharded_collection_targetting.js
+++ b/jstests/sharding/unsharded_collection_targetting.js
@@ -1,32 +1,32 @@
// Tests that a stale mongos would route writes correctly to the right shard after
// an unsharded collection was moved to another shard.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- rs: {
- nodes: 1,
- },
- });
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ rs: {
+ nodes: 1,
+ },
+});
- const testName = 'test';
- const mongosDB = st.s0.getDB(testName);
+const testName = 'test';
+const mongosDB = st.s0.getDB(testName);
- // Ensure that shard1 is the primary shard.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs1.getURL());
+// Ensure that shard1 is the primary shard.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs1.getURL());
- // Before moving the collection, issue a write through mongos2 to make it aware
- // about the location of the collection before the move.
- const mongos2DB = st.s1.getDB(testName);
- const mongos2Coll = mongos2DB[testName];
- assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
+// Before moving the collection, issue a write through mongos2 to make it aware
+// about the location of the collection before the move.
+const mongos2DB = st.s1.getDB(testName);
+const mongos2Coll = mongos2DB[testName];
+assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- assert.writeOK(mongos2Coll.insert({_id: 1, a: 0}));
+assert.writeOK(mongos2Coll.insert({_id: 1, a: 0}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/unsharded_lookup_in_txn.js b/jstests/sharding/unsharded_lookup_in_txn.js
index 32d0d21e105..ac1bcee7216 100644
--- a/jstests/sharding/unsharded_lookup_in_txn.js
+++ b/jstests/sharding/unsharded_lookup_in_txn.js
@@ -4,36 +4,36 @@
* uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const st = new ShardingTest({shards: 2, mongos: 1});
- const kDBName = "unsharded_lookup_in_txn";
+const st = new ShardingTest({shards: 2, mongos: 1});
+const kDBName = "unsharded_lookup_in_txn";
- let session = st.s.startSession();
- let sessionDB = session.getDatabase("unsharded_lookup_in_txn");
+let session = st.s.startSession();
+let sessionDB = session.getDatabase("unsharded_lookup_in_txn");
- const shardedColl = sessionDB.sharded;
- const unshardedColl = sessionDB.unsharded;
+const shardedColl = sessionDB.sharded;
+const unshardedColl = sessionDB.unsharded;
- assert.commandWorked(st.s.adminCommand({enableSharding: sessionDB.getName()}));
- st.ensurePrimaryShard(sessionDB.getName(), st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: sessionDB.getName()}));
+st.ensurePrimaryShard(sessionDB.getName(), st.shard0.shardName);
- assert.commandWorked(
- st.s.adminCommand({shardCollection: shardedColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(
+ st.s.adminCommand({shardCollection: shardedColl.getFullName(), key: {_id: 1}}));
- // Move all of the data to shard 1.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
- flushRoutersAndRefreshShardMetadata(st, {ns: shardedColl.getFullName()});
+// Move all of the data to shard 1.
+assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+flushRoutersAndRefreshShardMetadata(st, {ns: shardedColl.getFullName()});
- // Insert a bunch of documents, all of which reside on the same chunk (on shard 1).
- for (let i = -10; i < 10; i++) {
- assert.commandWorked(shardedColl.insert({_id: i, local_always_one: 1}));
- }
+// Insert a bunch of documents, all of which reside on the same chunk (on shard 1).
+for (let i = -10; i < 10; i++) {
+ assert.commandWorked(shardedColl.insert({_id: i, local_always_one: 1}));
+}
- const pipeline = [{
+const pipeline = [{
$lookup: {
from: unshardedColl.getName(),
localField: "local_always_one",
@@ -41,57 +41,57 @@
as: "matches"
}
}];
- const kBatchSize = 2;
-
- const testLookupDoesNotSeeDocumentsOutsideSnapshot = function() {
- unshardedColl.drop();
- // Insert some stuff into the unsharded collection.
- const kUnshardedCollOriginalSize = 10;
- for (let i = 0; i < kUnshardedCollOriginalSize; i++) {
- assert.commandWorked(unshardedColl.insert({_id: i, foreign_always_one: 1}));
- }
+const kBatchSize = 2;
+
+const testLookupDoesNotSeeDocumentsOutsideSnapshot = function() {
+ unshardedColl.drop();
+ // Insert some stuff into the unsharded collection.
+ const kUnshardedCollOriginalSize = 10;
+ for (let i = 0; i < kUnshardedCollOriginalSize; i++) {
+ assert.commandWorked(unshardedColl.insert({_id: i, foreign_always_one: 1}));
+ }
- session.startTransaction();
+ session.startTransaction();
- const curs = shardedColl.aggregate(
- pipeline, {readConcern: {level: "snapshot"}, cursor: {batchSize: kBatchSize}});
+ const curs = shardedColl.aggregate(
+ pipeline, {readConcern: {level: "snapshot"}, cursor: {batchSize: kBatchSize}});
- for (let i = 0; i < kBatchSize; i++) {
- const doc = curs.next();
- assert.eq(doc.matches.length, kUnshardedCollOriginalSize);
- }
+ for (let i = 0; i < kBatchSize; i++) {
+ const doc = curs.next();
+ assert.eq(doc.matches.length, kUnshardedCollOriginalSize);
+ }
- // Do writes on the unsharded collection from outside the session.
- (function() {
- const unshardedCollOutsideSession =
- st.s.getDB(sessionDB.getName())[unshardedColl.getName()];
- assert.commandWorked(unshardedCollOutsideSession.insert({b: 1, xyz: 1}));
- assert.commandWorked(unshardedCollOutsideSession.insert({b: 1, xyz: 2}));
- })();
-
- // We shouldn't see those writes from the aggregation within the session.
- assert.eq(curs.hasNext(), true);
- while (curs.hasNext()) {
- const doc = curs.next();
- assert.eq(doc.matches.length, kUnshardedCollOriginalSize);
- }
+ // Do writes on the unsharded collection from outside the session.
+ (function() {
+ const unshardedCollOutsideSession =
+ st.s.getDB(sessionDB.getName())[unshardedColl.getName()];
+ assert.commandWorked(unshardedCollOutsideSession.insert({b: 1, xyz: 1}));
+ assert.commandWorked(unshardedCollOutsideSession.insert({b: 1, xyz: 2}));
+ })();
+
+ // We shouldn't see those writes from the aggregation within the session.
+ assert.eq(curs.hasNext(), true);
+ while (curs.hasNext()) {
+ const doc = curs.next();
+ assert.eq(doc.matches.length, kUnshardedCollOriginalSize);
+ }
- assert.commandWorked(session.abortTransaction_forTesting());
- };
+ assert.commandWorked(session.abortTransaction_forTesting());
+};
- // Run the test once, with all of the data on shard 1. This means that the merging shard (shard
- // 0) will not be targeted. This is interesting because in contrast to the case below, the
- // merging half of the pipeline will start the transaction on the merging shard.
- testLookupDoesNotSeeDocumentsOutsideSnapshot();
+// Run the test once, with all of the data on shard 1. This means that the merging shard (shard
+// 0) will not be targeted. This is interesting because in contrast to the case below, the
+// merging half of the pipeline will start the transaction on the merging shard.
+testLookupDoesNotSeeDocumentsOutsideSnapshot();
- // Move some data to shard 0, so that the merging shard will be targeted.
- assert.commandWorked(st.s.adminCommand({split: shardedColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {_id: -1}, to: st.shard0.shardName}));
- flushRoutersAndRefreshShardMetadata(st, {ns: shardedColl.getFullName()});
+// Move some data to shard 0, so that the merging shard will be targeted.
+assert.commandWorked(st.s.adminCommand({split: shardedColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {_id: -1}, to: st.shard0.shardName}));
+flushRoutersAndRefreshShardMetadata(st, {ns: shardedColl.getFullName()});
- // Run the test again.
- testLookupDoesNotSeeDocumentsOutsideSnapshot();
+// Run the test again.
+testLookupDoesNotSeeDocumentsOutsideSnapshot();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/update_compound_shard_key.js b/jstests/sharding/update_compound_shard_key.js
index c3b1f6b0f46..46e28ed597f 100644
--- a/jstests/sharding/update_compound_shard_key.js
+++ b/jstests/sharding/update_compound_shard_key.js
@@ -3,416 +3,414 @@
* @tags: [uses_transactions, uses_multi_shard_transaction]
*/
(function() {
- 'use strict';
-
- load("jstests/sharding/libs/update_shard_key_helpers.js");
-
- const st = new ShardingTest({mongos: 1, shards: 3});
- const kDbName = 'update_compound_sk';
- const ns = kDbName + '.coll';
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(kDbName);
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, st.shard0.shardName);
-
- assert.commandWorked(
- st.s.getDB('config').adminCommand({shardCollection: ns, key: {x: 1, y: 1, z: 1}}));
-
- let docsToInsert = [
- {_id: 0, x: 4, y: 3, z: 3},
- {_id: 1, x: 100, y: 50, z: 3, a: 5},
- {_id: 2, x: 100, y: 500, z: 3, a: 5}
- ];
-
- // Make sure that shard0, shard1 and shard2 has _id 0,1 and 2 documents respectively.
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 100, y: 0, z: 3}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 100, y: 100, z: 3}}));
-
- for (let i = 0; i < docsToInsert.length; i++) {
- assert.commandWorked(st.s.getDB(kDbName).coll.insert(docsToInsert[i]));
- }
-
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: 100, y: 50, z: 3}, to: st.shard1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: 100, y: 500, z: 3}, to: st.shard2.shardName}));
- cleanupOrphanedDocs(st, ns);
-
- function assertUpdateWorked(query, update, isUpsert, _id) {
- const res = st.s.getDB(kDbName).coll.update(query, update, {upsert: isUpsert});
- assert.commandWorked(res);
- assert.eq(0, res.nUpserted);
- assert.eq(1, res.nMatched);
- assert.eq(1, res.nModified);
-
- // Skip find based validation for pipleline update.
- if (!Array.isArray(update)) {
- if (update["$set"] != undefined) {
- update = update["$set"];
- }
- update["_id"] = _id;
- // Make sure that the update modified the document with the given _id.
- assert.eq(1, st.s.getDB(kDbName).coll.find(update).itcount());
+'use strict';
+
+load("jstests/sharding/libs/update_shard_key_helpers.js");
+
+const st = new ShardingTest({mongos: 1, shards: 3});
+const kDbName = 'update_compound_sk';
+const ns = kDbName + '.coll';
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(kDbName);
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, st.shard0.shardName);
+
+assert.commandWorked(
+ st.s.getDB('config').adminCommand({shardCollection: ns, key: {x: 1, y: 1, z: 1}}));
+
+let docsToInsert = [
+ {_id: 0, x: 4, y: 3, z: 3},
+ {_id: 1, x: 100, y: 50, z: 3, a: 5},
+ {_id: 2, x: 100, y: 500, z: 3, a: 5}
+];
+
+// Make sure that shard0, shard1 and shard2 has _id 0,1 and 2 documents respectively.
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 100, y: 0, z: 3}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 100, y: 100, z: 3}}));
+
+for (let i = 0; i < docsToInsert.length; i++) {
+ assert.commandWorked(st.s.getDB(kDbName).coll.insert(docsToInsert[i]));
+}
+
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {x: 100, y: 50, z: 3}, to: st.shard1.shardName}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {x: 100, y: 500, z: 3}, to: st.shard2.shardName}));
+cleanupOrphanedDocs(st, ns);
+
+function assertUpdateWorked(query, update, isUpsert, _id) {
+ const res = st.s.getDB(kDbName).coll.update(query, update, {upsert: isUpsert});
+ assert.commandWorked(res);
+ assert.eq(0, res.nUpserted);
+ assert.eq(1, res.nMatched);
+ assert.eq(1, res.nModified);
+
+ // Skip find based validation for pipleline update.
+ if (!Array.isArray(update)) {
+ if (update["$set"] != undefined) {
+ update = update["$set"];
}
+ update["_id"] = _id;
+ // Make sure that the update modified the document with the given _id.
+ assert.eq(1, st.s.getDB(kDbName).coll.find(update).itcount());
}
+}
- /**
- * For upserts this will insert a new document, for non-upserts it will be a no-op.
- */
- function assertUpdateWorkedWithNoMatchingDoc(query, update, isUpsert, inTransaction) {
- const res = sessionDB.coll.update(query, update, {upsert: isUpsert});
-
- assert.commandWorked(res);
- assert.eq(isUpsert ? 1 : 0, res.nUpserted);
- assert.eq(0, res.nMatched);
- assert.eq(0, res.nModified);
-
- // Skip find based validation for pipleline update or when inside a transaction.
- if (Array.isArray(update) || inTransaction)
- return;
-
- // Make sure that the upsert inserted the correct document or update did not insert
- // anything.
- assert.eq(
- isUpsert ? 1 : 0,
- st.s.getDB(kDbName).coll.find(update["$set"] ? update["$set"] : update).itcount());
- }
-
- //
- // Update Type Replacement-style.
- //
-
- // Test behaviours common to update and upsert.
- [false, true].forEach(function(isUpsert) {
- // Full shard key in query matches the update document.
- assertUpdateWorked({x: 4, y: 3, z: 3}, {x: 4, y: 3, z: 3, a: 0}, isUpsert, 0);
- assertUpdateWorked({x: 4, _id: 0, z: 3, y: 3}, {x: 4, y: 3, z: 3, a: 0}, isUpsert, 0);
-
- // Case when upsert needs to insert a new document and the new document should belong in the
- // same shard as the targeted shard. For non-upserts, it will be a no-op.
- assertUpdateWorkedWithNoMatchingDoc(
- {x: 4, y: 0, z: 0}, {x: 1, z: 3, y: 110, a: 90}, isUpsert);
- });
-
- //
- // Test behaviours specific to non-upsert updates.
- //
-
- // Partial shard key in query can target a single shard, and shard key of existing document is
- // the same as the replacement's.
- assertUpdateWorked({x: 4}, {x: 4, y: 3, z: 3, a: 1}, false, 0);
- assertUpdateWorked({x: 4, _id: 0, z: 3}, {y: 3, x: 4, z: 3, a: 3}, false, 0);
-
- // Parital shard key in the query, update succeeds with no op when there is no matching document
- // for the query.
- assertUpdateWorkedWithNoMatchingDoc({x: 10}, {x: 10, y: 3, z: 3, a: 5}, false);
- assertUpdateWorkedWithNoMatchingDoc({x: 100, y: 55, a: 15}, {x: 100, y: 55, z: 3, a: 6}, false);
- assertUpdateWorkedWithNoMatchingDoc({x: 11, _id: 3}, {x: 11, y: 3, z: 3, a: 7}, false);
-
- // Partial shard key in query can target a single shard, but fails while attempting to
- // modify shard key value.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 100, y: 50, a: 5}, {x: 100, y: 55, z: 3, a: 1}, {upsert: false}),
- [31025]);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 4, z: 3}, {x: 4, y: 3, z: 4, a: 1}, {upsert: false}),
- [31025]);
-
- // Full shard key in query, matches no document.
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, {x: 1110, y: 55, z: 3, a: 111}, false);
-
- // Partial shard key in query, but can still target a single shard.
- assertUpdateWorkedWithNoMatchingDoc({x: 100, y: 51, a: 5}, {x: 110, y: 55, z: 3, a: 8}, false);
-
- // Partial shard key in query cannot target a single shard, targeting happens using update
- // document.
-
- // When query doesn't match any doc.
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0}, {x: 110, y: 55, z: 3, a: 110}, false);
- assertUpdateWorkedWithNoMatchingDoc({_id: 1}, {x: 110, y: 55, z: 3, a: 110}, false);
-
- // When query matches a doc and updates sucessfully.
- assertUpdateWorked({_id: 0, y: 3}, {z: 3, x: 4, y: 3, a: 2}, false, 0);
- assertUpdateWorked({_id: 0}, {z: 3, x: 4, y: 3, replStyle: 2}, false, 0);
-
- // When query matches a doc and fails to update because shard key needs to be updated.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({}, {x: 110, y: 55, z: 3, a: 110}, false), 31025);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({_id: 2}, {x: 110, y: 55, z: 3, a: 110}, false), 31025);
-
- //
- // Test upsert-specific behaviours.
- //
-
- // Case when upsert needs to insert a new document and the new document should belong in a shard
- // other than the one targeted by the update. These upserts can only succeed in a
- // multi-statement transaction or with retryWrites: true.
- const updateDoc = {x: 1110, y: 55, z: 3, replStyleUpdate: true};
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0}, updateDoc, {upsert: true}),
- ErrorCodes.IllegalOperation);
-
- // The above upsert works with transactions.
- session.startTransaction();
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, updateDoc, true, true);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(1, st.s.getDB(kDbName).coll.find(updateDoc).itcount());
-
- // Full shard key not specified in query.
-
- // Query on partial shard key.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 100, y: 50, a: 5}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 100, y: 50, nonExistingField: true}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Query on partial shard key with _id.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 100, y: 50, a: 5, _id: 0}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 100, y: 50, a: 5, _id: 0, nonExistingField: true},
- {x: 100, y: 55, z: 3, a: 1},
- {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Query on only _id.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({_id: 0}, {z: 3, x: 4, y: 3, a: 2}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: "nonExisting"}, {z: 3, x: 4, y: 3, a: 2}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- //
- // Update Type Op-style.
- //
-
- // Test behaviours common to update and upsert.
- [false, true].forEach(function(isUpsert) {
- // Full shard key in query.
- assertUpdateWorked({x: 4, _id: 0, z: 3, y: 3}, {"$set": {opStyle: 1}}, isUpsert, 0);
- assertUpdateWorked({x: 4, z: 3, y: 3}, {"$set": {opStyle: 2}}, isUpsert, 0);
-
- // Case when upsert needs to insert a new document and the new document should belong in the
- // same shard as the targetted shard. For non-upserts, it will be a no op.
- assertUpdateWorkedWithNoMatchingDoc(
- {x: 4, y: 0, z: 0}, {"$set": {x: 1, z: 3, y: 111, a: 90}}, isUpsert);
- });
-
- // Test behaviours specific to non-upsert updates.
-
- // Full shard key in query, matches no document.
+/**
+ * For upserts this will insert a new document, for non-upserts it will be a no-op.
+ */
+function assertUpdateWorkedWithNoMatchingDoc(query, update, isUpsert, inTransaction) {
+ const res = sessionDB.coll.update(query, update, {upsert: isUpsert});
+
+ assert.commandWorked(res);
+ assert.eq(isUpsert ? 1 : 0, res.nUpserted);
+ assert.eq(0, res.nMatched);
+ assert.eq(0, res.nModified);
+
+ // Skip find based validation for pipleline update or when inside a transaction.
+ if (Array.isArray(update) || inTransaction)
+ return;
+
+ // Make sure that the upsert inserted the correct document or update did not insert
+ // anything.
+ assert.eq(isUpsert ? 1 : 0,
+ st.s.getDB(kDbName).coll.find(update["$set"] ? update["$set"] : update).itcount());
+}
+
+//
+// Update Type Replacement-style.
+//
+
+// Test behaviours common to update and upsert.
+[false, true].forEach(function(isUpsert) {
+ // Full shard key in query matches the update document.
+ assertUpdateWorked({x: 4, y: 3, z: 3}, {x: 4, y: 3, z: 3, a: 0}, isUpsert, 0);
+ assertUpdateWorked({x: 4, _id: 0, z: 3, y: 3}, {x: 4, y: 3, z: 3, a: 0}, isUpsert, 0);
+
+ // Case when upsert needs to insert a new document and the new document should belong in the
+ // same shard as the targeted shard. For non-upserts, it will be a no-op.
+ assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, {x: 1, z: 3, y: 110, a: 90}, isUpsert);
+});
+
+//
+// Test behaviours specific to non-upsert updates.
+//
+
+// Partial shard key in query can target a single shard, and shard key of existing document is
+// the same as the replacement's.
+assertUpdateWorked({x: 4}, {x: 4, y: 3, z: 3, a: 1}, false, 0);
+assertUpdateWorked({x: 4, _id: 0, z: 3}, {y: 3, x: 4, z: 3, a: 3}, false, 0);
+
+// Parital shard key in the query, update succeeds with no op when there is no matching document
+// for the query.
+assertUpdateWorkedWithNoMatchingDoc({x: 10}, {x: 10, y: 3, z: 3, a: 5}, false);
+assertUpdateWorkedWithNoMatchingDoc({x: 100, y: 55, a: 15}, {x: 100, y: 55, z: 3, a: 6}, false);
+assertUpdateWorkedWithNoMatchingDoc({x: 11, _id: 3}, {x: 11, y: 3, z: 3, a: 7}, false);
+
+// Partial shard key in query can target a single shard, but fails while attempting to
+// modify shard key value.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {x: 100, y: 50, a: 5}, {x: 100, y: 55, z: 3, a: 1}, {upsert: false}),
+ [31025]);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 4, z: 3}, {x: 4, y: 3, z: 4, a: 1}, {upsert: false}),
+ [31025]);
+
+// Full shard key in query, matches no document.
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, {x: 1110, y: 55, z: 3, a: 111}, false);
+
+// Partial shard key in query, but can still target a single shard.
+assertUpdateWorkedWithNoMatchingDoc({x: 100, y: 51, a: 5}, {x: 110, y: 55, z: 3, a: 8}, false);
+
+// Partial shard key in query cannot target a single shard, targeting happens using update
+// document.
+
+// When query doesn't match any doc.
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0}, {x: 110, y: 55, z: 3, a: 110}, false);
+assertUpdateWorkedWithNoMatchingDoc({_id: 1}, {x: 110, y: 55, z: 3, a: 110}, false);
+
+// When query matches a doc and updates sucessfully.
+assertUpdateWorked({_id: 0, y: 3}, {z: 3, x: 4, y: 3, a: 2}, false, 0);
+assertUpdateWorked({_id: 0}, {z: 3, x: 4, y: 3, replStyle: 2}, false, 0);
+
+// When query matches a doc and fails to update because shard key needs to be updated.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({}, {x: 110, y: 55, z: 3, a: 110}, false), 31025);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({_id: 2}, {x: 110, y: 55, z: 3, a: 110}, false), 31025);
+
+//
+// Test upsert-specific behaviours.
+//
+
+// Case when upsert needs to insert a new document and the new document should belong in a shard
+// other than the one targeted by the update. These upserts can only succeed in a
+// multi-statement transaction or with retryWrites: true.
+const updateDoc = {
+ x: 1110,
+ y: 55,
+ z: 3,
+ replStyleUpdate: true
+};
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0}, updateDoc, {upsert: true}),
+ ErrorCodes.IllegalOperation);
+
+// The above upsert works with transactions.
+session.startTransaction();
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, updateDoc, true, true);
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(1, st.s.getDB(kDbName).coll.find(updateDoc).itcount());
+
+// Full shard key not specified in query.
+
+// Query on partial shard key.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {x: 100, y: 50, a: 5}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {x: 100, y: 50, nonExistingField: true}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Query on partial shard key with _id.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {x: 100, y: 50, a: 5, _id: 0}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 100, y: 50, a: 5, _id: 0, nonExistingField: true},
+ {x: 100, y: 55, z: 3, a: 1},
+ {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Query on only _id.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({_id: 0}, {z: 3, x: 4, y: 3, a: 2}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({_id: "nonExisting"}, {z: 3, x: 4, y: 3, a: 2}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+//
+// Update Type Op-style.
+//
+
+// Test behaviours common to update and upsert.
+[false, true].forEach(function(isUpsert) {
+ // Full shard key in query.
+ assertUpdateWorked({x: 4, _id: 0, z: 3, y: 3}, {"$set": {opStyle: 1}}, isUpsert, 0);
+ assertUpdateWorked({x: 4, z: 3, y: 3}, {"$set": {opStyle: 2}}, isUpsert, 0);
+
+ // Case when upsert needs to insert a new document and the new document should belong in the
+ // same shard as the targetted shard. For non-upserts, it will be a no op.
assertUpdateWorkedWithNoMatchingDoc(
- {x: 4, y: 0, z: 0}, {"$set": {x: 2110, y: 55, z: 3, a: 111}}, false);
-
- // Partial shard key in query, but can still target a single shard.
+ {x: 4, y: 0, z: 0}, {"$set": {x: 1, z: 3, y: 111, a: 90}}, isUpsert);
+});
+
+// Test behaviours specific to non-upsert updates.
+
+// Full shard key in query, matches no document.
+assertUpdateWorkedWithNoMatchingDoc(
+ {x: 4, y: 0, z: 0}, {"$set": {x: 2110, y: 55, z: 3, a: 111}}, false);
+
+// Partial shard key in query, but can still target a single shard.
+assertUpdateWorkedWithNoMatchingDoc(
+ {x: 100, y: 51, a: 112}, {"$set": {x: 110, y: 55, z: 3, a: 8}}, false);
+
+// Query on _id works for update.
+assertUpdateWorked({_id: 0}, {"$set": {opStyle: 6}}, false, 0);
+assertUpdateWorked({_id: 0, y: 3}, {"$set": {opStyle: 8, y: 3, x: 4}}, false, 0);
+
+// Parital shard key in the query targets single shard. Update succeeds with no op when there is
+// no matching document for the query.
+assertUpdateWorkedWithNoMatchingDoc({x: 14, _id: 0}, {"$set": {opStyle: 5}}, false);
+assertUpdateWorkedWithNoMatchingDoc({x: 14}, {"$set": {opStyle: 5}}, false);
+
+assertUpdateWorkedWithNoMatchingDoc({x: -1, y: 0}, {"$set": {z: 3, y: 110, a: 91}}, false);
+
+// Partial shard key in query can target a single shard and doesn't try to update shard key
+// value.
+assertUpdateWorked({x: 4, z: 3}, {"$set": {opStyle: 3}}, false, 0);
+assertUpdateWorked({x: 4, _id: 0, z: 3}, {"$set": {y: 3, x: 4, z: 3, opStyle: 4}}, false, 0);
+
+// Partial shard key in query can target a single shard, but fails while attempting to modify
+// shard key value.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {_id: 1, x: 100, z: 3, a: 5}, {"$set": {y: 55, a: 11}}, {upsert: false}),
+ [31025]);
+assert.commandFailedWithCode(st.s.getDB(kDbName).coll.update(
+ {x: 4, z: 3}, {"$set": {x: 4, y: 3, z: 4, a: 1}}, {upsert: false}),
+ [31025]);
+
+// Test upsert-specific behaviours.
+
+// Case when upsert needs to insert a new document and the new document should belong in a shard
+// other than the one targeted by the update. These upserts can only succeed in a
+// multi-statement transaction or with retryWrites: true.
+const update = {
+ "$set": {x: 2110, y: 55, z: 3, opStyle: true}
+};
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0, opStyle: true}, update, {upsert: true}),
+ ErrorCodes.IllegalOperation);
+
+// The above upsert works with transactions.
+session.startTransaction();
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0, opStyle: true}, update, true, true);
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(1, st.s.getDB(kDbName).coll.find(update["$set"]).itcount());
+
+// Full shard key not specified in query.
+
+// Query on _id doesn't work for upserts.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {_id: 0}, {"$set": {x: 2, y: 11, z: 10, opStyle: 7}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Partial shard key can target single shard. This style of update can work if SERVER-41243 is
+// implemented.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 14}, {"$set": {opStyle: 5}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 100, y: 51, nonExistingField: true},
+ {"$set": {x: 110, y: 55, z: 3, a: 8}},
+ {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Partial shard key cannot target single shard.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {_id: 0, y: 3}, {"$set": {z: 3, x: 4, y: 3, a: 2}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({y: 3}, {"$set": {z: 3, x: 4, y: 3, a: 2}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+//
+// Update with pipeline.
+//
+
+// Test behaviours common to update and upsert.
+[false, true].forEach(function(isUpsert) {
+ // Full shard key in query.
+ assertUpdateWorked(
+ {_id: 0, x: 4, z: 3, y: 3}, [{$addFields: {pipelineUpdate: isUpsert}}], isUpsert, 0);
+ assert.eq(1,
+ st.s.getDB(kDbName)
+ .coll.find({_id: 0, x: 4, z: 3, y: 3, pipelineUpdate: isUpsert})
+ .itcount());
assertUpdateWorkedWithNoMatchingDoc(
- {x: 100, y: 51, a: 112}, {"$set": {x: 110, y: 55, z: 3, a: 8}}, false);
-
- // Query on _id works for update.
- assertUpdateWorked({_id: 0}, {"$set": {opStyle: 6}}, false, 0);
- assertUpdateWorked({_id: 0, y: 3}, {"$set": {opStyle: 8, y: 3, x: 4}}, false, 0);
-
- // Parital shard key in the query targets single shard. Update succeeds with no op when there is
- // no matching document for the query.
- assertUpdateWorkedWithNoMatchingDoc({x: 14, _id: 0}, {"$set": {opStyle: 5}}, false);
- assertUpdateWorkedWithNoMatchingDoc({x: 14}, {"$set": {opStyle: 5}}, false);
-
- assertUpdateWorkedWithNoMatchingDoc({x: -1, y: 0}, {"$set": {z: 3, y: 110, a: 91}}, false);
-
- // Partial shard key in query can target a single shard and doesn't try to update shard key
- // value.
- assertUpdateWorked({x: 4, z: 3}, {"$set": {opStyle: 3}}, false, 0);
- assertUpdateWorked({x: 4, _id: 0, z: 3}, {"$set": {y: 3, x: 4, z: 3, opStyle: 4}}, false, 0);
-
- // Partial shard key in query can target a single shard, but fails while attempting to modify
- // shard key value.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: 1, x: 100, z: 3, a: 5}, {"$set": {y: 55, a: 11}}, {upsert: false}),
- [31025]);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 4, z: 3}, {"$set": {x: 4, y: 3, z: 4, a: 1}}, {upsert: false}),
- [31025]);
-
- // Test upsert-specific behaviours.
-
- // Case when upsert needs to insert a new document and the new document should belong in a shard
- // other than the one targeted by the update. These upserts can only succeed in a
- // multi-statement transaction or with retryWrites: true.
- const update = {"$set": {x: 2110, y: 55, z: 3, opStyle: true}};
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0, opStyle: true}, update, {upsert: true}),
- ErrorCodes.IllegalOperation);
-
- // The above upsert works with transactions.
- session.startTransaction();
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0, opStyle: true}, update, true, true);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(1, st.s.getDB(kDbName).coll.find(update["$set"]).itcount());
-
- // Full shard key not specified in query.
-
- // Query on _id doesn't work for upserts.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: 0}, {"$set": {x: 2, y: 11, z: 10, opStyle: 7}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Partial shard key can target single shard. This style of update can work if SERVER-41243 is
- // implemented.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 14}, {"$set": {opStyle: 5}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 100, y: 51, nonExistingField: true},
- {"$set": {x: 110, y: 55, z: 3, a: 8}},
- {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Partial shard key cannot target single shard.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: 0, y: 3}, {"$set": {z: 3, x: 4, y: 3, a: 2}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({y: 3}, {"$set": {z: 3, x: 4, y: 3, a: 2}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- //
- // Update with pipeline.
- //
-
- // Test behaviours common to update and upsert.
- [false, true].forEach(function(isUpsert) {
- // Full shard key in query.
- assertUpdateWorked(
- {_id: 0, x: 4, z: 3, y: 3}, [{$addFields: {pipelineUpdate: isUpsert}}], isUpsert, 0);
- assert.eq(1,
- st.s.getDB(kDbName)
- .coll.find({_id: 0, x: 4, z: 3, y: 3, pipelineUpdate: isUpsert})
- .itcount());
- assertUpdateWorkedWithNoMatchingDoc(
- {_id: 15, x: 44, z: 3, y: 3}, [{$addFields: {pipelineUpdate: true}}], isUpsert);
- assert.eq(isUpsert ? 1 : 0,
- st.s.getDB(kDbName)
- .coll.find({_id: 15, x: 44, z: 3, y: 3, pipelineUpdate: true})
- .itcount());
-
- assertUpdateWorkedWithNoMatchingDoc(
- {x: 45, z: 4, y: 3}, [{$addFields: {pipelineUpdate: true}}], isUpsert);
- assert.eq(
- isUpsert ? 1 : 0,
- st.s.getDB(kDbName).coll.find({x: 45, z: 4, y: 3, pipelineUpdate: true}).itcount());
-
- // Case when upsert needs to insert a new document and the new document should belong in the
- // same shard as the targeted shard.
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0},
- [{
- "$project": {
- x: {$literal: 3},
- y: {$literal: 33},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
- }],
- isUpsert);
- assert.eq(
- isUpsert ? 1 : 0,
- st.s.getDB(kDbName).coll.find({x: 3, z: 3, y: 33, pipelineUpdate: true}).itcount());
- });
-
- // Test behaviours specific to non-upsert updates.
-
- // Full shard key in query, matches no document.
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0},
- [{
- "$project": {
- x: {$literal: 2111},
- y: {$literal: 55},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
- }],
- false);
- assert.eq(
- 0, st.s.getDB(kDbName).coll.find({x: 2111, z: 3, y: 55, pipelineUpdate: true}).itcount());
-
- // Partial shard key in query targets single shard but doesn't match any document on that shard.
- assertUpdateWorkedWithNoMatchingDoc({_id: 14, z: 4, x: 3}, [{$addFields: {foo: 4}}], false);
+ {_id: 15, x: 44, z: 3, y: 3}, [{$addFields: {pipelineUpdate: true}}], isUpsert);
+ assert.eq(isUpsert ? 1 : 0,
+ st.s.getDB(kDbName)
+ .coll.find({_id: 15, x: 44, z: 3, y: 3, pipelineUpdate: true})
+ .itcount());
- // Partial shard key in query can target a single shard and doesn't try to update shard key
- // value.
assertUpdateWorkedWithNoMatchingDoc(
- {x: 46, z: 4}, [{$addFields: {y: 10, pipelineUpdateNoOp: false}}], false);
- assertUpdateWorked({x: 4, z: 3}, [{$addFields: {pipelineUpdateDoc: false}}], false, 0);
+ {x: 45, z: 4, y: 3}, [{$addFields: {pipelineUpdate: true}}], isUpsert);
+ assert.eq(isUpsert ? 1 : 0,
+ st.s.getDB(kDbName).coll.find({x: 45, z: 4, y: 3, pipelineUpdate: true}).itcount());
- // Partial shard key in query cannot target a single shard.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({z: 3, y: 3}, [{$addFields: {foo: 4}}], {upsert: false}),
- [72, ErrorCodes.InvalidOptions]);
-
- // Test upsert-specific behaviours.
-
- // Case when upsert needs to insert a new document and the new document should belong in a shard
- // other than the one targeted by the update. These upserts can only succeed in a
- // multi-statement transaction or with retryWrites: true.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0},
- [{
- "$project": {
- x: {$literal: 2111},
- y: {$literal: 55},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
- }],
- {upsert: true}),
- ErrorCodes.IllegalOperation);
-
- // The above upsert works with transactions.
- session.startTransaction();
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0, pipelineUpdate: true},
- [{
- "$project": {
- x: {$literal: 2111},
- y: {$literal: 55},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
- }],
- true);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(
- 1, st.s.getDB(kDbName).coll.find({x: 2111, y: 55, z: 3, pipelineUpdate: true}).itcount());
-
- // Full shard key not specified in query.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: 18, z: 4, x: 3}, [{$addFields: {foo: 4}}], {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({_id: 0},
+ // Case when upsert needs to insert a new document and the new document should belong in the
+ // same shard as the targeted shard.
+ assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0},
[{
- "$project": {
- x: {$literal: 2111},
- y: {$literal: 55},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
+ "$project": {
+ x: {$literal: 3},
+ y: {$literal: 33},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
}],
- {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- st.stop();
+ isUpsert);
+ assert.eq(isUpsert ? 1 : 0,
+ st.s.getDB(kDbName).coll.find({x: 3, z: 3, y: 33, pipelineUpdate: true}).itcount());
+});
+
+// Test behaviours specific to non-upsert updates.
+
+// Full shard key in query, matches no document.
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0},
+ [{
+ "$project": {
+ x: {$literal: 2111},
+ y: {$literal: 55},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
+ }],
+ false);
+assert.eq(0, st.s.getDB(kDbName).coll.find({x: 2111, z: 3, y: 55, pipelineUpdate: true}).itcount());
+
+// Partial shard key in query targets single shard but doesn't match any document on that shard.
+assertUpdateWorkedWithNoMatchingDoc({_id: 14, z: 4, x: 3}, [{$addFields: {foo: 4}}], false);
+
+// Partial shard key in query can target a single shard and doesn't try to update shard key
+// value.
+assertUpdateWorkedWithNoMatchingDoc(
+ {x: 46, z: 4}, [{$addFields: {y: 10, pipelineUpdateNoOp: false}}], false);
+assertUpdateWorked({x: 4, z: 3}, [{$addFields: {pipelineUpdateDoc: false}}], false, 0);
+
+// Partial shard key in query cannot target a single shard.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({z: 3, y: 3}, [{$addFields: {foo: 4}}], {upsert: false}),
+ [72, ErrorCodes.InvalidOptions]);
+
+// Test upsert-specific behaviours.
+
+// Case when upsert needs to insert a new document and the new document should belong in a shard
+// other than the one targeted by the update. These upserts can only succeed in a
+// multi-statement transaction or with retryWrites: true.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0},
+ [{
+ "$project": {
+ x: {$literal: 2111},
+ y: {$literal: 55},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
+ }],
+ {upsert: true}),
+ ErrorCodes.IllegalOperation);
+
+// The above upsert works with transactions.
+session.startTransaction();
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0, pipelineUpdate: true},
+ [{
+ "$project": {
+ x: {$literal: 2111},
+ y: {$literal: 55},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
+ }],
+ true);
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(1, st.s.getDB(kDbName).coll.find({x: 2111, y: 55, z: 3, pipelineUpdate: true}).itcount());
+
+// Full shard key not specified in query.
+assert.commandFailedWithCode(st.s.getDB(kDbName).coll.update(
+ {_id: 18, z: 4, x: 3}, [{$addFields: {foo: 4}}], {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({_id: 0},
+ [{
+ "$project": {
+ x: {$literal: 2111},
+ y: {$literal: 55},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
+ }],
+ {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+st.stop();
})();
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
index 287e750c176..96bf4f454dc 100644
--- a/jstests/sharding/update_immutable_fields.js
+++ b/jstests/sharding/update_immutable_fields.js
@@ -1,85 +1,83 @@
// Tests that save style updates correctly change immutable fields
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- var mongos = st.s;
- var config = mongos.getDB("config");
- var coll = mongos.getCollection(jsTestName() + ".coll1");
- var shard0 = st.shard0;
+var mongos = st.s;
+var config = mongos.getDB("config");
+var coll = mongos.getCollection(jsTestName() + ".coll1");
+var shard0 = st.shard0;
- assert.commandWorked(config.adminCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
- assert.commandWorked(config.adminCommand({shardCollection: "" + coll, key: {a: 1}}));
+assert.commandWorked(config.adminCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+assert.commandWorked(config.adminCommand({shardCollection: "" + coll, key: {a: 1}}));
- var getDirectShardedConn = function(st, collName) {
+var getDirectShardedConn = function(st, collName) {
+ var shardConnWithVersion = new Mongo(st.shard0.host);
- var shardConnWithVersion = new Mongo(st.shard0.host);
+ var configConnStr = st._configDB;
- var configConnStr = st._configDB;
+ var maxChunk =
+ st.s0.getCollection("config.chunks").find({ns: collName}).sort({lastmod: -1}).next();
- var maxChunk =
- st.s0.getCollection("config.chunks").find({ns: collName}).sort({lastmod: -1}).next();
-
- var ssvInitCmd = {
- setShardVersion: collName,
- authoritative: true,
- configdb: configConnStr,
- version: maxChunk.lastmod,
- shard: st.shard0.shardName,
- versionEpoch: maxChunk.lastmodEpoch
- };
-
- printjson(ssvInitCmd);
- assert.commandWorked(shardConnWithVersion.getDB("admin").runCommand(ssvInitCmd));
-
- return shardConnWithVersion;
+ var ssvInitCmd = {
+ setShardVersion: collName,
+ authoritative: true,
+ configdb: configConnStr,
+ version: maxChunk.lastmod,
+ shard: st.shard0.shardName,
+ versionEpoch: maxChunk.lastmodEpoch
};
- var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
+ printjson(ssvInitCmd);
+ assert.commandWorked(shardConnWithVersion.getDB("admin").runCommand(ssvInitCmd));
+
+ return shardConnWithVersion;
+};
- // No shard key
- shard0Coll.remove({});
- assert.writeError(shard0Coll.save({_id: 3}));
+var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
- // Full shard key in save
- assert.writeOK(shard0Coll.save({_id: 1, a: 1}));
+// No shard key
+shard0Coll.remove({});
+assert.writeError(shard0Coll.save({_id: 3}));
- // Full shard key on replacement (basically the same as above)
- shard0Coll.remove({});
- assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}, true));
+// Full shard key in save
+assert.writeOK(shard0Coll.save({_id: 1, a: 1}));
- // Full shard key after $set
- shard0Coll.remove({});
- assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
+// Full shard key on replacement (basically the same as above)
+shard0Coll.remove({});
+assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}, true));
- // Update existing doc (replacement), same shard key value
- assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}));
+// Full shard key after $set
+shard0Coll.remove({});
+assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
- // Update existing doc ($set), same shard key value
- assert.commandWorked(shard0Coll.update({_id: 1}, {$set: {a: 1}}));
+// Update existing doc (replacement), same shard key value
+assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}));
- // Error when trying to update a shard key outside of a transaction.
- assert.commandFailedWithCode(shard0Coll.update({_id: 1, a: 1}, {_id: 1, a: 2}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(shard0Coll.update({_id: 1, a: 1}, {"$set": {a: 2}}),
- ErrorCodes.IllegalOperation);
+// Update existing doc ($set), same shard key value
+assert.commandWorked(shard0Coll.update({_id: 1}, {$set: {a: 1}}));
- // Error when unsetting shard key.
- assert.writeError(shard0Coll.update({_id: 1}, {b: 3}));
+// Error when trying to update a shard key outside of a transaction.
+assert.commandFailedWithCode(shard0Coll.update({_id: 1, a: 1}, {_id: 1, a: 2}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(shard0Coll.update({_id: 1, a: 1}, {"$set": {a: 2}}),
+ ErrorCodes.IllegalOperation);
- // Error when unsetting shard key ($set).
- assert.writeError(shard0Coll.update({_id: 1}, {$unset: {a: 1}}));
+// Error when unsetting shard key.
+assert.writeError(shard0Coll.update({_id: 1}, {b: 3}));
- // Error due to removing all the embedded fields.
- shard0Coll.remove({});
+// Error when unsetting shard key ($set).
+assert.writeError(shard0Coll.update({_id: 1}, {$unset: {a: 1}}));
- assert.writeOK(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
+// Error due to removing all the embedded fields.
+shard0Coll.remove({});
- assert.writeError(shard0Coll.update({}, {$unset: {"a.c": 1}}));
- assert.writeError(shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}}));
+assert.writeOK(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
- st.stop();
+assert.writeError(shard0Coll.update({}, {$unset: {"a.c": 1}}));
+assert.writeError(shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}}));
+st.stop();
})();
diff --git a/jstests/sharding/update_replace_id.js b/jstests/sharding/update_replace_id.js
index db8c878674f..0cd19ef1d88 100644
--- a/jstests/sharding/update_replace_id.js
+++ b/jstests/sharding/update_replace_id.js
@@ -12,191 +12,186 @@
* filter.
*/
(function() {
- load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
+load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1, other: {enableBalancer: false}});
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1, other: {enableBalancer: false}});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB.test;
- const shard0DB = st.shard0.getDB(jsTestName());
- const shard1DB = st.shard1.getDB(jsTestName());
+const shard0DB = st.shard0.getDB(jsTestName());
+const shard1DB = st.shard1.getDB(jsTestName());
- assert.commandWorked(mongosDB.dropDatabase());
+assert.commandWorked(mongosDB.dropDatabase());
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
- // Enables profiling on both shards so that we can verify the targeting behaviour.
- function restartProfiling() {
- for (let shardDB of[shard0DB, shard1DB]) {
- shardDB.setProfilingLevel(0);
- shardDB.system.profile.drop();
- shardDB.setProfilingLevel(2);
- }
- }
-
- function setUpData() {
- // Write a single document to shard0 and verify that it is present.
- mongosColl.insert({_id: -100, a: -100, msg: "not_updated"});
- assert.docEq(shard0DB.test.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "not_updated"}]);
-
- // Write a document with the same key directly to shard1. This simulates an orphaned
- // document, or the duplicate document which temporarily exists during a chunk migration.
- shard1DB.test.insert({_id: -100, a: -100, msg: "not_updated"});
-
- // Clear and restart the profiler on both shards.
- restartProfiling();
- }
-
- function runReplacementUpdateTestsForHashedShardKey() {
- setUpData();
-
- // Perform a replacement update whose query is an exact match on _id and whose replacement
- // document contains the remainder of the shard key. Despite the fact that the replacement
- // document does not contain the entire shard key, we expect that mongoS will extract the
- // _id from the query and combine it with the replacement doc to target a single shard.
- let writeRes = assert.commandWorked(
- mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
-
- // Verify that the update did not modify the orphan document.
- assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "not_updated"}]);
- assert.eq(writeRes.nMatched, 1);
- assert.eq(writeRes.nModified, 1);
-
- // Verify that the update only targeted shard0 and that the resulting document appears as
- // expected.
- assert.docEq(mongosColl.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
- });
-
- // Perform an upsert replacement whose query is an exact match on _id and whose replacement
- // doc contains the remainder of the shard key. The _id taken from the query should be used
- // both in targeting the update and in generating the new document.
- writeRes = assert.commandWorked(mongosColl.update(
- {_id: 101}, {a: 101, msg: "upsert_extracted_id_from_query"}, {upsert: true}));
- assert.eq(writeRes.nUpserted, 1);
-
- // Verify that the update only targeted shard1, and that the resulting document appears as
- // expected. At this point in the test we expect shard1 to be stale, because it was the
- // destination shard for the first moveChunk; we therefore explicitly check the profiler for
- // a successful update, i.e. one which did not report a stale config exception.
- assert.docEq(mongosColl.find({_id: 101}).toArray(),
- [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
- assert.docEq(shard1DB.test.find({_id: 101}).toArray(),
- [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "upsert_extracted_id_from_query"}
- });
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard1DB,
- filter: {
- op: "update",
- "command.u.msg": "upsert_extracted_id_from_query",
- errName: {$exists: false}
- }
- });
+// Enables profiling on both shards so that we can verify the targeting behaviour.
+function restartProfiling() {
+ for (let shardDB of [shard0DB, shard1DB]) {
+ shardDB.setProfilingLevel(0);
+ shardDB.system.profile.drop();
+ shardDB.setProfilingLevel(2);
}
-
- function runReplacementUpdateTestsForCompoundShardKey() {
- setUpData();
-
- // Perform a replacement update whose query is an exact match on _id and whose replacement
- // document contains the remainder of the shard key. Despite the fact that the replacement
- // document does not contain the entire shard key, we expect that mongoS will extract the
- // _id from the query and combine it with the replacement doc to target a single shard.
- let writeRes = assert.commandWorked(
- mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
-
- // Verify that the update did not modify the orphan document.
- assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "not_updated"}]);
- assert.eq(writeRes.nMatched, 1);
- assert.eq(writeRes.nModified, 1);
-
- // Verify that the update only targeted shard0 and that the resulting document appears as
- // expected.
- assert.docEq(mongosColl.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
- });
-
- // An upsert whose query doesn't have full shard key will fail.
- assert.commandFailedWithCode(
- mongosColl.update(
- {_id: 101}, {a: 101, msg: "upsert_extracted_id_from_query"}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Verify that the document did not perform any writes.
- assert.docEq(mongosColl.find({_id: 101}).itcount(), 0);
-
- // Verify that an update whose query contains an exact match on _id but whose replacement
- // doc does not contain all other shard key fields will be rejected by mongoS.
- writeRes = assert.commandFailedWithCode(
- mongosColl.update({_id: -100, a: -100}, {msg: "update_failed_missing_shard_key_field"}),
- ErrorCodes.ShardKeyNotFound);
-
- // Check that the existing document remains unchanged, and that the update did not reach
- // either shard per their respective profilers.
- assert.docEq(mongosColl.find({_id: -100, a: -100}).toArray(),
- [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "update_failed_missing_shard_key_field"}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {op: "update", "command.u.msg": "update_failed_missing_shard_key_field"}
- });
-
- // Verify that an upsert whose query contains an exact match on _id but whose replacement
- // document does not contain all other shard key fields will be rejected by mongoS, since it
- // does not contain an exact shard key match.
- writeRes = assert.commandFailedWithCode(
- mongosColl.update({_id: 200, a: 200}, {msg: "upsert_targeting_failed"}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "upsert_targeting_failed"}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {op: "update", "command.u.msg": "upsert_targeting_failed"}
- });
- assert.eq(mongosColl.find({_id: 200, a: 200}).itcount(), 0);
- }
-
- // Shard the test collection on {_id: 1, a: 1}, split it into two chunks, and migrate one of
- // these to the second shard.
- st.shardColl(
- mongosColl, {_id: 1, a: 1}, {_id: 0, a: 0}, {_id: 1, a: 1}, mongosDB.getName(), true);
-
- // Run the replacement behaviour tests that are relevant to a compound key that includes _id.
- runReplacementUpdateTestsForCompoundShardKey();
-
- // Drop and reshard the collection on {_id: "hashed"}, which will autosplit across both shards.
- assert(mongosColl.drop());
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: "hashed"}});
-
- // Run the replacement behaviour tests relevant to a collection sharded on {_id: "hashed"}.
- runReplacementUpdateTestsForHashedShardKey();
-
- st.stop();
+}
+
+function setUpData() {
+ // Write a single document to shard0 and verify that it is present.
+ mongosColl.insert({_id: -100, a: -100, msg: "not_updated"});
+ assert.docEq(shard0DB.test.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "not_updated"}]);
+
+ // Write a document with the same key directly to shard1. This simulates an orphaned
+ // document, or the duplicate document which temporarily exists during a chunk migration.
+ shard1DB.test.insert({_id: -100, a: -100, msg: "not_updated"});
+
+ // Clear and restart the profiler on both shards.
+ restartProfiling();
+}
+
+function runReplacementUpdateTestsForHashedShardKey() {
+ setUpData();
+
+ // Perform a replacement update whose query is an exact match on _id and whose replacement
+ // document contains the remainder of the shard key. Despite the fact that the replacement
+ // document does not contain the entire shard key, we expect that mongoS will extract the
+ // _id from the query and combine it with the replacement doc to target a single shard.
+ let writeRes = assert.commandWorked(
+ mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
+
+ // Verify that the update did not modify the orphan document.
+ assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "not_updated"}]);
+ assert.eq(writeRes.nMatched, 1);
+ assert.eq(writeRes.nModified, 1);
+
+ // Verify that the update only targeted shard0 and that the resulting document appears as
+ // expected.
+ assert.docEq(mongosColl.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard0DB,
+ filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
+ });
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
+ });
+
+ // Perform an upsert replacement whose query is an exact match on _id and whose replacement
+ // doc contains the remainder of the shard key. The _id taken from the query should be used
+ // both in targeting the update and in generating the new document.
+ writeRes = assert.commandWorked(mongosColl.update(
+ {_id: 101}, {a: 101, msg: "upsert_extracted_id_from_query"}, {upsert: true}));
+ assert.eq(writeRes.nUpserted, 1);
+
+ // Verify that the update only targeted shard1, and that the resulting document appears as
+ // expected. At this point in the test we expect shard1 to be stale, because it was the
+ // destination shard for the first moveChunk; we therefore explicitly check the profiler for
+ // a successful update, i.e. one which did not report a stale config exception.
+ assert.docEq(mongosColl.find({_id: 101}).toArray(),
+ [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
+ assert.docEq(shard1DB.test.find({_id: 101}).toArray(),
+ [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard0DB,
+ filter: {op: "update", "command.u.msg": "upsert_extracted_id_from_query"}
+ });
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard1DB,
+ filter: {
+ op: "update",
+ "command.u.msg": "upsert_extracted_id_from_query",
+ errName: {$exists: false}
+ }
+ });
+}
+
+function runReplacementUpdateTestsForCompoundShardKey() {
+ setUpData();
+
+ // Perform a replacement update whose query is an exact match on _id and whose replacement
+ // document contains the remainder of the shard key. Despite the fact that the replacement
+ // document does not contain the entire shard key, we expect that mongoS will extract the
+ // _id from the query and combine it with the replacement doc to target a single shard.
+ let writeRes = assert.commandWorked(
+ mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
+
+ // Verify that the update did not modify the orphan document.
+ assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "not_updated"}]);
+ assert.eq(writeRes.nMatched, 1);
+ assert.eq(writeRes.nModified, 1);
+
+ // Verify that the update only targeted shard0 and that the resulting document appears as
+ // expected.
+ assert.docEq(mongosColl.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard0DB,
+ filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
+ });
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
+ });
+
+ // An upsert whose query doesn't have full shard key will fail.
+ assert.commandFailedWithCode(
+ mongosColl.update(
+ {_id: 101}, {a: 101, msg: "upsert_extracted_id_from_query"}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+ // Verify that the document did not perform any writes.
+ assert.docEq(mongosColl.find({_id: 101}).itcount(), 0);
+
+ // Verify that an update whose query contains an exact match on _id but whose replacement
+ // doc does not contain all other shard key fields will be rejected by mongoS.
+ writeRes = assert.commandFailedWithCode(
+ mongosColl.update({_id: -100, a: -100}, {msg: "update_failed_missing_shard_key_field"}),
+ ErrorCodes.ShardKeyNotFound);
+
+ // Check that the existing document remains unchanged, and that the update did not reach
+ // either shard per their respective profilers.
+ assert.docEq(mongosColl.find({_id: -100, a: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard0DB,
+ filter: {op: "update", "command.u.msg": "update_failed_missing_shard_key_field"}
+ });
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {op: "update", "command.u.msg": "update_failed_missing_shard_key_field"}
+ });
+
+ // Verify that an upsert whose query contains an exact match on _id but whose replacement
+ // document does not contain all other shard key fields will be rejected by mongoS, since it
+ // does not contain an exact shard key match.
+ writeRes = assert.commandFailedWithCode(
+ mongosColl.update({_id: 200, a: 200}, {msg: "upsert_targeting_failed"}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: shard0DB, filter: {op: "update", "command.u.msg": "upsert_targeting_failed"}});
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: shard1DB, filter: {op: "update", "command.u.msg": "upsert_targeting_failed"}});
+ assert.eq(mongosColl.find({_id: 200, a: 200}).itcount(), 0);
+}
+
+// Shard the test collection on {_id: 1, a: 1}, split it into two chunks, and migrate one of
+// these to the second shard.
+st.shardColl(mongosColl, {_id: 1, a: 1}, {_id: 0, a: 0}, {_id: 1, a: 1}, mongosDB.getName(), true);
+
+// Run the replacement behaviour tests that are relevant to a compound key that includes _id.
+runReplacementUpdateTestsForCompoundShardKey();
+
+// Drop and reshard the collection on {_id: "hashed"}, which will autosplit across both shards.
+assert(mongosColl.drop());
+mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: "hashed"}});
+
+// Run the replacement behaviour tests relevant to a collection sharded on {_id: "hashed"}.
+runReplacementUpdateTestsForHashedShardKey();
+
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/update_shard_key_conflicting_writes.js b/jstests/sharding/update_shard_key_conflicting_writes.js
index 3fc3fb9f416..8f228dfb70a 100644
--- a/jstests/sharding/update_shard_key_conflicting_writes.js
+++ b/jstests/sharding/update_shard_key_conflicting_writes.js
@@ -7,151 +7,150 @@
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/parallelTester.js'); // for ScopedThread.
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
+load('jstests/libs/parallelTester.js'); // for ScopedThread.
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
- let st = new ShardingTest({mongos: 1, shards: 2});
- let kDbName = 'db';
- let mongos = st.s0;
- let ns = kDbName + '.foo';
- let db = mongos.getDB(kDbName);
+let st = new ShardingTest({mongos: 1, shards: 2});
+let kDbName = 'db';
+let mongos = st.s0;
+let ns = kDbName + '.foo';
+let db = mongos.getDB(kDbName);
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, st.shard0.shardName);
- // Shards the collection "db.foo" on shard key {"x" : 1} such that negative "x" values are on
- // shard0 and positive on shard1
- assert.commandWorked(db.foo.createIndex({"x": 1}));
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {"x": 1}}));
- assert.commandWorked(mongos.adminCommand({split: ns, middle: {"x": 0}}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {"x": 0}, to: st.shard1.shardName}));
+// Shards the collection "db.foo" on shard key {"x" : 1} such that negative "x" values are on
+// shard0 and positive on shard1
+assert.commandWorked(db.foo.createIndex({"x": 1}));
+assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {"x": 1}}));
+assert.commandWorked(mongos.adminCommand({split: ns, middle: {"x": 0}}));
+assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: {"x": 0}, to: st.shard1.shardName}));
- assert.commandWorked(db.foo.insert({"x": -50, "a": 10}));
- assert.commandWorked(db.foo.insert({"x": -100, "a": 4}));
- assert.commandWorked(db.foo.insert({"x": -150, "a": 15}));
- assert.commandWorked(db.foo.insert({"x": 50, "a": 6}));
- assert.commandWorked(db.foo.insert({"x": 100, "a": 8}));
- assert.commandWorked(db.foo.insert({"x": 150, "a": 20}));
+assert.commandWorked(db.foo.insert({"x": -50, "a": 10}));
+assert.commandWorked(db.foo.insert({"x": -100, "a": 4}));
+assert.commandWorked(db.foo.insert({"x": -150, "a": 15}));
+assert.commandWorked(db.foo.insert({"x": 50, "a": 6}));
+assert.commandWorked(db.foo.insert({"x": 100, "a": 8}));
+assert.commandWorked(db.foo.insert({"x": 150, "a": 20}));
- assert.commandWorked(st.shard0.adminCommand({_flushDatabaseCacheUpdates: kDbName}));
- assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(st.shard1.adminCommand({_flushDatabaseCacheUpdates: kDbName}));
- assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(st.shard0.adminCommand({_flushDatabaseCacheUpdates: kDbName}));
+assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(st.shard1.adminCommand({_flushDatabaseCacheUpdates: kDbName}));
+assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- let session = mongos.startSession({retryWrites: false});
- let sessionDB = session.getDatabase(kDbName);
+let session = mongos.startSession({retryWrites: false});
+let sessionDB = session.getDatabase(kDbName);
- let session2 = mongos.startSession({retryWrites: true});
- let sessionDB2 = session2.getDatabase(kDbName);
+let session2 = mongos.startSession({retryWrites: true});
+let sessionDB2 = session2.getDatabase(kDbName);
- // Returns true if the command "cmdName" has started running on the server.
- function opStarted(cmdName) {
- return mongos.getDB(kDbName).currentOp().inprog.some(op => {
- return op.active && (op.ns === "db.foo") && (op.op === cmdName);
- });
- }
+// Returns true if the command "cmdName" has started running on the server.
+function opStarted(cmdName) {
+ return mongos.getDB(kDbName).currentOp().inprog.some(op => {
+ return op.active && (op.ns === "db.foo") && (op.op === cmdName);
+ });
+}
- // Send update that will change the shard key causing the document to move shards. Wait to hit
- // failpoint specified.
- function setFailPointAndSendUpdateToShardKeyInParallelShell(
- failpoint, failpointMode, shard, codeToRunInParallelShell) {
- assert.commandWorked(
- shard.adminCommand({configureFailPoint: failpoint, mode: failpointMode}));
- let awaitShell = startParallelShell(codeToRunInParallelShell, st.s.port);
- waitForFailpoint("Hit " + failpoint, 1);
- clearRawMongoProgramOutput();
- return awaitShell;
- }
+// Send update that will change the shard key causing the document to move shards. Wait to hit
+// failpoint specified.
+function setFailPointAndSendUpdateToShardKeyInParallelShell(
+ failpoint, failpointMode, shard, codeToRunInParallelShell) {
+ assert.commandWorked(shard.adminCommand({configureFailPoint: failpoint, mode: failpointMode}));
+ let awaitShell = startParallelShell(codeToRunInParallelShell, st.s.port);
+ waitForFailpoint("Hit " + failpoint, 1);
+ clearRawMongoProgramOutput();
+ return awaitShell;
+}
- /**
- * Test that an in-transaction update to the shard key and a non-transactional update to the
- * same document will conflict and the non-transactional update will retry indefinitely. Once
- * the transaction will conflict and the non-transactional update will retry indefinitely. Once
- * the transaction commits, the non-transactional update should complete. When 'maxTimeMS' is
- * specified, the non-transactional write will timeout.
- */
- (() => {
- const originalShardKeyValue = 50;
- const updatedShardKeyValue = -10;
+/**
+ * Test that an in-transaction update to the shard key and a non-transactional update to the
+ * same document will conflict and the non-transactional update will retry indefinitely. Once
+ * the transaction will conflict and the non-transactional update will retry indefinitely. Once
+ * the transaction commits, the non-transactional update should complete. When 'maxTimeMS' is
+ * specified, the non-transactional write will timeout.
+ */
+(() => {
+ const originalShardKeyValue = 50;
+ const updatedShardKeyValue = -10;
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.update({"x": originalShardKeyValue},
- {$set: {"x": updatedShardKeyValue}}));
- // Attempt to update the same doc not in a transaction, this update should timeout.
- assert.commandFailedWithCode(db.runCommand({
- update: "foo",
- updates: [{q: {"x": originalShardKeyValue}, u: {$inc: {"a": 1}}}],
- maxTimeMS: 100
- }),
- ErrorCodes.MaxTimeMSExpired);
- // Run the non-transactional update again in a separate thread and wait for it to start.
- function conflictingUpdate(host, kDbName, query, update) {
- const mongosConn = new Mongo(host);
- return mongosConn.getDB(kDbName).foo.update(query, update);
- }
- let thread = new ScopedThread(
- conflictingUpdate, st.s.host, kDbName, {"x": originalShardKeyValue}, {$inc: {"a": 1}});
- thread.start();
- assert.soon(() => opStarted("update"));
- // Once we commit the transaction, the non-transaction update should finish, but it should
- // not actually modify any documents since the transaction commited first.
- assert.commandWorked(session.commitTransaction_forTesting());
- thread.join();
- assert.commandWorked(thread.returnData());
- assert.eq(1, db.foo.find({"x": updatedShardKeyValue, "a": 6}).itcount());
- assert.eq(0, db.foo.find({"x": originalShardKeyValue}).itcount());
- assert.eq(0, db.foo.find({"a": 7}).itcount());
- })();
+ session.startTransaction();
+ assert.commandWorked(
+ sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}));
+ // Attempt to update the same doc not in a transaction, this update should timeout.
+ assert.commandFailedWithCode(db.runCommand({
+ update: "foo",
+ updates: [{q: {"x": originalShardKeyValue}, u: {$inc: {"a": 1}}}],
+ maxTimeMS: 100
+ }),
+ ErrorCodes.MaxTimeMSExpired);
+ // Run the non-transactional update again in a separate thread and wait for it to start.
+ function conflictingUpdate(host, kDbName, query, update) {
+ const mongosConn = new Mongo(host);
+ return mongosConn.getDB(kDbName).foo.update(query, update);
+ }
+ let thread = new ScopedThread(
+ conflictingUpdate, st.s.host, kDbName, {"x": originalShardKeyValue}, {$inc: {"a": 1}});
+ thread.start();
+ assert.soon(() => opStarted("update"));
+ // Once we commit the transaction, the non-transaction update should finish, but it should
+ // not actually modify any documents since the transaction commited first.
+ assert.commandWorked(session.commitTransaction_forTesting());
+ thread.join();
+ assert.commandWorked(thread.returnData());
+ assert.eq(1, db.foo.find({"x": updatedShardKeyValue, "a": 6}).itcount());
+ assert.eq(0, db.foo.find({"x": originalShardKeyValue}).itcount());
+ assert.eq(0, db.foo.find({"a": 7}).itcount());
+})();
- /**
- * When the non-transactional update or delete runs before the transactional update to the shard
- * key, the update to the shard key should fail with WriteConflict.
- */
- (() => {
- const originalShardKeyValue = -10;
- let updatedShardKeyValue = 40;
+/**
+ * When the non-transactional update or delete runs before the transactional update to the shard
+ * key, the update to the shard key should fail with WriteConflict.
+ */
+(() => {
+ const originalShardKeyValue = -10;
+ let updatedShardKeyValue = 40;
- session.startTransaction();
- assert.commandWorked(sessionDB.runCommand({find: "foo"}));
- // Run a non-transactional update before updating the shard key.
- assert.commandWorked(db.foo.update({"x": originalShardKeyValue}, {$inc: {"a": 1}}));
- // Run transactional update to change the shard key for the same doc as updated above
- assert.commandFailedWithCode(
- sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}),
- ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(1, db.foo.find({"x": originalShardKeyValue, "a": 7}).itcount());
- assert.eq(0, db.foo.find({"x": updatedShardKeyValue}).itcount());
+ session.startTransaction();
+ assert.commandWorked(sessionDB.runCommand({find: "foo"}));
+ // Run a non-transactional update before updating the shard key.
+ assert.commandWorked(db.foo.update({"x": originalShardKeyValue}, {$inc: {"a": 1}}));
+ // Run transactional update to change the shard key for the same doc as updated above
+ assert.commandFailedWithCode(
+ sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}),
+ ErrorCodes.WriteConflict);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.eq(1, db.foo.find({"x": originalShardKeyValue, "a": 7}).itcount());
+ assert.eq(0, db.foo.find({"x": updatedShardKeyValue}).itcount());
- // Run a non-transactional delete before updating the shard key.
- updatedShardKeyValue = 20;
- session.startTransaction();
- assert.commandWorked(sessionDB.runCommand({find: "foo"}));
- assert.commandWorked(db.foo.remove({"x": originalShardKeyValue}));
- // Run transactional update to change the shard key for the same doc as updated above
- assert.commandFailedWithCode(
- sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}),
- ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(0, db.foo.find({"x": originalShardKeyValue}).itcount());
- assert.eq(0, db.foo.find({"x": updatedShardKeyValue}).itcount());
- })();
+ // Run a non-transactional delete before updating the shard key.
+ updatedShardKeyValue = 20;
+ session.startTransaction();
+ assert.commandWorked(sessionDB.runCommand({find: "foo"}));
+ assert.commandWorked(db.foo.remove({"x": originalShardKeyValue}));
+ // Run transactional update to change the shard key for the same doc as updated above
+ assert.commandFailedWithCode(
+ sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}),
+ ErrorCodes.WriteConflict);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.eq(0, db.foo.find({"x": originalShardKeyValue}).itcount());
+ assert.eq(0, db.foo.find({"x": updatedShardKeyValue}).itcount());
+})();
- /**
- * Test scenarios where a concurrent update/delete that mutates the same document that a user is
- * updating the shard key for completes just before the update to the shard key throws
- * WouldChangeOwningShard.
- */
+/**
+ * Test scenarios where a concurrent update/delete that mutates the same document that a user is
+ * updating the shard key for completes just before the update to the shard key throws
+ * WouldChangeOwningShard.
+ */
- // Assert that if the concurrent update mutates the same document as the original update to the
- // shard key, we get a write conflict.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update mutates the same document as the original update to the
+// shard key, we get a write conflict.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -160,28 +159,26 @@
assert.commandFailedWithCode(session.commitTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeThrowWouldChangeOwningShard",
- "alwaysOn",
- st.shard0,
- codeToRunInParallelShell);
- // Send update that changes "a" so that the original update will no longer match this doc.
- // Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.update({"x": -50}, {$set: {"a": 300}}));
- assert.commandWorked(st.shard0.adminCommand({
- configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
- mode: "off",
- }));
- awaitShell();
- assert.eq(1, db.foo.find({"x": -50, "a": 300}).itcount());
- assert.eq(0, db.foo.find({"a": 10}).itcount());
- assert.eq(0, db.foo.find({"x": 10}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeThrowWouldChangeOwningShard", "alwaysOn", st.shard0, codeToRunInParallelShell);
+ // Send update that changes "a" so that the original update will no longer match this doc.
+ // Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.update({"x": -50}, {$set: {"a": 300}}));
+ assert.commandWorked(st.shard0.adminCommand({
+ configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(1, db.foo.find({"x": -50, "a": 300}).itcount());
+ assert.eq(0, db.foo.find({"a": 10}).itcount());
+ assert.eq(0, db.foo.find({"x": 10}).itcount());
+})();
- // Assert that if a concurrent delete removes the same document that the original update
- // attempts to modify the shard key for, we get a write conflict.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if a concurrent delete removes the same document that the original update
+// attempts to modify the shard key for, we get a write conflict.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -190,27 +187,25 @@
assert.commandFailedWithCode(session.commitTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeThrowWouldChangeOwningShard",
- "alwaysOn",
- st.shard1,
- codeToRunInParallelShell);
- // Send update that changes "a" so that the original update will no longer match this doc.
- // Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.remove({"x": 100}));
- assert.commandWorked(st.shard1.adminCommand({
- configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
- mode: "off",
- }));
- awaitShell();
- assert.eq(0, db.foo.find({"x": 100}).itcount());
- assert.eq(0, db.foo.find({"x": -1}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeThrowWouldChangeOwningShard", "alwaysOn", st.shard1, codeToRunInParallelShell);
+ // Send update that changes "a" so that the original update will no longer match this doc.
+ // Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.remove({"x": 100}));
+ assert.commandWorked(st.shard1.adminCommand({
+ configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(0, db.foo.find({"x": 100}).itcount());
+ assert.eq(0, db.foo.find({"x": -1}).itcount());
+})();
- // Assert that if the concurrent update also mutates the shard key (and remains on the same
- // shard), the original update to the shard key will get a write conflict.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update also mutates the shard key (and remains on the same
+// shard), the original update to the shard key will get a write conflict.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -219,36 +214,34 @@
assert.commandFailedWithCode(session.commitTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeThrowWouldChangeOwningShard",
- "alwaysOn",
- st.shard0,
- codeToRunInParallelShell);
- // Send update that changes the shard key so that the original update will no longer match
- // this doc. This doc will still remain on its original shard. Turn off the failpoint so the
- // server stops hanging.
- assert.commandWorked(sessionDB2.foo.update({"x": -50}, {$set: {"x": -500}}));
- assert.commandWorked(st.shard0.adminCommand({
- configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
- mode: "off",
- }));
- awaitShell();
- assert.eq(0, db.foo.find({"x": -50}).itcount());
- assert.eq(1, db.foo.find({"x": -500}).itcount());
- assert.eq(0, db.foo.find({"x": 80}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeThrowWouldChangeOwningShard", "alwaysOn", st.shard0, codeToRunInParallelShell);
+ // Send update that changes the shard key so that the original update will no longer match
+ // this doc. This doc will still remain on its original shard. Turn off the failpoint so the
+ // server stops hanging.
+ assert.commandWorked(sessionDB2.foo.update({"x": -50}, {$set: {"x": -500}}));
+ assert.commandWorked(st.shard0.adminCommand({
+ configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(0, db.foo.find({"x": -50}).itcount());
+ assert.eq(1, db.foo.find({"x": -500}).itcount());
+ assert.eq(0, db.foo.find({"x": 80}).itcount());
+})();
- /**
- * Test scenario where a concurrent update/delete that mutates the same document that a user is
- * updating the shard key for is sent just after the update to the shard key has deleted the
- * original document but before it has inserted the new one. The second update should not match
- * any documents.
- */
+/**
+ * Test scenario where a concurrent update/delete that mutates the same document that a user is
+ * updating the shard key for is sent just after the update to the shard key has deleted the
+ * original document but before it has inserted the new one. The second update should not match
+ * any documents.
+ */
- // Assert that if the concurrent update mutates the same document as the original update to the
- // shard key, it does not match and documents.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update mutates the same document as the original update to the
+// shard key, it does not match and documents.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -258,7 +251,8 @@
assert.eq(1, res.nModified);
assert.commandWorked(session.commitTransaction_forTesting());
}`;
- let codeToRunInParallelShell2 = `{
+ let codeToRunInParallelShell2 =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.update({"x": -100}, {$inc: {"a": 1}});
@@ -266,26 +260,27 @@
assert.eq(0, res.nMatched);
assert.eq(0, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeInsertOnUpdateShardKey", "alwaysOn", st.s, codeToRunInParallelShell);
- let awaitShell2 = startParallelShell(codeToRunInParallelShell2, st.s.port);
- assert.soon(() => opStarted("update"));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangBeforeInsertOnUpdateShardKey",
- mode: "off",
- }));
- awaitShell();
- awaitShell2();
- assert.eq(1, db.foo.find({"x": 10}).itcount());
- assert.eq(1, db.foo.find({"a": 4}).itcount());
- assert.eq(0, db.foo.find({"x": -100}).itcount());
- assert.eq(0, db.foo.find({"a": 5}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeInsertOnUpdateShardKey", "alwaysOn", st.s, codeToRunInParallelShell);
+ let awaitShell2 = startParallelShell(codeToRunInParallelShell2, st.s.port);
+ assert.soon(() => opStarted("update"));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangBeforeInsertOnUpdateShardKey",
+ mode: "off",
+ }));
+ awaitShell();
+ awaitShell2();
+ assert.eq(1, db.foo.find({"x": 10}).itcount());
+ assert.eq(1, db.foo.find({"a": 4}).itcount());
+ assert.eq(0, db.foo.find({"x": -100}).itcount());
+ assert.eq(0, db.foo.find({"a": 5}).itcount());
+})();
- // Assert that if a concurrent delete removes the same document that the original update
- // attempts to modify the shard key for, we get a write conflict.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if a concurrent delete removes the same document that the original update
+// attempts to modify the shard key for, we get a write conflict.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -295,7 +290,8 @@
assert.eq(1, res.nModified);
assert.commandWorked(session.commitTransaction_forTesting());
}`;
- let codeToRunInParallelShell2 = `{
+ let codeToRunInParallelShell2 =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.remove({"x": 10});
@@ -303,63 +299,64 @@
assert.eq(0, res.nMatched);
assert.eq(0, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeInsertOnUpdateShardKey", "alwaysOn", st.s, codeToRunInParallelShell);
- let awaitShell2 = startParallelShell(codeToRunInParallelShell2, st.s.port);
- assert.soon(() => opStarted("remove"));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangBeforeInsertOnUpdateShardKey",
- mode: "off",
- }));
- awaitShell();
- awaitShell2();
- assert.eq(0, db.foo.find({"x": 10}).itcount());
- assert.eq(1, db.foo.find({"x": -70}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeInsertOnUpdateShardKey", "alwaysOn", st.s, codeToRunInParallelShell);
+ let awaitShell2 = startParallelShell(codeToRunInParallelShell2, st.s.port);
+ assert.soon(() => opStarted("remove"));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangBeforeInsertOnUpdateShardKey",
+ mode: "off",
+ }));
+ awaitShell();
+ awaitShell2();
+ assert.eq(0, db.foo.find({"x": 10}).itcount());
+ assert.eq(1, db.foo.find({"x": -70}).itcount());
+})();
- /**
- * Attempt to update the shard key in two different transactions. The second transaction should
- * fail with WriteConflict.
- */
- (() => {
- session2 = mongos.startSession();
- sessionDB2 = session2.getDatabase(kDbName);
- // Start transactions on both sessions and then run the two change shard key updates for the
- // same document
- session.startTransaction();
- assert.commandWorked(sessionDB.runCommand({find: "foo"}));
- session2.startTransaction();
- // The first update will complete and the second should get a write conflict
- assert.commandWorked(sessionDB2.foo.update({"x": -500}, {$set: {"x": 25}}));
- assert.commandFailedWithCode(sessionDB.foo.update({"x": -500}, {$set: {"x": 250}}),
- ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(session2.commitTransaction_forTesting());
- assert.eq(1, db.foo.find({"x": 25}).itcount());
- assert.eq(0, db.foo.find({"x": 250}).itcount());
- assert.eq(0, db.foo.find({"x": -500}).itcount());
- })();
+/**
+ * Attempt to update the shard key in two different transactions. The second transaction should
+ * fail with WriteConflict.
+ */
+(() => {
+ session2 = mongos.startSession();
+ sessionDB2 = session2.getDatabase(kDbName);
+ // Start transactions on both sessions and then run the two change shard key updates for the
+ // same document
+ session.startTransaction();
+ assert.commandWorked(sessionDB.runCommand({find: "foo"}));
+ session2.startTransaction();
+ // The first update will complete and the second should get a write conflict
+ assert.commandWorked(sessionDB2.foo.update({"x": -500}, {$set: {"x": 25}}));
+ assert.commandFailedWithCode(sessionDB.foo.update({"x": -500}, {$set: {"x": 250}}),
+ ErrorCodes.WriteConflict);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.commandWorked(session2.commitTransaction_forTesting());
+ assert.eq(1, db.foo.find({"x": 25}).itcount());
+ assert.eq(0, db.foo.find({"x": 250}).itcount());
+ assert.eq(0, db.foo.find({"x": -500}).itcount());
+})();
- /**
- * Test scenarios where a user sends an update as a retryable write that changes the shard key
- * and there is a concurrent update/delete that mutates the same document which completes after
- * the change to the shard key throws WouldChangeOwningShard the first time, but before mongos
- * starts a transaction to change the shard key.
- *
- * The scenario looks like:
- * 1. user sends db.foo.update({shardKey: x}, {shardKey: new x})
- * 2. shard throws WCOS for this update
- * 3. user sends db.foo.update({shardKey: x}, {otherFieldInDoc: y}) on a different thread, this
- * write completes successfully
- * 4. mongos starts a transaction and resends the update on line 1
- * 5. mongos deletes the old doc, inserts a doc with the updated shard key, and commits the txn
- */
+/**
+ * Test scenarios where a user sends an update as a retryable write that changes the shard key
+ * and there is a concurrent update/delete that mutates the same document which completes after
+ * the change to the shard key throws WouldChangeOwningShard the first time, but before mongos
+ * starts a transaction to change the shard key.
+ *
+ * The scenario looks like:
+ * 1. user sends db.foo.update({shardKey: x}, {shardKey: new x})
+ * 2. shard throws WCOS for this update
+ * 3. user sends db.foo.update({shardKey: x}, {otherFieldInDoc: y}) on a different thread, this
+ * write completes successfully
+ * 4. mongos starts a transaction and resends the update on line 1
+ * 5. mongos deletes the old doc, inserts a doc with the updated shard key, and commits the txn
+ */
- // Assert that if the concurrent update modifies the document so that the update which changes
- // the shard key no longer matches the doc, it does not modify the doc.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update modifies the document so that the update which changes
+// the shard key no longer matches the doc, it does not modify the doc.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession({retryWrites : true});
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.update({"x": -150, "a" : 15}, {$set: {"x": 1000}});
@@ -367,28 +364,29 @@
assert.eq(0, res.nMatched);
assert.eq(0, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- "alwaysOn",
- st.s,
- codeToRunInParallelShell);
- // Send update that changes "a" so that the original update will no longer match this doc.
- // Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.update({"x": -150}, {$set: {"a": 3000}}));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- mode: "off",
- }));
- awaitShell();
- assert.eq(1, db.foo.find({"x": -150, "a": 3000}).itcount());
- assert.eq(0, db.foo.find({"a": 15}).itcount());
- assert.eq(0, db.foo.find({"x": 1000}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ "alwaysOn",
+ st.s,
+ codeToRunInParallelShell);
+ // Send update that changes "a" so that the original update will no longer match this doc.
+ // Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.update({"x": -150}, {$set: {"a": 3000}}));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(1, db.foo.find({"x": -150, "a": 3000}).itcount());
+ assert.eq(0, db.foo.find({"a": 15}).itcount());
+ assert.eq(0, db.foo.find({"x": 1000}).itcount());
+})();
- // Assert that if the concurrent update modifies the document and the update which changes the
- // shard key still matches the doc, the final document reflects both updates.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update modifies the document and the update which changes the
+// shard key still matches the doc, the final document reflects both updates.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession({retryWrites : true});
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.update({"x": 150}, {$set: {"x": -1000}});
@@ -396,28 +394,29 @@
assert.eq(1, res.nMatched);
assert.eq(1, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- "alwaysOn",
- st.s,
- codeToRunInParallelShell);
- // Send update that changes "a". The original update will still match this doc because it
- // queries only on the shard key. Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.update({"x": 150}, {$set: {"a": -200}}));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- mode: "off",
- }));
- awaitShell();
- assert.eq(1, db.foo.find({"x": -1000, "a": -200}).itcount());
- assert.eq(0, db.foo.find({"a": 20}).itcount());
- assert.eq(0, db.foo.find({"x": 150}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ "alwaysOn",
+ st.s,
+ codeToRunInParallelShell);
+ // Send update that changes "a". The original update will still match this doc because it
+ // queries only on the shard key. Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.update({"x": 150}, {$set: {"a": -200}}));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(1, db.foo.find({"x": -1000, "a": -200}).itcount());
+ assert.eq(0, db.foo.find({"a": 20}).itcount());
+ assert.eq(0, db.foo.find({"x": 150}).itcount());
+})();
- // Assert that if a concurrent delete removes the same document that the original update
- // attempts to modify the shard key for, we don't match any docs.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if a concurrent delete removes the same document that the original update
+// attempts to modify the shard key for, we don't match any docs.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession({retryWrites : true});
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.update({"x": -150}, {$set: {"x": 1000}});
@@ -425,24 +424,23 @@
assert.eq(0, res.nMatched);
assert.eq(0, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- "alwaysOn",
- st.s,
- codeToRunInParallelShell);
- // Remove this doc so that the original update will no longer match any doc.
- // Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.remove({"x": -150}));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- mode: "off",
- }));
- awaitShell();
- assert.eq(0, db.foo.find({"x": -150}).itcount());
- assert.eq(0, db.foo.find({"a": 3000}).itcount());
- assert.eq(0, db.foo.find({"x": 1000}).itcount());
- })();
-
- st.stop();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ "alwaysOn",
+ st.s,
+ codeToRunInParallelShell);
+ // Remove this doc so that the original update will no longer match any doc.
+ // Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.remove({"x": -150}));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(0, db.foo.find({"x": -150}).itcount());
+ assert.eq(0, db.foo.find({"a": 3000}).itcount());
+ assert.eq(0, db.foo.find({"x": 1000}).itcount());
+})();
+st.stop();
}());
diff --git a/jstests/sharding/update_shard_key_doc_moves_shards.js b/jstests/sharding/update_shard_key_doc_moves_shards.js
index ff673d91390..9567b807b1e 100644
--- a/jstests/sharding/update_shard_key_doc_moves_shards.js
+++ b/jstests/sharding/update_shard_key_doc_moves_shards.js
@@ -5,457 +5,454 @@
*/
(function() {
- 'use strict';
-
- load("jstests/sharding/libs/update_shard_key_helpers.js");
-
- const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
- const kDbName = 'db';
- const mongos = st.s0;
- const shard0 = st.shard0.shardName;
- const shard1 = st.shard1.shardName;
- const ns = kDbName + '.foo';
-
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
-
- function changeShardKeyWhenFailpointsSet(session, sessionDB, runInTxn, isFindAndModify) {
- let docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
-
- // Assert that the document is not updated when the delete fails
- assert.commandWorked(st.rs1.getPrimary().getDB(kDbName).adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.WriteConflict,
- failCommands: ["delete"],
- failInternalCommands: true
- }
- }));
- if (isFindAndModify) {
- runFindAndModifyCmdFail(
- st, kDbName, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}}, false);
- } else {
- runUpdateCmdFail(st,
- kDbName,
- session,
- sessionDB,
- runInTxn,
- {"x": 300},
- {"$set": {"x": 30}},
- false,
- ErrorCodes.WriteConflict);
- }
- assert.commandWorked(st.rs1.getPrimary().getDB(kDbName).adminCommand({
- configureFailPoint: "failCommand",
- mode: "off",
- }));
-
- // Assert that the document is not updated when the insert fails
- assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.NamespaceNotFound,
- failCommands: ["insert"],
- failInternalCommands: true
- }
- }));
- if (isFindAndModify) {
- runFindAndModifyCmdFail(
- st, kDbName, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}}, false);
- } else {
- runUpdateCmdFail(st,
- kDbName,
- session,
- sessionDB,
- runInTxn,
- {"x": 300},
- {"$set": {"x": 30}},
- false,
- ErrorCodes.NamespaceNotFound);
- }
- assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
- configureFailPoint: "failCommand",
- mode: "off",
- }));
-
- // Assert that the shard key update is not committed when there are no write errors and the
- // transaction is explicity aborted.
- if (runInTxn) {
- session.startTransaction();
- if (isFindAndModify) {
- sessionDB.foo.findAndModify({query: {"x": 300}, update: {"$set": {"x": 30}}});
- } else {
- assert.commandWorked(sessionDB.foo.update({"x": 300}, {"$set": {"x": 30}}));
- }
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
- }
-
- mongos.getDB(kDbName).foo.drop();
- }
-
- //
- // Test that changing the shard key works correctly when either the update or findAndModify
- // command is used and when the command is run either as a retryable write or in a transaction.
- // Tuples represent [shouldRunCommandInTxn, runUpdateAsFindAndModifyCmd, isUpsert].
- //
-
- const changeShardKeyOptions = [
- [false, false, false],
- [true, false, false],
- [true, true, false],
- [false, true, false],
- [false, false, true],
- [true, false, true],
- [false, true, true],
- [true, true, true]
- ];
-
- //
- // Tests for op-style updates.
- //
-
- changeShardKeyOptions.forEach(function(updateConfig) {
- let runInTxn, isFindAndModify, upsert;
- [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
-
- jsTestLog("Testing changing the shard key using op style update and " +
- (isFindAndModify ? "findAndModify command " : "update command ") +
- (runInTxn ? "in transaction " : "as retryable write"));
-
- let session = st.s.startSession({retryWrites: runInTxn ? false : true});
- let sessionDB = session.getDatabase(kDbName);
-
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 30}}, {"$set": {"x": 600}}],
- upsert);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 30}}}, {"$set": {"x": {"a": 600}}}],
- upsert);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 30}}, {"$set": {"x": 600}}],
- upsert);
-
- // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
- // them for both upsert true and false.
- if (!upsert) {
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id": 300}, {
- "$set": {"_id": 30}
- });
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id.a": 300}, {
- "$set": {"_id": {"a": 30}}
- });
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
- "$set": {"x": [30]}
- });
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
- "$unset": {"x": 1}
- });
-
- if (!isFindAndModify) {
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}});
- }
- changeShardKeyWhenFailpointsSet(session, sessionDB, runInTxn, isFindAndModify);
- }
- });
-
- //
- // Tests for replacement style updates.
- //
-
- changeShardKeyOptions.forEach(function(updateConfig) {
- let runInTxn, isFindAndModify, upsert;
- [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
-
- jsTestLog("Testing changing the shard key using replacement style update and " +
- (isFindAndModify ? "findAndModify command " : "update command ") +
- (runInTxn ? "in transaction " : "as retryable write"));
-
- let session = st.s.startSession({retryWrites: runInTxn ? false : true});
- let sessionDB = session.getDatabase(kDbName);
-
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300}, {"x": 4}],
- [{"x": 30}, {"x": 600}],
- upsert);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 30}}, {"x": {"a": 600}}],
- upsert);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 30, "y": 80}, {"x": 600, "y": 3}],
- upsert);
-
- // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
- // them for both upsert true and false.
- if (!upsert) {
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id": 300}, {
- "_id": 30
- });
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id.a": 300}, {
- "_id": {"a": 30}
- });
- if (!isFindAndModify) {
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, runInTxn, {"x": 300}, {"x": 30});
- }
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
- "x": [30]
- });
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {});
- }
- });
-
- let session = st.s.startSession({retryWrites: true});
- let sessionDB = session.getDatabase(kDbName);
-
- let docsToInsert =
- [{"x": 4, "a": 3}, {"x": 78}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
-
- // ----Assert correct behavior when collection is hash sharded----
+'use strict';
- // Non-upsert case
- assertCanUpdatePrimitiveShardKeyHashedChangeShards(st, kDbName, ns, session, sessionDB, false);
- assertCanUpdatePrimitiveShardKeyHashedChangeShards(st, kDbName, ns, session, sessionDB, true);
+load("jstests/sharding/libs/update_shard_key_helpers.js");
- // ----Assert correct error when changing a doc shard key conflicts with an orphan----
+const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
+const kDbName = 'db';
+const mongos = st.s0;
+const shard0 = st.shard0.shardName;
+const shard1 = st.shard1.shardName;
+const ns = kDbName + '.foo';
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- mongos.getDB(kDbName).foo.insert({"x": 505});
-
- let _id = mongos.getDB(kDbName).foo.find({"x": 505}).toArray()[0]._id;
- assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).foo.insert({"x": 2, "_id": _id}));
-
- let res = sessionDB.foo.update({"x": 505}, {"$set": {"x": 20}});
- assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
- assert(res.getWriteError().errmsg.includes(
- "There is either an orphan for this document or _id for this collection is not globally unique."));
-
- session.startTransaction();
- res = sessionDB.foo.update({"x": 505}, {"$set": {"x": 20}});
- assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
- assert(res.errmsg.includes(
- "There is either an orphan for this document or _id for this collection is not globally unique."));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- mongos.getDB(kDbName).foo.drop();
-
- // ----Assert retryable write result has WCE when the internal commitTransaction fails----
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
+function changeShardKeyWhenFailpointsSet(session, sessionDB, runInTxn, isFindAndModify) {
+ let docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
cleanupOrphanedDocs(st, ns);
- // Turn on failcommand fail point to fail CoordinateCommitTransaction
- assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
+ // Assert that the document is not updated when the delete fails
+ assert.commandWorked(st.rs1.getPrimary().getDB(kDbName).adminCommand({
configureFailPoint: "failCommand",
mode: "alwaysOn",
data: {
- writeConcernError: {code: NumberInt(12345), errmsg: "dummy error"},
- failCommands: ["coordinateCommitTransaction"],
+ errorCode: ErrorCodes.WriteConflict,
+ failCommands: ["delete"],
failInternalCommands: true
}
}));
+ if (isFindAndModify) {
+ runFindAndModifyCmdFail(
+ st, kDbName, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}}, false);
+ } else {
+ runUpdateCmdFail(st,
+ kDbName,
+ session,
+ sessionDB,
+ runInTxn,
+ {"x": 300},
+ {"$set": {"x": 30}},
+ false,
+ ErrorCodes.WriteConflict);
+ }
+ assert.commandWorked(st.rs1.getPrimary().getDB(kDbName).adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "off",
+ }));
- res = sessionDB.foo.update({x: 4}, {$set: {x: 1000}});
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(12345, res.getWriteConcernError().code);
-
- let findAndModCmd = {
- findAndModify: 'foo',
- query: {x: 78},
- update: {$set: {x: 250}},
- lsid: {id: UUID()},
- txnNumber: NumberLong(1),
- };
- res = sessionDB.runCommand(findAndModCmd);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.writeConcernError.code, 12345);
- assert(res.writeConcernError.errmsg.includes("dummy error"));
-
+ // Assert that the document is not updated when the insert fails
+ assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ errorCode: ErrorCodes.NamespaceNotFound,
+ failCommands: ["insert"],
+ failInternalCommands: true
+ }
+ }));
+ if (isFindAndModify) {
+ runFindAndModifyCmdFail(
+ st, kDbName, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}}, false);
+ } else {
+ runUpdateCmdFail(st,
+ kDbName,
+ session,
+ sessionDB,
+ runInTxn,
+ {"x": 300},
+ {"$set": {"x": 30}},
+ false,
+ ErrorCodes.NamespaceNotFound);
+ }
assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
configureFailPoint: "failCommand",
mode: "off",
}));
- mongos.getDB(kDbName).foo.drop();
+ // Assert that the shard key update is not committed when there are no write errors and the
+ // transaction is explicity aborted.
+ if (runInTxn) {
+ session.startTransaction();
+ if (isFindAndModify) {
+ sessionDB.foo.findAndModify({query: {"x": 300}, update: {"$set": {"x": 30}}});
+ } else {
+ assert.commandWorked(sessionDB.foo.update({"x": 300}, {"$set": {"x": 30}}));
+ }
+ assert.commandWorked(session.abortTransaction_forTesting());
+ assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
+ assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
+ }
- // ----Assert that updating the shard key in a batch with size > 1 fails----
+ mongos.getDB(kDbName).foo.drop();
+}
+
+//
+// Test that changing the shard key works correctly when either the update or findAndModify
+// command is used and when the command is run either as a retryable write or in a transaction.
+// Tuples represent [shouldRunCommandInTxn, runUpdateAsFindAndModifyCmd, isUpsert].
+//
+
+const changeShardKeyOptions = [
+ [false, false, false],
+ [true, false, false],
+ [true, true, false],
+ [false, true, false],
+ [false, false, true],
+ [true, false, true],
+ [false, true, true],
+ [true, true, true]
+];
+
+//
+// Tests for op-style updates.
+//
+
+changeShardKeyOptions.forEach(function(updateConfig) {
+ let runInTxn, isFindAndModify, upsert;
+ [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
+
+ jsTestLog("Testing changing the shard key using op style update and " +
+ (isFindAndModify ? "findAndModify command " : "update command ") +
+ (runInTxn ? "in transaction " : "as retryable write"));
+
+ let session = st.s.startSession({retryWrites: runInTxn ? false : true});
+ let sessionDB = session.getDatabase(kDbName);
- assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, false, true);
- assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, false, false);
+ assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 30}}, {"$set": {"x": 600}}],
+ upsert);
+ assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 30}}}, {"$set": {"x": {"a": 600}}}],
+ upsert);
+ assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 30}}, {"$set": {"x": 600}}],
+ upsert);
+
+ // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
+ // them for both upsert true and false.
+ if (!upsert) {
+ assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id": 300}, {
+ "$set": {"_id": 30}
+ });
+ assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id.a": 300}, {
+ "$set": {"_id": {"a": 30}}
+ });
+ assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
+ "$set": {"x": [30]}
+ });
+ assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
+ "$unset": {"x": 1}
+ });
+
+ if (!isFindAndModify) {
+ assertCannotUpdateWithMultiTrue(
+ st, kDbName, ns, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}});
+ }
+ changeShardKeyWhenFailpointsSet(session, sessionDB, runInTxn, isFindAndModify);
+ }
+});
- session = st.s.startSession({retryWrites: false});
- sessionDB = session.getDatabase(kDbName);
- assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, true, true);
- assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, true, false);
+//
+// Tests for replacement style updates.
+//
- // ----Multiple writes in txn-----
+changeShardKeyOptions.forEach(function(updateConfig) {
+ let runInTxn, isFindAndModify, upsert;
+ [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
- // Update two docs, updating one twice
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
+ jsTestLog("Testing changing the shard key using replacement style update and " +
+ (isFindAndModify ? "findAndModify command " : "update command ") +
+ (runInTxn ? "in transaction " : "as retryable write"));
- session.startTransaction();
- let id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 30}}));
- assert.commandWorked(sessionDB.foo.update({"x": 30}, {"$set": {"x": 600}}));
- assert.commandWorked(sessionDB.foo.update({"x": 4}, {"$set": {"x": 50}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+ let session = st.s.startSession({retryWrites: runInTxn ? false : true});
+ let sessionDB = session.getDatabase(kDbName);
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
- assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 600}).toArray()[0]._id);
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 50}).itcount());
+ assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 30}, {"x": 600}],
+ upsert);
+ assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 30}}, {"x": {"a": 600}}],
+ upsert);
+ assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 30, "y": 80}, {"x": 600, "y": 3}],
+ upsert);
+
+ // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
+ // them for both upsert true and false.
+ if (!upsert) {
+ assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id": 300}, {
+ "_id": 30
+ });
+ assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id.a": 300}, {
+ "_id": {"a": 30}
+ });
+ if (!isFindAndModify) {
+ assertCannotUpdateWithMultiTrue(
+ st, kDbName, ns, session, sessionDB, runInTxn, {"x": 300}, {"x": 30});
+ }
+ assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
+ "x": [30]
+ });
+ assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {});
+ }
+});
- mongos.getDB(kDbName).foo.drop();
+let session = st.s.startSession({retryWrites: true});
+let sessionDB = session.getDatabase(kDbName);
- // Check that doing $inc on doc A, then updating shard key for doc A, then $inc again only incs
- // once
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
+let docsToInsert =
+ [{"x": 4, "a": 3}, {"x": 78}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 30}}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+// ----Assert correct behavior when collection is hash sharded----
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30, "a": 7}).itcount());
+// Non-upsert case
+assertCanUpdatePrimitiveShardKeyHashedChangeShards(st, kDbName, ns, session, sessionDB, false);
+assertCanUpdatePrimitiveShardKeyHashedChangeShards(st, kDbName, ns, session, sessionDB, true);
- mongos.getDB(kDbName).foo.drop();
+// ----Assert correct error when changing a doc shard key conflicts with an orphan----
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+mongos.getDB(kDbName).foo.insert({"x": 505});
- // Insert and $inc before moving doc
- session.startTransaction();
- id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
- assert.commandWorked(sessionDB.foo.insert({"x": 1, "a": 1}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- sessionDB.foo.findAndModify({query: {"x": 500}, update: {$set: {"x": 20}}});
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).toArray().length);
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 20}).toArray().length);
- assert.eq(20, mongos.getDB(kDbName).foo.find({"_id": id}).toArray()[0].x);
- assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).toArray().length);
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 20, "a": 7}).toArray().length);
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 1}).toArray().length);
+let _id = mongos.getDB(kDbName).foo.find({"x": 505}).toArray()[0]._id;
+assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).foo.insert({"x": 2, "_id": _id}));
- mongos.getDB(kDbName).foo.drop();
+let res = sessionDB.foo.update({"x": 505}, {"$set": {"x": 20}});
+assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
+assert(res.getWriteError().errmsg.includes(
+ "There is either an orphan for this document or _id for this collection is not globally unique."));
- // ----Assert correct behavior when update is sent directly to a shard----
+session.startTransaction();
+res = sessionDB.foo.update({"x": 505}, {"$set": {"x": 20}});
+assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
+assert(res.errmsg.includes(
+ "There is either an orphan for this document or _id for this collection is not globally unique."));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
+mongos.getDB(kDbName).foo.drop();
- //
- // For Op-style updates.
- //
-
- // An update sent directly to a shard cannot change the shard key.
- assert.commandFailedWithCode(
- st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 500}, {$set: {"x": 2}}),
- ErrorCodes.ImmutableField);
- assert.commandFailedWithCode(st.rs1.getPrimary().getDB(kDbName).foo.update(
- {"x": 1000}, {$set: {"x": 2}}, {upsert: true}),
- ErrorCodes.ImmutableField);
- assert.commandFailedWithCode(st.rs0.getPrimary().getDB(kDbName).foo.update(
- {"x": 1000}, {$set: {"x": 2}}, {upsert: true}),
- ErrorCodes.ImmutableField);
-
- // The query will not match a doc and upsert is false, so this will not fail but will be a
- // no-op.
- res = assert.commandWorked(
- st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 500}, {$set: {"x": 2}}));
- assert.eq(0, res.nMatched);
- assert.eq(0, res.nModified);
- assert.eq(0, res.nUpserted);
-
- //
- // For Replacement style updates.
- //
-
- // An update sent directly to a shard cannot change the shard key.
- assert.commandFailedWithCode(
- st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 500}, {"x": 2}),
- ErrorCodes.ImmutableField);
- assert.commandFailedWithCode(
- st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {"x": 2}, {upsert: true}),
- ErrorCodes.ImmutableField);
- assert.commandFailedWithCode(
- st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {"x": 2}, {upsert: true}),
- ErrorCodes.ImmutableField);
-
- // The query will not match a doc and upsert is false, so this will not fail but will be a
- // no-op.
- res = assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 500}, {"x": 2}));
- assert.eq(0, res.nMatched);
- assert.eq(0, res.nModified);
- assert.eq(0, res.nUpserted);
+// ----Assert retryable write result has WCE when the internal commitTransaction fails----
- mongos.getDB(kDbName).foo.drop();
-
- st.stop();
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+// Turn on failcommand fail point to fail CoordinateCommitTransaction
+assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ writeConcernError: {code: NumberInt(12345), errmsg: "dummy error"},
+ failCommands: ["coordinateCommitTransaction"],
+ failInternalCommands: true
+ }
+}));
+
+res = sessionDB.foo.update({x: 4}, {$set: {x: 1000}});
+assert.commandWorkedIgnoringWriteConcernErrors(res);
+assert.eq(12345, res.getWriteConcernError().code);
+
+let findAndModCmd = {
+ findAndModify: 'foo',
+ query: {x: 78},
+ update: {$set: {x: 250}},
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(1),
+};
+res = sessionDB.runCommand(findAndModCmd);
+assert.commandWorkedIgnoringWriteConcernErrors(res);
+assert.eq(res.writeConcernError.code, 12345);
+assert(res.writeConcernError.errmsg.includes("dummy error"));
+
+assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "off",
+}));
+
+mongos.getDB(kDbName).foo.drop();
+
+// ----Assert that updating the shard key in a batch with size > 1 fails----
+
+assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, false, true);
+assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, false, false);
+
+session = st.s.startSession({retryWrites: false});
+sessionDB = session.getDatabase(kDbName);
+assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, true, true);
+assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, true, false);
+
+// ----Multiple writes in txn-----
+
+// Update two docs, updating one twice
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+
+session.startTransaction();
+let id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 30}}));
+assert.commandWorked(sessionDB.foo.update({"x": 30}, {"$set": {"x": 600}}));
+assert.commandWorked(sessionDB.foo.update({"x": 4}, {"$set": {"x": 50}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 600}).toArray()[0]._id);
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 50}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// Check that doing $inc on doc A, then updating shard key for doc A, then $inc again only incs
+// once
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+
+session.startTransaction();
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 30}}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30, "a": 7}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+
+// Insert and $inc before moving doc
+session.startTransaction();
+id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
+assert.commandWorked(sessionDB.foo.insert({"x": 1, "a": 1}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+sessionDB.foo.findAndModify({query: {"x": 500}, update: {$set: {"x": 20}}});
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).toArray().length);
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 20}).toArray().length);
+assert.eq(20, mongos.getDB(kDbName).foo.find({"_id": id}).toArray()[0].x);
+assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).toArray().length);
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 20, "a": 7}).toArray().length);
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 1}).toArray().length);
+
+mongos.getDB(kDbName).foo.drop();
+
+// ----Assert correct behavior when update is sent directly to a shard----
+
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+
+//
+// For Op-style updates.
+//
+
+// An update sent directly to a shard cannot change the shard key.
+assert.commandFailedWithCode(
+ st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 500}, {$set: {"x": 2}}),
+ ErrorCodes.ImmutableField);
+assert.commandFailedWithCode(
+ st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {$set: {"x": 2}}, {upsert: true}),
+ ErrorCodes.ImmutableField);
+assert.commandFailedWithCode(
+ st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {$set: {"x": 2}}, {upsert: true}),
+ ErrorCodes.ImmutableField);
+
+// The query will not match a doc and upsert is false, so this will not fail but will be a
+// no-op.
+res = assert.commandWorked(
+ st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 500}, {$set: {"x": 2}}));
+assert.eq(0, res.nMatched);
+assert.eq(0, res.nModified);
+assert.eq(0, res.nUpserted);
+
+//
+// For Replacement style updates.
+//
+
+// An update sent directly to a shard cannot change the shard key.
+assert.commandFailedWithCode(st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 500}, {"x": 2}),
+ ErrorCodes.ImmutableField);
+assert.commandFailedWithCode(
+ st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {"x": 2}, {upsert: true}),
+ ErrorCodes.ImmutableField);
+assert.commandFailedWithCode(
+ st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {"x": 2}, {upsert: true}),
+ ErrorCodes.ImmutableField);
+
+// The query will not match a doc and upsert is false, so this will not fail but will be a
+// no-op.
+res = assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 500}, {"x": 2}));
+assert.eq(0, res.nMatched);
+assert.eq(0, res.nModified);
+assert.eq(0, res.nUpserted);
+
+mongos.getDB(kDbName).foo.drop();
+
+st.stop();
})();
diff --git a/jstests/sharding/update_shard_key_doc_on_same_shard.js b/jstests/sharding/update_shard_key_doc_on_same_shard.js
index 90133e49325..00f2aa23435 100644
--- a/jstests/sharding/update_shard_key_doc_on_same_shard.js
+++ b/jstests/sharding/update_shard_key_doc_on_same_shard.js
@@ -5,783 +5,768 @@
*/
(function() {
- 'use strict';
-
- load("jstests/sharding/libs/update_shard_key_helpers.js");
-
- const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
- const kDbName = 'db';
- const ns = kDbName + '.foo';
- const mongos = st.s0;
- const shard0 = st.shard0.shardName;
- const shard1 = st.shard1.shardName;
-
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
-
- // -----------------------------------------
- // Updates to the shard key are not allowed if write is not retryable and not in a multi-stmt
- // txn
- // -----------------------------------------
-
- let docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- assert.writeError(mongos.getDB(kDbName).foo.update({"x": 300}, {"x": 600}));
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
-
- assert.throws(function() {
- mongos.getDB(kDbName).foo.findAndModify({query: {"x": 300}, update: {$set: {"x": 600}}});
- });
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
-
- // ---------------------------------
- // Update shard key retryable write
- // ---------------------------------
-
- let session = st.s.startSession({retryWrites: true});
- let sessionDB = session.getDatabase(kDbName);
-
- // Modify updates
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 900}, {"x": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, false, false, {"_id": 300}, {"$set": {"_id": 600}});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, false, false, {"_id.a": 300}, {
- "$set": {"_id": {"a": 600}}
- });
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, false, {"x": 300}, {"$set": {"x": 600}});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"$set": {"x": [300]}});
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"$unset": {"x": 1}});
-
- // Replacement updates
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, false, false, {"_id": 300}, {"_id": 600});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, false, false, {"_id.a": 300}, {"_id": {"a": 600}});
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, false, {"x": 300}, {"x": 600});
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300, "y": 80}, {"x": 600});
- // Shard key fields are missing in query.
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"x": 600, "y": 80, "a": 2});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"x": [300]});
-
- // Modify style findAndModify
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, false, true, {"_id": 300}, {"$set": {"_id": 600}});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, false, true, {"_id.a": 300}, {
- "$set": {"_id": {"a": 600}}
- });
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"$set": {"x": [300]}});
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"$unset": {"x": 1}});
-
- // Replacement style findAndModify
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- false);
-
- // upsert: true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, false, true, {"_id": 300}, {"_id": 600});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, false, true, {"_id.a": 300}, {"_id": {"a": 600}});
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300, "y": 80}, {"x": 600});
- // Shard key fields are missing in query.
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"x": 600, "y": 80, "a": 2});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"x": [300]});
-
- // Bulk writes retryable writes
- assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(
- st, kDbName, ns, session, sessionDB, false, false);
- assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(
- st, kDbName, ns, session, sessionDB, false, true);
-
- // ----Assert correct behavior when collection is hash sharded----
-
- assertCanUpdatePrimitiveShardKeyHashedSameShards(st, kDbName, ns, session, sessionDB, true);
-
- // ---------------------------------------
- // Update shard key in multi statement txn
- // ---------------------------------------
-
- session = st.s.startSession();
- sessionDB = session.getDatabase(kDbName);
-
- // ----Single writes in txn----
-
- // Modify updates
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, true, false, {"_id": 300}, {"$set": {"_id": 600}});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, true, false, {"_id.a": 300}, {
- "$set": {"_id": {"a": 600}}
- });
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, true, {"x": 300}, {"$set": {"x": 600}});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"$set": {"x": [300]}});
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"$unset": {"x": 1}});
-
- // Replacement updates
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, true, false, {"_id": 300}, {"_id": 600});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, true, false, {"_id.a": 300}, {"_id": {"a": 600}});
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, true, {"x": 300}, {"x": 600});
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300, "y": 80}, {"x": 600});
- // Shard key fields are missing in query.
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"x": 600, "y": 80, "a": 2});
-
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"x": [300]});
-
- // Modify style findAndModify
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, true, true, {"_id": 300}, {"$set": {"_id": 600}});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, true, true, {"_id.a": 300}, {
- "$set": {"_id": {"a": 600}}
- });
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"$set": {"x": [300]}});
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"$unset": {"x": 1}});
-
- // Replacement style findAndModify
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, true, true, {"_id": 300}, {"_id": 600});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, true, true, {"_id.a": 300}, {"_id": {"a": 600}});
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300, "y": 80}, {"x": 600});
- // Shard key fields are missing in query.
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"x": 600, "y": 80, "a": 2});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"x": [300]});
-
- // ----Assert correct behavior when collection is hash sharded----
-
- assertCanUpdatePrimitiveShardKeyHashedSameShards(st, kDbName, ns, session, sessionDB, true);
-
- // ----Multiple writes in txn-----
-
- // Bulk writes in txn
- assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(
- st, kDbName, ns, session, sessionDB, true, false);
- assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(
- st, kDbName, ns, session, sessionDB, true, true);
-
- // Update two docs, updating one twice
- docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- session.startTransaction();
- let id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 400}}));
- assert.commandWorked(sessionDB.foo.update({"x": 400}, {"x": 600, "_id": id}));
- assert.commandWorked(sessionDB.foo.update({"x": 4}, {"$set": {"x": 30}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 400}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
- assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 600}).toArray()[0]._id);
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
-
- // Check that doing $inc on doc A, then updating shard key for doc A, then $inc again only incs
- // once
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 400}}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 400}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 400, "a": 7}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
-
- // Check that doing findAndModify to update shard key followed by $inc works correctly
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- session.startTransaction();
- sessionDB.foo.findAndModify({query: {"x": 500}, update: {$set: {"x": 600}}});
- assert.commandWorked(sessionDB.foo.update({"x": 600}, {"$inc": {"a": 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600, "a": 7}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
-
- // Check that doing findAndModify followed by and update on a shard key works correctly
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- id = mongos.getDB(kDbName).foo.find({"x": 4}).toArray()[0]._id;
- session.startTransaction();
- sessionDB.foo.findAndModify({query: {"x": 4}, update: {$set: {"x": 20}}});
- assert.commandWorked(sessionDB.foo.update({"x": 20}, {$set: {"x": 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 20}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 1}).itcount());
- assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 1}).toArray()[0]._id);
-
- mongos.getDB(kDbName).foo.drop();
-
- st.stop();
-
+'use strict';
+
+load("jstests/sharding/libs/update_shard_key_helpers.js");
+
+const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
+const kDbName = 'db';
+const ns = kDbName + '.foo';
+const mongos = st.s0;
+const shard0 = st.shard0.shardName;
+const shard1 = st.shard1.shardName;
+
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
+
+// -----------------------------------------
+// Updates to the shard key are not allowed if write is not retryable and not in a multi-stmt
+// txn
+// -----------------------------------------
+
+let docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+assert.writeError(mongos.getDB(kDbName).foo.update({"x": 300}, {"x": 600}));
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+
+assert.throws(function() {
+ mongos.getDB(kDbName).foo.findAndModify({query: {"x": 300}, update: {$set: {"x": 600}}});
+});
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// ---------------------------------
+// Update shard key retryable write
+// ---------------------------------
+
+let session = st.s.startSession({retryWrites: true});
+let sessionDB = session.getDatabase(kDbName);
+
+// Modify updates
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 900}, {"x": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, false, false, {"_id": 300}, {"$set": {"_id": 600}});
+assertCannotUpdate_idDottedPath(st, kDbName, ns, session, sessionDB, false, false, {"_id.a": 300}, {
+ "$set": {"_id": {"a": 600}}
+});
+assertCannotUpdateWithMultiTrue(
+ st, kDbName, ns, session, sessionDB, false, {"x": 300}, {"$set": {"x": 600}});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"$set": {"x": [300]}});
+assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"$unset": {"x": 1}});
+
+// Replacement updates
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, false, false, {"_id": 300}, {"_id": 600});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, false, false, {"_id.a": 300}, {"_id": {"a": 600}});
+assertCannotUpdateWithMultiTrue(st, kDbName, ns, session, sessionDB, false, {"x": 300}, {"x": 600});
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300, "y": 80}, {"x": 600});
+// Shard key fields are missing in query.
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"x": 600, "y": 80, "a": 2});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"x": [300]});
+
+// Modify style findAndModify
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, false, true, {"_id": 300}, {"$set": {"_id": 600}});
+assertCannotUpdate_idDottedPath(st, kDbName, ns, session, sessionDB, false, true, {"_id.a": 300}, {
+ "$set": {"_id": {"a": 600}}
+});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"$set": {"x": [300]}});
+assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"$unset": {"x": 1}});
+
+// Replacement style findAndModify
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ false);
+
+// upsert: true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(st, kDbName, ns, session, sessionDB, false, true, {"_id": 300}, {"_id": 600});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, false, true, {"_id.a": 300}, {"_id": {"a": 600}});
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300, "y": 80}, {"x": 600});
+// Shard key fields are missing in query.
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"x": 600, "y": 80, "a": 2});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"x": [300]});
+
+// Bulk writes retryable writes
+assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(st, kDbName, ns, session, sessionDB, false, false);
+assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(st, kDbName, ns, session, sessionDB, false, true);
+
+// ----Assert correct behavior when collection is hash sharded----
+
+assertCanUpdatePrimitiveShardKeyHashedSameShards(st, kDbName, ns, session, sessionDB, true);
+
+// ---------------------------------------
+// Update shard key in multi statement txn
+// ---------------------------------------
+
+session = st.s.startSession();
+sessionDB = session.getDatabase(kDbName);
+
+// ----Single writes in txn----
+
+// Modify updates
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, true, false, {"_id": 300}, {"$set": {"_id": 600}});
+assertCannotUpdate_idDottedPath(st, kDbName, ns, session, sessionDB, true, false, {"_id.a": 300}, {
+ "$set": {"_id": {"a": 600}}
+});
+assertCannotUpdateWithMultiTrue(
+ st, kDbName, ns, session, sessionDB, true, {"x": 300}, {"$set": {"x": 600}});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"$set": {"x": [300]}});
+assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"$unset": {"x": 1}});
+
+// Replacement updates
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(st, kDbName, ns, session, sessionDB, true, false, {"_id": 300}, {"_id": 600});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, true, false, {"_id.a": 300}, {"_id": {"a": 600}});
+assertCannotUpdateWithMultiTrue(st, kDbName, ns, session, sessionDB, true, {"x": 300}, {"x": 600});
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300, "y": 80}, {"x": 600});
+// Shard key fields are missing in query.
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"x": 600, "y": 80, "a": 2});
+
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"x": [300]});
+
+// Modify style findAndModify
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, true, true, {"_id": 300}, {"$set": {"_id": 600}});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, true, true, {"_id.a": 300}, {"$set": {"_id": {"a": 600}}});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"$set": {"x": [300]}});
+assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"$unset": {"x": 1}});
+
+// Replacement style findAndModify
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(st, kDbName, ns, session, sessionDB, true, true, {"_id": 300}, {"_id": 600});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, true, true, {"_id.a": 300}, {"_id": {"a": 600}});
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300, "y": 80}, {"x": 600});
+// Shard key fields are missing in query.
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"x": 600, "y": 80, "a": 2});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"x": [300]});
+
+// ----Assert correct behavior when collection is hash sharded----
+
+assertCanUpdatePrimitiveShardKeyHashedSameShards(st, kDbName, ns, session, sessionDB, true);
+
+// ----Multiple writes in txn-----
+
+// Bulk writes in txn
+assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(st, kDbName, ns, session, sessionDB, true, false);
+assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(st, kDbName, ns, session, sessionDB, true, true);
+
+// Update two docs, updating one twice
+docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+session.startTransaction();
+let id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 400}}));
+assert.commandWorked(sessionDB.foo.update({"x": 400}, {"x": 600, "_id": id}));
+assert.commandWorked(sessionDB.foo.update({"x": 4}, {"$set": {"x": 30}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 400}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 600}).toArray()[0]._id);
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// Check that doing $inc on doc A, then updating shard key for doc A, then $inc again only incs
+// once
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+session.startTransaction();
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 400}}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 400}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 400, "a": 7}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// Check that doing findAndModify to update shard key followed by $inc works correctly
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+session.startTransaction();
+sessionDB.foo.findAndModify({query: {"x": 500}, update: {$set: {"x": 600}}});
+assert.commandWorked(sessionDB.foo.update({"x": 600}, {"$inc": {"a": 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600, "a": 7}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// Check that doing findAndModify followed by and update on a shard key works correctly
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+id = mongos.getDB(kDbName).foo.find({"x": 4}).toArray()[0]._id;
+session.startTransaction();
+sessionDB.foo.findAndModify({query: {"x": 4}, update: {$set: {"x": 20}}});
+assert.commandWorked(sessionDB.foo.update({"x": 20}, {$set: {"x": 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 20}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 1}).itcount());
+assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 1}).toArray()[0]._id);
+
+mongos.getDB(kDbName).foo.drop();
+
+st.stop();
})();
diff --git a/jstests/sharding/update_shard_key_pipeline_update.js b/jstests/sharding/update_shard_key_pipeline_update.js
index b65aefb947b..9f1ff0082e0 100644
--- a/jstests/sharding/update_shard_key_pipeline_update.js
+++ b/jstests/sharding/update_shard_key_pipeline_update.js
@@ -4,236 +4,235 @@
*/
(function() {
- 'use strict';
-
- load("jstests/sharding/libs/update_shard_key_helpers.js");
-
- const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
- const kDbName = 'db';
- const mongos = st.s0;
- const shard0 = st.shard0.shardName;
- const shard1 = st.shard1.shardName;
- const ns = kDbName + '.foo';
-
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
-
- // Tuples represent [shouldRunCommandInTxn, runUpdateAsFindAndModifyCmd, isUpsert].
- const changeShardKeyOptions = [
- [false, false, false],
- [true, false, false],
- [true, true, false],
- [false, true, false],
- [false, false, true],
- [true, false, true],
- [false, true, true],
- [true, true, true]
- ];
-
- // Test pipeline updates where the document being updated remains on the same shard.
-
- changeShardKeyOptions.forEach(function(updateConfig) {
- let runInTxn, isFindAndModify, upsert;
- [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
-
- jsTestLog("Testing changing the shard key using pipeline style update and " +
- (isFindAndModify ? "findAndModify command " : "update command ") +
- (runInTxn ? "in transaction " : "as retryable write"));
-
- let session = st.s.startSession({retryWrites: runInTxn ? false : true});
- let sessionDB = session.getDatabase(kDbName);
-
- assertCanUpdatePrimitiveShardKey(
- st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300}, {"x": 4}],
- [
- [{$set: {"x": {$multiply: ["$x", 2]}}}, {$addFields: {"z": 1}}],
- [{$set: {"x": {$multiply: ["$x", -1]}}}, {$addFields: {"z": 1}}]
- ],
- upsert,
- [{"x": 600, "z": 1}, {"x": -4, "z": 1}]);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x.a": 300}, {"x.a": 4}],
- [
- [{$set: {"x": {"a": {$multiply: ["$x.a", 2]}, "y": 1}}}],
- [{$set: {"x": {"a": {$multiply: ["$x.a", -1]}, "y": 1}}}]
- ],
- upsert,
- [{"x": {"a": 600, "y": 1}}, {"x": {"a": -4, "y": 1}}]);
- assertCanUpdatePartialShardKey(
- st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [[{$set: {"x": {$multiply: ["$x", 2]}}}], [{$set: {"x": {$multiply: ["$x", -1]}}}]],
- upsert,
- [{"x": 600}, {"x": -4}]);
-
- // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
- // them for both upsert true and false.
- if (!upsert) {
- assertCannotUpdate_id(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- {"_id": 300},
- [{$set: {"_id": {$multiply: ["$_id", 2]}}}],
- {"_id": 600});
- assertCannotUpdate_idDottedPath(st,
+'use strict';
+
+load("jstests/sharding/libs/update_shard_key_helpers.js");
+
+const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
+const kDbName = 'db';
+const mongos = st.s0;
+const shard0 = st.shard0.shardName;
+const shard1 = st.shard1.shardName;
+const ns = kDbName + '.foo';
+
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
+
+// Tuples represent [shouldRunCommandInTxn, runUpdateAsFindAndModifyCmd, isUpsert].
+const changeShardKeyOptions = [
+ [false, false, false],
+ [true, false, false],
+ [true, true, false],
+ [false, true, false],
+ [false, false, true],
+ [true, false, true],
+ [false, true, true],
+ [true, true, true]
+];
+
+// Test pipeline updates where the document being updated remains on the same shard.
+
+changeShardKeyOptions.forEach(function(updateConfig) {
+ let runInTxn, isFindAndModify, upsert;
+ [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
+
+ jsTestLog("Testing changing the shard key using pipeline style update and " +
+ (isFindAndModify ? "findAndModify command " : "update command ") +
+ (runInTxn ? "in transaction " : "as retryable write"));
+
+ let session = st.s.startSession({retryWrites: runInTxn ? false : true});
+ let sessionDB = session.getDatabase(kDbName);
+
+ assertCanUpdatePrimitiveShardKey(
+ st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300}, {"x": 4}],
+ [
+ [{$set: {"x": {$multiply: ["$x", 2]}}}, {$addFields: {"z": 1}}],
+ [{$set: {"x": {$multiply: ["$x", -1]}}}, {$addFields: {"z": 1}}]
+ ],
+ upsert,
+ [{"x": 600, "z": 1}, {"x": -4, "z": 1}]);
+ assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x.a": 300}, {"x.a": 4}],
+ [
+ [{$set: {"x": {"a": {$multiply: ["$x.a", 2]}, "y": 1}}}],
+ [{$set: {"x": {"a": {$multiply: ["$x.a", -1]}, "y": 1}}}]
+ ],
+ upsert,
+ [{"x": {"a": 600, "y": 1}}, {"x": {"a": -4, "y": 1}}]);
+ assertCanUpdatePartialShardKey(
+ st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [[{$set: {"x": {$multiply: ["$x", 2]}}}], [{$set: {"x": {$multiply: ["$x", -1]}}}]],
+ upsert,
+ [{"x": 600}, {"x": -4}]);
+
+ // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
+ // them for both upsert true and false.
+ if (!upsert) {
+ assertCannotUpdate_id(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"_id": 300},
+ [{$set: {"_id": {$multiply: ["$_id", 2]}}}],
+ {"_id": 600});
+ assertCannotUpdate_idDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"_id.a": 300},
+ [{$set: {"_id": {"a": {$multiply: ["$_id.a", 2]}}}}],
+ {"_id": {"a": 600}});
+ assertCannotUnsetSKFieldUsingPipeline(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"x": 300, "y": 80},
+ [{$project: {"y": 0}}],
+ {"x": 300, "y": 80});
+ if (!isFindAndModify) {
+ assertCannotUpdateWithMultiTrue(st,
kDbName,
ns,
session,
sessionDB,
runInTxn,
- isFindAndModify,
- {"_id.a": 300},
- [{$set: {"_id": {"a": {$multiply: ["$_id.a", 2]}}}}],
- {"_id": {"a": 600}});
- assertCannotUnsetSKFieldUsingPipeline(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- {"x": 300, "y": 80},
- [{$project: {"y": 0}}],
- {"x": 300, "y": 80});
- if (!isFindAndModify) {
- assertCannotUpdateWithMultiTrue(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- {"x": 300},
- [{$set: {"x": {$multiply: ["$x", 2]}}}],
- {"x": 600});
- }
+ {"x": 300},
+ [{$set: {"x": {$multiply: ["$x", 2]}}}],
+ {"x": 600});
}
- });
-
- // Test pipeline updates where the document being updated will move shards.
-
- changeShardKeyOptions.forEach(function(updateConfig) {
- let runInTxn, isFindAndModify, upsert;
- [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
-
- jsTestLog("Testing changing the shard key using pipeline style update and " +
- (isFindAndModify ? "findAndModify command " : "update command ") +
- (runInTxn ? "in transaction " : "as retryable write"));
-
- let session = st.s.startSession({retryWrites: runInTxn ? false : true});
- let sessionDB = session.getDatabase(kDbName);
-
- assertCanUpdatePrimitiveShardKey(
- st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300}, {"x": 4}],
- [
- [{$set: {"x": {$multiply: ["$x", -1]}}}, {$addFields: {"z": 1}}],
- [{$set: {"x": {$multiply: ["$x", 100]}}}, {$addFields: {"z": 1}}]
- ],
- upsert,
- [{"x": -300, "z": 1}, {"x": 400, "z": 1}]);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x.a": 300}, {"x.a": 4}],
- [
- [{$set: {"x": {"a": {$multiply: ["$x.a", -1]}, "y": 1}}}],
- [{$set: {"x": {"a": {$multiply: ["$x.a", 100]}, "y": 1}}}]
- ],
- upsert,
- [{"x": {"a": -300, "y": 1}}, {"x": {"a": 400, "y": 1}}]);
- assertCanUpdatePartialShardKey(
- st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [[{$set: {"x": {$multiply: ["$x", -1]}}}], [{$set: {"x": {$multiply: ["$x", 100]}}}]],
- upsert,
- [{"x": -300}, {"x": 400}]);
-
- // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
- // them for both upsert true and false.
- if (!upsert) {
- assertCannotUpdate_id(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- {"_id": 300},
- [{$set: {"_id": {$multiply: ["$_id", -1]}}}],
- {"_id": -300});
- assertCannotUpdate_idDottedPath(st,
+ }
+});
+
+// Test pipeline updates where the document being updated will move shards.
+
+changeShardKeyOptions.forEach(function(updateConfig) {
+ let runInTxn, isFindAndModify, upsert;
+ [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
+
+ jsTestLog("Testing changing the shard key using pipeline style update and " +
+ (isFindAndModify ? "findAndModify command " : "update command ") +
+ (runInTxn ? "in transaction " : "as retryable write"));
+
+ let session = st.s.startSession({retryWrites: runInTxn ? false : true});
+ let sessionDB = session.getDatabase(kDbName);
+
+ assertCanUpdatePrimitiveShardKey(
+ st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300}, {"x": 4}],
+ [
+ [{$set: {"x": {$multiply: ["$x", -1]}}}, {$addFields: {"z": 1}}],
+ [{$set: {"x": {$multiply: ["$x", 100]}}}, {$addFields: {"z": 1}}]
+ ],
+ upsert,
+ [{"x": -300, "z": 1}, {"x": 400, "z": 1}]);
+ assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x.a": 300}, {"x.a": 4}],
+ [
+ [{$set: {"x": {"a": {$multiply: ["$x.a", -1]}, "y": 1}}}],
+ [{$set: {"x": {"a": {$multiply: ["$x.a", 100]}, "y": 1}}}]
+ ],
+ upsert,
+ [{"x": {"a": -300, "y": 1}}, {"x": {"a": 400, "y": 1}}]);
+ assertCanUpdatePartialShardKey(
+ st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [[{$set: {"x": {$multiply: ["$x", -1]}}}], [{$set: {"x": {$multiply: ["$x", 100]}}}]],
+ upsert,
+ [{"x": -300}, {"x": 400}]);
+
+ // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
+ // them for both upsert true and false.
+ if (!upsert) {
+ assertCannotUpdate_id(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"_id": 300},
+ [{$set: {"_id": {$multiply: ["$_id", -1]}}}],
+ {"_id": -300});
+ assertCannotUpdate_idDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"_id.a": 300},
+ [{$set: {"_id": {"a": {$multiply: ["$_id.a", -1]}}}}],
+ {"_id": {"a": -300}});
+ assertCannotUnsetSKFieldUsingPipeline(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"x": 300, "y": 80},
+ [{$project: {"y": 0}}],
+ {"x": 300, "y": 80});
+ if (!isFindAndModify) {
+ assertCannotUpdateWithMultiTrue(st,
kDbName,
ns,
session,
sessionDB,
runInTxn,
- isFindAndModify,
- {"_id.a": 300},
- [{$set: {"_id": {"a": {$multiply: ["$_id.a", -1]}}}}],
- {"_id": {"a": -300}});
- assertCannotUnsetSKFieldUsingPipeline(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- {"x": 300, "y": 80},
- [{$project: {"y": 0}}],
- {"x": 300, "y": 80});
- if (!isFindAndModify) {
- assertCannotUpdateWithMultiTrue(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- {"x": 300},
- [{$set: {"x": {$multiply: ["$x", -1]}}}],
- {"x": -300});
- }
+ {"x": 300},
+ [{$set: {"x": {$multiply: ["$x", -1]}}}],
+ {"x": -300});
}
- });
-
- st.stop();
+ }
+});
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index 432329f7210..ea1939bfd72 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -2,110 +2,108 @@
// since shard key is immutable.
(function() {
- const s = new ShardingTest({name: "auto1", shards: 2, mongos: 1});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- db = s.getDB("test");
-
- // Repeat same tests with hashed shard key, to ensure identical behavior.
- s.shardColl("update0", {key: 1}, {key: 0}, {key: 1}, db.getName(), true);
- s.adminCommand({shardcollection: "test.update1", key: {key: "hashed"}});
-
- s.shard0.getDB("admin").setLogLevel(1);
- s.shard1.getDB("admin").setLogLevel(1);
-
- for (let i = 0; i < 2; i++) {
- const collName = "update" + i;
- const hashedKey = (collName == "update1");
-
- coll = db.getCollection(collName);
- coll.insert({_id: 1, key: 1});
-
- // Replacment and Opstyle upserts.
- assert.commandWorked(coll.update({_id: 2, key: 2}, {key: 2, foo: 'bar'}, {upsert: true}));
- assert.commandWorked(coll.update({_id: 3, key: 3}, {$set: {foo: 'bar'}}, {upsert: true}));
-
- assert.eq(coll.count(), 3, "count A");
- assert.eq(coll.findOne({_id: 3}).key, 3, "findOne 3 key A");
- assert.eq(coll.findOne({_id: 3}).foo, 'bar', "findOne 3 foo A");
-
- // update existing using update()
- assert.commandWorked(coll.update({_id: 1}, {key: 1, other: 1}));
- assert.commandWorked(coll.update({_id: 2}, {key: 2, other: 2}));
- assert.commandWorked(coll.update({_id: 3}, {key: 3, other: 3}));
-
- // do a replacement-style update which queries the shard key and keeps it constant
- assert.commandWorked(coll.update({key: 4}, {_id: 4, key: 4}, {upsert: true}));
- assert.commandWorked(coll.update({key: 4}, {key: 4, other: 4}));
- assert.eq(coll.find({key: 4, other: 4}).count(), 1, 'replacement update error');
- coll.remove({_id: 4});
-
- assert.eq(coll.count(), 3, "count B");
- coll.find().forEach(function(x) {
- assert.eq(x._id, x.key, "_id == key");
- assert.eq(x._id, x.other, "_id == other");
- });
-
- assert.writeError(coll.update({_id: 1, key: 1}, {$set: {key: 2}}));
- assert.eq(coll.findOne({_id: 1}).key, 1, 'key unchanged');
-
- assert.writeOK(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
-
- coll.update({key: 17}, {$inc: {x: 5}}, true);
- assert.eq(5, coll.findOne({key: 17}).x, "up1");
-
- coll.update({key: 18}, {$inc: {x: 5}}, true, true);
- assert.eq(5, coll.findOne({key: 18}).x, "up2");
-
- // Make sure we can extract exact _id from certain queries
- assert.writeOK(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
-
- // Invalid extraction of exact _id from query
- assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update({_id: {$gt: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update(
- {$or: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update(
- {$and: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update({'_id.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
-
- // Make sure we can extract exact shard key from certain queries
- assert.writeOK(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
-
- // Invalid extraction of exact key from query
- assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update({'key.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
-
- // Inexact queries may target a single shard. Range queries may target a single shard as
- // long as the collection is not hashed.
- assert[hashedKey ? "writeError" : "writeOK"](
- coll.update({key: {$gt: 0}}, {$set: {x: 1}}, {multi: false}));
- // Note: {key:-1} and {key:-2} fall on shard0 for both hashed and ascending shardkeys.
- assert.writeOK(coll.update({$or: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
-
- // In cases where an inexact query does target multiple shards, single update is rejected.
- assert.writeError(coll.update({key: {$gt: MinKey}}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(
- coll.update({$or: [{key: -10}, {key: 10}]}, {$set: {x: 1}}, {multi: false}));
-
- // Make sure failed shard key or _id extraction doesn't affect the other
- assert.writeOK(coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
- }
-
- s.stop();
-
+const s = new ShardingTest({name: "auto1", shards: 2, mongos: 1});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+db = s.getDB("test");
+
+// Repeat same tests with hashed shard key, to ensure identical behavior.
+s.shardColl("update0", {key: 1}, {key: 0}, {key: 1}, db.getName(), true);
+s.adminCommand({shardcollection: "test.update1", key: {key: "hashed"}});
+
+s.shard0.getDB("admin").setLogLevel(1);
+s.shard1.getDB("admin").setLogLevel(1);
+
+for (let i = 0; i < 2; i++) {
+ const collName = "update" + i;
+ const hashedKey = (collName == "update1");
+
+ coll = db.getCollection(collName);
+ coll.insert({_id: 1, key: 1});
+
+ // Replacment and Opstyle upserts.
+ assert.commandWorked(coll.update({_id: 2, key: 2}, {key: 2, foo: 'bar'}, {upsert: true}));
+ assert.commandWorked(coll.update({_id: 3, key: 3}, {$set: {foo: 'bar'}}, {upsert: true}));
+
+ assert.eq(coll.count(), 3, "count A");
+ assert.eq(coll.findOne({_id: 3}).key, 3, "findOne 3 key A");
+ assert.eq(coll.findOne({_id: 3}).foo, 'bar', "findOne 3 foo A");
+
+ // update existing using update()
+ assert.commandWorked(coll.update({_id: 1}, {key: 1, other: 1}));
+ assert.commandWorked(coll.update({_id: 2}, {key: 2, other: 2}));
+ assert.commandWorked(coll.update({_id: 3}, {key: 3, other: 3}));
+
+ // do a replacement-style update which queries the shard key and keeps it constant
+ assert.commandWorked(coll.update({key: 4}, {_id: 4, key: 4}, {upsert: true}));
+ assert.commandWorked(coll.update({key: 4}, {key: 4, other: 4}));
+ assert.eq(coll.find({key: 4, other: 4}).count(), 1, 'replacement update error');
+ coll.remove({_id: 4});
+
+ assert.eq(coll.count(), 3, "count B");
+ coll.find().forEach(function(x) {
+ assert.eq(x._id, x.key, "_id == key");
+ assert.eq(x._id, x.other, "_id == other");
+ });
+
+ assert.writeError(coll.update({_id: 1, key: 1}, {$set: {key: 2}}));
+ assert.eq(coll.findOne({_id: 1}).key, 1, 'key unchanged');
+
+ assert.writeOK(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
+
+ coll.update({key: 17}, {$inc: {x: 5}}, true);
+ assert.eq(5, coll.findOne({key: 17}).x, "up1");
+
+ coll.update({key: 18}, {$inc: {x: 5}}, true, true);
+ assert.eq(5, coll.findOne({key: 18}).x, "up2");
+
+ // Make sure we can extract exact _id from certain queries
+ assert.writeOK(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+
+ // Invalid extraction of exact _id from query
+ assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({_id: {$gt: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(
+ coll.update({$or: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$and: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({'_id.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
+
+ // Make sure we can extract exact shard key from certain queries
+ assert.writeOK(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+
+ // Invalid extraction of exact key from query
+ assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({'key.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
+
+ // Inexact queries may target a single shard. Range queries may target a single shard as
+ // long as the collection is not hashed.
+ assert[hashedKey ? "writeError" : "writeOK"](
+ coll.update({key: {$gt: 0}}, {$set: {x: 1}}, {multi: false}));
+ // Note: {key:-1} and {key:-2} fall on shard0 for both hashed and ascending shardkeys.
+ assert.writeOK(coll.update({$or: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
+
+ // In cases where an inexact query does target multiple shards, single update is rejected.
+ assert.writeError(coll.update({key: {$gt: MinKey}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({$or: [{key: -10}, {key: 10}]}, {$set: {x: 1}}, {multi: false}));
+
+ // Make sure failed shard key or _id extraction doesn't affect the other
+ assert.writeOK(coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/update_zone_key_range.js b/jstests/sharding/update_zone_key_range.js
index b4babb0f441..97826029b03 100644
--- a/jstests/sharding/update_zone_key_range.js
+++ b/jstests/sharding/update_zone_key_range.js
@@ -3,43 +3,42 @@
* in sharding_catalog_assign_key_range_to_zone_test.cpp.
*/
(function() {
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
- var shardName = configDB.shards.findOne()._id;
+var configDB = st.s.getDB('config');
+var shardName = configDB.shards.findOne()._id;
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- // Testing basic assign.
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
+// Testing basic assign.
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
- var tagDoc = configDB.tags.findOne();
+var tagDoc = configDB.tags.findOne();
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- // Cannot assign overlapping ranges
- assert.commandFailedWithCode(
- st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {x: -10}, max: {x: 20}, zone: 'x'}),
- ErrorCodes.RangeOverlapConflict);
+// Cannot assign overlapping ranges
+assert.commandFailedWithCode(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: -10}, max: {x: 20}, zone: 'x'}),
+ ErrorCodes.RangeOverlapConflict);
- tagDoc = configDB.tags.findOne();
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+tagDoc = configDB.tags.findOne();
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- // Testing basic remove.
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: null}));
+// Testing basic remove.
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: null}));
- assert.eq(null, configDB.tags.findOne());
+assert.eq(null, configDB.tags.findOne());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/update_zone_key_range_not_sharded.js b/jstests/sharding/update_zone_key_range_not_sharded.js
index 4f9603a33a9..211dedc8588 100644
--- a/jstests/sharding/update_zone_key_range_not_sharded.js
+++ b/jstests/sharding/update_zone_key_range_not_sharded.js
@@ -3,42 +3,41 @@
* More detailed tests can be found in sharding_catalog_assign_key_range_to_zone_test.cpp.
*/
(function() {
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
- var shardName = configDB.shards.findOne()._id;
+var configDB = st.s.getDB('config');
+var shardName = configDB.shards.findOne()._id;
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- // Testing basic assign.
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
+// Testing basic assign.
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
- var tagDoc = configDB.tags.findOne();
+var tagDoc = configDB.tags.findOne();
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- // Cannot assign overlapping ranges
- assert.commandFailedWithCode(
- st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {x: -10}, max: {x: 20}, zone: 'x'}),
- ErrorCodes.RangeOverlapConflict);
+// Cannot assign overlapping ranges
+assert.commandFailedWithCode(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: -10}, max: {x: 20}, zone: 'x'}),
+ ErrorCodes.RangeOverlapConflict);
- tagDoc = configDB.tags.findOne();
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+tagDoc = configDB.tags.findOne();
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- // Testing basic remove.
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: null}));
+// Testing basic remove.
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: null}));
- assert.eq(null, configDB.tags.findOne());
+assert.eq(null, configDB.tags.findOne());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/upsert_sharded.js b/jstests/sharding/upsert_sharded.js
index 317b3107e09..32a59b9a586 100644
--- a/jstests/sharding/upsert_sharded.js
+++ b/jstests/sharding/upsert_sharded.js
@@ -3,111 +3,107 @@
// NOTE: Generic upsert behavior tests belong in the core suite
//
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
-
- assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
-
- var upsertedResult = function(query, expr) {
- coll.remove({});
- return coll.update(query, expr, {upsert: true});
- };
-
- var upsertedField = function(query, expr, fieldName) {
- assert.writeOK(upsertedResult(query, expr));
- return coll.findOne()[fieldName];
- };
-
- var upsertedId = function(query, expr) {
- return upsertedField(query, expr, "_id");
- };
-
- var upsertedXVal = function(query, expr) {
- return upsertedField(query, expr, "x");
- };
-
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: coll + "", find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
-
- st.printShardingStatus();
-
- // upserted update replacement would result in no shard key
- assert.writeError(upsertedResult({x: 1}, {}));
-
- // updates with upsert must contain shard key in query when $op style
- assert.eq(1, upsertedXVal({x: 1}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({x: {$eq: 1}}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({x: {$all: [1]}}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({x: {$in: [1]}}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({$and: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({$or: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
-
- // Missing shard key in query.
- assert.commandFailedWithCode(upsertedResult({}, {$set: {a: 1, x: 1}}),
- ErrorCodes.ShardKeyNotFound);
-
- // Missing equality match on shard key in query.
- assert.commandFailedWithCode(upsertedResult({x: {$gt: 10}}, {$set: {a: 1, x: 5}}),
- ErrorCodes.ShardKeyNotFound);
-
- // Regex shard key value in query is ambigious and cannot be extracted for an equality match.
- assert.commandFailedWithCode(
- upsertedResult({x: {$eq: /abc*/}}, {$set: {a: 1, x: "regexValue"}}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(upsertedResult({x: {$eq: /abc/}}, {$set: {a: 1, x: /abc/}}),
- ErrorCodes.ShardKeyNotFound);
-
- // Shard key in query not extractable.
- assert.commandFailedWithCode(upsertedResult({x: undefined}, {$set: {a: 1}}),
- ErrorCodes.BadValue);
- assert.commandFailedWithCode(upsertedResult({x: [1, 2]}, {$set: {a: 1}}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(upsertedResult({x: {$eq: {$gt: 5}}}, {$set: {a: 1}}),
- ErrorCodes.ShardKeyNotFound);
-
- // nested field extraction always fails with non-nested key - like _id, we require setting the
- // elements directly
- assert.writeError(upsertedResult({"x.x": 1}, {$set: {a: 1}}));
- assert.writeError(upsertedResult({"x.x": {$eq: 1}}, {$set: {a: 1}}));
-
- coll.drop();
-
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {'x.x': 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {'x.x': 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: coll + "", find: {'x.x': 0}, to: st.shard1.shardName, _waitForDelete: true}));
-
- st.printShardingStatus();
-
- // nested field extraction with nested shard key
- assert.docEq({x: 1}, upsertedXVal({"x.x": 1}, {$set: {a: 1}}));
- assert.docEq({x: 1}, upsertedXVal({"x.x": {$eq: 1}}, {$set: {a: 1}}));
- assert.docEq({x: 1}, upsertedXVal({"x.x": {$all: [1]}}, {$set: {a: 1}}));
- assert.docEq({x: 1}, upsertedXVal({$and: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
- assert.docEq({x: 1}, upsertedXVal({$or: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
-
- // Can specify siblings of nested shard keys
- assert.docEq({x: 1, y: 1}, upsertedXVal({"x.x": 1, "x.y": 1}, {$set: {a: 1}}));
- assert.docEq({x: 1, y: {z: 1}}, upsertedXVal({"x.x": 1, "x.y.z": 1}, {$set: {a: 1}}));
-
- // No arrays at any level
- assert.writeError(upsertedResult({"x.x": []}, {$set: {a: 1}}));
- assert.writeError(upsertedResult({x: {x: []}}, {$set: {a: 1}}));
- assert.writeError(upsertedResult({x: [{x: 1}]}, {$set: {a: 1}}));
-
- // Can't set sub-fields of nested key
- assert.writeError(upsertedResult({"x.x.x": {$eq: 1}}, {$set: {a: 1}}));
-
- st.stop();
-
+'use strict';
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
+
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+
+var upsertedResult = function(query, expr) {
+ coll.remove({});
+ return coll.update(query, expr, {upsert: true});
+};
+
+var upsertedField = function(query, expr, fieldName) {
+ assert.writeOK(upsertedResult(query, expr));
+ return coll.findOne()[fieldName];
+};
+
+var upsertedId = function(query, expr) {
+ return upsertedField(query, expr, "_id");
+};
+
+var upsertedXVal = function(query, expr) {
+ return upsertedField(query, expr, "x");
+};
+
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+
+st.printShardingStatus();
+
+// upserted update replacement would result in no shard key
+assert.writeError(upsertedResult({x: 1}, {}));
+
+// updates with upsert must contain shard key in query when $op style
+assert.eq(1, upsertedXVal({x: 1}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({x: {$eq: 1}}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({x: {$all: [1]}}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({x: {$in: [1]}}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({$and: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({$or: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
+
+// Missing shard key in query.
+assert.commandFailedWithCode(upsertedResult({}, {$set: {a: 1, x: 1}}), ErrorCodes.ShardKeyNotFound);
+
+// Missing equality match on shard key in query.
+assert.commandFailedWithCode(upsertedResult({x: {$gt: 10}}, {$set: {a: 1, x: 5}}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Regex shard key value in query is ambigious and cannot be extracted for an equality match.
+assert.commandFailedWithCode(upsertedResult({x: {$eq: /abc*/}}, {$set: {a: 1, x: "regexValue"}}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(upsertedResult({x: {$eq: /abc/}}, {$set: {a: 1, x: /abc/}}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Shard key in query not extractable.
+assert.commandFailedWithCode(upsertedResult({x: undefined}, {$set: {a: 1}}), ErrorCodes.BadValue);
+assert.commandFailedWithCode(upsertedResult({x: [1, 2]}, {$set: {a: 1}}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(upsertedResult({x: {$eq: {$gt: 5}}}, {$set: {a: 1}}),
+ ErrorCodes.ShardKeyNotFound);
+
+// nested field extraction always fails with non-nested key - like _id, we require setting the
+// elements directly
+assert.writeError(upsertedResult({"x.x": 1}, {$set: {a: 1}}));
+assert.writeError(upsertedResult({"x.x": {$eq: 1}}, {$set: {a: 1}}));
+
+coll.drop();
+
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {'x.x': 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {'x.x': 0}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {'x.x': 0}, to: st.shard1.shardName, _waitForDelete: true}));
+
+st.printShardingStatus();
+
+// nested field extraction with nested shard key
+assert.docEq({x: 1}, upsertedXVal({"x.x": 1}, {$set: {a: 1}}));
+assert.docEq({x: 1}, upsertedXVal({"x.x": {$eq: 1}}, {$set: {a: 1}}));
+assert.docEq({x: 1}, upsertedXVal({"x.x": {$all: [1]}}, {$set: {a: 1}}));
+assert.docEq({x: 1}, upsertedXVal({$and: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
+assert.docEq({x: 1}, upsertedXVal({$or: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
+
+// Can specify siblings of nested shard keys
+assert.docEq({x: 1, y: 1}, upsertedXVal({"x.x": 1, "x.y": 1}, {$set: {a: 1}}));
+assert.docEq({x: 1, y: {z: 1}}, upsertedXVal({"x.x": 1, "x.y.z": 1}, {$set: {a: 1}}));
+
+// No arrays at any level
+assert.writeError(upsertedResult({"x.x": []}, {$set: {a: 1}}));
+assert.writeError(upsertedResult({x: {x: []}}, {$set: {a: 1}}));
+assert.writeError(upsertedResult({x: [{x: 1}]}, {$set: {a: 1}}));
+
+// Can't set sub-fields of nested key
+assert.writeError(upsertedResult({"x.x.x": {$eq: 1}}, {$set: {a: 1}}));
+
+st.stop();
})();
diff --git a/jstests/sharding/use_rsm_data_for_cs.js b/jstests/sharding/use_rsm_data_for_cs.js
index c2fafec4889..7ae96385243 100644
--- a/jstests/sharding/use_rsm_data_for_cs.js
+++ b/jstests/sharding/use_rsm_data_for_cs.js
@@ -1,37 +1,37 @@
(function() {
- 'use strict';
+'use strict';
- // init with one shard with one node rs
- var st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
- var mongos = st.s;
- var rs = st.rs0;
+// init with one shard with one node rs
+var st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
+var mongos = st.s;
+var rs = st.rs0;
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- var db = mongos.getDB("test");
- db.foo.save({_id: 1, x: 1});
- assert.eq(db.foo.find({_id: 1}).next().x, 1);
+var db = mongos.getDB("test");
+db.foo.save({_id: 1, x: 1});
+assert.eq(db.foo.find({_id: 1}).next().x, 1);
- // prevent RSM on all nodes to update config shard
- mongos.adminCommand(
- {configureFailPoint: "failReplicaSetChangeConfigServerUpdateHook", mode: "alwaysOn"});
- rs.nodes.forEach(function(node) {
- node.adminCommand(
- {configureFailPoint: "failUpdateShardIdentityConfigString", mode: "alwaysOn"});
- });
+// prevent RSM on all nodes to update config shard
+mongos.adminCommand(
+ {configureFailPoint: "failReplicaSetChangeConfigServerUpdateHook", mode: "alwaysOn"});
+rs.nodes.forEach(function(node) {
+ node.adminCommand(
+ {configureFailPoint: "failUpdateShardIdentityConfigString", mode: "alwaysOn"});
+});
- // add a node to shard rs
- rs.add({'shardsvr': ''});
- rs.reInitiate();
- rs.awaitSecondaryNodes();
+// add a node to shard rs
+rs.add({'shardsvr': ''});
+rs.reInitiate();
+rs.awaitSecondaryNodes();
- jsTest.log("Reload ShardRegistry");
- // force SR reload with flushRouterConfig
- mongos.getDB("admin").runCommand({flushRouterConfig: 1});
+jsTest.log("Reload ShardRegistry");
+// force SR reload with flushRouterConfig
+mongos.getDB("admin").runCommand({flushRouterConfig: 1});
- // issue a read from mongos with secondaryOnly read preference to force it use just added node
- jsTest.log("Issue find");
- assert.eq(db.foo.find({_id: 1}).readPref('secondary').next().x, 1);
+// issue a read from mongos with secondaryOnly read preference to force it use just added node
+jsTest.log("Issue find");
+assert.eq(db.foo.find({_id: 1}).readPref('secondary').next().x, 1);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js b/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
index fb610a68925..2d6b4c57020 100644
--- a/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
+++ b/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
@@ -3,39 +3,39 @@
* persists it in config.collections.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/uuid_util.js");
+load("jstests/libs/uuid_util.js");
- let db = "test";
+let db = "test";
- let st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}, other: {config: 3}});
+let st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}, other: {config: 3}});
- assert.commandWorked(st.s.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: db}));
+st.ensurePrimaryShard(db, st.shard0.shardName);
- // Check that shardCollection propagates and persists UUIDs.
- for (let i = 0; i < 3; i++) {
- let coll = "bar" + i;
- let nss = db + "." + coll;
+// Check that shardCollection propagates and persists UUIDs.
+for (let i = 0; i < 3; i++) {
+ let coll = "bar" + i;
+ let nss = db + "." + coll;
- // It shouldn't matter whether the collection existed on the shard already or not; test
- // both cases.
- if (i === 0) {
- assert.writeOK(st.s.getDB(db).getCollection(coll).insert({x: 1}));
- }
+ // It shouldn't matter whether the collection existed on the shard already or not; test
+ // both cases.
+ if (i === 0) {
+ assert.writeOK(st.s.getDB(db).getCollection(coll).insert({x: 1}));
+ }
- assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
- // Check that the entry for the collection in config.collections has a uuid field.
- let collEntryUUID = getUUIDFromConfigCollections(st.s, nss);
- assert.neq(undefined, collEntryUUID);
+ // Check that the entry for the collection in config.collections has a uuid field.
+ let collEntryUUID = getUUIDFromConfigCollections(st.s, nss);
+ assert.neq(undefined, collEntryUUID);
- // Check that the uuid field in the config.collections entry matches the uuid on the shard.
- let listCollsUUID = getUUIDFromListCollections(st.shard0.getDB(db), coll);
- assert.neq(undefined, listCollsUUID);
- assert.eq(listCollsUUID, collEntryUUID);
- }
+ // Check that the uuid field in the config.collections entry matches the uuid on the shard.
+ let listCollsUUID = getUUIDFromListCollections(st.shard0.getDB(db), coll);
+ assert.neq(undefined, listCollsUUID);
+ assert.eq(listCollsUUID, collEntryUUID);
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js b/jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js
index 94ac86dbb97..e5dde50d8bb 100644
--- a/jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js
+++ b/jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js
@@ -3,44 +3,43 @@
* collection on itself as part of a migration.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/uuid_util.js");
+load("jstests/libs/uuid_util.js");
- let db = "test";
- let coll = "foo";
- let nss = db + "." + coll;
+let db = "test";
+let coll = "foo";
+let nss = db + "." + coll;
- let st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}, other: {config: 3}});
+let st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}, other: {config: 3}});
- let donor = st.shard0;
- let recipient = st.shard1;
+let donor = st.shard0;
+let recipient = st.shard1;
- let setUp = function() {
- assert.commandWorked(st.s.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, donor.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
- };
+let setUp = function() {
+ assert.commandWorked(st.s.adminCommand({enableSharding: db}));
+ st.ensurePrimaryShard(db, donor.shardName);
+ assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
+};
- // Check that the recipient accepts the chunk and uses the UUID from the recipient when creating
- // the collection.
+// Check that the recipient accepts the chunk and uses the UUID from the recipient when creating
+// the collection.
- setUp();
- assert.commandWorked(
- st.s.adminCommand({moveChunk: nss, find: {_id: 0}, to: recipient.shardName}));
+setUp();
+assert.commandWorked(st.s.adminCommand({moveChunk: nss, find: {_id: 0}, to: recipient.shardName}));
- let donorUUID = getUUIDFromListCollections(donor.getDB(db), coll);
- assert.neq(undefined, donorUUID);
+let donorUUID = getUUIDFromListCollections(donor.getDB(db), coll);
+assert.neq(undefined, donorUUID);
- let recipientUUID = getUUIDFromListCollections(recipient.getDB(db), coll);
- assert.neq(undefined, recipientUUID);
+let recipientUUID = getUUIDFromListCollections(recipient.getDB(db), coll);
+assert.neq(undefined, recipientUUID);
- assert.eq(donorUUID, recipientUUID);
+assert.eq(donorUUID, recipientUUID);
- // Sanity check that the UUID in config.collections matches the donor's and recipient's UUIDs.
- let collEntryUUID = getUUIDFromConfigCollections(st.s, nss);
- assert.neq(undefined, collEntryUUID);
- assert.eq(donorUUID, collEntryUUID);
+// Sanity check that the UUID in config.collections matches the donor's and recipient's UUIDs.
+let collEntryUUID = getUUIDFromConfigCollections(st.s, nss);
+assert.neq(undefined, collEntryUUID);
+assert.eq(donorUUID, collEntryUUID);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/validate_collection.js b/jstests/sharding/validate_collection.js
index b67a42df6ea..0584c2a8c63 100644
--- a/jstests/sharding/validate_collection.js
+++ b/jstests/sharding/validate_collection.js
@@ -11,76 +11,75 @@
// 4. The previous scenario, but with validation legitimately failing on one of the shards.
(function() {
- const NUM_SHARDS = 3;
- assert(NUM_SHARDS >= 3);
+const NUM_SHARDS = 3;
+assert(NUM_SHARDS >= 3);
- var st = new ShardingTest({shards: NUM_SHARDS});
- var s = st.s;
- var testDb = st.getDB('test');
+var st = new ShardingTest({shards: NUM_SHARDS});
+var s = st.s;
+var testDb = st.getDB('test');
- function setup() {
- assert.writeOK(testDb.test.insert({_id: 0}));
- assert.writeOK(testDb.test.insert({_id: 1}));
+function setup() {
+ assert.writeOK(testDb.test.insert({_id: 0}));
+ assert.writeOK(testDb.test.insert({_id: 1}));
- assert.writeOK(testDb.dummy.insert({_id: 0}));
- assert.writeOK(testDb.dummy.insert({_id: 1}));
- assert.writeOK(testDb.dummy.insert({_id: 2}));
- }
-
- function validate(valid) {
- var res = testDb.runCommand({validate: 'test'});
- assert.commandWorked(res);
- assert.eq(res.valid, valid, tojson(res));
- }
+ assert.writeOK(testDb.dummy.insert({_id: 0}));
+ assert.writeOK(testDb.dummy.insert({_id: 1}));
+ assert.writeOK(testDb.dummy.insert({_id: 2}));
+}
- function setFailValidateFailPointOnShard(enabled, shard) {
- var mode;
- if (enabled) {
- mode = 'alwaysOn';
- } else {
- mode = 'off';
- }
+function validate(valid) {
+ var res = testDb.runCommand({validate: 'test'});
+ assert.commandWorked(res);
+ assert.eq(res.valid, valid, tojson(res));
+}
- var res =
- shard.adminCommand({configureFailPoint: 'validateCmdCollectionNotValid', mode: mode});
- assert.commandWorked(res);
+function setFailValidateFailPointOnShard(enabled, shard) {
+ var mode;
+ if (enabled) {
+ mode = 'alwaysOn';
+ } else {
+ mode = 'off';
}
- setup();
+ var res = shard.adminCommand({configureFailPoint: 'validateCmdCollectionNotValid', mode: mode});
+ assert.commandWorked(res);
+}
- // 1. Collection in an unsharded DB.
- validate(true);
+setup();
+
+// 1. Collection in an unsharded DB.
+validate(true);
- // 2. Sharded collection in a DB.
- assert.commandWorked(s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(s.adminCommand({shardCollection: 'test.test', key: {_id: 1}}));
- assert.commandWorked(s.adminCommand({shardCollection: 'test.dummy', key: {_id: 1}}));
- validate(true);
+// 2. Sharded collection in a DB.
+assert.commandWorked(s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(s.adminCommand({shardCollection: 'test.test', key: {_id: 1}}));
+assert.commandWorked(s.adminCommand({shardCollection: 'test.dummy', key: {_id: 1}}));
+validate(true);
- // 3. Sharded collection with chunks on two shards.
- st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(s.adminCommand({split: 'test.test', middle: {_id: 1}}));
- assert.commandWorked(
- testDb.adminCommand({moveChunk: 'test.test', find: {_id: 1}, to: st.shard1.shardName}));
- // We move the dummy database to NUM_SHARDS shards so that testDb will exist on all NUM_SHARDS
- // shards but the testDb.test collection will only exist on the first two shards. Prior to
- // SERVER-22588, this scenario would cause validation to fail.
- assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 1}}));
- assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 2}}));
- assert.commandWorked(
- testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 1}, to: st.shard1.shardName}));
- assert.commandWorked(
- testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 2}, to: st.shard2.shardName}));
- assert.eq(st.onNumShards('test'), 2);
- assert.eq(st.onNumShards('dummy'), NUM_SHARDS);
- validate(true);
+// 3. Sharded collection with chunks on two shards.
+st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(s.adminCommand({split: 'test.test', middle: {_id: 1}}));
+assert.commandWorked(
+ testDb.adminCommand({moveChunk: 'test.test', find: {_id: 1}, to: st.shard1.shardName}));
+// We move the dummy database to NUM_SHARDS shards so that testDb will exist on all NUM_SHARDS
+// shards but the testDb.test collection will only exist on the first two shards. Prior to
+// SERVER-22588, this scenario would cause validation to fail.
+assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 1}}));
+assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 2}}));
+assert.commandWorked(
+ testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 1}, to: st.shard1.shardName}));
+assert.commandWorked(
+ testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 2}, to: st.shard2.shardName}));
+assert.eq(st.onNumShards('test'), 2);
+assert.eq(st.onNumShards('dummy'), NUM_SHARDS);
+validate(true);
- // 4. Fail validation on one of the shards.
- var primaryShard = st.getPrimaryShard('test');
- setFailValidateFailPointOnShard(true, primaryShard);
- validate(false);
- setFailValidateFailPointOnShard(false, primaryShard);
+// 4. Fail validation on one of the shards.
+var primaryShard = st.getPrimaryShard('test');
+setFailValidateFailPointOnShard(true, primaryShard);
+validate(false);
+setFailValidateFailPointOnShard(false, primaryShard);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/verify_sessions_expiration_sharded.js b/jstests/sharding/verify_sessions_expiration_sharded.js
index fe743f147d6..96cd020aadd 100644
--- a/jstests/sharding/verify_sessions_expiration_sharded.js
+++ b/jstests/sharding/verify_sessions_expiration_sharded.js
@@ -16,135 +16,139 @@
// @tags: [requires_find_command]
(function() {
- "use strict";
-
- // This test makes assertions about the number of logical session records.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
-
- const refresh = {refreshLogicalSessionCacheNow: 1};
- const startSession = {startSession: 1};
- const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
-
- function refreshSessionsAndVerifyCount(mongosConfig, shardConfig, expectedCount) {
- mongosConfig.runCommand(refresh);
- shardConfig.runCommand(refresh);
-
- assert.eq(mongosConfig.system.sessions.count(), expectedCount);
- }
-
- function verifyOpenCursorCount(db, expectedCount) {
- assert.eq(db.serverStatus().metrics.cursor.open.total, expectedCount);
- }
-
- function getSessions(config) {
- return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
- }
-
- const dbName = "test";
- const testCollName = "verify_sessions_find_get_more";
-
- let shardingTest = new ShardingTest({
- shards: 1,
- });
-
- let mongos = shardingTest.s;
- let db = mongos.getDB(dbName);
- let mongosConfig = mongos.getDB("config");
- let shardConfig = shardingTest.rs0.getPrimary().getDB("config");
-
- // 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
- for (let i = 0; i < 5; i++) {
- let res = db.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- }
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
-
- // Manually delete entries in config.system.sessions to simulate TTL expiration.
- assert.commandWorked(mongosConfig.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 0);
-
- // 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
- // config.system.sessions collection.
- for (let i = 0; i < 10; i++) {
- db[testCollName].insert({_id: i, a: i, b: 1});
- }
-
- let cursors = [];
- for (let i = 0; i < 5; i++) {
- let session = mongos.startSession({});
- assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
- "initialize the session");
- cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
- assert(cursors[i].hasNext());
+"use strict";
+
+// This test makes assertions about the number of logical session records.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
+
+const refresh = {
+ refreshLogicalSessionCacheNow: 1
+};
+const startSession = {
+ startSession: 1
+};
+const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+
+function refreshSessionsAndVerifyCount(mongosConfig, shardConfig, expectedCount) {
+ mongosConfig.runCommand(refresh);
+ shardConfig.runCommand(refresh);
+
+ assert.eq(mongosConfig.system.sessions.count(), expectedCount);
+}
+
+function verifyOpenCursorCount(db, expectedCount) {
+ assert.eq(db.serverStatus().metrics.cursor.open.total, expectedCount);
+}
+
+function getSessions(config) {
+ return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
+}
+
+const dbName = "test";
+const testCollName = "verify_sessions_find_get_more";
+
+let shardingTest = new ShardingTest({
+ shards: 1,
+});
+
+let mongos = shardingTest.s;
+let db = mongos.getDB(dbName);
+let mongosConfig = mongos.getDB("config");
+let shardConfig = shardingTest.rs0.getPrimary().getDB("config");
+
+// 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
+for (let i = 0; i < 5; i++) {
+ let res = db.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+}
+refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
+
+// Manually delete entries in config.system.sessions to simulate TTL expiration.
+assert.commandWorked(mongosConfig.system.sessions.remove({}));
+refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 0);
+
+// 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
+// config.system.sessions collection.
+for (let i = 0; i < 10; i++) {
+ db[testCollName].insert({_id: i, a: i, b: 1});
+}
+
+let cursors = [];
+for (let i = 0; i < 5; i++) {
+ let session = mongos.startSession({});
+ assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
+ "initialize the session");
+ cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
+ assert(cursors[i].hasNext());
+}
+
+refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
+verifyOpenCursorCount(mongosConfig, 5);
+
+let sessionsCollectionArray;
+let lastUseValues = [];
+for (let i = 0; i < 3; i++) {
+ for (let j = 0; j < cursors.length; j++) {
+ cursors[j].next();
}
refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
verifyOpenCursorCount(mongosConfig, 5);
- let sessionsCollectionArray;
- let lastUseValues = [];
- for (let i = 0; i < 3; i++) {
- for (let j = 0; j < cursors.length; j++) {
- cursors[j].next();
- }
-
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
- verifyOpenCursorCount(mongosConfig, 5);
-
- sessionsCollectionArray = getSessions(mongosConfig);
+ sessionsCollectionArray = getSessions(mongosConfig);
- if (i == 0) {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- lastUseValues.push(sessionsCollectionArray[j].lastUse);
- }
- } else {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
- lastUseValues[j] = sessionsCollectionArray[j].lastUse;
- }
+ if (i == 0) {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ lastUseValues.push(sessionsCollectionArray[j].lastUse);
+ }
+ } else {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
+ lastUseValues[j] = sessionsCollectionArray[j].lastUse;
}
}
-
- // 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
- // cursors.
- assert.commandWorked(mongosConfig.system.sessions.remove({}));
-
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 0);
- verifyOpenCursorCount(mongosConfig, 0);
-
- for (let i = 0; i < cursors.length; i++) {
- assert.commandFailedWithCode(
- db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
- }
-
- // 4. Verify that an expired session (simulated by manual deletion) that has a currently
- // running operation will be vivified during the logical session cache refresh.
- let pinnedCursorSession = mongos.startSession();
- let pinnedCursorDB = pinnedCursorSession.getDatabase(dbName);
-
- withPinnedCursor({
- conn: mongos,
- sessionId: pinnedCursorSession,
- db: pinnedCursorDB,
- assertFunction: (cursorId, coll) => {
- assert.commandWorked(mongosConfig.system.sessions.remove({}));
- verifyOpenCursorCount(mongosConfig, 1);
-
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 1);
-
- let db = coll.getDB();
- assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
- },
- runGetMoreFunc: () => {
- db.runCommand({getMore: cursorId, collection: collName, lsid: sessionId});
- },
- failPointName: failPointName
+}
+
+// 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
+// cursors.
+assert.commandWorked(mongosConfig.system.sessions.remove({}));
+
+refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 0);
+verifyOpenCursorCount(mongosConfig, 0);
+
+for (let i = 0; i < cursors.length; i++) {
+ assert.commandFailedWithCode(
+ db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+}
+
+// 4. Verify that an expired session (simulated by manual deletion) that has a currently
+// running operation will be vivified during the logical session cache refresh.
+let pinnedCursorSession = mongos.startSession();
+let pinnedCursorDB = pinnedCursorSession.getDatabase(dbName);
+
+withPinnedCursor({
+ conn: mongos,
+ sessionId: pinnedCursorSession,
+ db: pinnedCursorDB,
+ assertFunction: (cursorId, coll) => {
+ assert.commandWorked(mongosConfig.system.sessions.remove({}));
+ verifyOpenCursorCount(mongosConfig, 1);
+
+ refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 1);
+
+ let db = coll.getDB();
+ assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
+ },
+ runGetMoreFunc: () => {
+ db.runCommand({getMore: cursorId, collection: collName, lsid: sessionId});
},
- /* assertEndCounts */ false);
+ failPointName: failPointName
+},
+ /* assertEndCounts */ false);
- shardingTest.stop();
+shardingTest.stop();
})();
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index c8a361f72ed..22314505a9a 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -1,93 +1,89 @@
(function() {
- var s = new ShardingTest({name: "version1", shards: 1});
-
- s.adminCommand({enablesharding: "alleyinsider"});
- s.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}});
-
- // alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
- s.printShardingStatus();
-
- a = s._connections[0].getDB("admin");
-
- assert.commandFailed(
- a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB}));
-
- assert.commandFailed(
- a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB, version: "a"}));
-
- assert.commandFailed(a.runCommand(
- {setShardVersion: "alleyinsider.foo", configdb: s._configDB, authoritative: true}));
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0)
- }),
- "should have failed b/c no auth");
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0),
- authoritative: true
- }),
- "should have failed because first setShardVersion needs shard info");
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0),
- authoritative: true,
- shard: "s.shard0.shardName",
- shardHost: s.s.host
- }),
- "should have failed because version is config is 1|0");
-
- var epoch = s.getDB('config').chunks.findOne({"ns": "alleyinsider.foo"}).lastmodEpoch;
- assert.commandWorked(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(1, 0),
- versionEpoch: epoch,
- authoritative: true,
- shard: s.shard0.shardName,
- shardHost: s.s.host
- }),
- "should have worked");
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: "a",
- version: new Timestamp(0, 2),
- versionEpoch: epoch
- }));
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(0, 2),
- versionEpoch: epoch
- }));
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(0, 1),
- versionEpoch: epoch
- }));
-
- // the only way that setSharVersion passes is if the shard agrees with the version
- // the shard takes its version from config directly
- // TODO bump timestamps in config
- // assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB ,
- // version : 3 } ).oldVersion.i , 2 , "oldVersion" );
-
- // assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get
- // version A" );
- // assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get
- // version B" );
-
- s.stop();
-
+var s = new ShardingTest({name: "version1", shards: 1});
+
+s.adminCommand({enablesharding: "alleyinsider"});
+s.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}});
+
+// alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
+s.printShardingStatus();
+
+a = s._connections[0].getDB("admin");
+
+assert.commandFailed(a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB}));
+
+assert.commandFailed(
+ a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB, version: "a"}));
+
+assert.commandFailed(a.runCommand(
+ {setShardVersion: "alleyinsider.foo", configdb: s._configDB, authoritative: true}));
+
+assert.commandFailed(
+ a.runCommand(
+ {setShardVersion: "alleyinsider.foo", configdb: s._configDB, version: new Timestamp(2, 0)}),
+ "should have failed b/c no auth");
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0),
+ authoritative: true
+}),
+ "should have failed because first setShardVersion needs shard info");
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0),
+ authoritative: true,
+ shard: "s.shard0.shardName",
+ shardHost: s.s.host
+}),
+ "should have failed because version is config is 1|0");
+
+var epoch = s.getDB('config').chunks.findOne({"ns": "alleyinsider.foo"}).lastmodEpoch;
+assert.commandWorked(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(1, 0),
+ versionEpoch: epoch,
+ authoritative: true,
+ shard: s.shard0.shardName,
+ shardHost: s.s.host
+}),
+ "should have worked");
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: "a",
+ version: new Timestamp(0, 2),
+ versionEpoch: epoch
+}));
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(0, 2),
+ versionEpoch: epoch
+}));
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(0, 1),
+ versionEpoch: epoch
+}));
+
+// the only way that setSharVersion passes is if the shard agrees with the version
+// the shard takes its version from config directly
+// TODO bump timestamps in config
+// assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB ,
+// version : 3 } ).oldVersion.i , 2 , "oldVersion" );
+
+// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get
+// version A" );
+// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get
+// version B" );
+
+s.stop();
})();
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 89b919f7ce4..28d22c5f861 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -1,79 +1,73 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({name: "version2", shards: 1});
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "alleyinsider"}));
- assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}}));
- assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.bar", key: {num: 1}}));
-
- var a = s._connections[0].getDB("admin");
-
- // Setup from one client
- assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
- 0);
- assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.i,
- 0);
-
- var fooEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.foo'}).lastmodEpoch;
- assert.commandWorked(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- authoritative: true,
- version: new Timestamp(1, 0),
- versionEpoch: fooEpoch,
- shard: s.shard0.shardName,
- shardHost: s.s.host,
- }));
-
- printjson(s.config.chunks.findOne());
-
- assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.t,
- 1);
- assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
- 1);
-
- // From a different client
- var a2 = connect(`mongodb://${s.rs0.getPrimary().name}/admin`);
-
- assert.eq(
- a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
- 1,
- "a2 global 1");
- assert.eq(a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
- 0,
- "a2 mine 1");
-
- function simpleFindOne() {
- return a2.getMongo().getDB("alleyinsider").foo.findOne();
- }
-
- var barEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.bar'}).lastmodEpoch;
- assert.commandWorked(a2.runCommand({
- setShardVersion: "alleyinsider.bar",
- configdb: s._configDB,
- version: new Timestamp(1, 0),
- versionEpoch: barEpoch,
- shard: s.shard0.shardName,
- authoritative: true
- }),
- "setShardVersion bar temp");
-
- assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1");
-
- // the only way that setSharVersion passes is if the shard agrees with the version
- // the shard takes its version from config directly
- // TODO bump timestamps in config
- // assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
- // 2 }).ok == 1, "setShardVersion a2-1");
-
- // simpleFindOne(); // now should run ok
-
- // assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
- // 3 }).ok == 1, "setShardVersion a2-2");
-
- // simpleFindOne(); // newer version is ok
-
- s.stop();
+'use strict';
+var s = new ShardingTest({name: "version2", shards: 1});
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "alleyinsider"}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.bar", key: {num: 1}}));
+
+var a = s._connections[0].getDB("admin");
+
+// Setup from one client
+assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i, 0);
+assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.i, 0);
+
+var fooEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.foo'}).lastmodEpoch;
+assert.commandWorked(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ authoritative: true,
+ version: new Timestamp(1, 0),
+ versionEpoch: fooEpoch,
+ shard: s.shard0.shardName,
+ shardHost: s.s.host,
+}));
+
+printjson(s.config.chunks.findOne());
+
+assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.t, 1);
+assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t, 1);
+
+// From a different client
+var a2 = connect(`mongodb://${s.rs0.getPrimary().name}/admin`);
+
+assert.eq(a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
+ 1,
+ "a2 global 1");
+assert.eq(a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
+ 0,
+ "a2 mine 1");
+
+function simpleFindOne() {
+ return a2.getMongo().getDB("alleyinsider").foo.findOne();
+}
+
+var barEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.bar'}).lastmodEpoch;
+assert.commandWorked(a2.runCommand({
+ setShardVersion: "alleyinsider.bar",
+ configdb: s._configDB,
+ version: new Timestamp(1, 0),
+ versionEpoch: barEpoch,
+ shard: s.shard0.shardName,
+ authoritative: true
+}),
+ "setShardVersion bar temp");
+
+assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1");
+
+// the only way that setSharVersion passes is if the shard agrees with the version
+// the shard takes its version from config directly
+// TODO bump timestamps in config
+// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
+// 2 }).ok == 1, "setShardVersion a2-1");
+
+// simpleFindOne(); // now should run ok
+
+// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
+// 3 }).ok == 1, "setShardVersion a2-2");
+
+// simpleFindOne(); // newer version is ok
+
+s.stop();
})();
diff --git a/jstests/sharding/view_rewrite.js b/jstests/sharding/view_rewrite.js
index 652937ff113..e0177f84b80 100644
--- a/jstests/sharding/view_rewrite.js
+++ b/jstests/sharding/view_rewrite.js
@@ -3,238 +3,234 @@
* aggregation against the underlying collection.
*/
(function() {
- "use strict";
-
- load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
-
- const st = new ShardingTest({
- name: "view_rewrite",
- shards: 2,
- other: {
- rs0: {
- nodes: [
- {rsConfig: {priority: 1}},
- {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
- ]
- },
- rs1: {
- nodes: [
- {rsConfig: {priority: 1}},
- {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
- ]
- },
- enableBalancer: false
+"use strict";
+
+load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
+
+const st = new ShardingTest({
+ name: "view_rewrite",
+ shards: 2,
+ other: {
+ rs0: {
+ nodes:
+ [{rsConfig: {priority: 1}}, {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}]
+ },
+ rs1: {
+ nodes:
+ [{rsConfig: {priority: 1}}, {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}]
+ },
+ enableBalancer: false
+ }
+});
+
+const mongos = st.s0;
+const config = mongos.getDB("config");
+const mongosDB = mongos.getDB("view_rewrite");
+const coll = mongosDB.getCollection("coll");
+
+assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), "view_rewrite-rs0");
+
+const rs0Secondary = st.rs0.getSecondary();
+const rs1Primary = st.rs1.getPrimary();
+const rs1Secondary = st.rs1.getSecondary();
+
+assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 5}}));
+assert.commandWorked(
+ mongosDB.adminCommand({moveChunk: coll.getFullName(), find: {a: 5}, to: "view_rewrite-rs1"}));
+
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+assert.commandWorked(mongosDB.createView("view", coll.getName(), []));
+const view = mongosDB.getCollection("view");
+
+//
+// Confirms that queries run against views on mongos result in execution of a rewritten
+// aggregation that contains all expected query options.
+//
+function confirmOptionsInProfiler(shardPrimary) {
+ assert.commandWorked(shardPrimary.setProfilingLevel(2));
+
+ // Aggregation
+ assert.commandWorked(mongosDB.runCommand({
+ aggregate: "view",
+ pipeline: [],
+ comment: "agg_rewrite",
+ maxTimeMS: 5 * 60 * 1000,
+ readConcern: {level: "linearizable"},
+ cursor: {}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardPrimary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "agg_rewrite",
+ "command.maxTimeMS": {"$exists": true},
+ "command.readConcern": {level: "linearizable"},
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
}
});
- const mongos = st.s0;
- const config = mongos.getDB("config");
- const mongosDB = mongos.getDB("view_rewrite");
- const coll = mongosDB.getCollection("coll");
+ // Find
+ assert.commandWorked(mongosDB.runCommand({
+ find: "view",
+ comment: "find_rewrite",
+ maxTimeMS: 5 * 60 * 1000,
+ readConcern: {level: "linearizable"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardPrimary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "find_rewrite",
+ "command.maxTimeMS": {"$exists": true},
+ "command.readConcern": {level: "linearizable"},
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), "view_rewrite-rs0");
+ // Count
+ assert.commandWorked(mongosDB.runCommand({
+ count: "view",
+ comment: "count_rewrite",
+ maxTimeMS: 5 * 60 * 1000,
+ readConcern: {level: "linearizable"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardPrimary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "count_rewrite",
+ "command.maxTimeMS": {"$exists": true},
+ "command.readConcern": {level: "linearizable"},
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- const rs0Secondary = st.rs0.getSecondary();
- const rs1Primary = st.rs1.getPrimary();
- const rs1Secondary = st.rs1.getSecondary();
+ // Distinct
+ assert.commandWorked(mongosDB.runCommand({
+ distinct: "view",
+ key: "a",
+ comment: "distinct_rewrite",
+ maxTimeMS: 5 * 60 * 1000,
+ readConcern: {level: "linearizable"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardPrimary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "distinct_rewrite",
+ "command.maxTimeMS": {"$exists": true},
+ "command.readConcern": {level: "linearizable"},
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
- assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 5}}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 5}, to: "view_rewrite-rs1"}));
+ assert.commandWorked(shardPrimary.setProfilingLevel(0));
+ shardPrimary.system.profile.drop();
+}
+
+//
+// Confirms that queries run against views on mongos are executed against a tagged secondary, as
+// per readPreference setting.
+//
+function confirmReadPreference(shardSecondary) {
+ assert.commandWorked(shardSecondary.setProfilingLevel(2));
+
+ // Aggregation
+ assert.commandWorked(mongosDB.runCommand({
+ query: {aggregate: "view", pipeline: [], comment: "agg_readPref", cursor: {}},
+ $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
+ readConcern: {level: "local"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardSecondary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "agg_readPref",
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
+ // Find
+ assert.commandWorked(mongosDB.runCommand({
+ query: {find: "view", comment: "find_readPref", maxTimeMS: 5 * 60 * 1000},
+ $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
+ readConcern: {level: "local"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardSecondary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "find_readPref",
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- assert.commandWorked(mongosDB.createView("view", coll.getName(), []));
- const view = mongosDB.getCollection("view");
-
- //
- // Confirms that queries run against views on mongos result in execution of a rewritten
- // aggregation that contains all expected query options.
- //
- function confirmOptionsInProfiler(shardPrimary) {
- assert.commandWorked(shardPrimary.setProfilingLevel(2));
-
- // Aggregation
- assert.commandWorked(mongosDB.runCommand({
- aggregate: "view",
- pipeline: [],
- comment: "agg_rewrite",
- maxTimeMS: 5 * 60 * 1000,
- readConcern: {level: "linearizable"},
- cursor: {}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardPrimary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "agg_rewrite",
- "command.maxTimeMS": {"$exists": true},
- "command.readConcern": {level: "linearizable"},
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Find
- assert.commandWorked(mongosDB.runCommand({
- find: "view",
- comment: "find_rewrite",
- maxTimeMS: 5 * 60 * 1000,
- readConcern: {level: "linearizable"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardPrimary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "find_rewrite",
- "command.maxTimeMS": {"$exists": true},
- "command.readConcern": {level: "linearizable"},
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Count
- assert.commandWorked(mongosDB.runCommand({
- count: "view",
- comment: "count_rewrite",
- maxTimeMS: 5 * 60 * 1000,
- readConcern: {level: "linearizable"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardPrimary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "count_rewrite",
- "command.maxTimeMS": {"$exists": true},
- "command.readConcern": {level: "linearizable"},
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Distinct
- assert.commandWorked(mongosDB.runCommand({
- distinct: "view",
- key: "a",
- comment: "distinct_rewrite",
- maxTimeMS: 5 * 60 * 1000,
- readConcern: {level: "linearizable"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardPrimary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "distinct_rewrite",
- "command.maxTimeMS": {"$exists": true},
- "command.readConcern": {level: "linearizable"},
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- assert.commandWorked(shardPrimary.setProfilingLevel(0));
- shardPrimary.system.profile.drop();
- }
+ // Count
+ assert.commandWorked(mongosDB.runCommand({
+ query: {count: "view", comment: "count_readPref"},
+ $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
+ readConcern: {level: "local"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardSecondary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "count_readPref",
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- //
- // Confirms that queries run against views on mongos are executed against a tagged secondary, as
- // per readPreference setting.
- //
- function confirmReadPreference(shardSecondary) {
- assert.commandWorked(shardSecondary.setProfilingLevel(2));
-
- // Aggregation
- assert.commandWorked(mongosDB.runCommand({
- query: {aggregate: "view", pipeline: [], comment: "agg_readPref", cursor: {}},
- $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
- readConcern: {level: "local"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardSecondary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "agg_readPref",
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Find
- assert.commandWorked(mongosDB.runCommand({
- query: {find: "view", comment: "find_readPref", maxTimeMS: 5 * 60 * 1000},
- $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
- readConcern: {level: "local"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardSecondary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "find_readPref",
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Count
- assert.commandWorked(mongosDB.runCommand({
- query: {count: "view", comment: "count_readPref"},
- $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
- readConcern: {level: "local"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardSecondary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "count_readPref",
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Distinct
- assert.commandWorked(mongosDB.runCommand({
- query: {distinct: "view", key: "a", comment: "distinct_readPref"},
- $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
- readConcern: {level: "local"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardSecondary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "distinct_readPref",
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- assert.commandWorked(shardSecondary.setProfilingLevel(0));
- }
+ // Distinct
+ assert.commandWorked(mongosDB.runCommand({
+ query: {distinct: "view", key: "a", comment: "distinct_readPref"},
+ $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
+ readConcern: {level: "local"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardSecondary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "distinct_readPref",
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
+
+ assert.commandWorked(shardSecondary.setProfilingLevel(0));
+}
- confirmOptionsInProfiler(st.rs1.getPrimary().getDB(mongosDB.getName()));
+confirmOptionsInProfiler(st.rs1.getPrimary().getDB(mongosDB.getName()));
- confirmReadPreference(st.rs0.getSecondary().getDB(mongosDB.getName()));
- confirmReadPreference(st.rs1.getSecondary().getDB(mongosDB.getName()));
+confirmReadPreference(st.rs0.getSecondary().getDB(mongosDB.getName()));
+confirmReadPreference(st.rs1.getSecondary().getDB(mongosDB.getName()));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/views.js b/jstests/sharding/views.js
index c1ea43d8e93..876406902a6 100644
--- a/jstests/sharding/views.js
+++ b/jstests/sharding/views.js
@@ -3,170 +3,165 @@
* @tags: [requires_find_command]
*/
(function() {
- "use strict";
-
- // For profilerHasSingleMatchingEntryOrThrow.
- load("jstests/libs/profiler.js");
-
- // Given sharded explain output in 'shardedExplain', verifies that the explain mode 'verbosity'
- // affected the output verbosity appropriately, and that the response has the expected format.
- // Set 'optimizedAwayPipeline' to true if the pipeline is expected to be optimized away.
- function verifyExplainResult(
- {shardedExplain = null, verbosity = "", optimizedAwayPipeline = false} = {}) {
- assert.commandWorked(shardedExplain);
- assert(shardedExplain.hasOwnProperty("shards"), tojson(shardedExplain));
- for (let elem in shardedExplain.shards) {
- let shard = shardedExplain.shards[elem];
- let root;
- if (optimizedAwayPipeline) {
- assert(shard.hasOwnProperty("queryPlanner"), tojson(shardedExplain));
- root = shard;
- } else {
- assert(shard.stages[0].hasOwnProperty("$cursor"), tojson(shardedExplain));
- assert(shard.stages[0].$cursor.hasOwnProperty("queryPlanner"),
- tojson(shardedExplain));
- root = shard.stages[0].$cursor;
- }
- if (verbosity === "queryPlanner") {
- assert(!root.hasOwnProperty("executionStats"), tojson(shardedExplain));
- } else if (verbosity === "executionStats") {
- assert(root.hasOwnProperty("executionStats"), tojson(shardedExplain));
- assert(!root.executionStats.hasOwnProperty("allPlansExecution"),
- tojson("shardedExplain"));
- } else {
- assert.eq(verbosity, "allPlansExecution", tojson(shardedExplain));
- assert(root.hasOwnProperty("executionStats"), tojson(shardedExplain));
- assert(root.executionStats.hasOwnProperty("allPlansExecution"),
- tojson(shardedExplain));
- }
+"use strict";
+
+// For profilerHasSingleMatchingEntryOrThrow.
+load("jstests/libs/profiler.js");
+
+// Given sharded explain output in 'shardedExplain', verifies that the explain mode 'verbosity'
+// affected the output verbosity appropriately, and that the response has the expected format.
+// Set 'optimizedAwayPipeline' to true if the pipeline is expected to be optimized away.
+function verifyExplainResult(
+ {shardedExplain = null, verbosity = "", optimizedAwayPipeline = false} = {}) {
+ assert.commandWorked(shardedExplain);
+ assert(shardedExplain.hasOwnProperty("shards"), tojson(shardedExplain));
+ for (let elem in shardedExplain.shards) {
+ let shard = shardedExplain.shards[elem];
+ let root;
+ if (optimizedAwayPipeline) {
+ assert(shard.hasOwnProperty("queryPlanner"), tojson(shardedExplain));
+ root = shard;
+ } else {
+ assert(shard.stages[0].hasOwnProperty("$cursor"), tojson(shardedExplain));
+ assert(shard.stages[0].$cursor.hasOwnProperty("queryPlanner"), tojson(shardedExplain));
+ root = shard.stages[0].$cursor;
+ }
+ if (verbosity === "queryPlanner") {
+ assert(!root.hasOwnProperty("executionStats"), tojson(shardedExplain));
+ } else if (verbosity === "executionStats") {
+ assert(root.hasOwnProperty("executionStats"), tojson(shardedExplain));
+ assert(!root.executionStats.hasOwnProperty("allPlansExecution"),
+ tojson("shardedExplain"));
+ } else {
+ assert.eq(verbosity, "allPlansExecution", tojson(shardedExplain));
+ assert(root.hasOwnProperty("executionStats"), tojson(shardedExplain));
+ assert(root.executionStats.hasOwnProperty("allPlansExecution"), tojson(shardedExplain));
}
}
+}
- let st = new ShardingTest({name: "views_sharded", shards: 2, other: {enableBalancer: false}});
+let st = new ShardingTest({name: "views_sharded", shards: 2, other: {enableBalancer: false}});
- let mongos = st.s;
- let config = mongos.getDB("config");
- let db = mongos.getDB(jsTestName());
- db.dropDatabase();
+let mongos = st.s;
+let config = mongos.getDB("config");
+let db = mongos.getDB(jsTestName());
+db.dropDatabase();
- let coll = db.getCollection("coll");
+let coll = db.getCollection("coll");
- assert.commandWorked(config.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+assert.commandWorked(config.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
- assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 6}}));
- assert.commandWorked(
- db.adminCommand({moveChunk: coll.getFullName(), find: {a: 25}, to: st.shard1.shardName}));
+assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 6}}));
+assert.commandWorked(
+ db.adminCommand({moveChunk: coll.getFullName(), find: {a: 25}, to: st.shard1.shardName}));
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
- assert.commandWorked(db.createView("view", coll.getName(), [{$match: {a: {$gte: 4}}}]));
- let view = db.getCollection("view");
+assert.commandWorked(db.createView("view", coll.getName(), [{$match: {a: {$gte: 4}}}]));
+let view = db.getCollection("view");
- const explainVerbosities = ["queryPlanner", "executionStats", "allPlansExecution"];
+const explainVerbosities = ["queryPlanner", "executionStats", "allPlansExecution"];
- //
- // find
- //
- assert.eq(5, view.find({a: {$lte: 8}}).itcount());
+//
+// find
+//
+assert.eq(5, view.find({a: {$lte: 8}}).itcount());
- let result = db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}});
+let result = db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}});
+verifyExplainResult(
+ {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true});
+for (let verbosity of explainVerbosities) {
+ result = db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}, verbosity: verbosity});
verifyExplainResult(
- {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true});
- for (let verbosity of explainVerbosities) {
- result =
- db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}, verbosity: verbosity});
- verifyExplainResult(
- {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true});
- }
-
- //
- // aggregate
- //
- assert.eq(5, view.aggregate([{$match: {a: {$lte: 8}}}]).itcount());
-
- // Test that the explain:true flag for the aggregate command results in queryPlanner verbosity.
- result =
- db.runCommand({aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], explain: true});
+ {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true});
+}
+
+//
+// aggregate
+//
+assert.eq(5, view.aggregate([{$match: {a: {$lte: 8}}}]).itcount());
+
+// Test that the explain:true flag for the aggregate command results in queryPlanner verbosity.
+result = db.runCommand({aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], explain: true});
+verifyExplainResult(
+ {shardedExplain: result, verbosity: "queryPlanner", optimizedAwayPipeline: true});
+
+result =
+ db.runCommand({explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}}});
+verifyExplainResult(
+ {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true});
+for (let verbosity of explainVerbosities) {
+ result = db.runCommand({
+ explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}},
+ verbosity: verbosity
+ });
verifyExplainResult(
- {shardedExplain: result, verbosity: "queryPlanner", optimizedAwayPipeline: true});
-
+ {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true});
+}
+
+//
+// count
+//
+assert.eq(5, view.count({a: {$lte: 8}}));
+
+result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}});
+verifyExplainResult({shardedExplain: result, verbosity: "allPlansExecution"});
+for (let verbosity of explainVerbosities) {
+ result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}, verbosity: verbosity});
+ verifyExplainResult({shardedExplain: result, verbosity: verbosity});
+}
+
+//
+// distinct
+//
+result = db.runCommand({distinct: "view", key: "a", query: {a: {$lte: 8}}});
+assert.commandWorked(result);
+assert.eq([4, 5, 6, 7, 8], result.values.sort());
+
+result = db.runCommand({explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}});
+verifyExplainResult({shardedExplain: result, verbosity: "allPlansExecution"});
+for (let verbosity of explainVerbosities) {
result = db.runCommand(
- {explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}}});
- verifyExplainResult(
- {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true});
- for (let verbosity of explainVerbosities) {
- result = db.runCommand({
- explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}},
- verbosity: verbosity
- });
- verifyExplainResult(
- {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true});
+ {explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}, verbosity: verbosity});
+ verifyExplainResult({shardedExplain: result, verbosity: verbosity});
+}
+
+//
+// Confirm cleanupOrphaned command fails.
+//
+result = st.getPrimaryShard(db.getName()).getDB("admin").runCommand({
+ cleanupOrphaned: view.getFullName()
+});
+assert.commandFailedWithCode(result, ErrorCodes.CommandNotSupportedOnView);
+
+//
+// Confirm getShardVersion command fails.
+//
+assert.commandFailedWithCode(db.adminCommand({getShardVersion: view.getFullName()}),
+ ErrorCodes.NamespaceNotSharded);
+
+//
+// Confirm that the comment parameter on a find command is retained when rewritten as an
+// expanded aggregation on the view.
+//
+let sdb = st.shard0.getDB(jsTestName());
+assert.commandWorked(sdb.setProfilingLevel(2));
+
+assert.eq(5, view.find({a: {$lte: 8}}).comment("agg_comment").itcount());
+
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: sdb,
+ filter: {
+ "command.aggregate": coll.getName(),
+ "command.comment": "agg_comment",
+ "command.needsMerge": true,
+ "command.pipeline.$mergeCursors": {$exists: false}
}
+});
- //
- // count
- //
- assert.eq(5, view.count({a: {$lte: 8}}));
-
- result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}});
- verifyExplainResult({shardedExplain: result, verbosity: "allPlansExecution"});
- for (let verbosity of explainVerbosities) {
- result =
- db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}, verbosity: verbosity});
- verifyExplainResult({shardedExplain: result, verbosity: verbosity});
- }
-
- //
- // distinct
- //
- result = db.runCommand({distinct: "view", key: "a", query: {a: {$lte: 8}}});
- assert.commandWorked(result);
- assert.eq([4, 5, 6, 7, 8], result.values.sort());
-
- result = db.runCommand({explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}});
- verifyExplainResult({shardedExplain: result, verbosity: "allPlansExecution"});
- for (let verbosity of explainVerbosities) {
- result = db.runCommand(
- {explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}, verbosity: verbosity});
- verifyExplainResult({shardedExplain: result, verbosity: verbosity});
- }
-
- //
- // Confirm cleanupOrphaned command fails.
- //
- result = st.getPrimaryShard(db.getName()).getDB("admin").runCommand({
- cleanupOrphaned: view.getFullName()
- });
- assert.commandFailedWithCode(result, ErrorCodes.CommandNotSupportedOnView);
-
- //
- // Confirm getShardVersion command fails.
- //
- assert.commandFailedWithCode(db.adminCommand({getShardVersion: view.getFullName()}),
- ErrorCodes.NamespaceNotSharded);
-
- //
- // Confirm that the comment parameter on a find command is retained when rewritten as an
- // expanded aggregation on the view.
- //
- let sdb = st.shard0.getDB(jsTestName());
- assert.commandWorked(sdb.setProfilingLevel(2));
-
- assert.eq(5, view.find({a: {$lte: 8}}).comment("agg_comment").itcount());
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: sdb,
- filter: {
- "command.aggregate": coll.getName(),
- "command.comment": "agg_comment",
- "command.needsMerge": true,
- "command.pipeline.$mergeCursors": {$exists: false}
- }
- });
-
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/wildcard_index_banned_for_shard_key.js b/jstests/sharding/wildcard_index_banned_for_shard_key.js
index 1b8f8bd4dea..17f905a77fd 100644
--- a/jstests/sharding/wildcard_index_banned_for_shard_key.js
+++ b/jstests/sharding/wildcard_index_banned_for_shard_key.js
@@ -3,38 +3,38 @@
//
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({mongos: 1, shards: 2});
- const kDbName = 'wildcard_index_banned_for_shard_key';
- const mongos = st.s0;
+const st = new ShardingTest({mongos: 1, shards: 2});
+const kDbName = 'wildcard_index_banned_for_shard_key';
+const mongos = st.s0;
- function assertCannotShardCollectionOnWildcardIndex(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+function assertCannotShardCollectionOnWildcardIndex(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- assert.commandFailedWithCode(
- mongos.adminCommand({shardCollection: `${kDbName}.foo`, key: keyDoc}),
- ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(
+ mongos.adminCommand({shardCollection: `${kDbName}.foo`, key: keyDoc}),
+ ErrorCodes.InvalidOptions);
- assert.eq(mongos.getDB('config').collections.count({_id: `${kDbName}.foo`}), 0);
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- }
+ assert.eq(mongos.getDB('config').collections.count({_id: `${kDbName}.foo`}), 0);
+ assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+}
- // Can't shard on a path supported by a general wildcard index.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({"$**": 1}));
- assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
- assertCannotShardCollectionOnWildcardIndex({a: 1});
+// Can't shard on a path supported by a general wildcard index.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({"$**": 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
+assertCannotShardCollectionOnWildcardIndex({a: 1});
- // Can't shard on a path supported by a targeted wildcard index.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({"a.$**": 1}));
- assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
- assertCannotShardCollectionOnWildcardIndex({a: 1});
+// Can't shard on a path supported by a targeted wildcard index.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({"a.$**": 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
+assertCannotShardCollectionOnWildcardIndex({a: 1});
- // Can't shard on a path supported by wildcard index with projection option.
- assert.commandWorked(
- mongos.getDB(kDbName).foo.createIndex({"$**": 1}, {wildcardProjection: {a: 1}}));
- assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
- assertCannotShardCollectionOnWildcardIndex({a: 1});
+// Can't shard on a path supported by wildcard index with projection option.
+assert.commandWorked(
+ mongos.getDB(kDbName).foo.createIndex({"$**": 1}, {wildcardProjection: {a: 1}}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
+assertCannotShardCollectionOnWildcardIndex({a: 1});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index b88971e8c3d..0c808102bf3 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -2,164 +2,160 @@
* Tests the auto split will be triggered when using write commands.
*/
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
+'use strict';
+load('jstests/sharding/autosplit_include.js');
- var st = new ShardingTest({shards: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+var st = new ShardingTest({shards: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- var configDB = st.s.getDB('config');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
+var configDB = st.s.getDB('config');
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
- var doc1k = (new Array(1024)).join('x');
- var testDB = st.s.getDB('test');
+var doc1k = (new Array(1024)).join('x');
+var testDB = st.s.getDB('test');
- jsTest.log('Test single batch insert should auto-split');
+jsTest.log('Test single batch insert should auto-split');
- assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
- // This should result in a little over 3MB inserted into the chunk, so with
- // a max chunk size of 1MB we'd expect the autosplitter to split this into
- // at least 3 chunks
- for (var x = 0; x < 3100; x++) {
- assert.writeOK(testDB.runCommand({
- insert: 'insert',
- documents: [{x: x, v: doc1k}],
- ordered: false,
- writeConcern: {w: 1}
- }));
- }
+// This should result in a little over 3MB inserted into the chunk, so with
+// a max chunk size of 1MB we'd expect the autosplitter to split this into
+// at least 3 chunks
+for (var x = 0; x < 3100; x++) {
+ assert.writeOK(testDB.runCommand(
+ {insert: 'insert', documents: [{x: x, v: doc1k}], ordered: false, writeConcern: {w: 1}}));
+}
- waitForOngoingChunkSplits(st);
+waitForOngoingChunkSplits(st);
- // Inserted batch is a multiple of the chunkSize, expect the chunks to split into
- // more than 2.
- assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 2);
- testDB.dropDatabase();
+// Inserted batch is a multiple of the chunkSize, expect the chunks to split into
+// more than 2.
+assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 2);
+testDB.dropDatabase();
- jsTest.log('Test single batch update should auto-split');
+jsTest.log('Test single batch update should auto-split');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
- assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
- for (var x = 0; x < 2100; x++) {
- assert.writeOK(testDB.runCommand({
- update: 'update',
- updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
- ordered: false,
- writeConcern: {w: 1}
- }));
- }
+for (var x = 0; x < 2100; x++) {
+ assert.writeOK(testDB.runCommand({
+ update: 'update',
+ updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
+}
- waitForOngoingChunkSplits(st);
+waitForOngoingChunkSplits(st);
- assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
- testDB.dropDatabase();
+assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
+testDB.dropDatabase();
- jsTest.log('Test single delete should not auto-split');
+jsTest.log('Test single delete should not auto-split');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
- assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- for (var x = 0; x < 1100; x++) {
- assert.writeOK(testDB.runCommand({
- delete: 'delete',
- deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
- ordered: false,
- writeConcern: {w: 1}
- }));
- }
+for (var x = 0; x < 1100; x++) {
+ assert.writeOK(testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
+}
- // If we are autosplitting (which we shouldn't be), we want to wait until
- // it's finished, otherwise we could falsely think no autosplitting was
- // done when really it was just in progress.
- waitForOngoingChunkSplits(st);
+// If we are autosplitting (which we shouldn't be), we want to wait until
+// it's finished, otherwise we could falsely think no autosplitting was
+// done when really it was just in progress.
+waitForOngoingChunkSplits(st);
- assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- testDB.dropDatabase();
+assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+testDB.dropDatabase();
- jsTest.log('Test batched insert should auto-split');
+jsTest.log('Test batched insert should auto-split');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
- assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
- // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
- // we are going to be conservative.
- for (var x = 0; x < 2100; x += 400) {
- var docs = [];
+// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
+// we are going to be conservative.
+for (var x = 0; x < 2100; x += 400) {
+ var docs = [];
- for (var y = 0; y < 400; y++) {
- docs.push({x: (x + y), v: doc1k});
- }
-
- assert.writeOK(testDB.runCommand(
- {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}}));
+ for (var y = 0; y < 400; y++) {
+ docs.push({x: (x + y), v: doc1k});
}
- waitForOngoingChunkSplits(st);
+ assert.writeOK(testDB.runCommand(
+ {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}}));
+}
- assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 1);
- testDB.dropDatabase();
+waitForOngoingChunkSplits(st);
- jsTest.log('Test batched update should auto-split');
+assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 1);
+testDB.dropDatabase();
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
+jsTest.log('Test batched update should auto-split');
- assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
- for (var x = 0; x < 2100; x += 400) {
- var docs = [];
+assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
- }
+for (var x = 0; x < 2100; x += 400) {
+ var docs = [];
- assert.writeOK(testDB.runCommand(
- {update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}}));
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
}
- waitForOngoingChunkSplits(st);
+ assert.writeOK(
+ testDB.runCommand({update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}}));
+}
- assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
- testDB.dropDatabase();
+waitForOngoingChunkSplits(st);
- jsTest.log('Test batched delete should not auto-split');
+assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
+testDB.dropDatabase();
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
+jsTest.log('Test batched delete should not auto-split');
- assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
- for (var x = 0; x < 2100; x += 400) {
- var docs = [];
+assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({q: {x: id, v: doc1k}, top: 0});
- }
+for (var x = 0; x < 2100; x += 400) {
+ var docs = [];
- assert.writeOK(testDB.runCommand({
- delete: 'delete',
- deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
- ordered: false,
- writeConcern: {w: 1}
- }));
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id, v: doc1k}, top: 0});
}
- // If we are autosplitting (which we shouldn't be), we want to wait until
- // it's finished, otherwise we could falsely think no autosplitting was
- // done when really it was just in progress.
- waitForOngoingChunkSplits(st);
+ assert.writeOK(testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
+}
+
+// If we are autosplitting (which we shouldn't be), we want to wait until
+// it's finished, otherwise we could falsely think no autosplitting was
+// done when really it was just in progress.
+waitForOngoingChunkSplits(st);
- assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/write_commands_sharding_state.js b/jstests/sharding/write_commands_sharding_state.js
index 395d328a138..5d6595e5758 100644
--- a/jstests/sharding/write_commands_sharding_state.js
+++ b/jstests/sharding/write_commands_sharding_state.js
@@ -3,84 +3,82 @@
// @tags: [requires_persistence]
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2});
+var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2});
- var dbTestName = 'WriteCommandsTestDB';
- var collName = dbTestName + '.TestColl';
+var dbTestName = 'WriteCommandsTestDB';
+var collName = dbTestName + '.TestColl';
- assert.commandWorked(st.s0.adminCommand({enablesharding: dbTestName}));
- st.ensurePrimaryShard(dbTestName, st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: dbTestName}));
+st.ensurePrimaryShard(dbTestName, st.shard0.shardName);
- assert.commandWorked(
- st.s0.adminCommand({shardCollection: collName, key: {Key: 1}, unique: true}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: collName, key: {Key: 1}, unique: true}));
- // Split at keys 10 and 20
- assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 10}}));
- assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 20}}));
+// Split at keys 10 and 20
+assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 10}}));
+assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 20}}));
- printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+printjson(st.config.getSiblingDB('config').chunks.find().toArray());
- // Move 10 and 20 to st.shard0.shardName1
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: collName, find: {Key: 19}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: collName, find: {Key: 21}, to: st.shard1.shardName, _waitForDelete: true}));
+// Move 10 and 20 to st.shard0.shardName1
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: collName, find: {Key: 19}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: collName, find: {Key: 21}, to: st.shard1.shardName, _waitForDelete: true}));
- printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+printjson(st.config.getSiblingDB('config').chunks.find().toArray());
- // Insert one document in each chunk, which we will use to change
- assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 1}));
- assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 11}));
- assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 21}));
+// Insert one document in each chunk, which we will use to change
+assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 1}));
+assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 11}));
+assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 21}));
- // Make sure the documents are correctly placed
- printjson(st.shard0.getDB(dbTestName).TestColl.find().toArray());
- printjson(st.shard1.getDB(dbTestName).TestColl.find().toArray());
+// Make sure the documents are correctly placed
+printjson(st.shard0.getDB(dbTestName).TestColl.find().toArray());
+printjson(st.shard1.getDB(dbTestName).TestColl.find().toArray());
- assert.eq(1, st.shard0.getDB(dbTestName).TestColl.count());
- assert.eq(2, st.shard1.getDB(dbTestName).TestColl.count());
+assert.eq(1, st.shard0.getDB(dbTestName).TestColl.count());
+assert.eq(2, st.shard1.getDB(dbTestName).TestColl.count());
- assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
- assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 11}).count());
- assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
+assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 11}).count());
+assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
- // Move chunk [0, 19] to st.shard0.shardName and make sure the documents are correctly placed
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: collName, find: {Key: 19}, _waitForDelete: true, to: st.shard0.shardName}));
+// Move chunk [0, 19] to st.shard0.shardName and make sure the documents are correctly placed
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: collName, find: {Key: 19}, _waitForDelete: true, to: st.shard0.shardName}));
- printjson(st.config.getSiblingDB('config').chunks.find().toArray());
- printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
- printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
+printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
+printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
- // Now restart all mongod instances, so they don't know yet that they are sharded
- st.restartShardRS(0);
- st.restartShardRS(1);
+// Now restart all mongod instances, so they don't know yet that they are sharded
+st.restartShardRS(0);
+st.restartShardRS(1);
- // Now that both mongod shards are restarted, they don't know yet that they are part of a
- // sharded
- // cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
- // doesn't know that chunk with key 19 has moved to st.shard0.shardName so it will send it to
- // st.shard1.shardName at
- // first.
- //
- // Shard0001 would only send back a stale config exception if it receives a setShardVersion
- // command. The bug that this test validates is that setShardVersion is indeed being sent (for
- // more
- // information, see SERVER-19395).
- st.s1.getDB(dbTestName).TestColl.update({Key: 11}, {$inc: {Counter: 1}}, {upsert: true});
+// Now that both mongod shards are restarted, they don't know yet that they are part of a
+// sharded
+// cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
+// doesn't know that chunk with key 19 has moved to st.shard0.shardName so it will send it to
+// st.shard1.shardName at
+// first.
+//
+// Shard0001 would only send back a stale config exception if it receives a setShardVersion
+// command. The bug that this test validates is that setShardVersion is indeed being sent (for
+// more
+// information, see SERVER-19395).
+st.s1.getDB(dbTestName).TestColl.update({Key: 11}, {$inc: {Counter: 1}}, {upsert: true});
- printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
- printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
+printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
+printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
- assert.eq(2, st.shard0.getDB(dbTestName).TestColl.count());
- assert.eq(1, st.shard1.getDB(dbTestName).TestColl.count());
+assert.eq(2, st.shard0.getDB(dbTestName).TestColl.count());
+assert.eq(1, st.shard1.getDB(dbTestName).TestColl.count());
- assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
- assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 11}).count());
- assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
-
- st.stop();
+assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 11}).count());
+assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
+st.stop();
})();
diff --git a/jstests/sharding/write_transactions_during_migration.js b/jstests/sharding/write_transactions_during_migration.js
index d8d86bd4516..9b043eb0f1a 100644
--- a/jstests/sharding/write_transactions_during_migration.js
+++ b/jstests/sharding/write_transactions_during_migration.js
@@ -13,159 +13,158 @@ load('./jstests/libs/chunk_manipulation_util.js');
* 4. Retry writes and confirm that writes are not duplicated.
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}});
- st.adminCommand({enableSharding: 'test'});
- st.ensurePrimaryShard('test', st.shard0.shardName);
- st.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {x: 0}}));
-
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- var joinMoveChunk =
- moveChunkParallel(staticMongod, st.s.host, {x: 0}, null, 'test.user', st.shard1.shardName);
-
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- const insertCmd = {
- insert: 'user',
- documents: [
- // For findAndModify not touching chunk being migrated.
- {x: -30},
- // For changing doc to become owned by chunk being migrated.
- {x: -20},
- {x: -20},
- // For basic insert.
- {x: 10},
- // For changing doc to become owned by another chunk not being migrated.
- {x: 20},
- {x: 20},
- // For basic findAndModify.
- {x: 30}
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(34),
- };
-
- var testDB = st.getDB('test');
- const insertResult = assert.commandWorked(testDB.runCommand(insertCmd));
-
- const findAndModCmd = {
- findAndModify: 'user',
- query: {x: 30},
- update: {$inc: {y: 1}},
- new: true,
- upsert: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(37),
- };
-
- const findAndModifyResult = assert.commandWorked(testDB.runCommand(findAndModCmd));
-
- const changeDocToChunkNotMigrated = {
- findAndModify: 'user',
- query: {x: 20},
- update: {$set: {x: -120}, $inc: {y: 1}},
- new: false,
- upsert: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(37),
- };
-
- const changeDocToNotMigratedResult =
- assert.commandWorked(testDB.runCommand(changeDocToChunkNotMigrated));
-
- const changeDocToChunkMigrated = {
- findAndModify: 'user',
- query: {x: -20},
- update: {$set: {x: 120}, $inc: {y: 1}},
- new: false,
- upsert: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(37),
- };
-
- const changeDocToMigratedResult =
- assert.commandWorked(testDB.runCommand(changeDocToChunkMigrated));
-
- const findAndModifyNotMigrated = {
- findAndModify: 'user',
- query: {x: -30},
- update: {$inc: {y: 1}},
- new: false,
- upsert: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(37),
- };
-
- const findAndModifyNotMigratedResult =
- assert.commandWorked(testDB.runCommand(findAndModifyNotMigrated));
-
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- joinMoveChunk();
-
- ///////////////////////////////////////////////////////////////////////////////////////////////
- // Retry phase
-
- var insertRetryResult = assert.commandWorked(testDB.runCommand(insertCmd));
-
- assert.eq(insertResult.ok, insertRetryResult.ok);
- assert.eq(insertResult.n, insertRetryResult.n);
- assert.eq(insertResult.writeErrors, insertRetryResult.writeErrors);
- assert.eq(insertResult.writeConcernErrors, insertRetryResult.writeConcernErrors);
-
- assert.eq(1, testDB.user.find({x: 10}).itcount());
- assert.eq(1, testDB.user.find({x: 30}).itcount());
-
- var findAndModifyRetryResult = assert.commandWorked(testDB.runCommand(findAndModCmd));
-
- assert.eq(findAndModifyResult.ok, findAndModifyRetryResult.ok);
- assert.eq(findAndModifyResult.value, findAndModifyRetryResult.value);
- assert.eq(findAndModifyResult.lastErrorObject, findAndModifyRetryResult.lastErrorObject);
-
- assert.eq(1, testDB.user.findOne({x: 30}).y);
-
- let changeDocToNotMigratedRetryResult =
- assert.commandWorked(testDB.runCommand(changeDocToChunkNotMigrated));
-
- assert.eq(changeDocToNotMigratedResult.ok, changeDocToNotMigratedRetryResult.ok);
- assert.eq(changeDocToNotMigratedResult.value, changeDocToNotMigratedRetryResult.value);
- assert.eq(changeDocToNotMigratedResult.lastErrorObject,
- changeDocToNotMigratedRetryResult.lastErrorObject);
-
- assert.eq(1, testDB.user.find({x: -120}).itcount());
-
- let changeDocToMigratedRetryResult =
- assert.commandWorked(testDB.runCommand(changeDocToChunkMigrated));
-
- assert.eq(changeDocToMigratedResult.ok, changeDocToMigratedRetryResult.ok);
- assert.eq(changeDocToMigratedResult.value, changeDocToMigratedRetryResult.value);
- assert.eq(changeDocToMigratedResult.lastErrorObject,
- changeDocToMigratedRetryResult.lastErrorObject);
-
- assert.eq(1, testDB.user.find({x: 120}).itcount());
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}});
+st.adminCommand({enableSharding: 'test'});
+st.ensurePrimaryShard('test', st.shard0.shardName);
+st.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {x: 0}}));
+
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+var joinMoveChunk =
+ moveChunkParallel(staticMongod, st.s.host, {x: 0}, null, 'test.user', st.shard1.shardName);
+
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+const insertCmd = {
+ insert: 'user',
+ documents: [
+ // For findAndModify not touching chunk being migrated.
+ {x: -30},
+ // For changing doc to become owned by chunk being migrated.
+ {x: -20},
+ {x: -20},
+ // For basic insert.
+ {x: 10},
+ // For changing doc to become owned by another chunk not being migrated.
+ {x: 20},
+ {x: 20},
+ // For basic findAndModify.
+ {x: 30}
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(34),
+};
+
+var testDB = st.getDB('test');
+const insertResult = assert.commandWorked(testDB.runCommand(insertCmd));
+
+const findAndModCmd = {
+ findAndModify: 'user',
+ query: {x: 30},
+ update: {$inc: {y: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(37),
+};
+
+const findAndModifyResult = assert.commandWorked(testDB.runCommand(findAndModCmd));
+
+const changeDocToChunkNotMigrated = {
+ findAndModify: 'user',
+ query: {x: 20},
+ update: {$set: {x: -120}, $inc: {y: 1}},
+ new: false,
+ upsert: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(37),
+};
+
+const changeDocToNotMigratedResult =
+ assert.commandWorked(testDB.runCommand(changeDocToChunkNotMigrated));
+
+const changeDocToChunkMigrated = {
+ findAndModify: 'user',
+ query: {x: -20},
+ update: {$set: {x: 120}, $inc: {y: 1}},
+ new: false,
+ upsert: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(37),
+};
+
+const changeDocToMigratedResult = assert.commandWorked(testDB.runCommand(changeDocToChunkMigrated));
+
+const findAndModifyNotMigrated = {
+ findAndModify: 'user',
+ query: {x: -30},
+ update: {$inc: {y: 1}},
+ new: false,
+ upsert: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(37),
+};
+
+const findAndModifyNotMigratedResult =
+ assert.commandWorked(testDB.runCommand(findAndModifyNotMigrated));
+
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+joinMoveChunk();
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+// Retry phase
+
+var insertRetryResult = assert.commandWorked(testDB.runCommand(insertCmd));
+
+assert.eq(insertResult.ok, insertRetryResult.ok);
+assert.eq(insertResult.n, insertRetryResult.n);
+assert.eq(insertResult.writeErrors, insertRetryResult.writeErrors);
+assert.eq(insertResult.writeConcernErrors, insertRetryResult.writeConcernErrors);
+
+assert.eq(1, testDB.user.find({x: 10}).itcount());
+assert.eq(1, testDB.user.find({x: 30}).itcount());
+
+var findAndModifyRetryResult = assert.commandWorked(testDB.runCommand(findAndModCmd));
+
+assert.eq(findAndModifyResult.ok, findAndModifyRetryResult.ok);
+assert.eq(findAndModifyResult.value, findAndModifyRetryResult.value);
+assert.eq(findAndModifyResult.lastErrorObject, findAndModifyRetryResult.lastErrorObject);
+
+assert.eq(1, testDB.user.findOne({x: 30}).y);
+
+let changeDocToNotMigratedRetryResult =
+ assert.commandWorked(testDB.runCommand(changeDocToChunkNotMigrated));
+
+assert.eq(changeDocToNotMigratedResult.ok, changeDocToNotMigratedRetryResult.ok);
+assert.eq(changeDocToNotMigratedResult.value, changeDocToNotMigratedRetryResult.value);
+assert.eq(changeDocToNotMigratedResult.lastErrorObject,
+ changeDocToNotMigratedRetryResult.lastErrorObject);
+
+assert.eq(1, testDB.user.find({x: -120}).itcount());
+
+let changeDocToMigratedRetryResult =
+ assert.commandWorked(testDB.runCommand(changeDocToChunkMigrated));
+
+assert.eq(changeDocToMigratedResult.ok, changeDocToMigratedRetryResult.ok);
+assert.eq(changeDocToMigratedResult.value, changeDocToMigratedRetryResult.value);
+assert.eq(changeDocToMigratedResult.lastErrorObject,
+ changeDocToMigratedRetryResult.lastErrorObject);
+
+assert.eq(1, testDB.user.find({x: 120}).itcount());
- let findAndModifyNotMigratedRetryResult =
- assert.commandWorked(testDB.runCommand(findAndModifyNotMigrated));
+let findAndModifyNotMigratedRetryResult =
+ assert.commandWorked(testDB.runCommand(findAndModifyNotMigrated));
- assert.eq(findAndModifyNotMigratedResult.ok, findAndModifyNotMigratedRetryResult.ok);
- assert.eq(findAndModifyNotMigratedResult.value, findAndModifyNotMigratedRetryResult.value);
- assert.eq(findAndModifyNotMigratedResult.lastErrorObject,
- findAndModifyNotMigratedRetryResult.lastErrorObject);
+assert.eq(findAndModifyNotMigratedResult.ok, findAndModifyNotMigratedRetryResult.ok);
+assert.eq(findAndModifyNotMigratedResult.value, findAndModifyNotMigratedRetryResult.value);
+assert.eq(findAndModifyNotMigratedResult.lastErrorObject,
+ findAndModifyNotMigratedRetryResult.lastErrorObject);
- assert.eq(1, testDB.user.findOne({x: -30}).y);
+assert.eq(1, testDB.user.findOne({x: -30}).y);
- st.stop();
+st.stop();
- MongoRunner.stopMongod(staticMongod);
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index baf96845f62..dcd3b93fb71 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -4,227 +4,225 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
-
- let s = new ShardingTest({
- shards: 2,
- mongos: 1,
- other: {
- rs: true,
- numReplicas: 2,
- chunkSize: 1,
- rsOptions: {oplogSize: 50},
- enableAutoSplit: true,
- }
- });
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard0.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {"_id": 1}}));
-
- let testDb = s.getDB("test");
-
- jsTest.log("Inserting a lot of documents into test.foo");
+'use strict';
- // Make each document data to be 5K so that the total size is ~250MB
- const str = "#".repeat(5 * 1024);
-
- var idInc = 0;
- var valInc = 0;
-
- var bulk = testDb.foo.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({i: idInc++, val: valInc++, y: str});
- }
+let s = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ rs: true,
+ numReplicas: 2,
+ chunkSize: 1,
+ rsOptions: {oplogSize: 50},
+ enableAutoSplit: true,
}
- assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
+});
- jsTest.log("Documents inserted, doing double-checks of insert...");
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {"_id": 1}}));
- // Collect some useful stats to figure out what happened
- if (testDb.foo.find().itcount() != 51200) {
- s.printShardingStatus(true);
+let testDb = s.getDB("test");
- print("Shard 0: " + s.shard0.getCollection(testDb.foo + "").find().itcount());
- print("Shard 1: " + s.shard1.getCollection(testDb.foo + "").find().itcount());
+jsTest.log("Inserting a lot of documents into test.foo");
- for (var i = 0; i < 51200; i++) {
- if (!testDb.foo.findOne({i: i}, {i: 1})) {
- print("Could not find: " + i);
- }
+// Make each document data to be 5K so that the total size is ~250MB
+const str = "#".repeat(5 * 1024);
- if (i % 100 == 0)
- print("Checked " + i);
- }
+var idInc = 0;
+var valInc = 0;
- assert(false, 'Incorect number of chunks found!');
+var bulk = testDb.foo.initializeUnorderedBulkOp();
+for (var j = 0; j < 100; j++) {
+ for (var i = 0; i < 512; i++) {
+ bulk.insert({i: idInc++, val: valInc++, y: str});
}
+}
+assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
- s.printChunks(testDb.foo.getFullName());
- s.printChangeLog();
+jsTest.log("Documents inserted, doing double-checks of insert...");
- function map() {
- emit('count', 1);
- }
- function reduce(key, values) {
- return Array.sum(values);
- }
+// Collect some useful stats to figure out what happened
+if (testDb.foo.find().itcount() != 51200) {
+ s.printShardingStatus(true);
- // Let chunks move around while map reduce is running
- s.startBalancer();
+ print("Shard 0: " + s.shard0.getCollection(testDb.foo + "").find().itcount());
+ print("Shard 1: " + s.shard1.getCollection(testDb.foo + "").find().itcount());
- jsTest.log("Test basic mapreduce...");
+ for (var i = 0; i < 51200; i++) {
+ if (!testDb.foo.findOne({i: i}, {i: 1})) {
+ print("Could not find: " + i);
+ }
- // Test basic mapReduce
- for (var iter = 0; iter < 5; iter++) {
- print("Test #" + iter);
- testDb.foo.mapReduce(map, reduce, "big_out");
+ if (i % 100 == 0)
+ print("Checked " + i);
}
- print("Testing output to different db...");
+ assert(false, 'Incorect number of chunks found!');
+}
- // Test output to a different DB - do it multiple times so that the merging shard changes
- for (var iter = 0; iter < 5; iter++) {
- print("Test #" + iter);
+s.printChunks(testDb.foo.getFullName());
+s.printChangeLog();
- assert.eq(51200, testDb.foo.find().itcount(), "Not all data was found!");
+function map() {
+ emit('count', 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
- let outCollStr = "mr_replace_col_" + iter;
- let outDbStr = "mr_db_" + iter;
+// Let chunks move around while map reduce is running
+s.startBalancer();
- print("Testing mr replace into DB " + iter);
+jsTest.log("Test basic mapreduce...");
- var res = testDb.foo.mapReduce(map, reduce, {out: {replace: outCollStr, db: outDbStr}});
- printjson(res);
+// Test basic mapReduce
+for (var iter = 0; iter < 5; iter++) {
+ print("Test #" + iter);
+ testDb.foo.mapReduce(map, reduce, "big_out");
+}
- var outDb = s.getDB(outDbStr);
- var outColl = outDb[outCollStr];
+print("Testing output to different db...");
- var obj = outColl.convertToSingleObject("value");
- assert.eq(51200, obj.count, "Received wrong result " + obj.count);
+// Test output to a different DB - do it multiple times so that the merging shard changes
+for (var iter = 0; iter < 5; iter++) {
+ print("Test #" + iter);
- print("Checking result field");
- assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection);
- assert.eq(res.result.db, outDbStr, "Wrong db " + res.result.db);
- }
+ assert.eq(51200, testDb.foo.find().itcount(), "Not all data was found!");
- jsTest.log("Verifying nonatomic M/R throws...");
+ let outCollStr = "mr_replace_col_" + iter;
+ let outDbStr = "mr_db_" + iter;
- // Check nonAtomic output
- assert.throws(function() {
- testDb.foo.mapReduce(map, reduce, {out: {replace: "big_out", nonAtomic: true}});
- });
+ print("Testing mr replace into DB " + iter);
- jsTest.log("Adding documents");
+ var res = testDb.foo.mapReduce(map, reduce, {out: {replace: outCollStr, db: outDbStr}});
+ printjson(res);
- // Add docs with dup "i"
- valInc = 0;
- for (var j = 0; j < 100; j++) {
- print("Inserted document: " + (j * 100));
- var bulk = testDb.foo.initializeUnorderedBulkOp();
- for (i = 0; i < 512; i++) {
- bulk.insert({i: idInc++, val: valInc++, y: str});
- }
- assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
- }
+ var outDb = s.getDB(outDbStr);
+ var outColl = outDb[outCollStr];
- jsTest.log("No errors...");
+ var obj = outColl.convertToSingleObject("value");
+ assert.eq(51200, obj.count, "Received wrong result " + obj.count);
- function map2() {
- emit(this.val, 1);
- }
- function reduce2(key, values) {
- return Array.sum(values);
- }
-
- // Test merge
- let outColMerge = 'big_out_merge';
-
- // M/R quarter of the docs
- {
- jsTestLog("Test A");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$lt: 25600}}, out: {merge: outColMerge}});
- printjson(out);
- assert.eq(25600, out.counts.emit, "Received wrong result");
- assert.eq(25600, out.counts.output, "Received wrong result");
- }
+ print("Checking result field");
+ assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection);
+ assert.eq(res.result.db, outDbStr, "Wrong db " + res.result.db);
+}
- // M/R further docs
- {
- jsTestLog("Test B");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {merge: outColMerge}});
- printjson(out);
- assert.eq(25600, out.counts.emit, "Received wrong result");
- assert.eq(51200, out.counts.output, "Received wrong result");
- }
+jsTest.log("Verifying nonatomic M/R throws...");
- // M/R do 2nd half of docs
- {
- jsTestLog("Test C");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$gte: 51200}}, out: {merge: outColMerge, nonAtomic: true}});
- printjson(out);
- assert.eq(51200, out.counts.emit, "Received wrong result");
- assert.eq(51200, out.counts.output, "Received wrong result");
- assert.eq(1, testDb[outColMerge].findOne().value, "Received wrong result");
- }
+// Check nonAtomic output
+assert.throws(function() {
+ testDb.foo.mapReduce(map, reduce, {out: {replace: "big_out", nonAtomic: true}});
+});
- // Test reduce
- let outColReduce = "big_out_reduce";
-
- // M/R quarter of the docs
- {
- jsTestLog("Test D");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$lt: 25600}}, out: {reduce: outColReduce}});
- printjson(out);
- assert.eq(25600, out.counts.emit, "Received wrong result");
- assert.eq(25600, out.counts.output, "Received wrong result");
- }
+jsTest.log("Adding documents");
- // M/R further docs
- {
- jsTestLog("Test E");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {reduce: outColReduce}});
- printjson(out);
- assert.eq(25600, out.counts.emit, "Received wrong result");
- assert.eq(51200, out.counts.output, "Received wrong result");
- }
-
- // M/R do 2nd half of docs
- {
- jsTestLog("Test F");
- var out = testDb.foo.mapReduce(
- map2,
- reduce2,
- {query: {i: {$gte: 51200}}, out: {reduce: outColReduce, nonAtomic: true}});
- printjson(out);
- assert.eq(51200, out.counts.emit, "Received wrong result");
- assert.eq(51200, out.counts.output, "Received wrong result");
- assert.eq(2, testDb[outColReduce].findOne().value, "Received wrong result");
- }
-
- // Verify that data is also on secondary
- {
- jsTestLog("Test G");
- var primary = s.rs0._master;
- var secondaries = s.rs0._slaves;
-
- // Stop the balancer to prevent new writes from happening and make sure that replication can
- // keep up even on slow machines
- s.stopBalancer();
- s.rs0.awaitReplication();
- assert.eq(51200, primary.getDB("test")[outColReduce].find().itcount(), "Wrong count");
-
- for (var i = 0; i < secondaries.length; ++i) {
- assert.eq(
- 51200, secondaries[i].getDB("test")[outColReduce].find().itcount(), "Wrong count");
- }
+// Add docs with dup "i"
+valInc = 0;
+for (var j = 0; j < 100; j++) {
+ print("Inserted document: " + (j * 100));
+ var bulk = testDb.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < 512; i++) {
+ bulk.insert({i: idInc++, val: valInc++, y: str});
}
-
- s.stop();
+ assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
+}
+
+jsTest.log("No errors...");
+
+function map2() {
+ emit(this.val, 1);
+}
+function reduce2(key, values) {
+ return Array.sum(values);
+}
+
+// Test merge
+let outColMerge = 'big_out_merge';
+
+// M/R quarter of the docs
+{
+ jsTestLog("Test A");
+ var out =
+ testDb.foo.mapReduce(map2, reduce2, {query: {i: {$lt: 25600}}, out: {merge: outColMerge}});
+ printjson(out);
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(25600, out.counts.output, "Received wrong result");
+}
+
+// M/R further docs
+{
+ jsTestLog("Test B");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {merge: outColMerge}});
+ printjson(out);
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+}
+
+// M/R do 2nd half of docs
+{
+ jsTestLog("Test C");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$gte: 51200}}, out: {merge: outColMerge, nonAtomic: true}});
+ printjson(out);
+ assert.eq(51200, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+ assert.eq(1, testDb[outColMerge].findOne().value, "Received wrong result");
+}
+
+// Test reduce
+let outColReduce = "big_out_reduce";
+
+// M/R quarter of the docs
+{
+ jsTestLog("Test D");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$lt: 25600}}, out: {reduce: outColReduce}});
+ printjson(out);
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(25600, out.counts.output, "Received wrong result");
+}
+
+// M/R further docs
+{
+ jsTestLog("Test E");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {reduce: outColReduce}});
+ printjson(out);
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+}
+
+// M/R do 2nd half of docs
+{
+ jsTestLog("Test F");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$gte: 51200}}, out: {reduce: outColReduce, nonAtomic: true}});
+ printjson(out);
+ assert.eq(51200, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+ assert.eq(2, testDb[outColReduce].findOne().value, "Received wrong result");
+}
+
+// Verify that data is also on secondary
+{
+ jsTestLog("Test G");
+ var primary = s.rs0._master;
+ var secondaries = s.rs0._slaves;
+
+ // Stop the balancer to prevent new writes from happening and make sure that replication can
+ // keep up even on slow machines
+ s.stopBalancer();
+ s.rs0.awaitReplication();
+ assert.eq(51200, primary.getDB("test")[outColReduce].find().itcount(), "Wrong count");
+
+ for (var i = 0; i < secondaries.length; ++i) {
+ assert.eq(
+ 51200, secondaries[i].getDB("test")[outColReduce].find().itcount(), "Wrong count");
+ }
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js
index 1b29f50c459..7d08bf34d36 100644
--- a/jstests/sharding/zero_shard_version.js
+++ b/jstests/sharding/zero_shard_version.js
@@ -3,181 +3,180 @@
* against a major version of zero or incompatible epochs.
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 4});
+var st = new ShardingTest({shards: 2, mongos: 4});
- var testDB_s0 = st.s.getDB('test');
- assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+var testDB_s0 = st.s.getDB('test');
+assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- var checkShardMajorVersion = function(conn, expectedVersion) {
- var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
- assert.eq(expectedVersion, shardVersionInfo.global.getTime());
- };
+var checkShardMajorVersion = function(conn, expectedVersion) {
+ var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
+ assert.eq(expectedVersion, shardVersionInfo.global.getTime());
+};
- ///////////////////////////////////////////////////////
- // Test shard with empty chunk
+///////////////////////////////////////////////////////
+// Test shard with empty chunk
- // shard0: 0|0|a
- // shard1: 1|0|a, [-inf, inf)
- // mongos0: 1|0|a
+// shard0: 0|0|a
+// shard1: 1|0|a, [-inf, inf)
+// mongos0: 1|0|a
- var testDB_s1 = st.s1.getDB('test');
- assert.writeOK(testDB_s1.user.insert({x: 1}));
- assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+var testDB_s1 = st.s1.getDB('test');
+assert.writeOK(testDB_s1.user.insert({x: 1}));
+assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
- st.configRS.awaitLastOpCommitted();
-
- // Official config:
- // shard0: 2|0|a, [-inf, inf)
- // shard1: 0|0|a
- //
- // Shard metadata:
- // shard0: 0|0|a
- // shard1: 0|0|a
- // mongos0: 1|0|a
+st.configRS.awaitLastOpCommitted();
+
+// Official config:
+// shard0: 2|0|a, [-inf, inf)
+// shard1: 0|0|a
+//
+// Shard metadata:
+// shard0: 0|0|a
+// shard1: 0|0|a
+// mongos0: 1|0|a
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
- // mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
- // refresh it's metadata correctly.
- assert.neq(null, testDB_s0.user.findOne({x: 1}));
+// mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
+// refresh it's metadata correctly.
+assert.neq(null, testDB_s0.user.findOne({x: 1}));
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+checkShardMajorVersion(st.rs0.getPrimary(), 2);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
- // Set mongos2 & mongos3 to version 2|0|a
- var testDB_s2 = st.s2.getDB('test');
- assert.neq(null, testDB_s2.user.findOne({x: 1}));
+// Set mongos2 & mongos3 to version 2|0|a
+var testDB_s2 = st.s2.getDB('test');
+assert.neq(null, testDB_s2.user.findOne({x: 1}));
- var testDB_s3 = st.s3.getDB('test');
- assert.neq(null, testDB_s3.user.findOne({x: 1}));
+var testDB_s3 = st.s3.getDB('test');
+assert.neq(null, testDB_s3.user.findOne({x: 1}));
- ///////////////////////////////////////////////////////
- // Test unsharded collection
- // mongos versions: s0, s2, s3: 2|0|a
+///////////////////////////////////////////////////////
+// Test unsharded collection
+// mongos versions: s0, s2, s3: 2|0|a
- testDB_s1.user.drop();
- assert.writeOK(testDB_s1.user.insert({x: 10}));
+testDB_s1.user.drop();
+assert.writeOK(testDB_s1.user.insert({x: 10}));
- // shard0: 0|0|0
- // shard1: 0|0|0
- // mongos0: 2|0|a
-
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- // mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
- // query should be routed to primary shard.
- assert.neq(null, testDB_s0.user.findOne({x: 10}));
+// shard0: 0|0|0
+// shard1: 0|0|0
+// mongos0: 2|0|a
+
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+// mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
+// query should be routed to primary shard.
+assert.neq(null, testDB_s0.user.findOne({x: 10}));
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
- ///////////////////////////////////////////////////////
- // Test 2 shards with 1 chunk
- // mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+///////////////////////////////////////////////////////
+// Test 2 shards with 1 chunk
+// mongos versions: s0: 0|0|0, s2, s3: 2|0|a
- testDB_s1.user.drop();
- testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+testDB_s1.user.drop();
+testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
- // shard0: 0|0|b,
- // shard1: 1|1|b, [-inf, 0), [0, inf)
+// shard0: 0|0|b,
+// shard1: 1|1|b, [-inf, 0), [0, inf)
- testDB_s1.user.insert({x: 1});
- testDB_s1.user.insert({x: -11});
- assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
+testDB_s1.user.insert({x: 1});
+testDB_s1.user.insert({x: -11});
+assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
- st.configRS.awaitLastOpCommitted();
+st.configRS.awaitLastOpCommitted();
- // Official config:
- // shard0: 2|0|b, [-inf, 0)
- // shard1: 2|1|b, [0, inf)
- //
- // Shard metadata:
- // shard0: 0|0|b
- // shard1: 2|1|b
- //
- // mongos2: 2|0|a
+// Official config:
+// shard0: 2|0|b, [-inf, 0)
+// shard1: 2|1|b, [0, inf)
+//
+// Shard metadata:
+// shard0: 0|0|b
+// shard1: 2|1|b
+//
+// mongos2: 2|0|a
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 2);
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 2);
- // mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
- // refresh it's metadata correctly.
- assert.neq(null, testDB_s2.user.findOne({x: 1}));
+// mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
+// refresh it's metadata correctly.
+assert.neq(null, testDB_s2.user.findOne({x: 1}));
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 2);
+checkShardMajorVersion(st.rs0.getPrimary(), 2);
+checkShardMajorVersion(st.rs1.getPrimary(), 2);
- // Set shard metadata to 2|0|b
- assert.neq(null, testDB_s2.user.findOne({x: -11}));
+// Set shard metadata to 2|0|b
+assert.neq(null, testDB_s2.user.findOne({x: -11}));
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 2);
+checkShardMajorVersion(st.rs0.getPrimary(), 2);
+checkShardMajorVersion(st.rs1.getPrimary(), 2);
- // Official config:
- // shard0: 2|0|b, [-inf, 0)
- // shard1: 2|1|b, [0, inf)
- //
- // Shard metadata:
- // shard0: 2|0|b
- // shard1: 2|1|b
- //
- // mongos3: 2|0|a
+// Official config:
+// shard0: 2|0|b, [-inf, 0)
+// shard1: 2|1|b, [0, inf)
+//
+// Shard metadata:
+// shard0: 2|0|b
+// shard1: 2|1|b
+//
+// mongos3: 2|0|a
+
+// 4th mongos still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
+// refresh it's metadata correctly.
+assert.neq(null, testDB_s3.user.findOne({x: 1}));
- // 4th mongos still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
- // refresh it's metadata correctly.
- assert.neq(null, testDB_s3.user.findOne({x: 1}));
+///////////////////////////////////////////////////////
+// Test mongos thinks unsharded when it's actually sharded
+// mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
- ///////////////////////////////////////////////////////
- // Test mongos thinks unsharded when it's actually sharded
- // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
+// Set mongos0 to version 0|0|0
+testDB_s0.user.drop();
- // Set mongos0 to version 0|0|0
- testDB_s0.user.drop();
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+assert.eq(null, testDB_s0.user.findOne({x: 1}));
- assert.eq(null, testDB_s0.user.findOne({x: 1}));
+// Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
+// already sharded.
+assert.eq(null, testDB_s1.user.findOne({x: 1}));
+assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+testDB_s1.user.insert({x: 1});
- // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
- // already sharded.
- assert.eq(null, testDB_s1.user.findOne({x: 1}));
- assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- testDB_s1.user.insert({x: 1});
+assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
- assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+st.configRS.awaitLastOpCommitted();
- st.configRS.awaitLastOpCommitted();
+// Official config:
+// shard0: 2|0|c, [-inf, inf)
+// shard1: 0|0|c
+//
+// Shard metadata:
+// shard0: 0|0|c
+// shard1: 0|0|c
+//
+// mongos0: 0|0|0
- // Official config:
- // shard0: 2|0|c, [-inf, inf)
- // shard1: 0|0|c
- //
- // Shard metadata:
- // shard0: 0|0|c
- // shard1: 0|0|c
- //
- // mongos0: 0|0|0
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+// 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
+assert.neq(null, testDB_s0.user.findOne({x: 1}));
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- // 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
- assert.neq(null, testDB_s0.user.findOne({x: 1}));
-
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- st.stop();
+checkShardMajorVersion(st.rs0.getPrimary(), 2);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
+st.stop();
})();