summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
Diffstat (limited to 'jstests')
-rw-r--r--jstests/aggregation/bugs/cond.js12
-rw-r--r--jstests/aggregation/bugs/cursor_timeout.js4
-rw-r--r--jstests/aggregation/bugs/explain_options_helper.js2
-rw-r--r--jstests/aggregation/bugs/firstlast.js60
-rw-r--r--jstests/aggregation/bugs/lookup_unwind_getmore.js4
-rw-r--r--jstests/aggregation/bugs/lookup_unwind_killcursor.js4
-rw-r--r--jstests/aggregation/bugs/match.js72
-rw-r--r--jstests/aggregation/bugs/match_swap_limit.js8
-rw-r--r--jstests/aggregation/bugs/server10176.js38
-rw-r--r--jstests/aggregation/bugs/server11118.js6
-rw-r--r--jstests/aggregation/bugs/server11675.js10
-rw-r--r--jstests/aggregation/bugs/server12015.js8
-rw-r--r--jstests/aggregation/bugs/server14421.js2
-rw-r--r--jstests/aggregation/bugs/server14691.js14
-rw-r--r--jstests/aggregation/bugs/server14872.js2
-rw-r--r--jstests/aggregation/bugs/server14969.js2
-rw-r--r--jstests/aggregation/bugs/server17943.js16
-rw-r--r--jstests/aggregation/bugs/server18222.js22
-rw-r--r--jstests/aggregation/bugs/server18427.js2
-rw-r--r--jstests/aggregation/bugs/server20168.js2
-rw-r--r--jstests/aggregation/bugs/server21632.js2
-rw-r--r--jstests/aggregation/bugs/server25590.js2
-rw-r--r--jstests/aggregation/bugs/server26462.js8
-rw-r--r--jstests/aggregation/bugs/server4588.js10
-rw-r--r--jstests/aggregation/bugs/server4589.js2
-rw-r--r--jstests/aggregation/bugs/server5044.js4
-rw-r--r--jstests/aggregation/bugs/server533.js2
-rw-r--r--jstests/aggregation/bugs/server6074.js2
-rw-r--r--jstests/aggregation/bugs/server6127.js6
-rw-r--r--jstests/aggregation/bugs/server6147.js4
-rw-r--r--jstests/aggregation/bugs/server6185.js10
-rw-r--r--jstests/aggregation/bugs/server6530.js4
-rw-r--r--jstests/aggregation/bugs/server6779.js4
-rw-r--r--jstests/aggregation/bugs/server7695_isodates.js2
-rw-r--r--jstests/aggregation/bugs/server7781.js2
-rw-r--r--jstests/aggregation/bugs/server8141.js4
-rw-r--r--jstests/aggregation/bugs/server8568.js2
-rw-r--r--jstests/aggregation/bugs/server9625.js4
-rw-r--r--jstests/aggregation/bugs/sort_arrays.js2
-rw-r--r--jstests/aggregation/explain_limit.js2
-rw-r--r--jstests/aggregation/explain_writing_aggs.js2
-rw-r--r--jstests/aggregation/expressions/arrayToObject.js2
-rw-r--r--jstests/aggregation/expressions/collation_expressions.js18
-rw-r--r--jstests/aggregation/expressions/convert.js2
-rw-r--r--jstests/aggregation/expressions/date_expressions_with_timezones.js2
-rw-r--r--jstests/aggregation/expressions/date_from_string.js32
-rw-r--r--jstests/aggregation/expressions/date_from_string_on_error.js2
-rw-r--r--jstests/aggregation/expressions/date_from_string_on_null.js2
-rw-r--r--jstests/aggregation/expressions/date_to_parts.js10
-rw-r--r--jstests/aggregation/expressions/date_to_string.js14
-rw-r--r--jstests/aggregation/expressions/date_to_string_on_null.js2
-rw-r--r--jstests/aggregation/expressions/in.js2
-rw-r--r--jstests/aggregation/expressions/indexof_array.js2
-rw-r--r--jstests/aggregation/expressions/indexof_bytes.js2
-rw-r--r--jstests/aggregation/expressions/indexof_codepoints.js2
-rw-r--r--jstests/aggregation/expressions/merge_objects.js36
-rw-r--r--jstests/aggregation/expressions/objectToArray.js34
-rw-r--r--jstests/aggregation/expressions/object_ids_for_date_expressions.js2
-rw-r--r--jstests/aggregation/expressions/round_trunc.js2
-rw-r--r--jstests/aggregation/expressions/size.js12
-rw-r--r--jstests/aggregation/expressions/split.js2
-rw-r--r--jstests/aggregation/expressions/trim.js6
-rw-r--r--jstests/aggregation/match_swapping_renamed_fields.js10
-rw-r--r--jstests/aggregation/mongos_merge.js4
-rw-r--r--jstests/aggregation/optimize_away_pipeline.js10
-rw-r--r--jstests/aggregation/shard_targeting.js8
-rw-r--r--jstests/aggregation/sharded_agg_cleanup_on_error.js2
-rw-r--r--jstests/aggregation/sources/addFields/use_cases.js2
-rw-r--r--jstests/aggregation/sources/addFields/weather.js2
-rw-r--r--jstests/aggregation/sources/bucket/collation_bucket.js18
-rw-r--r--jstests/aggregation/sources/bucketauto/collation_bucketauto.js18
-rw-r--r--jstests/aggregation/sources/collStats/count.js4
-rw-r--r--jstests/aggregation/sources/facet/inner_graphlookup.js8
-rw-r--r--jstests/aggregation/sources/facet/inner_lookup.js10
-rw-r--r--jstests/aggregation/sources/facet/use_cases.js2
-rw-r--r--jstests/aggregation/sources/geonear/collation_geonear.js4
-rw-r--r--jstests/aggregation/sources/geonear/distancefield_and_includelocs.js6
-rw-r--r--jstests/aggregation/sources/graphLookup/airports.js2
-rw-r--r--jstests/aggregation/sources/graphLookup/basic.js22
-rw-r--r--jstests/aggregation/sources/graphLookup/collation_graphlookup.js20
-rw-r--r--jstests/aggregation/sources/graphLookup/error.js10
-rw-r--r--jstests/aggregation/sources/graphLookup/filter.js10
-rw-r--r--jstests/aggregation/sources/graphLookup/nested_objects.js8
-rw-r--r--jstests/aggregation/sources/graphLookup/socialite.js4
-rw-r--r--jstests/aggregation/sources/group/collation_group.js12
-rw-r--r--jstests/aggregation/sources/group/numeric_grouping.js12
-rw-r--r--jstests/aggregation/sources/group/text_score_grouping.js4
-rw-r--r--jstests/aggregation/sources/lookup/lookup_absorb_match.js8
-rw-r--r--jstests/aggregation/sources/lookup/lookup_non_correlated.js12
-rw-r--r--jstests/aggregation/sources/lookup/lookup_subpipeline.js40
-rw-r--r--jstests/aggregation/sources/match/collation_match.js6
-rw-r--r--jstests/aggregation/sources/match/expr_match.js8
-rw-r--r--jstests/aggregation/sources/project/remove_redundant_projects.js2
-rw-r--r--jstests/aggregation/sources/redact/collation_redact.js4
-rw-r--r--jstests/aggregation/sources/replaceRoot/address.js2
-rw-r--r--jstests/aggregation/sources/sort/collation_sort.js18
-rw-r--r--jstests/aggregation/sources/sort/collation_sort_japanese.js2
-rw-r--r--jstests/aggregation/sources/sort/explain_sort.js2
-rw-r--r--jstests/aggregation/sources/sort/sort_with_metadata.js8
-rw-r--r--jstests/aggregation/testall.js2
-rw-r--r--jstests/aggregation/use_query_project_and_sort.js2
-rw-r--r--jstests/aggregation/use_query_projection.js4
-rw-r--r--jstests/aggregation/use_query_sort.js2
-rw-r--r--jstests/aggregation/variables/layered_variables.js2
-rw-r--r--jstests/aggregation/variables/remove_system_variable.js6
-rw-r--r--jstests/auth/db_multiple_login.js2
-rw-r--r--jstests/auth/deleted_recreated_user.js2
-rw-r--r--jstests/auth/explain_auth.js2
-rw-r--r--jstests/auth/getMore.js8
-rw-r--r--jstests/auth/keyfile_rollover.js6
-rw-r--r--jstests/auth/kill_cursors.js4
-rw-r--r--jstests/auth/localhostAuthBypass.js6
-rw-r--r--jstests/auth/mongos_cache_invalidation.js18
-rw-r--r--jstests/auth/renameRestrictedCollections.js4
-rw-r--r--jstests/auth/repl_auth.js4
-rw-r--r--jstests/auth/role_management_commands_lib.js18
-rw-r--r--jstests/auth/server-4892.js2
-rw-r--r--jstests/auth/user_defined_roles.js10
-rw-r--r--jstests/auth/user_defined_roles_on_secondaries.js2
-rw-r--r--jstests/auth/user_management_commands_lib.js6
-rw-r--r--jstests/auth/user_special_chars.js4
-rw-r--r--jstests/auth/views_authz.js2
-rw-r--r--jstests/change_streams/ban_from_lookup.js2
-rw-r--r--jstests/change_streams/ban_from_views.js2
-rw-r--r--jstests/change_streams/change_stream.js46
-rw-r--r--jstests/change_streams/collation.js34
-rw-r--r--jstests/change_streams/lookup_post_image.js38
-rw-r--r--jstests/change_streams/metadata_notifications.js20
-rw-r--r--jstests/change_streams/only_wake_getmore_for_relevant_changes.js8
-rw-r--r--jstests/change_streams/shell_helper.js8
-rw-r--r--jstests/change_streams/start_at_cluster_time.js8
-rw-r--r--jstests/change_streams/whole_cluster.js12
-rw-r--r--jstests/change_streams/whole_cluster_metadata_notifications.js32
-rw-r--r--jstests/change_streams/whole_cluster_resumability.js14
-rw-r--r--jstests/change_streams/whole_db.js8
-rw-r--r--jstests/change_streams/whole_db_metadata_notifications.js30
-rw-r--r--jstests/change_streams/whole_db_resumability.js14
-rw-r--r--jstests/client_encrypt/fle_auto_decrypt.js5
-rw-r--r--jstests/client_encrypt/fle_aws_faults.js6
-rw-r--r--jstests/client_encrypt/fle_encrypt_decrypt_shell.js2
-rw-r--r--jstests/client_encrypt/fle_key_faults.js2
-rw-r--r--jstests/client_encrypt/fle_keys.js6
-rw-r--r--jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js3
-rw-r--r--jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js2
-rw-r--r--jstests/concurrency/fsm_workloads/agg_base.js2
-rw-r--r--jstests/concurrency/fsm_workloads/agg_graph_lookup.js4
-rw-r--r--jstests/concurrency/fsm_workloads/collmod.js2
-rw-r--r--jstests/concurrency/fsm_workloads/compact.js2
-rw-r--r--jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js2
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection.js2
-rw-r--r--jstests/concurrency/fsm_workloads/count.js2
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection.js2
-rw-r--r--jstests/concurrency/fsm_workloads/create_database.js2
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background.js10
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background_unique.js2
-rw-r--r--jstests/concurrency/fsm_workloads/distinct.js2
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_noindex.js2
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove.js2
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js4
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update.js2
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_grow.js2
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base.js2
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js2
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text.js2
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ttl.js4
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js2
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_where.js2
-rw-r--r--jstests/concurrency/fsm_workloads/invalidated_cursors.js2
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_drop.js2
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_inline.js2
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js2
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js11
-rw-r--r--jstests/concurrency/fsm_workloads/plan_cache_drop_database.js2
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_base.js2
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js7
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js6
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js6
-rw-r--r--jstests/concurrency/fsm_workloads/reindex.js2
-rw-r--r--jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js2
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_droptarget.js2
-rw-r--r--jstests/concurrency/fsm_workloads/secondary_reads.js2
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_base_partitioned.js2
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js2
-rw-r--r--jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js4
-rw-r--r--jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_and_bulk_insert.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_array.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_check_index.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_where.js2
-rw-r--r--jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js8
-rw-r--r--jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js2
-rw-r--r--jstests/concurrency/fsm_workloads/yield.js8
-rw-r--r--jstests/concurrency/fsm_workloads/yield_geo_near.js2
-rw-r--r--jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js4
-rw-r--r--jstests/core/SERVER-23626.js6
-rw-r--r--jstests/core/add_skip_stage_before_fetch.js2
-rw-r--r--jstests/core/agg_hint.js14
-rw-r--r--jstests/core/aggregation_getmore_batchsize.js2
-rw-r--r--jstests/core/apply_ops_dups.js2
-rw-r--r--jstests/core/apply_ops_invalid_index_spec.js2
-rw-r--r--jstests/core/awaitdata_getmore_cmd.js10
-rw-r--r--jstests/core/background_index_multikey.js14
-rw-r--r--jstests/core/batch_write_collation_estsize.js4
-rw-r--r--jstests/core/batch_write_command_delete.js4
-rw-r--r--jstests/core/bindata_indexonly.js8
-rw-r--r--jstests/core/bittest.js24
-rw-r--r--jstests/core/bulk_legacy_enforce_gle.js4
-rw-r--r--jstests/core/bypass_doc_validation.js8
-rw-r--r--jstests/core/capped6.js2
-rw-r--r--jstests/core/capped_queries_and_id_index.js6
-rw-r--r--jstests/core/capped_update.js6
-rw-r--r--jstests/core/collation.js282
-rw-r--r--jstests/core/collation_convert_to_capped.js4
-rw-r--r--jstests/core/collation_plan_cache.js2
-rw-r--r--jstests/core/collation_update.js93
-rw-r--r--jstests/core/collmod_without_uuid.js2
-rw-r--r--jstests/core/commands_namespace_parsing.js2
-rw-r--r--jstests/core/compare_timestamps.js4
-rw-r--r--jstests/core/constructors.js2
-rw-r--r--jstests/core/convert_to_capped.js2
-rw-r--r--jstests/core/count_hint.js4
-rw-r--r--jstests/core/coveredIndex1.js12
-rw-r--r--jstests/core/covered_multikey.js22
-rw-r--r--jstests/core/currentop.js2
-rw-r--r--jstests/core/cursora.js2
-rw-r--r--jstests/core/dbcase.js2
-rw-r--r--jstests/core/dbstats.js2
-rw-r--r--jstests/core/distinct1.js36
-rw-r--r--jstests/core/distinct3.js4
-rw-r--r--jstests/core/distinct_compound_index.js8
-rw-r--r--jstests/core/distinct_multikey.js34
-rw-r--r--jstests/core/doc_validation.js82
-rw-r--r--jstests/core/dotted_path_in_null.js10
-rw-r--r--jstests/core/drop_index.js2
-rw-r--r--jstests/core/elemMatchProjection.js6
-rw-r--r--jstests/core/elemmatch_or_pushdown.js28
-rw-r--r--jstests/core/ensure_sorted.js10
-rw-r--r--jstests/core/exhaust.js2
-rw-r--r--jstests/core/existsa.js14
-rw-r--r--jstests/core/explain_agg_write_concern.js2
-rw-r--r--jstests/core/explain_distinct.js4
-rw-r--r--jstests/core/explain_execution_error.js2
-rw-r--r--jstests/core/explain_find_and_modify.js2
-rw-r--r--jstests/core/explain_multikey.js2
-rw-r--r--jstests/core/expr.js48
-rw-r--r--jstests/core/expr_index_use.js50
-rw-r--r--jstests/core/field_name_validation.js26
-rw-r--r--jstests/core/filemd5.js6
-rw-r--r--jstests/core/find4.js6
-rw-r--r--jstests/core/find5.js8
-rw-r--r--jstests/core/find_and_modify_concurrent_update.js2
-rw-r--r--jstests/core/find_and_modify_empty_update.js8
-rw-r--r--jstests/core/find_dedup.js14
-rw-r--r--jstests/core/find_getmore_bsonsize.js6
-rw-r--r--jstests/core/find_getmore_cmd.js2
-rw-r--r--jstests/core/fsync.js4
-rw-r--r--jstests/core/fts1.js8
-rw-r--r--jstests/core/fts_array.js4
-rw-r--r--jstests/core/fts_casesensitive.js2
-rw-r--r--jstests/core/fts_diacritic_and_caseinsensitive.js2
-rw-r--r--jstests/core/fts_diacritic_and_casesensitive.js2
-rw-r--r--jstests/core/fts_diacriticsensitive.js2
-rw-r--r--jstests/core/fts_dotted_prefix_fields.js6
-rw-r--r--jstests/core/fts_explain.js2
-rw-r--r--jstests/core/fts_index.js2
-rw-r--r--jstests/core/fts_index2.js2
-rw-r--r--jstests/core/fts_index3.js48
-rw-r--r--jstests/core/fts_index_version1.js6
-rw-r--r--jstests/core/fts_index_version2.js2
-rw-r--r--jstests/core/fts_partition_no_multikey.js2
-rw-r--r--jstests/core/fts_score_sort.js6
-rw-r--r--jstests/core/fts_spanish.js9
-rw-r--r--jstests/core/fts_trailing_fields.js4
-rw-r--r--jstests/core/function_string_representations.js2
-rw-r--r--jstests/core/geo1.js4
-rw-r--r--jstests/core/geo10.js6
-rw-r--r--jstests/core/geo_2d_trailing_fields.js6
-rw-r--r--jstests/core/geo_allowedcomparisons.js2
-rw-r--r--jstests/core/geo_array0.js4
-rw-r--r--jstests/core/geo_big_polygon.js14
-rw-r--r--jstests/core/geo_big_polygon2.js2
-rw-r--r--jstests/core/geo_big_polygon3.js4
-rw-r--r--jstests/core/geo_center_sphere1.js2
-rw-r--r--jstests/core/geo_center_sphere2.js2
-rw-r--r--jstests/core/geo_distinct.js10
-rw-r--r--jstests/core/geo_mindistance.js6
-rw-r--r--jstests/core/geo_multinest0.js6
-rw-r--r--jstests/core/geo_multinest1.js4
-rw-r--r--jstests/core/geo_operator_crs.js6
-rw-r--r--jstests/core/geo_polygon1_noindex.js2
-rw-r--r--jstests/core/geo_polygon3.js11
-rw-r--r--jstests/core/geo_s2cursorlimitskip.js2
-rw-r--r--jstests/core/geo_s2disjoint_holes.js4
-rw-r--r--jstests/core/geo_s2dupe_points.js2
-rw-r--r--jstests/core/geo_s2explain.js4
-rw-r--r--jstests/core/geo_s2index.js6
-rw-r--r--jstests/core/geo_s2indexversion1.js20
-rw-r--r--jstests/core/geo_s2meridian.js2
-rw-r--r--jstests/core/geo_s2multi.js8
-rw-r--r--jstests/core/geo_s2nongeoarray.js2
-rw-r--r--jstests/core/geo_s2ordering.js2
-rw-r--r--jstests/core/geo_s2sparse.js2
-rw-r--r--jstests/core/geo_s2twofields.js2
-rw-r--r--jstests/core/geo_s2within_line_polygon_sphere.js21
-rw-r--r--jstests/core/geo_update1.js6
-rw-r--r--jstests/core/geo_update2.js6
-rw-r--r--jstests/core/geo_validate.js2
-rw-r--r--jstests/core/geonear_key.js12
-rw-r--r--jstests/core/getmore_cmd_maxtimems.js4
-rw-r--r--jstests/core/getmore_invalidated_cursors.js2
-rw-r--r--jstests/core/getmore_invalidated_documents.js60
-rw-r--r--jstests/core/grow_hash_table.js2
-rw-r--r--jstests/core/idhack.js18
-rw-r--r--jstests/core/index_bounds_code.js8
-rw-r--r--jstests/core/index_bounds_maxkey.js8
-rw-r--r--jstests/core/index_bounds_minkey.js8
-rw-r--r--jstests/core/index_bounds_object.js10
-rw-r--r--jstests/core/index_bounds_pipe.js14
-rw-r--r--jstests/core/index_bounds_timestamp.js2
-rw-r--r--jstests/core/index_decimal.js10
-rw-r--r--jstests/core/index_elemmatch2.js8
-rw-r--r--jstests/core/index_filter_commands.js6
-rw-r--r--jstests/core/index_multiple_compatibility.js28
-rw-r--r--jstests/core/index_partial_2dsphere.js12
-rw-r--r--jstests/core/index_partial_create_drop.js2
-rw-r--r--jstests/core/index_partial_read_ops.js4
-rw-r--r--jstests/core/index_partial_validate.js2
-rw-r--r--jstests/core/index_partial_write_ops.js26
-rw-r--r--jstests/core/index_stats.js20
-rw-r--r--jstests/core/index_type_change.js4
-rw-r--r--jstests/core/indexes_multiple_commands.js2
-rw-r--r--jstests/core/indexu.js16
-rw-r--r--jstests/core/insert1.js2
-rw-r--r--jstests/core/insert_illegal_doc.js2
-rw-r--r--jstests/core/json_schema/json_schema.js42
-rw-r--r--jstests/core/json_schema/misc_validation.js58
-rw-r--r--jstests/core/kill_cursors.js2
-rw-r--r--jstests/core/killop_drop_collection.js4
-rw-r--r--jstests/core/min_max_bounds.js6
-rw-r--r--jstests/core/min_max_key.js2
-rw-r--r--jstests/core/minmax_edge.js18
-rw-r--r--jstests/core/mr1.js2
-rw-r--r--jstests/core/mr5.js12
-rw-r--r--jstests/core/mr_bigobject_replace.js2
-rw-r--r--jstests/core/mr_optim.js2
-rw-r--r--jstests/core/mr_tolerates_js_exception.js2
-rw-r--r--jstests/core/nan.js24
-rw-r--r--jstests/core/natural.js6
-rw-r--r--jstests/core/no_db_created.js6
-rw-r--r--jstests/core/not2.js20
-rw-r--r--jstests/core/null_query_semantics.js4
-rw-r--r--jstests/core/opcounters_write_cmd.js8
-rw-r--r--jstests/core/operation_latency_histogram.js8
-rw-r--r--jstests/core/optimized_match_explain.js8
-rw-r--r--jstests/core/or4.js26
-rw-r--r--jstests/core/or_always_false.js2
-rw-r--r--jstests/core/profile2.js4
-rw-r--r--jstests/core/profile_agg.js6
-rw-r--r--jstests/core/profile_count.js8
-rw-r--r--jstests/core/profile_delete.js18
-rw-r--r--jstests/core/profile_distinct.js4
-rw-r--r--jstests/core/profile_find.js12
-rw-r--r--jstests/core/profile_findandmodify.js16
-rw-r--r--jstests/core/profile_getmore.js10
-rw-r--r--jstests/core/profile_insert.js6
-rw-r--r--jstests/core/profile_mapreduce.js8
-rw-r--r--jstests/core/profile_query_hash.js8
-rw-r--r--jstests/core/profile_repair_cursor.js2
-rw-r--r--jstests/core/profile_sampling.js8
-rw-r--r--jstests/core/profile_update.js20
-rw-r--r--jstests/core/projection_dotted_paths.js6
-rw-r--r--jstests/core/push.js14
-rw-r--r--jstests/core/push_sort.js13
-rw-r--r--jstests/core/record_store_count.js4
-rw-r--r--jstests/core/regex.js12
-rw-r--r--jstests/core/regex_not_id.js2
-rw-r--r--jstests/core/remove2.js2
-rw-r--r--jstests/core/remove7.js4
-rw-r--r--jstests/core/remove9.js2
-rw-r--r--jstests/core/remove_undefined.js8
-rw-r--r--jstests/core/removea.js4
-rw-r--r--jstests/core/removeb.js4
-rw-r--r--jstests/core/rename4.js4
-rw-r--r--jstests/core/rename_change_target_type.js4
-rw-r--r--jstests/core/return_key.js6
-rw-r--r--jstests/core/set7.js6
-rw-r--r--jstests/core/set_type_change.js2
-rw-r--r--jstests/core/shell_writeconcern.js14
-rw-r--r--jstests/core/single_batch.js2
-rw-r--r--jstests/core/sort1.js10
-rw-r--r--jstests/core/sort3.js6
-rw-r--r--jstests/core/sort4.js12
-rw-r--r--jstests/core/sort_array.js40
-rw-r--r--jstests/core/sorta.js2
-rw-r--r--jstests/core/sortc.js4
-rw-r--r--jstests/core/sortl.js2
-rw-r--r--jstests/core/splitvector.js4
-rw-r--r--jstests/core/stages_delete.js12
-rw-r--r--jstests/core/stages_ixscan.js2
-rw-r--r--jstests/core/system_profile.js4
-rw-r--r--jstests/core/tailable_cursor_invalidation.js2
-rw-r--r--jstests/core/tailable_getmore_batch_size.js2
-rw-r--r--jstests/core/tailable_skip_limit.js16
-rw-r--r--jstests/core/text_covered_matching.js12
-rw-r--r--jstests/core/top.js8
-rw-r--r--jstests/core/ts1.js4
-rw-r--r--jstests/core/txns/abort_expired_transaction.js2
-rw-r--r--jstests/core/txns/find_and_modify_in_transaction.js4
-rw-r--r--jstests/core/txns/multi_delete_in_transaction.js4
-rw-r--r--jstests/core/txns/multi_update_in_transaction.js4
-rw-r--r--jstests/core/txns/read_concerns.js2
-rw-r--r--jstests/core/txns/read_own_multikey_writes.js4
-rw-r--r--jstests/core/txns/repeatable_reads_in_transaction.js5
-rw-r--r--jstests/core/txns/start_transaction_with_read.js2
-rw-r--r--jstests/core/txns/statement_ids_accepted.js10
-rw-r--r--jstests/core/type_array.js34
-rw-r--r--jstests/core/uniqueness.js4
-rw-r--r--jstests/core/update_addToSet.js26
-rw-r--r--jstests/core/update_affects_indexes.js44
-rw-r--r--jstests/core/update_arrayFilters.js194
-rw-r--r--jstests/core/update_array_offset_positional.js32
-rw-r--r--jstests/core/update_arraymatch6.js2
-rw-r--r--jstests/core/update_bit_examples.js6
-rw-r--r--jstests/core/update_currentdate_examples.js6
-rw-r--r--jstests/core/update_min_max_examples.js14
-rw-r--r--jstests/core/update_modifier_pop.js61
-rw-r--r--jstests/core/update_mul_examples.js10
-rw-r--r--jstests/core/update_multi5.js7
-rw-r--r--jstests/core/update_server-12848.js4
-rw-r--r--jstests/core/updatea.js20
-rw-r--r--jstests/core/updateh.js10
-rw-r--r--jstests/core/updatel.js4
-rw-r--r--jstests/core/upsert_and.js12
-rw-r--r--jstests/core/upsert_fields.js2
-rw-r--r--jstests/core/verify_update_mods.js24
-rw-r--r--jstests/core/views/duplicate_ns.js4
-rw-r--r--jstests/core/views/invalid_system_views.js16
-rw-r--r--jstests/core/views/views_aggregation.js6
-rw-r--r--jstests/core/views/views_all_commands.js10
-rw-r--r--jstests/core/views/views_basic.js2
-rw-r--r--jstests/core/views/views_change.js16
-rw-r--r--jstests/core/views/views_collation.js6
-rw-r--r--jstests/core/views/views_count.js2
-rw-r--r--jstests/core/views/views_distinct.js4
-rw-r--r--jstests/core/views/views_drop.js2
-rw-r--r--jstests/core/views/views_find.js4
-rw-r--r--jstests/core/views/views_rename.js2
-rw-r--r--jstests/core/views/views_stats.js6
-rw-r--r--jstests/core/where4.js16
-rw-r--r--jstests/decimal/decimal_constructors.js4
-rw-r--r--jstests/decimal/decimal_find_basic.js6
-rw-r--r--jstests/decimal/decimal_find_mixed.js8
-rw-r--r--jstests/decimal/decimal_find_query.js8
-rw-r--r--jstests/decimal/decimal_roundtrip_basic.js4
-rw-r--r--jstests/decimal/decimal_update.js24
-rw-r--r--jstests/disk/directoryperdb.js8
-rw-r--r--jstests/disk/killall.js2
-rw-r--r--jstests/disk/too_many_fds.js2
-rw-r--r--jstests/gle/get_last_error.js4
-rw-r--r--jstests/libs/assert_schema_match.js8
-rw-r--r--jstests/libs/geo_near_random.js2
-rw-r--r--jstests/libs/override_methods/mongos_manual_intervention_actions.js2
-rw-r--r--jstests/libs/pin_getmore_cursor.js2
-rw-r--r--jstests/multiVersion/clone_helper.js2
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js2
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js5
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js2
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js6
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js8
-rw-r--r--jstests/multiVersion/libs/multiversion_rollback.js18
-rw-r--r--jstests/multiVersion/skip_level_upgrade.js2
-rw-r--r--jstests/multiVersion/upgrade_downgrade_cluster.js12
-rw-r--r--jstests/noPassthrough/aggregation_cursor_invalidations.js8
-rw-r--r--jstests/noPassthrough/aggregation_zero_batchsize.js2
-rw-r--r--jstests/noPassthrough/apply_ops_mode.js4
-rw-r--r--jstests/noPassthrough/atomic_rename_collection.js4
-rw-r--r--jstests/noPassthrough/auto_retry_on_network_error.js2
-rw-r--r--jstests/noPassthrough/change_stream_failover.js6
-rw-r--r--jstests/noPassthrough/change_streams_require_majority_read_concern.js4
-rw-r--r--jstests/noPassthrough/change_streams_update_lookup_collation.js12
-rw-r--r--jstests/noPassthrough/characterize_index_builds_on_restart.js2
-rw-r--r--jstests/noPassthrough/client_metadata_log.js2
-rw-r--r--jstests/noPassthrough/client_metadata_slowlog.js2
-rw-r--r--jstests/noPassthrough/commands_handle_kill.js2
-rw-r--r--jstests/noPassthrough/commands_preserve_exec_error_code.js2
-rw-r--r--jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js2
-rw-r--r--jstests/noPassthrough/currentop_includes_await_time.js4
-rw-r--r--jstests/noPassthrough/currentop_query.js12
-rw-r--r--jstests/noPassthrough/durable_view_catalog.js4
-rw-r--r--jstests/noPassthrough/feature_compatibility_version.js10
-rw-r--r--jstests/noPassthrough/filemd5_kill_during_yield.js4
-rw-r--r--jstests/noPassthrough/geo_full.js2
-rw-r--r--jstests/noPassthrough/geo_mnypts_plus_fields.js2
-rw-r--r--jstests/noPassthrough/global_operation_latency_histogram.js8
-rw-r--r--jstests/noPassthrough/ignore_notablescan.js4
-rw-r--r--jstests/noPassthrough/implicit_sessions.js40
-rw-r--r--jstests/noPassthrough/index_killop_standalone.js2
-rw-r--r--jstests/noPassthrough/index_partial_no_explain_cmds.js4
-rw-r--r--jstests/noPassthrough/index_stepdown_after_init.js2
-rw-r--r--jstests/noPassthrough/index_stepdown_during_scan.js2
-rw-r--r--jstests/noPassthrough/index_version_v2.js4
-rw-r--r--jstests/noPassthrough/indexbg1.js12
-rw-r--r--jstests/noPassthrough/indexbg2.js6
-rw-r--r--jstests/noPassthrough/indexbg_drop.js2
-rw-r--r--jstests/noPassthrough/indexbg_killop_apply_ops.js2
-rw-r--r--jstests/noPassthrough/indexbg_killop_primary.js2
-rw-r--r--jstests/noPassthrough/indexbg_killop_primary_after_init.js2
-rw-r--r--jstests/noPassthrough/indexbg_killop_secondary.js2
-rw-r--r--jstests/noPassthrough/indexbg_shutdown.js2
-rw-r--r--jstests/noPassthrough/initial_sync_wt_cache_full.js4
-rw-r--r--jstests/noPassthrough/js_protection.js2
-rw-r--r--jstests/noPassthrough/js_protection_roundtrip.js2
-rw-r--r--jstests/noPassthrough/killop.js2
-rw-r--r--jstests/noPassthrough/latency_includes_lock_acquisition_time.js10
-rw-r--r--jstests/noPassthrough/libs/backup_restore.js12
-rw-r--r--jstests/noPassthrough/libs/concurrent_rename.js4
-rw-r--r--jstests/noPassthrough/list_indexes_with_build_uuids.js2
-rw-r--r--jstests/noPassthrough/log_find_getmore.js2
-rw-r--r--jstests/noPassthrough/logical_session_cache_find_getmore.js4
-rw-r--r--jstests/noPassthrough/logical_session_cursor_checks.js4
-rw-r--r--jstests/noPassthrough/max_bson_depth_parameter.js4
-rw-r--r--jstests/noPassthrough/minvalid.js2
-rw-r--r--jstests/noPassthrough/minvalid2.js2
-rw-r--r--jstests/noPassthrough/noncapped_oplog_creation.js2
-rw-r--r--jstests/noPassthrough/ns1.js2
-rw-r--r--jstests/noPassthrough/predictive_connpool.js6
-rw-r--r--jstests/noPassthrough/profile_agg_multiple_batches.js2
-rw-r--r--jstests/noPassthrough/query_yield_reset_timer.js2
-rw-r--r--jstests/noPassthrough/readConcern_snapshot_mongos.js2
-rw-r--r--jstests/noPassthrough/read_majority.js6
-rw-r--r--jstests/noPassthrough/read_majority_reads.js12
-rw-r--r--jstests/noPassthrough/recovery_wt_cache_full.js4
-rw-r--r--jstests/noPassthrough/replica_set_connection_getmore.js2
-rw-r--r--jstests/noPassthrough/rollback_wt_cache_full.js4
-rw-r--r--jstests/noPassthrough/set_step_params.js6
-rw-r--r--jstests/noPassthrough/shell_can_use_read_concern.js2
-rw-r--r--jstests/noPassthrough/shell_cmd_assertions.js2
-rw-r--r--jstests/noPassthrough/shell_gossip_cluster_time.js10
-rw-r--r--jstests/noPassthrough/shell_retry_writes_uri.js20
-rw-r--r--jstests/noPassthrough/snapshot_reads.js2
-rw-r--r--jstests/noPassthrough/socket_disconnect_kills.js6
-rw-r--r--jstests/noPassthrough/step_down_during_drop_database.js2
-rw-r--r--jstests/noPassthrough/stepdown_query.js2
-rw-r--r--jstests/noPassthrough/sync_write.js2
-rw-r--r--jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js2
-rw-r--r--jstests/noPassthrough/transaction_reaper.js2
-rw-r--r--jstests/noPassthrough/ttl_capped.js3
-rw-r--r--jstests/noPassthrough/ttl_partial_index.js4
-rw-r--r--jstests/noPassthrough/txn_override_causal_consistency.js2
-rw-r--r--jstests/noPassthrough/unsupported_change_stream_deployments.js2
-rw-r--r--jstests/noPassthrough/update_post_image_validation.js4
-rw-r--r--jstests/noPassthrough/update_server-5552.js2
-rw-r--r--jstests/noPassthrough/use_disk.js4
-rw-r--r--jstests/noPassthrough/utf8_paths.js2
-rw-r--r--jstests/noPassthrough/views_legacy.js2
-rw-r--r--jstests/noPassthrough/wt_cache_full.js4
-rw-r--r--jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js4
-rw-r--r--jstests/noPassthrough/wt_cache_full_restart.js4
-rw-r--r--jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js2
-rw-r--r--jstests/noPassthrough/wt_nojournal_skip_recovery.js4
-rw-r--r--jstests/noPassthrough/wt_nojournal_toggle.js10
-rw-r--r--jstests/noPassthrough/wt_operation_stats.js2
-rw-r--r--jstests/noPassthrough/yield_during_writes.js6
-rw-r--r--jstests/noPassthroughWithMongod/background.js6
-rw-r--r--jstests/noPassthroughWithMongod/bench_test_crud_commands.js4
-rw-r--r--jstests/noPassthroughWithMongod/benchrun_substitution.js4
-rw-r--r--jstests/noPassthroughWithMongod/btreedel.js2
-rw-r--r--jstests/noPassthroughWithMongod/capped_truncate.js4
-rw-r--r--jstests/noPassthroughWithMongod/cursor_server_status_metrics.js6
-rw-r--r--jstests/noPassthroughWithMongod/find_and_modify_server16469.js2
-rw-r--r--jstests/noPassthroughWithMongod/find_cmd.js4
-rw-r--r--jstests/noPassthroughWithMongod/geo_mnypts.js2
-rw-r--r--jstests/noPassthroughWithMongod/geo_polygon.js2
-rw-r--r--jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js6
-rw-r--r--jstests/noPassthroughWithMongod/index_boundary_values_validate.js16
-rw-r--r--jstests/noPassthroughWithMongod/index_check10.js6
-rw-r--r--jstests/noPassthroughWithMongod/index_check9.js6
-rw-r--r--jstests/noPassthroughWithMongod/index_hammer1.js2
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_interrupts.js2
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary.js2
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js2
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_updates.js4
-rw-r--r--jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js4
-rw-r--r--jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js2
-rw-r--r--jstests/noPassthroughWithMongod/mr_writeconflict.js2
-rw-r--r--jstests/noPassthroughWithMongod/no_balance_collection.js2
-rw-r--r--jstests/noPassthroughWithMongod/remove9.js2
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js14
-rw-r--r--jstests/noPassthroughWithMongod/skip_shell_cursor_finalize.js2
-rw-r--r--jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js2
-rw-r--r--jstests/noPassthroughWithMongod/top_drop.js6
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl.js2
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_maintenance.js2
-rw-r--r--jstests/noPassthroughWithMongod/ttl_sharded.js2
-rw-r--r--jstests/noPassthroughWithMongod/validate_interrupt.js2
-rw-r--r--jstests/noPassthroughWithMongod/views_invalid.js22
-rw-r--r--jstests/readonly/count.js2
-rw-r--r--jstests/readonly/distinct.js2
-rw-r--r--jstests/readonly/find.js2
-rw-r--r--jstests/readonly/get_more.js2
-rw-r--r--jstests/readonly/temp_collection.js2
-rw-r--r--jstests/readonly/write_ops.js2
-rw-r--r--jstests/replsets/apply_batch_only_goes_forward.js8
-rw-r--r--jstests/replsets/apply_batches_totalMillis.js4
-rw-r--r--jstests/replsets/apply_ops_create_view.js2
-rw-r--r--jstests/replsets/apply_ops_idempotency.js50
-rw-r--r--jstests/replsets/apply_ops_lastop.js2
-rw-r--r--jstests/replsets/auth1.js6
-rw-r--r--jstests/replsets/auth_no_pri.js2
-rw-r--r--jstests/replsets/await_replication_timeout.js2
-rw-r--r--jstests/replsets/background_index.js2
-rw-r--r--jstests/replsets/capped_insert_order.js2
-rw-r--r--jstests/replsets/catchup.js2
-rw-r--r--jstests/replsets/catchup_takeover_one_high_priority.js4
-rw-r--r--jstests/replsets/catchup_takeover_two_nodes_ahead.js4
-rw-r--r--jstests/replsets/chaining_removal.js4
-rw-r--r--jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js6
-rw-r--r--jstests/replsets/clean_shutdown_oplog_state.js4
-rw-r--r--jstests/replsets/collate_id.js4
-rw-r--r--jstests/replsets/dbhash_system_collections.js12
-rw-r--r--jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js2
-rw-r--r--jstests/replsets/drain.js6
-rw-r--r--jstests/replsets/drop_collections_two_phase_rename_drop_target.js4
-rw-r--r--jstests/replsets/drop_collections_two_phase_write_concern.js4
-rw-r--r--jstests/replsets/drop_databases_two_phase.js2
-rw-r--r--jstests/replsets/drop_oplog.js2
-rw-r--r--jstests/replsets/emptycapped.js6
-rw-r--r--jstests/replsets/index_delete.js2
-rw-r--r--jstests/replsets/index_restart_secondary.js2
-rw-r--r--jstests/replsets/initial_sync1.js4
-rw-r--r--jstests/replsets/initial_sync4.js2
-rw-r--r--jstests/replsets/initial_sync_applier_error.js2
-rw-r--r--jstests/replsets/initial_sync_capped_index.js4
-rw-r--r--jstests/replsets/initial_sync_document_validation.js2
-rw-r--r--jstests/replsets/initial_sync_drop_collection.js4
-rw-r--r--jstests/replsets/initial_sync_during_stepdown.js4
-rw-r--r--jstests/replsets/initial_sync_fail_insert_once.js2
-rw-r--r--jstests/replsets/initial_sync_fcv.js2
-rw-r--r--jstests/replsets/initial_sync_invalid_views.js4
-rw-r--r--jstests/replsets/initial_sync_move_forward.js14
-rw-r--r--jstests/replsets/initial_sync_oplog_hole.js4
-rw-r--r--jstests/replsets/initial_sync_oplog_rollover.js4
-rw-r--r--jstests/replsets/initial_sync_rename_collection.js4
-rw-r--r--jstests/replsets/initial_sync_replSetGetStatus.js8
-rw-r--r--jstests/replsets/initial_sync_unsupported_auth_schema.js4
-rw-r--r--jstests/replsets/initial_sync_uuid_not_found.js4
-rw-r--r--jstests/replsets/initial_sync_views.js2
-rw-r--r--jstests/replsets/initial_sync_with_write_load.js4
-rw-r--r--jstests/replsets/interrupted_batch_insert.js2
-rw-r--r--jstests/replsets/last_vote.js2
-rw-r--r--jstests/replsets/lastop.js30
-rw-r--r--jstests/replsets/libs/election_handoff.js2
-rw-r--r--jstests/replsets/libs/rename_across_dbs.js4
-rw-r--r--jstests/replsets/libs/rollback_test.js2
-rw-r--r--jstests/replsets/libs/secondary_reads_test.js2
-rw-r--r--jstests/replsets/libs/tags.js34
-rw-r--r--jstests/replsets/linearizable_read_concern.js4
-rw-r--r--jstests/replsets/localhostAuthBypass.js6
-rw-r--r--jstests/replsets/maintenance_non-blocking.js4
-rw-r--r--jstests/replsets/mr_nonrepl_coll_in_local_db.js2
-rw-r--r--jstests/replsets/noop_writes_wait_for_write_concern.js26
-rw-r--r--jstests/replsets/noop_writes_wait_for_write_concern_fcv.js2
-rw-r--r--jstests/replsets/opcounters_repl.js6
-rw-r--r--jstests/replsets/oplog_format.js35
-rw-r--r--jstests/replsets/oplog_replay_on_startup_with_bad_op.js6
-rw-r--r--jstests/replsets/oplog_term.js2
-rw-r--r--jstests/replsets/oplog_wallclock.js6
-rw-r--r--jstests/replsets/optime.js2
-rw-r--r--jstests/replsets/prepare_transaction_index_build.js2
-rw-r--r--jstests/replsets/read_after_optime.js2
-rw-r--r--jstests/replsets/read_committed.js12
-rw-r--r--jstests/replsets/read_committed_after_rollback.js12
-rw-r--r--jstests/replsets/read_committed_stale_history.js6
-rw-r--r--jstests/replsets/read_committed_with_catalog_changes.js56
-rw-r--r--jstests/replsets/read_majority_two_arbs.js3
-rw-r--r--jstests/replsets/read_operations_during_step_down.js2
-rw-r--r--jstests/replsets/replset2.js6
-rw-r--r--jstests/replsets/replset8.js8
-rw-r--r--jstests/replsets/replsetprio1.js4
-rw-r--r--jstests/replsets/restore_term.js2
-rw-r--r--jstests/replsets/retryable_writes_direct_write_to_config_transactions.js10
-rw-r--r--jstests/replsets/retryable_writes_failover.js4
-rw-r--r--jstests/replsets/rollback_all_op_types.js10
-rw-r--r--jstests/replsets/rollback_collmods.js8
-rw-r--r--jstests/replsets/rollback_creates_rollback_directory.js6
-rw-r--r--jstests/replsets/rollback_crud_op_sequences.js40
-rw-r--r--jstests/replsets/rollback_ddl_op_sequences.js52
-rw-r--r--jstests/replsets/rollback_drop_database.js4
-rw-r--r--jstests/replsets/rollback_rename_collection_on_sync_source.js6
-rw-r--r--jstests/replsets/rollback_views.js18
-rw-r--r--jstests/replsets/rollback_waits_for_bgindex_completion.js2
-rw-r--r--jstests/replsets/rollback_with_socket_error_then_steady_state.js8
-rw-r--r--jstests/replsets/rslib.js2
-rw-r--r--jstests/replsets/secondary_as_sync_source.js2
-rw-r--r--jstests/replsets/server_status_metrics.js12
-rw-r--r--jstests/replsets/server_status_repl.js2
-rw-r--r--jstests/replsets/shutdown_primary.js6
-rw-r--r--jstests/replsets/single_server_majority.js2
-rw-r--r--jstests/replsets/slave_delay_clean_shutdown.js4
-rw-r--r--jstests/replsets/slavedelay1.js4
-rw-r--r--jstests/replsets/step_down_during_draining.js6
-rw-r--r--jstests/replsets/step_down_during_draining2.js6
-rw-r--r--jstests/replsets/step_down_during_draining3.js4
-rw-r--r--jstests/replsets/stepdown.js4
-rw-r--r--jstests/replsets/stepdown3.js2
-rw-r--r--jstests/replsets/stepdown_catch_up_opt.js2
-rw-r--r--jstests/replsets/stepdown_kill_other_ops.js2
-rw-r--r--jstests/replsets/stepdown_killop.js4
-rw-r--r--jstests/replsets/stepdown_long_wait_time.js2
-rw-r--r--jstests/replsets/stepdown_needs_electable_secondary.js12
-rw-r--r--jstests/replsets/stepdown_needs_majority.js6
-rw-r--r--jstests/replsets/stepup.js4
-rw-r--r--jstests/replsets/sync2.js4
-rw-r--r--jstests/replsets/system_profile.js6
-rw-r--r--jstests/replsets/tags2.js6
-rw-r--r--jstests/replsets/tags_with_reconfig.js8
-rw-r--r--jstests/replsets/temp_namespace.js2
-rw-r--r--jstests/replsets/temp_namespace_restart_as_standalone.js2
-rw-r--r--jstests/replsets/too_stale_secondary.js5
-rw-r--r--jstests/replsets/transaction_table_multi_statement_txn.js4
-rw-r--r--jstests/replsets/transactions_during_step_down.js4
-rw-r--r--jstests/replsets/write_concern_after_stepdown.js4
-rw-r--r--jstests/replsets/write_concern_after_stepdown_and_stepup.js4
-rw-r--r--jstests/serial_run/index_multi.js4
-rw-r--r--jstests/sharding/accurate_count_with_predicate.js4
-rw-r--r--jstests/sharding/addshard1.js6
-rw-r--r--jstests/sharding/addshard2.js2
-rw-r--r--jstests/sharding/addshard5.js2
-rw-r--r--jstests/sharding/agg_project_limit_pipe_split.js2
-rw-r--r--jstests/sharding/agg_sort.js4
-rw-r--r--jstests/sharding/aggregates_during_balancing.js4
-rw-r--r--jstests/sharding/aggregation_currentop.js2
-rw-r--r--jstests/sharding/aggregations_in_session.js2
-rw-r--r--jstests/sharding/all_config_servers_blackholed_from_mongos.js2
-rw-r--r--jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js6
-rw-r--r--jstests/sharding/allow_partial_results.js2
-rw-r--r--jstests/sharding/array_shard_key.js20
-rw-r--r--jstests/sharding/auth.js6
-rw-r--r--jstests/sharding/authCommands.js5
-rw-r--r--jstests/sharding/auth_repl.js2
-rw-r--r--jstests/sharding/auth_slaveok_routing.js2
-rw-r--r--jstests/sharding/authmr.js4
-rw-r--r--jstests/sharding/authwhere.js4
-rw-r--r--jstests/sharding/auto_rebalance_parallel.js8
-rw-r--r--jstests/sharding/auto_rebalance_parallel_replica_sets.js8
-rw-r--r--jstests/sharding/autodiscover_config_rs_from_secondary.js2
-rw-r--r--jstests/sharding/autosplit.js2
-rw-r--r--jstests/sharding/autosplit_heuristics.js2
-rw-r--r--jstests/sharding/autosplit_with_balancer.js2
-rw-r--r--jstests/sharding/balance_repl.js2
-rw-r--r--jstests/sharding/balancer_window.js23
-rw-r--r--jstests/sharding/basic_drop_coll.js6
-rw-r--r--jstests/sharding/basic_split.js4
-rw-r--r--jstests/sharding/batch_write_command_sharded.js6
-rw-r--r--jstests/sharding/bulk_insert.js22
-rw-r--r--jstests/sharding/bulk_shard_insert.js2
-rw-r--r--jstests/sharding/change_stream_chunk_migration.js40
-rw-r--r--jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js2
-rw-r--r--jstests/sharding/change_stream_lookup_single_shard_cluster.js2
-rw-r--r--jstests/sharding/change_stream_metadata_notifications.js10
-rw-r--r--jstests/sharding/change_stream_read_preference.js12
-rw-r--r--jstests/sharding/change_stream_show_migration_events.js40
-rw-r--r--jstests/sharding/change_stream_update_lookup_collation.js16
-rw-r--r--jstests/sharding/change_stream_update_lookup_read_concern.js6
-rw-r--r--jstests/sharding/change_streams.js32
-rw-r--r--jstests/sharding/change_streams_establishment_finds_new_shards.js4
-rw-r--r--jstests/sharding/change_streams_primary_shard_unaware.js10
-rw-r--r--jstests/sharding/change_streams_shards_start_in_sync.js6
-rw-r--r--jstests/sharding/change_streams_unsharded_becomes_sharded.js20
-rw-r--r--jstests/sharding/change_streams_whole_db.js22
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js2
-rw-r--r--jstests/sharding/clone_catalog_data.js4
-rw-r--r--jstests/sharding/coll_epoch_test1.js6
-rw-r--r--jstests/sharding/coll_epoch_test2.js11
-rw-r--r--jstests/sharding/collation_lookup.js8
-rw-r--r--jstests/sharding/collation_targeting.js74
-rw-r--r--jstests/sharding/collation_targeting_inherited.js70
-rw-r--r--jstests/sharding/config_rs_no_primary.js2
-rw-r--r--jstests/sharding/convert_to_and_from_sharded.js12
-rw-r--r--jstests/sharding/count_config_servers.js2
-rw-r--r--jstests/sharding/count_slaveok.js2
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js10
-rw-r--r--jstests/sharding/create_idx_empty_primary.js2
-rw-r--r--jstests/sharding/cursor1.js2
-rw-r--r--jstests/sharding/cursor_cleanup.js4
-rw-r--r--jstests/sharding/cursor_timeout.js2
-rw-r--r--jstests/sharding/cursor_valid_after_shard_stepdown.js4
-rw-r--r--jstests/sharding/delete_during_migrate.js2
-rw-r--r--jstests/sharding/diffservers1.js6
-rw-r--r--jstests/sharding/drop_sharded_db.js2
-rw-r--r--jstests/sharding/empty_doc_results.js2
-rw-r--r--jstests/sharding/enable_sharding_basic.js6
-rw-r--r--jstests/sharding/enforce_zone_policy.js2
-rw-r--r--jstests/sharding/error_during_agg_getmore.js4
-rw-r--r--jstests/sharding/error_propagation.js4
-rw-r--r--jstests/sharding/exact_shard_key_target.js20
-rw-r--r--jstests/sharding/explainFind_stale_mongos.js2
-rw-r--r--jstests/sharding/explain_agg_read_pref.js2
-rw-r--r--jstests/sharding/features1.js14
-rw-r--r--jstests/sharding/features3.js2
-rw-r--r--jstests/sharding/find_getmore_cmd.js12
-rw-r--r--jstests/sharding/findandmodify1.js2
-rw-r--r--jstests/sharding/forget_mr_temp_ns.js2
-rw-r--r--jstests/sharding/fts_score_sort_sharded.js8
-rw-r--r--jstests/sharding/geo_near_sharded.js2
-rw-r--r--jstests/sharding/geo_near_sort.js8
-rw-r--r--jstests/sharding/graph_lookup.js4
-rw-r--r--jstests/sharding/idhack_sharded.js6
-rw-r--r--jstests/sharding/implicit_db_creation.js4
-rw-r--r--jstests/sharding/in_memory_sort_limit.js2
-rw-r--r--jstests/sharding/index1.js2
-rw-r--r--jstests/sharding/inserts_consistent.js6
-rw-r--r--jstests/sharding/invalid_system_views_sharded_collection.js14
-rw-r--r--jstests/sharding/json_schema.js12
-rw-r--r--jstests/sharding/jumbo1.js2
-rw-r--r--jstests/sharding/key_many.js3
-rw-r--r--jstests/sharding/kill_pinned_cursor.js2
-rw-r--r--jstests/sharding/killop.js2
-rw-r--r--jstests/sharding/lagged_config_secondary.js4
-rw-r--r--jstests/sharding/large_chunk.js2
-rw-r--r--jstests/sharding/large_skip_one_shard.js2
-rw-r--r--jstests/sharding/linearizable_read_concern.js2
-rw-r--r--jstests/sharding/listDatabases.js10
-rw-r--r--jstests/sharding/localhostAuthBypass.js6
-rw-r--r--jstests/sharding/lookup.js76
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js6
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js4
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_id_shard_key.js16
-rw-r--r--jstests/sharding/lookup_mongod_unaware.js18
-rw-r--r--jstests/sharding/lookup_stale_mongos.js24
-rw-r--r--jstests/sharding/mapReduce_inSharded.js6
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js2
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js2
-rw-r--r--jstests/sharding/mapReduce_outSharded.js2
-rw-r--r--jstests/sharding/mapReduce_outSharded_checkUUID.js6
-rw-r--r--jstests/sharding/max_time_ms_sharded.js2
-rw-r--r--jstests/sharding/merge_chunks_compound_shard_key.js10
-rw-r--r--jstests/sharding/merge_chunks_test.js8
-rw-r--r--jstests/sharding/migrateBig.js7
-rw-r--r--jstests/sharding/migrateBig_balancer.js2
-rw-r--r--jstests/sharding/migrate_overwrite_id.js4
-rw-r--r--jstests/sharding/migration_critical_section_concurrency.js14
-rw-r--r--jstests/sharding/migration_ignore_interrupts_1.js6
-rw-r--r--jstests/sharding/migration_ignore_interrupts_2.js2
-rw-r--r--jstests/sharding/migration_ignore_interrupts_3.js4
-rw-r--r--jstests/sharding/migration_ignore_interrupts_4.js8
-rw-r--r--jstests/sharding/migration_move_chunk_after_receive.js8
-rw-r--r--jstests/sharding/migration_sets_fromMigrate_flag.js14
-rw-r--r--jstests/sharding/migration_with_source_ops.js14
-rw-r--r--jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js2
-rw-r--r--jstests/sharding/missing_key.js4
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js4
-rw-r--r--jstests/sharding/mongos_query_comment.js2
-rw-r--r--jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js40
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js40
-rw-r--r--jstests/sharding/mongos_shard_failure_tolerance.js40
-rw-r--r--jstests/sharding/mongos_validate_writes.js12
-rw-r--r--jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js4
-rw-r--r--jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js2
-rw-r--r--jstests/sharding/move_chunk_open_cursors.js2
-rw-r--r--jstests/sharding/move_chunk_remove_with_write_retryability.js2
-rw-r--r--jstests/sharding/move_primary_clone_test.js4
-rw-r--r--jstests/sharding/movechunk_include.js2
-rw-r--r--jstests/sharding/movechunk_interrupt_at_primary_stepdown.js2
-rw-r--r--jstests/sharding/movechunk_parallel.js8
-rw-r--r--jstests/sharding/mrShardedOutput.js4
-rw-r--r--jstests/sharding/mr_and_agg_versioning.js2
-rw-r--r--jstests/sharding/mr_shard_version.js2
-rw-r--r--jstests/sharding/multi_mongos2.js8
-rw-r--r--jstests/sharding/multi_mongos2a.js2
-rw-r--r--jstests/sharding/multi_shard_transaction_without_majority_reads.js12
-rw-r--r--jstests/sharding/multi_write_target.js20
-rw-r--r--jstests/sharding/oplog_document_key.js56
-rw-r--r--jstests/sharding/parallel.js2
-rw-r--r--jstests/sharding/prefix_shard_key.js8
-rw-r--r--jstests/sharding/presplit.js2
-rw-r--r--jstests/sharding/primary_config_server_blackholed_from_mongos.js4
-rw-r--r--jstests/sharding/printShardingStatus.js10
-rw-r--r--jstests/sharding/query_config.js4
-rw-r--r--jstests/sharding/query_sharded.js4
-rw-r--r--jstests/sharding/read_pref_multi_mongos_stale_config.js4
-rw-r--r--jstests/sharding/recovering_slaveok.js8
-rw-r--r--jstests/sharding/refine_collection_shard_key_basic.js30
-rw-r--r--jstests/sharding/refine_collection_shard_key_jumbo.js2
-rw-r--r--jstests/sharding/regex_targeting.js133
-rw-r--r--jstests/sharding/remove2.js2
-rw-r--r--jstests/sharding/rename.js8
-rw-r--r--jstests/sharding/rename_across_mongos.js2
-rw-r--r--jstests/sharding/replication_with_undefined_shard_key.js6
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js2
-rw-r--r--jstests/sharding/resume_change_stream.js36
-rw-r--r--jstests/sharding/resume_change_stream_from_stale_mongos.js12
-rw-r--r--jstests/sharding/resume_change_stream_on_subset_of_shards.js8
-rw-r--r--jstests/sharding/retryable_writes.js8
-rw-r--r--jstests/sharding/return_partial_shards_down.js2
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js14
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js14
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js14
-rw-r--r--jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js2
-rw-r--r--jstests/sharding/secondary_shard_versioning.js2
-rw-r--r--jstests/sharding/shard1.js6
-rw-r--r--jstests/sharding/shard2.js14
-rw-r--r--jstests/sharding/shard3.js2
-rw-r--r--jstests/sharding/shard7.js4
-rw-r--r--jstests/sharding/shard_aware_init.js2
-rw-r--r--jstests/sharding/shard_aware_init_secondaries.js2
-rw-r--r--jstests/sharding/shard_aware_primary_failover.js2
-rw-r--r--jstests/sharding/shard_collection_basic.js28
-rw-r--r--jstests/sharding/shard_collection_existing_zones.js2
-rw-r--r--jstests/sharding/shard_collection_verify_initial_chunks.js2
-rw-r--r--jstests/sharding/shard_existing.js2
-rw-r--r--jstests/sharding/shard_existing_coll_chunk_count.js4
-rw-r--r--jstests/sharding/shard_identity_rollback.js4
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js6
-rw-r--r--jstests/sharding/shard_keycount.js4
-rw-r--r--jstests/sharding/shard_kill_and_pooling.js2
-rw-r--r--jstests/sharding/sharded_limit_batchsize.js8
-rw-r--r--jstests/sharding/sharded_profile.js2
-rw-r--r--jstests/sharding/sharding_balance1.js2
-rw-r--r--jstests/sharding/sharding_balance2.js4
-rw-r--r--jstests/sharding/sharding_balance3.js2
-rw-r--r--jstests/sharding/sharding_balance4.js4
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js2
-rw-r--r--jstests/sharding/sharding_multiple_ns_rs.js4
-rw-r--r--jstests/sharding/sharding_rs1.js2
-rw-r--r--jstests/sharding/sharding_rs2.js6
-rw-r--r--jstests/sharding/sharding_statistics_server_status.js2
-rw-r--r--jstests/sharding/shards_and_config_return_last_committed_optime.js4
-rw-r--r--jstests/sharding/snapshot_cursor_commands_mongos.js4
-rw-r--r--jstests/sharding/split_with_force.js4
-rw-r--r--jstests/sharding/split_with_force_small.js4
-rw-r--r--jstests/sharding/stale_mongos_updates_and_removes.js8
-rw-r--r--jstests/sharding/stale_version_write.js6
-rw-r--r--jstests/sharding/startup_with_all_configs_down.js2
-rw-r--r--jstests/sharding/stats.js2
-rw-r--r--jstests/sharding/test_stacked_migration_cleanup.js4
-rw-r--r--jstests/sharding/time_zone_info_mongos.js4
-rw-r--r--jstests/sharding/top_chunk_autosplit.js4
-rw-r--r--jstests/sharding/trace_missing_docs_test.js6
-rw-r--r--jstests/sharding/transactions_causal_consistency.js8
-rw-r--r--jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js4
-rw-r--r--jstests/sharding/transactions_implicit_abort.js6
-rw-r--r--jstests/sharding/transactions_multi_writes.js17
-rw-r--r--jstests/sharding/transactions_read_concerns.js8
-rw-r--r--jstests/sharding/transactions_reject_writes_for_moved_chunks.js10
-rw-r--r--jstests/sharding/transactions_snapshot_errors_first_statement.js10
-rw-r--r--jstests/sharding/transactions_snapshot_errors_subsequent_statements.js6
-rw-r--r--jstests/sharding/transactions_stale_database_version_errors.js10
-rw-r--r--jstests/sharding/transactions_stale_shard_version_errors.js11
-rw-r--r--jstests/sharding/transactions_target_at_point_in_time.js6
-rw-r--r--jstests/sharding/transactions_view_resolution.js8
-rw-r--r--jstests/sharding/transactions_writes_not_retryable.js3
-rw-r--r--jstests/sharding/txn_recover_decision_using_recovery_router.js3
-rw-r--r--jstests/sharding/txn_writes_during_movechunk.js4
-rw-r--r--jstests/sharding/unique_index_on_shardservers.js3
-rw-r--r--jstests/sharding/unowned_doc_filtering.js2
-rw-r--r--jstests/sharding/unsharded_collection_targetting.js4
-rw-r--r--jstests/sharding/update_immutable_fields.js10
-rw-r--r--jstests/sharding/update_sharded.js38
-rw-r--r--jstests/sharding/upsert_sharded.js2
-rw-r--r--jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js2
-rw-r--r--jstests/sharding/validate_collection.js10
-rw-r--r--jstests/sharding/view_rewrite.js2
-rw-r--r--jstests/sharding/views.js2
-rw-r--r--jstests/sharding/write_cmd_auto_split.js12
-rw-r--r--jstests/sharding/zbigMapReduce.js4
-rw-r--r--jstests/sharding/zero_shard_version.js4
-rw-r--r--jstests/slow1/conc_update.js6
-rw-r--r--jstests/slow1/initial_sync_many_dbs.js2
-rw-r--r--jstests/slow1/mr_during_migrate.js2
-rw-r--r--jstests/slow1/sharding_multiple_collections.js4
-rw-r--r--jstests/ssl/initial_sync1_x509.js4
-rw-r--r--jstests/ssl/libs/ssl_helpers.js6
-rw-r--r--jstests/ssl/sharding_with_x509.js6
-rw-r--r--jstests/ssl/ssl_fragment.js2
-rw-r--r--jstests/ssl/upgrade_noauth_to_x509_ssl.js6
-rw-r--r--jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js4
-rw-r--r--jstests/sslSpecial/x509_cluster_auth_rollover.js2
-rw-r--r--jstests/tool/dumprestoreWithNoOptions.js6
-rw-r--r--jstests/tool/exportimport_bigarray.js2
-rw-r--r--jstests/tool/tool_replset.js2
989 files changed, 4036 insertions, 3960 deletions
diff --git a/jstests/aggregation/bugs/cond.js b/jstests/aggregation/bugs/cond.js
index 84831ca11a7..73820227a6a 100644
--- a/jstests/aggregation/bugs/cond.js
+++ b/jstests/aggregation/bugs/cond.js
@@ -48,14 +48,14 @@ assertResult(1, [{$and: []}, {$add: [1]}, {$add: [1, 1]}]);
assertResult(2, [{$or: []}, {$add: [1]}, {$add: [1, 1]}]);
assert(coll.drop());
-assert.writeOK(coll.insert({t: true, f: false, x: 'foo', y: 'bar'}));
+assert.commandWorked(coll.insert({t: true, f: false, x: 'foo', y: 'bar'}));
// Field path expressions.
assertResult('foo', ['$t', '$x', '$y']);
assertResult('bar', ['$f', '$x', '$y']);
assert(coll.drop());
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
// Coerce to bool.
assertResult('a', [1, 'a', 'b']);
@@ -64,10 +64,10 @@ assertResult('b', [0, 'a', 'b']);
// Nested.
assert(coll.drop());
-assert.writeOK(coll.insert({noonSense: 'am', mealCombined: 'no'}));
-assert.writeOK(coll.insert({noonSense: 'am', mealCombined: 'yes'}));
-assert.writeOK(coll.insert({noonSense: 'pm', mealCombined: 'yes'}));
-assert.writeOK(coll.insert({noonSense: 'pm', mealCombined: 'no'}));
+assert.commandWorked(coll.insert({noonSense: 'am', mealCombined: 'no'}));
+assert.commandWorked(coll.insert({noonSense: 'am', mealCombined: 'yes'}));
+assert.commandWorked(coll.insert({noonSense: 'pm', mealCombined: 'yes'}));
+assert.commandWorked(coll.insert({noonSense: 'pm', mealCombined: 'no'}));
assert.eq(['breakfast', 'brunch', 'dinner', 'linner'],
coll.aggregate([
{
diff --git a/jstests/aggregation/bugs/cursor_timeout.js b/jstests/aggregation/bugs/cursor_timeout.js
index 21260074d26..e9b80b0597f 100644
--- a/jstests/aggregation/bugs/cursor_timeout.js
+++ b/jstests/aggregation/bugs/cursor_timeout.js
@@ -73,9 +73,9 @@ function assertCursorTimesOut(collName, pipeline) {
assert.eq(ErrorCodes.CursorNotFound, err.code, tojson(err));
}
-assert.writeOK(testDB.source.insert({local: 1}));
+assert.commandWorked(testDB.source.insert({local: 1}));
for (let i = 0; i < numMatches; ++i) {
- assert.writeOK(testDB.dest.insert({foreign: 1}));
+ assert.commandWorked(testDB.dest.insert({foreign: 1}));
}
// Test that a regular aggregation cursor is killed when the timeout is reached.
diff --git a/jstests/aggregation/bugs/explain_options_helper.js b/jstests/aggregation/bugs/explain_options_helper.js
index 0834d56e469..22c61efc6b5 100644
--- a/jstests/aggregation/bugs/explain_options_helper.js
+++ b/jstests/aggregation/bugs/explain_options_helper.js
@@ -8,7 +8,7 @@ const coll = db.explain_options;
coll.drop();
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
const collation = {
diff --git a/jstests/aggregation/bugs/firstlast.js b/jstests/aggregation/bugs/firstlast.js
index 8ab83fe30b7..ebf8dbe2946 100644
--- a/jstests/aggregation/bugs/firstlast.js
+++ b/jstests/aggregation/bugs/firstlast.js
@@ -29,24 +29,24 @@ function assertFirstLast(expectedFirst, expectedLast, stages, expression) {
}
// One document.
-assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
assertFirstLast(1, 1);
// Two documents.
-assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
assertFirstLast(1, 2);
// Three documents.
-assert.writeOK(coll.insert({a: 1, b: 3}));
+assert.commandWorked(coll.insert({a: 1, b: 3}));
assertFirstLast(1, 3);
// Another 'a' key value does not affect outcome.
assert(coll.drop());
-assert.writeOK(coll.insert({a: 3, b: 0}));
-assert.writeOK(coll.insert({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: 1, b: 2}));
-assert.writeOK(coll.insert({a: 1, b: 3}));
-assert.writeOK(coll.insert({a: 2, b: 0}));
+assert.commandWorked(coll.insert({a: 3, b: 0}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 3}));
+assert.commandWorked(coll.insert({a: 2, b: 0}));
assertFirstLast(1, 3);
// Additional pipeline stages do not affect outcome if order is maintained.
@@ -57,64 +57,64 @@ assertFirstLast(3, 1, [{$sort: {b: -1}}]);
// Skip and limit affect the results seen.
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: 1, b: 2}));
-assert.writeOK(coll.insert({a: 1, b: 3}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 3}));
assertFirstLast(1, 2, [{$limit: 2}]);
assertFirstLast(2, 3, [{$skip: 1}, {$limit: 2}]);
assertFirstLast(2, 2, [{$skip: 1}, {$limit: 1}]);
// Mixed type values.
-assert.writeOK(coll.insert({a: 1, b: 'foo'}));
+assert.commandWorked(coll.insert({a: 1, b: 'foo'}));
assertFirstLast(1, 'foo');
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: 'bar'}));
-assert.writeOK(coll.insert({a: 1, b: true}));
+assert.commandWorked(coll.insert({a: 1, b: 'bar'}));
+assert.commandWorked(coll.insert({a: 1, b: true}));
assertFirstLast('bar', true);
// Value null.
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: null}));
-assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: null}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
assertFirstLast(null, 2);
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: 2}));
-assert.writeOK(coll.insert({a: 1, b: null}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: null}));
assertFirstLast(2, null);
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: null}));
-assert.writeOK(coll.insert({a: 1, b: null}));
+assert.commandWorked(coll.insert({a: 1, b: null}));
+assert.commandWorked(coll.insert({a: 1, b: null}));
assertFirstLast(null, null);
// Value missing.
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
assertFirstLast(undefined, 2);
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: 2}));
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1}));
assertFirstLast(2, undefined);
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
assertFirstLast(undefined, undefined);
// Dotted field.
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: [{c: 1}, {c: 2}]}));
-assert.writeOK(coll.insert({a: 1, b: [{c: 6}, {}]}));
+assert.commandWorked(coll.insert({a: 1, b: [{c: 1}, {c: 2}]}));
+assert.commandWorked(coll.insert({a: 1, b: [{c: 6}, {}]}));
assertFirstLast([1, 2], [6], [], '$b.c');
// Computed expressions.
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
assertFirstLast(1, 0, [], {$mod: ['$b', 2]});
assertFirstLast(0, 1, [], {$mod: [{$add: ['$b', 1]}, 2]});
}());
diff --git a/jstests/aggregation/bugs/lookup_unwind_getmore.js b/jstests/aggregation/bugs/lookup_unwind_getmore.js
index 67b970de820..c184b5e56d6 100644
--- a/jstests/aggregation/bugs/lookup_unwind_getmore.js
+++ b/jstests/aggregation/bugs/lookup_unwind_getmore.js
@@ -29,11 +29,11 @@ function runTest(options) {
const batchSize = 2;
testDB.source.drop();
- assert.writeOK(testDB.source.insert({x: 1}));
+ assert.commandWorked(testDB.source.insert({x: 1}));
testDB.dest.drop();
for (let i = 0; i < 5; ++i) {
- assert.writeOK(testDB.dest.insert({x: 1}));
+ assert.commandWorked(testDB.dest.insert({x: 1}));
}
const res = assert.commandWorked(testDB.runCommand({
diff --git a/jstests/aggregation/bugs/lookup_unwind_killcursor.js b/jstests/aggregation/bugs/lookup_unwind_killcursor.js
index eab9d05c591..829f3b52089 100644
--- a/jstests/aggregation/bugs/lookup_unwind_killcursor.js
+++ b/jstests/aggregation/bugs/lookup_unwind_killcursor.js
@@ -24,11 +24,11 @@ function runTest(pipeline) {
const batchSize = 2;
testDB.source.drop();
- assert.writeOK(testDB.source.insert({x: 1}));
+ assert.commandWorked(testDB.source.insert({x: 1}));
testDB.dest.drop();
for (let i = 0; i < 5; ++i) {
- assert.writeOK(testDB.dest.insert({x: 1}));
+ assert.commandWorked(testDB.dest.insert({x: 1}));
}
const res = assert.commandWorked(testDB.runCommand({
diff --git a/jstests/aggregation/bugs/match.js b/jstests/aggregation/bugs/match.js
index 6a545ed60c1..f34d09bcfc6 100644
--- a/jstests/aggregation/bugs/match.js
+++ b/jstests/aggregation/bugs/match.js
@@ -64,9 +64,9 @@ function checkMatchResults(indexed) {
coll.remove({});
assertResults([], {});
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: 2}));
- assert.writeOK(coll.insert({_id: 2, a: 3}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
+ assert.commandWorked(coll.insert({_id: 1, a: 2}));
+ assert.commandWorked(coll.insert({_id: 2, a: 3}));
// Empty query.
assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}], {});
@@ -80,76 +80,76 @@ function checkMatchResults(indexed) {
// Regular expression.
coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: 'x'}));
- assert.writeOK(coll.insert({_id: 1, a: 'yx'}));
+ assert.commandWorked(coll.insert({_id: 0, a: 'x'}));
+ assert.commandWorked(coll.insert({_id: 1, a: 'yx'}));
assertResults([{_id: 0, a: 'x'}], {a: /^x/});
assertResults([{_id: 0, a: 'x'}, {_id: 1, a: 'yx'}], {a: /x/});
// Dotted field.
coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: {b: 4}}));
- assert.writeOK(coll.insert({_id: 1, a: 2}));
+ assert.commandWorked(coll.insert({_id: 0, a: {b: 4}}));
+ assert.commandWorked(coll.insert({_id: 1, a: 2}));
assertResults([{_id: 0, a: {b: 4}}], {'a.b': 4});
// Value within an array.
coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3]}));
- assert.writeOK(coll.insert({_id: 1, a: [2, 2, 3]}));
- assert.writeOK(coll.insert({_id: 2, a: [2, 2, 2]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [1, 2, 3]}));
+ assert.commandWorked(coll.insert({_id: 1, a: [2, 2, 3]}));
+ assert.commandWorked(coll.insert({_id: 2, a: [2, 2, 2]}));
assertResults([{_id: 0, a: [1, 2, 3]}, {_id: 1, a: [2, 2, 3]}], {a: 3});
// Missing, null, $exists matching.
coll.remove({});
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 3, a: 0}));
+ assert.commandWorked(coll.insert({_id: 0}));
+ assert.commandWorked(coll.insert({_id: 1, a: null}));
+ assert.commandWorked(coll.insert({_id: 3, a: 0}));
assertResults([{_id: 0}, {_id: 1, a: null}], {a: null});
assertResults(null, {a: {$exists: true}});
assertResults(null, {a: {$exists: false}});
// $elemMatch
coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: [1, 2]}));
- assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [1, 2]}));
+ assert.commandWorked(coll.insert({_id: 1, a: [1, 2, 3]}));
assertResults([{_id: 1, a: [1, 2, 3]}], {a: {$elemMatch: {$gt: 1, $mod: [2, 1]}}});
coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {c: 2}]}));
- assert.writeOK(coll.insert({_id: 1, a: [{b: 1, c: 2}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [{b: 1}, {c: 2}]}));
+ assert.commandWorked(coll.insert({_id: 1, a: [{b: 1, c: 2}]}));
assertResults([{_id: 1, a: [{b: 1, c: 2}]}], {a: {$elemMatch: {b: 1, c: 2}}});
// $size
coll.remove({});
- assert.writeOK(coll.insert({}));
- assert.writeOK(coll.insert({a: null}));
- assert.writeOK(coll.insert({a: []}));
- assert.writeOK(coll.insert({a: [1]}));
- assert.writeOK(coll.insert({a: [1, 2]}));
+ assert.commandWorked(coll.insert({}));
+ assert.commandWorked(coll.insert({a: null}));
+ assert.commandWorked(coll.insert({a: []}));
+ assert.commandWorked(coll.insert({a: [1]}));
+ assert.commandWorked(coll.insert({a: [1, 2]}));
assertResults(null, {a: {$size: 0}});
assertResults(null, {a: {$size: 1}});
assertResults(null, {a: {$size: 2}});
// $type
coll.remove({});
- assert.writeOK(coll.insert({}));
- assert.writeOK(coll.insert({a: null}));
- assert.writeOK(coll.insert({a: NumberInt(1)}));
- assert.writeOK(coll.insert({a: NumberLong(2)}));
- assert.writeOK(coll.insert({a: 66.6}));
- assert.writeOK(coll.insert({a: 'abc'}));
- assert.writeOK(coll.insert({a: /xyz/}));
- assert.writeOK(coll.insert({a: {q: 1}}));
- assert.writeOK(coll.insert({a: true}));
- assert.writeOK(coll.insert({a: new Date()}));
- assert.writeOK(coll.insert({a: new ObjectId()}));
+ assert.commandWorked(coll.insert({}));
+ assert.commandWorked(coll.insert({a: null}));
+ assert.commandWorked(coll.insert({a: NumberInt(1)}));
+ assert.commandWorked(coll.insert({a: NumberLong(2)}));
+ assert.commandWorked(coll.insert({a: 66.6}));
+ assert.commandWorked(coll.insert({a: 'abc'}));
+ assert.commandWorked(coll.insert({a: /xyz/}));
+ assert.commandWorked(coll.insert({a: {q: 1}}));
+ assert.commandWorked(coll.insert({a: true}));
+ assert.commandWorked(coll.insert({a: new Date()}));
+ assert.commandWorked(coll.insert({a: new ObjectId()}));
for (let type = 1; type <= 18; ++type) {
assertResults(null, {a: {$type: type}});
}
coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: 2}));
- assert.writeOK(coll.insert({_id: 2, a: 3}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
+ assert.commandWorked(coll.insert({_id: 1, a: 2}));
+ assert.commandWorked(coll.insert({_id: 2, a: 3}));
// $and
assertResults([{_id: 1, a: 2}], {$and: [{a: 2}, {_id: 1}]});
diff --git a/jstests/aggregation/bugs/match_swap_limit.js b/jstests/aggregation/bugs/match_swap_limit.js
index 7dabc7130ca..2fb38339cf5 100644
--- a/jstests/aggregation/bugs/match_swap_limit.js
+++ b/jstests/aggregation/bugs/match_swap_limit.js
@@ -7,14 +7,14 @@
let coll = db.jstests_match_swap_limit;
coll.drop();
-assert.writeOK(coll.insert({_id: 0, x: 1, y: 3}));
-assert.writeOK(coll.insert({_id: 1, x: 2, y: 2}));
-assert.writeOK(coll.insert({_id: 2, x: 3, y: 1}));
+assert.commandWorked(coll.insert({_id: 0, x: 1, y: 3}));
+assert.commandWorked(coll.insert({_id: 1, x: 2, y: 2}));
+assert.commandWorked(coll.insert({_id: 2, x: 3, y: 1}));
assert.eq([{_id: 1, x: 2, y: 2}],
coll.aggregate([{$sort: {x: -1}}, {$limit: 2}, {$match: {y: {$gte: 2}}}]).toArray());
-assert.writeOK(coll.createIndex({x: 1}));
+assert.commandWorked(coll.createIndex({x: 1}));
assert.eq([{_id: 1, x: 2, y: 2}],
coll.aggregate([{$sort: {x: -1}}, {$limit: 2}, {$match: {y: {$gte: 2}}}]).toArray());
}());
diff --git a/jstests/aggregation/bugs/server10176.js b/jstests/aggregation/bugs/server10176.js
index 9283c819342..fef92f56e06 100644
--- a/jstests/aggregation/bugs/server10176.js
+++ b/jstests/aggregation/bugs/server10176.js
@@ -8,27 +8,27 @@ var coll = db.abs_expr;
coll.drop();
// valid types (numeric and null)
-assert.writeOK(coll.insert({_id: 0, a: 5}));
-assert.writeOK(coll.insert({_id: 1, a: -5}));
-assert.writeOK(coll.insert({_id: 2, a: 5.5}));
-assert.writeOK(coll.insert({_id: 3, a: -5.5}));
-assert.writeOK(coll.insert({_id: 4, a: NumberInt("5")}));
-assert.writeOK(coll.insert({_id: 5, a: NumberInt("-5")}));
-assert.writeOK(coll.insert({_id: 6, a: NumberLong("5")}));
-assert.writeOK(coll.insert({_id: 7, a: NumberLong("-5")}));
-assert.writeOK(coll.insert({_id: 8, a: 0.0}));
-assert.writeOK(coll.insert({_id: 9, a: -0.0}));
-assert.writeOK(coll.insert({_id: 10, a: NumberInt("0")}));
+assert.commandWorked(coll.insert({_id: 0, a: 5}));
+assert.commandWorked(coll.insert({_id: 1, a: -5}));
+assert.commandWorked(coll.insert({_id: 2, a: 5.5}));
+assert.commandWorked(coll.insert({_id: 3, a: -5.5}));
+assert.commandWorked(coll.insert({_id: 4, a: NumberInt("5")}));
+assert.commandWorked(coll.insert({_id: 5, a: NumberInt("-5")}));
+assert.commandWorked(coll.insert({_id: 6, a: NumberLong("5")}));
+assert.commandWorked(coll.insert({_id: 7, a: NumberLong("-5")}));
+assert.commandWorked(coll.insert({_id: 8, a: 0.0}));
+assert.commandWorked(coll.insert({_id: 9, a: -0.0}));
+assert.commandWorked(coll.insert({_id: 10, a: NumberInt("0")}));
// INT_MIN is -(2 ^ 31)
-assert.writeOK(coll.insert({_id: 11, a: NumberInt(-Math.pow(2, 31))}));
-assert.writeOK(coll.insert({_id: 12, a: -Math.pow(2, 31)}));
+assert.commandWorked(coll.insert({_id: 11, a: NumberInt(-Math.pow(2, 31))}));
+assert.commandWorked(coll.insert({_id: 12, a: -Math.pow(2, 31)}));
// 1152921504606846977 is 2^60 + 1, an integer that can't be represented precisely as a double
-assert.writeOK(coll.insert({_id: 13, a: NumberLong("1152921504606846977")}));
-assert.writeOK(coll.insert({_id: 14, a: NumberLong("-1152921504606846977")}));
-assert.writeOK(coll.insert({_id: 15, a: null}));
-assert.writeOK(coll.insert({_id: 16, a: undefined}));
-assert.writeOK(coll.insert({_id: 17, a: NaN}));
-assert.writeOK(coll.insert({_id: 18}));
+assert.commandWorked(coll.insert({_id: 13, a: NumberLong("1152921504606846977")}));
+assert.commandWorked(coll.insert({_id: 14, a: NumberLong("-1152921504606846977")}));
+assert.commandWorked(coll.insert({_id: 15, a: null}));
+assert.commandWorked(coll.insert({_id: 16, a: undefined}));
+assert.commandWorked(coll.insert({_id: 17, a: NaN}));
+assert.commandWorked(coll.insert({_id: 18}));
// valid use of $abs: numbers become positive, null/undefined/nonexistent become null
diff --git a/jstests/aggregation/bugs/server11118.js b/jstests/aggregation/bugs/server11118.js
index 46e79c3a7cc..560389f21e1 100644
--- a/jstests/aggregation/bugs/server11118.js
+++ b/jstests/aggregation/bugs/server11118.js
@@ -9,7 +9,7 @@ const coll = db.server11118;
// Used to verify expected output format
function testFormat(date, formatStr, expectedStr) {
coll.drop();
- assert.writeOK(coll.insert({date: date}));
+ assert.commandWorked(coll.insert({date: date}));
const res =
coll.aggregate([
@@ -23,7 +23,7 @@ function testFormat(date, formatStr, expectedStr) {
// Used to verify that server recognizes bad formats
function testFormatError(formatObj, errCode) {
coll.drop();
- assert.writeOK(coll.insert({date: ISODate()}));
+ assert.commandWorked(coll.insert({date: ISODate()}));
assertErrorCode(coll, {$project: {_id: 0, formatted: {$dateToString: formatObj}}}, errCode);
}
@@ -31,7 +31,7 @@ function testFormatError(formatObj, errCode) {
// Used to verify that only date values are accepted for date parameter
function testDateValueError(dateVal, errCode) {
coll.drop();
- assert.writeOK(coll.insert({date: dateVal}));
+ assert.commandWorked(coll.insert({date: dateVal}));
assertErrorCode(
coll, {$project: {formatted: {$dateToString: {format: "%Y", date: "$date"}}}}, errCode);
diff --git a/jstests/aggregation/bugs/server11675.js b/jstests/aggregation/bugs/server11675.js
index 2d02a1ff53e..b9214934163 100644
--- a/jstests/aggregation/bugs/server11675.js
+++ b/jstests/aggregation/bugs/server11675.js
@@ -6,10 +6,10 @@ load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'
const coll = db.server11675;
coll.drop();
-assert.writeOK(coll.insert({_id: 1, text: "apple", words: 1}));
-assert.writeOK(coll.insert({_id: 2, text: "banana", words: 1}));
-assert.writeOK(coll.insert({_id: 3, text: "apple banana", words: 2}));
-assert.writeOK(coll.insert({_id: 4, text: "cantaloupe", words: 1}));
+assert.commandWorked(coll.insert({_id: 1, text: "apple", words: 1}));
+assert.commandWorked(coll.insert({_id: 2, text: "banana", words: 1}));
+assert.commandWorked(coll.insert({_id: 3, text: "apple banana", words: 2}));
+assert.commandWorked(coll.insert({_id: 4, text: "cantaloupe", words: 1}));
assert.commandWorked(coll.createIndex({text: "text"}));
@@ -202,7 +202,7 @@ res = coll.aggregate([
assert(!("scoreAgain" in res[0]));
// Make sure metadata works after a $unwind
-assert.writeOK(coll.insert({_id: 5, text: 'mango', words: [1, 2, 3]}));
+assert.commandWorked(coll.insert({_id: 5, text: 'mango', words: [1, 2, 3]}));
res = coll.aggregate([
{$match: {$text: {$search: 'mango'}}},
{$project: {score: {$meta: "textScore"}, _id: 1, words: 1}},
diff --git a/jstests/aggregation/bugs/server12015.js b/jstests/aggregation/bugs/server12015.js
index 2c2b34d126c..6bccef29144 100644
--- a/jstests/aggregation/bugs/server12015.js
+++ b/jstests/aggregation/bugs/server12015.js
@@ -17,10 +17,10 @@ const indexSpec = {
b: 1
};
-assert.writeOK(coll.insert({_id: 0, a: 0, b: 0}));
-assert.writeOK(coll.insert({_id: 1, a: 0, b: 1}));
-assert.writeOK(coll.insert({_id: 2, a: 1, b: 0}));
-assert.writeOK(coll.insert({_id: 3, a: 1, b: 1}));
+assert.commandWorked(coll.insert({_id: 0, a: 0, b: 0}));
+assert.commandWorked(coll.insert({_id: 1, a: 0, b: 1}));
+assert.commandWorked(coll.insert({_id: 2, a: 1, b: 0}));
+assert.commandWorked(coll.insert({_id: 3, a: 1, b: 1}));
/**
* Helper to test that for a given pipeline, the same results are returned whether or not an
diff --git a/jstests/aggregation/bugs/server14421.js b/jstests/aggregation/bugs/server14421.js
index b6701546e3d..3390b6d4b20 100644
--- a/jstests/aggregation/bugs/server14421.js
+++ b/jstests/aggregation/bugs/server14421.js
@@ -3,7 +3,7 @@
'use strict';
var coll = db.mindistance;
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, loc: {type: "Point", coordinates: [0, 0]}},
{_id: 1, loc: {type: "Point", coordinates: [0, 0.01]}}
]));
diff --git a/jstests/aggregation/bugs/server14691.js b/jstests/aggregation/bugs/server14691.js
index 2703f2dead9..7e948db19e6 100644
--- a/jstests/aggregation/bugs/server14691.js
+++ b/jstests/aggregation/bugs/server14691.js
@@ -8,7 +8,7 @@ var coll = db.accumulate_avg_sum_null;
coll.drop();
// Null cases.
-assert.writeOK(coll.insert({a: 1, b: 2, c: 'string', d: null}));
+assert.commandWorked(coll.insert({a: 1, b: 2, c: 'string', d: null}));
// Missing field.
var pipeline = [{$group: {_id: '$a', avg: {$avg: '$missing'}}}];
@@ -30,23 +30,23 @@ assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: null}]);
// Non-null cases.
coll.drop();
-assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
pipeline = [{$group: {_id: '$a', avg: {$avg: '$b'}}}];
// One field.
assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 2}]);
// Two fields.
-assert.writeOK(coll.insert({a: 1, b: 4}));
+assert.commandWorked(coll.insert({a: 1, b: 4}));
assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 3}]);
// Average of zero should still work.
-assert.writeOK(coll.insert({a: 1, b: -6}));
+assert.commandWorked(coll.insert({a: 1, b: -6}));
assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 0}]);
// Missing, null, or non-numeric fields should not error or affect the average.
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 1, b: 'string'}));
-assert.writeOK(coll.insert({a: 1, b: null}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 'string'}));
+assert.commandWorked(coll.insert({a: 1, b: null}));
assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 0}]);
}());
diff --git a/jstests/aggregation/bugs/server14872.js b/jstests/aggregation/bugs/server14872.js
index 4787df5259b..610346f163d 100644
--- a/jstests/aggregation/bugs/server14872.js
+++ b/jstests/aggregation/bugs/server14872.js
@@ -9,7 +9,7 @@ load('jstests/aggregation/extras/utils.js');
var coll = db.agg_concat_arrays_expr;
coll.drop();
-assert.writeOK(coll.insert({a: [1, 2], b: ['three'], c: [], d: [[3], 4], e: null, str: 'x'}));
+assert.commandWorked(coll.insert({a: [1, 2], b: ['three'], c: [], d: [[3], 4], e: null, str: 'x'}));
// Basic concatenation.
var pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$b', '$c']}}}];
diff --git a/jstests/aggregation/bugs/server14969.js b/jstests/aggregation/bugs/server14969.js
index 629e54505fc..8c4deee6686 100644
--- a/jstests/aggregation/bugs/server14969.js
+++ b/jstests/aggregation/bugs/server14969.js
@@ -6,7 +6,7 @@ coll.drop();
// Initialize collection with eight 1M documents, and index on field "a".
var longString = new Array(1024 * 1024).join('x');
for (var i = 0; i < 100; ++i) {
- assert.writeOK(coll.insert({a: 1, bigField: longString}));
+ assert.commandWorked(coll.insert({a: 1, bigField: longString}));
}
assert.commandWorked(coll.ensureIndex({a: 1}));
diff --git a/jstests/aggregation/bugs/server17943.js b/jstests/aggregation/bugs/server17943.js
index 6b510e2ddbc..70c22cc6164 100644
--- a/jstests/aggregation/bugs/server17943.js
+++ b/jstests/aggregation/bugs/server17943.js
@@ -9,13 +9,13 @@ load('jstests/aggregation/extras/utils.js');
var coll = db.agg_filter_expr;
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3, 4, 5]}));
-assert.writeOK(coll.insert({_id: 1, a: [2, 4]}));
-assert.writeOK(coll.insert({_id: 2, a: []}));
-assert.writeOK(coll.insert({_id: 3, a: [1]}));
-assert.writeOK(coll.insert({_id: 4, a: null}));
-assert.writeOK(coll.insert({_id: 5, a: undefined}));
-assert.writeOK(coll.insert({_id: 6}));
+assert.commandWorked(coll.insert({_id: 0, a: [1, 2, 3, 4, 5]}));
+assert.commandWorked(coll.insert({_id: 1, a: [2, 4]}));
+assert.commandWorked(coll.insert({_id: 2, a: []}));
+assert.commandWorked(coll.insert({_id: 3, a: [1]}));
+assert.commandWorked(coll.insert({_id: 4, a: null}));
+assert.commandWorked(coll.insert({_id: 5, a: undefined}));
+assert.commandWorked(coll.insert({_id: 6}));
// Create filter to only accept odd numbers.
filterDoc = {input: '$a', as: 'x', cond: {$eq: [1, {$mod: ['$$x', 2]}]}};
@@ -85,7 +85,7 @@ filterDoc = {
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 17276);
assert(coll.drop());
-assert.writeOK(coll.insert({a: 'string'}));
+assert.commandWorked(coll.insert({a: 'string'}));
filterDoc = {input: '$a', as: 'x', cond: true};
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28651);
}());
diff --git a/jstests/aggregation/bugs/server18222.js b/jstests/aggregation/bugs/server18222.js
index d27188bbb2b..3a05eb99f61 100644
--- a/jstests/aggregation/bugs/server18222.js
+++ b/jstests/aggregation/bugs/server18222.js
@@ -5,19 +5,19 @@ var coll = db.is_array_expr;
coll.drop();
// Non-array types.
-assert.writeOK(coll.insert({_id: 0, x: 0}));
-assert.writeOK(coll.insert({_id: 1, x: '0'}));
-assert.writeOK(coll.insert({_id: 2, x: new ObjectId()}));
-assert.writeOK(coll.insert({_id: 3, x: new NumberLong(0)}));
-assert.writeOK(coll.insert({_id: 4, x: {y: []}}));
-assert.writeOK(coll.insert({_id: 5, x: null}));
-assert.writeOK(coll.insert({_id: 6, x: NaN}));
-assert.writeOK(coll.insert({_id: 7, x: undefined}));
+assert.commandWorked(coll.insert({_id: 0, x: 0}));
+assert.commandWorked(coll.insert({_id: 1, x: '0'}));
+assert.commandWorked(coll.insert({_id: 2, x: new ObjectId()}));
+assert.commandWorked(coll.insert({_id: 3, x: new NumberLong(0)}));
+assert.commandWorked(coll.insert({_id: 4, x: {y: []}}));
+assert.commandWorked(coll.insert({_id: 5, x: null}));
+assert.commandWorked(coll.insert({_id: 6, x: NaN}));
+assert.commandWorked(coll.insert({_id: 7, x: undefined}));
// Array types.
-assert.writeOK(coll.insert({_id: 8, x: []}));
-assert.writeOK(coll.insert({_id: 9, x: [0]}));
-assert.writeOK(coll.insert({_id: 10, x: ['0']}));
+assert.commandWorked(coll.insert({_id: 8, x: []}));
+assert.commandWorked(coll.insert({_id: 9, x: [0]}));
+assert.commandWorked(coll.insert({_id: 10, x: ['0']}));
// Project field is_array to represent whether the field x was an array.
var results = coll.aggregate([
diff --git a/jstests/aggregation/bugs/server18427.js b/jstests/aggregation/bugs/server18427.js
index fffbc51ef64..44246a578fb 100644
--- a/jstests/aggregation/bugs/server18427.js
+++ b/jstests/aggregation/bugs/server18427.js
@@ -7,7 +7,7 @@ load('jstests/aggregation/extras/utils.js');
'use strict';
var coll = db.log_exponential_expressions;
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
var decimalE = NumberDecimal("2.718281828459045235360287471352662");
var decimal1overE = NumberDecimal("0.3678794411714423215955237701614609");
diff --git a/jstests/aggregation/bugs/server20168.js b/jstests/aggregation/bugs/server20168.js
index 9a886bbc279..25c7f0e24b0 100644
--- a/jstests/aggregation/bugs/server20168.js
+++ b/jstests/aggregation/bugs/server20168.js
@@ -16,7 +16,7 @@ assert.eq(0, results.length, "$unwind returned the wrong number of results");
*/
function testPreserveNullAndEmptyArraysParam(inputDoc, unwindPath, outputDoc) {
coll.drop();
- assert.writeOK(coll.insert(inputDoc));
+ assert.commandWorked(coll.insert(inputDoc));
// If preserveNullAndEmptyArrays is passed, we should get an output document.
var preservedResults =
diff --git a/jstests/aggregation/bugs/server21632.js b/jstests/aggregation/bugs/server21632.js
index c23d8836bea..d0b64d88437 100644
--- a/jstests/aggregation/bugs/server21632.js
+++ b/jstests/aggregation/bugs/server21632.js
@@ -41,7 +41,7 @@ assert.eq([], coll.aggregate([{$sample: {size: 10}}]).toArray());
// If there is only one document, we should get that document.
var paddingStr = "abcdefghijklmnopqrstuvwxyz";
var firstDoc = {_id: 0, paddingStr: paddingStr};
-assert.writeOK(coll.insert(firstDoc));
+assert.commandWorked(coll.insert(firstDoc));
assert.eq([firstDoc], coll.aggregate([{$sample: {size: 1}}]).toArray());
assert.eq([firstDoc], coll.aggregate([{$sample: {size: 10}}]).toArray());
diff --git a/jstests/aggregation/bugs/server25590.js b/jstests/aggregation/bugs/server25590.js
index b478f806029..89dd16ac9a3 100644
--- a/jstests/aggregation/bugs/server25590.js
+++ b/jstests/aggregation/bugs/server25590.js
@@ -6,7 +6,7 @@
const coll = db.server25590;
coll.drop();
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
assert.commandFailedWithCode(db.runCommand({aggregate: coll.getName(), pipeline: 1}),
ErrorCodes.TypeMismatch);
diff --git a/jstests/aggregation/bugs/server26462.js b/jstests/aggregation/bugs/server26462.js
index 08225e54ce3..16aacb905ab 100644
--- a/jstests/aggregation/bugs/server26462.js
+++ b/jstests/aggregation/bugs/server26462.js
@@ -7,10 +7,10 @@
db.server26462.drop();
// Insert some test documents into the collection.
-assert.writeOK(db.server26462.insert({"_id": 1, "title": "cakes and ale"}));
-assert.writeOK(db.server26462.insert({"_id": 2, "title": "more cakes"}));
-assert.writeOK(db.server26462.insert({"_id": 3, "title": "bread"}));
-assert.writeOK(db.server26462.insert({"_id": 4, "title": "some cakes"}));
+assert.commandWorked(db.server26462.insert({"_id": 1, "title": "cakes and ale"}));
+assert.commandWorked(db.server26462.insert({"_id": 2, "title": "more cakes"}));
+assert.commandWorked(db.server26462.insert({"_id": 3, "title": "bread"}));
+assert.commandWorked(db.server26462.insert({"_id": 4, "title": "some cakes"}));
// Create a text index on the documents.
assert.commandWorked(db.server26462.createIndex({title: "text"}));
diff --git a/jstests/aggregation/bugs/server4588.js b/jstests/aggregation/bugs/server4588.js
index be04773c0ff..df1b28df2e0 100644
--- a/jstests/aggregation/bugs/server4588.js
+++ b/jstests/aggregation/bugs/server4588.js
@@ -5,11 +5,11 @@
const coll = db.server4588;
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
-assert.writeOK(coll.insert({_id: 1, x: null}));
-assert.writeOK(coll.insert({_id: 2, x: []}));
-assert.writeOK(coll.insert({_id: 3, x: [1, 2, 3]}));
-assert.writeOK(coll.insert({_id: 4, x: 5}));
+assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 1, x: null}));
+assert.commandWorked(coll.insert({_id: 2, x: []}));
+assert.commandWorked(coll.insert({_id: 3, x: [1, 2, 3]}));
+assert.commandWorked(coll.insert({_id: 4, x: 5}));
// Without includeArrayIndex.
let actualResults = coll.aggregate([{$unwind: {path: "$x"}}, {$sort: {_id: 1, x: 1}}]).toArray();
diff --git a/jstests/aggregation/bugs/server4589.js b/jstests/aggregation/bugs/server4589.js
index efa7254e4d9..3d880204b00 100644
--- a/jstests/aggregation/bugs/server4589.js
+++ b/jstests/aggregation/bugs/server4589.js
@@ -9,7 +9,7 @@ load('jstests/aggregation/extras/utils.js');
var coll = db.agg_array_elem_at_expr;
coll.drop();
-assert.writeOK(coll.insert({a: [1, 2, 3, 4, 5]}));
+assert.commandWorked(coll.insert({a: [1, 2, 3, 4, 5]}));
// Normal indexing.
var pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', 2]}}}];
diff --git a/jstests/aggregation/bugs/server5044.js b/jstests/aggregation/bugs/server5044.js
index f1f77a1991d..f2a6a82962d 100644
--- a/jstests/aggregation/bugs/server5044.js
+++ b/jstests/aggregation/bugs/server5044.js
@@ -4,10 +4,10 @@ var t = db.server5044;
function test(data, popExpected, sampExpected) {
t.drop();
- assert.writeOK(t.insert({})); // need one document to ensure we get output
+ assert.commandWorked(t.insert({})); // need one document to ensure we get output
for (var i = 0; i < data.length; i++)
- assert.writeOK(t.insert({num: data[i]}));
+ assert.commandWorked(t.insert({num: data[i]}));
var res = t.aggregate({
$group: {
diff --git a/jstests/aggregation/bugs/server533.js b/jstests/aggregation/bugs/server533.js
index d66c5d27ad8..e9be67f635a 100644
--- a/jstests/aggregation/bugs/server533.js
+++ b/jstests/aggregation/bugs/server533.js
@@ -14,7 +14,7 @@ assert.eq(coll.aggregate([{$sample: {size: 10}}]).toArray(), []);
var nItems = 3;
for (var i = 0; i < nItems; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
[0, 1, nItems, nItems + 1].forEach(function(size) {
diff --git a/jstests/aggregation/bugs/server6074.js b/jstests/aggregation/bugs/server6074.js
index 8e53459ba9e..96a4227e999 100644
--- a/jstests/aggregation/bugs/server6074.js
+++ b/jstests/aggregation/bugs/server6074.js
@@ -10,7 +10,7 @@ var coll = db.agg_slice_expr;
coll.drop();
// Need to have at least one document to ensure the pipeline executes.
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
function testSlice(sliceArgs, expArray) {
var pipeline = [{$project: {_id: 0, slice: {$slice: sliceArgs}}}];
diff --git a/jstests/aggregation/bugs/server6127.js b/jstests/aggregation/bugs/server6127.js
index 1f11d858c83..92bcf9c442d 100644
--- a/jstests/aggregation/bugs/server6127.js
+++ b/jstests/aggregation/bugs/server6127.js
@@ -10,9 +10,9 @@
"use strict";
db.s6127.drop();
-assert.writeOK(db.s6127.insert({_id: 0, a: 1}));
-assert.writeOK(db.s6127.insert({_id: 1, foo: 2}));
-assert.writeOK(db.s6127.insert({_id: 2, foo: {bar: 3}}));
+assert.commandWorked(db.s6127.insert({_id: 0, a: 1}));
+assert.commandWorked(db.s6127.insert({_id: 1, foo: 2}));
+assert.commandWorked(db.s6127.insert({_id: 2, foo: {bar: 3}}));
// Aggregate checking the field foo and the path foo.bar.
const cursor = db.s6127.aggregate(
diff --git a/jstests/aggregation/bugs/server6147.js b/jstests/aggregation/bugs/server6147.js
index c74e1848512..f1911a88d57 100644
--- a/jstests/aggregation/bugs/server6147.js
+++ b/jstests/aggregation/bugs/server6147.js
@@ -9,8 +9,8 @@
"use strict";
db.s6147.drop();
-assert.writeOK(db.s6147.insert({a: 1}));
-assert.writeOK(db.s6147.insert({a: 2}));
+assert.commandWorked(db.s6147.insert({a: 1}));
+assert.commandWorked(db.s6147.insert({a: 2}));
// Aggregate checking various combinations of the constant and the field.
const cursor = db.s6147.aggregate([
diff --git a/jstests/aggregation/bugs/server6185.js b/jstests/aggregation/bugs/server6185.js
index 06eacdf791d..156dc6ce6fe 100644
--- a/jstests/aggregation/bugs/server6185.js
+++ b/jstests/aggregation/bugs/server6185.js
@@ -6,11 +6,11 @@
const coll = db.c;
coll.drop();
-assert.writeOK(coll.insert({a: [1]}));
-assert.writeOK(coll.insert({a: {c: 1}}));
-assert.writeOK(coll.insert({a: [{c: 1}, {b: 1, c: 1}, {c: 1}]}));
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({b: 1}));
+assert.commandWorked(coll.insert({a: [1]}));
+assert.commandWorked(coll.insert({a: {c: 1}}));
+assert.commandWorked(coll.insert({a: [{c: 1}, {b: 1, c: 1}, {c: 1}]}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({b: 1}));
assert.eq(coll.aggregate([{$project: {'a.b': 1}}, {$sort: {_id: 1}}]).toArray(),
coll.find({}, {'a.b': 1}).sort({_id: 1}).toArray());
diff --git a/jstests/aggregation/bugs/server6530.js b/jstests/aggregation/bugs/server6530.js
index 77dfcd703fb..8b0c914648a 100644
--- a/jstests/aggregation/bugs/server6530.js
+++ b/jstests/aggregation/bugs/server6530.js
@@ -13,8 +13,8 @@ assert.commandWorked(coll.createIndex({point2d: "2d"}));
assert.commandWorked(coll.createIndex({point2dsphere: "2dsphere"}));
// Populate the collection so that successful queries can return at least one result.
-assert.writeOK(coll.insert({point2d: [0.25, 0.35]}));
-assert.writeOK(coll.insert({point2dsphere: [0.25, 0.35]}));
+assert.commandWorked(coll.insert({point2d: [0.25, 0.35]}));
+assert.commandWorked(coll.insert({point2dsphere: [0.25, 0.35]}));
const nearQuery = {
point2d: {$near: [0, 0]}
diff --git a/jstests/aggregation/bugs/server6779.js b/jstests/aggregation/bugs/server6779.js
index d9d48898068..d6529732c62 100644
--- a/jstests/aggregation/bugs/server6779.js
+++ b/jstests/aggregation/bugs/server6779.js
@@ -6,8 +6,8 @@
function test(op, val) {
const coll = db.server6779;
coll.drop();
- assert.writeOK(coll.insert({a: true}));
- assert.writeOK(coll.insert({a: false}));
+ assert.commandWorked(coll.insert({a: true}));
+ assert.commandWorked(coll.insert({a: false}));
const obj = {};
obj[op] = ['$a', val];
diff --git a/jstests/aggregation/bugs/server7695_isodates.js b/jstests/aggregation/bugs/server7695_isodates.js
index ca90c47f0fe..de52997c2ba 100644
--- a/jstests/aggregation/bugs/server7695_isodates.js
+++ b/jstests/aggregation/bugs/server7695_isodates.js
@@ -10,7 +10,7 @@ load('jstests/libs/dateutil.js');
coll.drop();
// Seed collection so that the pipeline will execute.
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
/**
* Helper for testing that 'op' returns 'expResult'.
diff --git a/jstests/aggregation/bugs/server7781.js b/jstests/aggregation/bugs/server7781.js
index 19700cc2202..d70662a9d25 100644
--- a/jstests/aggregation/bugs/server7781.js
+++ b/jstests/aggregation/bugs/server7781.js
@@ -87,7 +87,7 @@ function test(db, sharded, indexType) {
for (var i = 0; i < numPts; i++) {
bulk.insert({rand: Math.random(), loc: pointMaker.mkPt()});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
assert.eq(db[coll].count(), numPts);
diff --git a/jstests/aggregation/bugs/server8141.js b/jstests/aggregation/bugs/server8141.js
index 908fd952059..3b1477e9ac4 100644
--- a/jstests/aggregation/bugs/server8141.js
+++ b/jstests/aggregation/bugs/server8141.js
@@ -4,7 +4,7 @@
var coll = db.exprs_in_arrays;
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: ['foo', 'bar', 'baz'], b: 'bar', c: 'Baz'}));
+assert.commandWorked(coll.insert({_id: 0, a: ['foo', 'bar', 'baz'], b: 'bar', c: 'Baz'}));
// An array of constants should still evaluate to an array of constants.
var pipeline = [{$project: {_id: 0, d: ['constant', 1]}}];
@@ -43,7 +43,7 @@ assert.eq(coll.aggregate(pipeline).toArray(), [{d: [[1, 'foo', 'bar']]}]);
coll.drop();
// Should replace missing values with NULL to preserve indices.
-assert.writeOK(coll.insert({_id: 1, x: 1, z: 2}));
+assert.commandWorked(coll.insert({_id: 1, x: 1, z: 2}));
pipeline = [{$project: {_id: 0, coordinate: ['$x', '$y', '$z']}}];
assert.eq(coll.aggregate(pipeline).toArray(), [{coordinate: [1, null, 2]}]);
diff --git a/jstests/aggregation/bugs/server8568.js b/jstests/aggregation/bugs/server8568.js
index 71793f5696b..9216e1dbffd 100644
--- a/jstests/aggregation/bugs/server8568.js
+++ b/jstests/aggregation/bugs/server8568.js
@@ -7,7 +7,7 @@ load('jstests/aggregation/extras/utils.js');
'use strict';
var coll = db.sqrt;
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
// Helper for testing that op returns expResult.
function testOp(op, expResult) {
diff --git a/jstests/aggregation/bugs/server9625.js b/jstests/aggregation/bugs/server9625.js
index 4cbf487b5e0..2cf1353f53d 100644
--- a/jstests/aggregation/bugs/server9625.js
+++ b/jstests/aggregation/bugs/server9625.js
@@ -8,7 +8,7 @@ load('jstests/aggregation/extras/utils.js');
'use strict';
var coll = db.server9625;
coll.drop();
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
// Helper for testing that op returns expResult.
function testOp(op, expResult) {
@@ -51,7 +51,7 @@ testOp({$stdDevSamp: [1, 2, 3, NaN]}, NaN);
// optimization will evaluate them all into one, without calling isAssociative() nor
// isCommutative().
coll.drop();
-assert.writeOK(coll.insert({"a": 1, "b": 6}));
+assert.commandWorked(coll.insert({"a": 1, "b": 6}));
// These expressions are associative and commutative so inner expression can be combined with
// outer.
diff --git a/jstests/aggregation/bugs/sort_arrays.js b/jstests/aggregation/bugs/sort_arrays.js
index e83b4466cc6..a82837dfb70 100644
--- a/jstests/aggregation/bugs/sort_arrays.js
+++ b/jstests/aggregation/bugs/sort_arrays.js
@@ -5,7 +5,7 @@
const coll = db.foo;
coll.drop();
-assert.writeOK(coll.insert([{_id: 2, a: [2, 3]}, {_id: 3, a: [2, 4]}, {_id: 4, a: [2, 1]}]));
+assert.commandWorked(coll.insert([{_id: 2, a: [2, 3]}, {_id: 3, a: [2, 4]}, {_id: 4, a: [2, 1]}]));
const expectedOrder = [{_id: 4, a: [2, 1]}, {_id: 2, a: [2, 3]}, {_id: 3, a: [2, 4]}];
assert.eq(coll.aggregate([{$sort: {a: 1, _id: 1}}]).toArray(), expectedOrder);
diff --git a/jstests/aggregation/explain_limit.js b/jstests/aggregation/explain_limit.js
index a0dabdc1b02..7b725548c96 100644
--- a/jstests/aggregation/explain_limit.js
+++ b/jstests/aggregation/explain_limit.js
@@ -50,7 +50,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
for (let i = 0; i < kCollSize; i++) {
- assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorked(coll.insert({a: 1}));
}
const pipeline = [{$match: {a: 1}}, {$limit: kLimit}];
diff --git a/jstests/aggregation/explain_writing_aggs.js b/jstests/aggregation/explain_writing_aggs.js
index 412060bcfa6..a1ec8e82e30 100644
--- a/jstests/aggregation/explain_writing_aggs.js
+++ b/jstests/aggregation/explain_writing_aggs.js
@@ -17,7 +17,7 @@ let targetColl = db.explain_writing_aggs_target;
sourceColl.drop();
targetColl.drop();
-assert.writeOK(sourceColl.insert({_id: 1}));
+assert.commandWorked(sourceColl.insert({_id: 1}));
// Test that $out can be explained with 'queryPlanner' explain verbosity and does not perform
// any writes.
diff --git a/jstests/aggregation/expressions/arrayToObject.js b/jstests/aggregation/expressions/arrayToObject.js
index df78b9f1aaf..409bd8aee1f 100644
--- a/jstests/aggregation/expressions/arrayToObject.js
+++ b/jstests/aggregation/expressions/arrayToObject.js
@@ -9,7 +9,7 @@ let coll = db.array_to_object_expr;
coll.drop();
// Write one document so that the aggregations which use $const produce a result.
-assert.writeOK(coll.insert({_id: "sentinel", a: 1}));
+assert.commandWorked(coll.insert({_id: "sentinel", a: 1}));
/*
* Check that the collapsed, object form of 'expanded' (which is computed using $arrayToObject)
diff --git a/jstests/aggregation/expressions/collation_expressions.js b/jstests/aggregation/expressions/collation_expressions.js
index dc959791f2f..c8445eb5b60 100644
--- a/jstests/aggregation/expressions/collation_expressions.js
+++ b/jstests/aggregation/expressions/collation_expressions.js
@@ -96,7 +96,7 @@ testExpressionWithCollation(coll, {$split: ["abc", "B"]}, ["abc"], caseInsensiti
// Test that an $and which can be optimized out respects the collation.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "A"}));
+assert.commandWorked(coll.insert({_id: 1, str: "A"}));
results = coll.aggregate([{$project: {out: {$and: [{$eq: ["$str", "a"]}, {$eq: ["b", "B"]}]}}}],
{collation: caseInsensitive})
.toArray();
@@ -105,7 +105,7 @@ assert.eq(true, results[0].out);
// Test that an $and which cannot be optimized out respects the collation.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "A", str2: "B"}));
+assert.commandWorked(coll.insert({_id: 1, str: "A", str2: "B"}));
results = coll.aggregate([{$project: {out: {$and: [{$eq: ["$str", "a"]}, {$eq: ["$str2", "b"]}]}}}],
{collation: caseInsensitive})
.toArray();
@@ -114,7 +114,7 @@ assert.eq(true, results[0].out);
// Test that an $or which can be optimized out respects the collation.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "A"}));
+assert.commandWorked(coll.insert({_id: 1, str: "A"}));
results = coll.aggregate([{$project: {out: {$or: [{$eq: ["$str", "a"]}, {$eq: ["b", "c"]}]}}}],
{collation: caseInsensitive})
.toArray();
@@ -123,7 +123,7 @@ assert.eq(true, results[0].out);
// Test that an $or which cannot be optimized out respects the collation.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "A", str2: "B"}));
+assert.commandWorked(coll.insert({_id: 1, str: "A", str2: "B"}));
results = coll.aggregate([{$project: {out: {$or: [{$eq: ["$str", "c"]}, {$eq: ["$str2", "b"]}]}}}],
{collation: caseInsensitive})
.toArray();
@@ -174,7 +174,7 @@ testExpressionWithCollation(
// Test that $group stage's _id expressions respect the collation.
coll.drop();
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
results = coll.aggregate([{$group: {_id: {a: {$eq: ["a", "A"]}, b: {$eq: ["b", "B"]}}}}],
{collation: caseInsensitive})
.toArray();
@@ -199,9 +199,9 @@ testExpressionWithCollation(
// Test that $switch's subexpressions respect the collation.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, a: "A"}));
-assert.writeOK(coll.insert({_id: 2, b: "B"}));
-assert.writeOK(coll.insert({_id: 3, c: "C"}));
+assert.commandWorked(coll.insert({_id: 1, a: "A"}));
+assert.commandWorked(coll.insert({_id: 2, b: "B"}));
+assert.commandWorked(coll.insert({_id: 3, c: "C"}));
results = coll.aggregate([{
$project: {
out: {
@@ -224,7 +224,7 @@ assert.eq("baz", results[2].out);
// Test that a $zip's subexpressions respect the collation.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, evens: [0, 2, 4], odds: [1, 3]}));
+assert.commandWorked(coll.insert({_id: 0, evens: [0, 2, 4], odds: [1, 3]}));
results = coll.aggregate([{
$project: {
out: {
diff --git a/jstests/aggregation/expressions/convert.js b/jstests/aggregation/expressions/convert.js
index 4e56bf16265..395ab5d9e93 100644
--- a/jstests/aggregation/expressions/convert.js
+++ b/jstests/aggregation/expressions/convert.js
@@ -9,7 +9,7 @@ function populateCollection(documentList) {
coll.drop();
var bulk = coll.initializeOrderedBulkOp();
documentList.forEach(doc => bulk.insert(doc));
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
//
diff --git a/jstests/aggregation/expressions/date_expressions_with_timezones.js b/jstests/aggregation/expressions/date_expressions_with_timezones.js
index 076800a5384..3d8e3a06ac3 100644
--- a/jstests/aggregation/expressions/date_expressions_with_timezones.js
+++ b/jstests/aggregation/expressions/date_expressions_with_timezones.js
@@ -5,7 +5,7 @@
const coll = db.date_expressions_with_time_zones;
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
// Three sales on 2017-06-16 in UTC.
{_id: 0, date: new ISODate("2017-06-16T00:00:00.000Z"), sales: 1},
{_id: 1, date: new ISODate("2017-06-16T12:02:21.013Z"), sales: 2},
diff --git a/jstests/aggregation/expressions/date_from_string.js b/jstests/aggregation/expressions/date_from_string.js
index 56c52de0f7e..2703fde98b6 100644
--- a/jstests/aggregation/expressions/date_from_string.js
+++ b/jstests/aggregation/expressions/date_from_string.js
@@ -9,7 +9,7 @@ const coll = db.date_from_string;
/* Normal format tests. */
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
let testCases = [
{
@@ -76,7 +76,7 @@ testCases.forEach(function(testCase) {
/* Normal format tests with timezone. */
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
testCases = [
{
@@ -132,7 +132,7 @@ testCases.forEach(function(testCase) {
/* Normal format tests with UTC offset. */
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
testCases = [
{
@@ -186,7 +186,7 @@ testCases.forEach(function(testCase) {
/* Normal format tests from data. */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, dateString: "2017-07-06T12:35:37Z", format: "%Y-%m-%dT%H:%M:%SZ"},
{_id: 1, dateString: "2017-07-06T12:35:37.513Z", format: "%Y-%m-%dT%H:%M:%S.%LZ"},
{_id: 2, dateString: "2017-07-06T12:35:37", format: "%Y-%m-%dT%H:%M:%S"},
@@ -252,7 +252,7 @@ assert.eq(
/* Normal format tests from data, with time zone. */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, dateString: "2017-07-06T12:35:37.513", timezone: "GMT"},
{_id: 1, dateString: "2017-07-06T12:35:37.513", timezone: "UTC"},
{_id: 2, dateString: "1960-07-10T12:35:37.513", timezone: "America/New_York"},
@@ -305,7 +305,7 @@ assert.eq(expectedResults,
/* dateString from data with timezone as constant */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, dateString: "2017-07-06T12:35:37"},
]));
@@ -326,7 +326,7 @@ assert.eq(
/* dateString from constant with timezone from data */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, timezone: "Europe/London"},
{_id: 1, timezone: "America/New_York"},
{_id: 2, timezone: "-05:00"},
@@ -355,7 +355,7 @@ assert.eq(
/* BI format tests. */
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
let pipelines = [
{
@@ -397,7 +397,7 @@ pipelines.forEach(function(pipeline) {
/* Tests with additional timezone information . */
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
testCases = [
// GMT based variants
@@ -445,7 +445,7 @@ testCases.forEach(function(testCase) {
/* BI format tests from data. */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, dateString: "2017-01-01 00:00:00"},
{_id: 1, dateString: "2017-07-01 00:00:00"},
{_id: 2, dateString: "2017-07-06"},
@@ -477,7 +477,7 @@ assert.eq(
/* Wacky format tests from data. */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, dateString: "July 4th, 2017"},
{_id: 1, dateString: "July 4th, 2017 12:39:30 BST"},
{_id: 2, dateString: "July 4th, 2017 11am"},
@@ -513,7 +513,7 @@ assert.eq(
/* Tests formats that aren't supported with the normal $dateFromString parser. */
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
testCases = [
{inputString: "05 12 1988", format: "%d %m %Y", expect: "1988-12-05T00:00:00Z"},
@@ -573,7 +573,7 @@ testCases.forEach(function(testCase) {
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0},
]));
@@ -594,7 +594,7 @@ pipelines.forEach(function(pipeline) {
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0},
]));
@@ -612,7 +612,7 @@ pipelines.forEach(function(pipeline) {
/* NULL returns. */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: new ISODate("2017-06-19T15:13:25.713Z")},
{_id: 1, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: null},
{_id: 2, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: undefined},
@@ -635,7 +635,7 @@ pipelines.forEach(function(pipeline) {
});
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0},
{_id: 1, format: null},
{_id: 2, format: undefined},
diff --git a/jstests/aggregation/expressions/date_from_string_on_error.js b/jstests/aggregation/expressions/date_from_string_on_error.js
index c944074657e..51936da86ea 100644
--- a/jstests/aggregation/expressions/date_from_string_on_error.js
+++ b/jstests/aggregation/expressions/date_from_string_on_error.js
@@ -10,7 +10,7 @@ const onErrorValue = ISODate("2017-07-04T11:56:02Z");
const coll = db.date_from_string_on_error;
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
// Test that the 'onError' value is returned when 'dateString' is not a valid date/time.
for (let inputDate of ["July 4th",
diff --git a/jstests/aggregation/expressions/date_from_string_on_null.js b/jstests/aggregation/expressions/date_from_string_on_null.js
index caf7cf1216d..be99897d5cb 100644
--- a/jstests/aggregation/expressions/date_from_string_on_null.js
+++ b/jstests/aggregation/expressions/date_from_string_on_null.js
@@ -8,7 +8,7 @@ const onNullValue = ISODate("2017-07-04T11:56:02Z");
const coll = db.date_from_string_on_null;
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
// Test that the 'onNull' value is returned when the 'dateString' is nullish.
for (let inputDate of [null, undefined, "$missing"]) {
diff --git a/jstests/aggregation/expressions/date_to_parts.js b/jstests/aggregation/expressions/date_to_parts.js
index 47344f5deda..30416f28610 100644
--- a/jstests/aggregation/expressions/date_to_parts.js
+++ b/jstests/aggregation/expressions/date_to_parts.js
@@ -7,7 +7,7 @@ const coll = db.dateToParts;
coll.drop();
/* --------------------------------------------------------------------------------------- */
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "UTC"},
{_id: 1, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "Europe/London"},
{_id: 2, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "America/New_York", iso: true},
@@ -195,7 +195,7 @@ assert.eq(
/* Tests with timestamp */
assert(coll.drop());
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{
_id: ObjectId("58c7cba47bbadf523cf2c313"),
date: new ISODate("2017-06-19T15:13:25.713Z"),
@@ -272,7 +272,7 @@ assert.eq(
/* --------------------------------------------------------------------------------------- */
assert(coll.drop());
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: ISODate("2017-06-27T12:00:20Z")},
]));
@@ -286,7 +286,7 @@ assert.eq(
/* --------------------------------------------------------------------------------------- */
assert(coll.drop());
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: ISODate("2017-06-27T12:00:20Z")},
]));
@@ -300,7 +300,7 @@ assert.eq(
/* --------------------------------------------------------------------------------------- */
assert(coll.drop());
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, tz: "Europe/London"},
]));
diff --git a/jstests/aggregation/expressions/date_to_string.js b/jstests/aggregation/expressions/date_to_string.js
index b1cc145b627..b618c81e383 100644
--- a/jstests/aggregation/expressions/date_to_string.js
+++ b/jstests/aggregation/expressions/date_to_string.js
@@ -8,7 +8,7 @@ coll.drop();
/* --------------------------------------------------------------------------------------- */
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "UTC"},
{_id: 1, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "Europe/London"},
{_id: 2, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "America/New_York"},
@@ -47,7 +47,7 @@ assert.eq(
/* --------------------------------------------------------------------------------------- */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
{_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
{_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
@@ -78,7 +78,7 @@ assert.eq(
/* --------------------------------------------------------------------------------------- */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
{_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
{_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
@@ -105,7 +105,7 @@ assert.eq(
/* --------------------------------------------------------------------------------------- */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: new ISODate("2017-01-01T15:08:51.911Z")},
{_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
{_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
@@ -133,7 +133,7 @@ assert.eq(
/* Test that missing expressions, turn into BSON null values */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
{_id: 1, date: new ISODate("2017-01-04T15:08:51.911Z"), timezone: null},
{_id: 2, date: new ISODate("2017-01-04T15:08:51.911Z"), timezone: undefined},
@@ -171,7 +171,7 @@ assert.eq(
/* Test that the default format is "%Y-%m-%dT%H:%M:%S.%LZ" if none specified. */
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
{_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
{_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
@@ -192,7 +192,7 @@ assert.eq(
/* --------------------------------------------------------------------------------------- */
/* Test that null is returned when 'format' evaluates to nullish. */
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
assert.eq([{_id: 0, date: null}],
coll.aggregate({
diff --git a/jstests/aggregation/expressions/date_to_string_on_null.js b/jstests/aggregation/expressions/date_to_string_on_null.js
index 7b3bdc07538..15db75bee75 100644
--- a/jstests/aggregation/expressions/date_to_string_on_null.js
+++ b/jstests/aggregation/expressions/date_to_string_on_null.js
@@ -8,7 +8,7 @@ const onNullValue = ISODate("2017-07-04T11:56:02Z");
const coll = db.date_to_string_on_null;
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
for (let nullishValue of [null, undefined, "$missing"]) {
// Test that the 'onNull' value is returned when the 'date' is nullish.
diff --git a/jstests/aggregation/expressions/in.js b/jstests/aggregation/expressions/in.js
index 63ba02f1b4e..698a6212b42 100644
--- a/jstests/aggregation/expressions/in.js
+++ b/jstests/aggregation/expressions/in.js
@@ -32,7 +32,7 @@ function testExpressionCollectionCollation(options, collationSpec) {
function testExpressionInternal(options) {
var pipeline = {$project: {included: {$in: ["$elementField", {$literal: options.array}]}}};
- assert.writeOK(coll.insert({elementField: options.element}));
+ assert.commandWorked(coll.insert({elementField: options.element}));
var res = coll.aggregate(pipeline).toArray();
assert.eq(res.length, 1);
assert.eq(res[0].included, options.elementIsIncluded);
diff --git a/jstests/aggregation/expressions/indexof_array.js b/jstests/aggregation/expressions/indexof_array.js
index a32376b1f9d..450e450fa16 100644
--- a/jstests/aggregation/expressions/indexof_array.js
+++ b/jstests/aggregation/expressions/indexof_array.js
@@ -9,7 +9,7 @@ var coll = db.indexofarray;
coll.drop();
// Insert a dummy document to ensure something flows through the pipeline.
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
testExpression(coll, {$indexOfArray: [[1, 2, 3], 2]}, 1);
diff --git a/jstests/aggregation/expressions/indexof_bytes.js b/jstests/aggregation/expressions/indexof_bytes.js
index 14bcead5293..7632d0177c0 100644
--- a/jstests/aggregation/expressions/indexof_bytes.js
+++ b/jstests/aggregation/expressions/indexof_bytes.js
@@ -36,7 +36,7 @@ var coll = db.indexofbytes;
coll.drop();
// Insert a dummy document so something flows through the pipeline.
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
testExpressionBytes(coll, {$indexOfBytes: ["abc", "b"]}, 1);
diff --git a/jstests/aggregation/expressions/indexof_codepoints.js b/jstests/aggregation/expressions/indexof_codepoints.js
index acc4a3b072d..56d43547564 100644
--- a/jstests/aggregation/expressions/indexof_codepoints.js
+++ b/jstests/aggregation/expressions/indexof_codepoints.js
@@ -34,7 +34,7 @@ var coll = db.indexofcp;
coll.drop();
// Insert a dummy document so something flows through the pipeline.
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
testExpressionCodePoints(coll, {$indexOfCP: ["∫aƒ", "ƒ"]}, 2);
diff --git a/jstests/aggregation/expressions/merge_objects.js b/jstests/aggregation/expressions/merge_objects.js
index e6d38ccc6a4..995299f3e6c 100644
--- a/jstests/aggregation/expressions/merge_objects.js
+++ b/jstests/aggregation/expressions/merge_objects.js
@@ -9,7 +9,7 @@ let coll = db.merge_object_expr;
coll.drop();
// Test merging two objects together.
-assert.writeOK(coll.insert({_id: 0, subObject: {b: 1, c: 1}}));
+assert.commandWorked(coll.insert({_id: 0, subObject: {b: 1, c: 1}}));
let result = coll.aggregate([
{$match: {_id: 0}},
{$project: {mergedDocument: {$mergeObjects: ["$subObject", {d: 1}]}}}
@@ -18,7 +18,7 @@ let result = coll.aggregate([
assert.eq(result, [{_id: 0, mergedDocument: {b: 1, c: 1, d: 1}}]);
// Test merging the root document with a new field.
-assert.writeOK(coll.insert({_id: 1, a: 0, b: 1}));
+assert.commandWorked(coll.insert({_id: 1, a: 0, b: 1}));
result = coll.aggregate([
{$match: {_id: 1}},
{$project: {mergedDocument: {$mergeObjects: ["$$ROOT", {newField: "newValue"}]}}}
@@ -27,7 +27,7 @@ result = coll.aggregate([
assert.eq(result, [{_id: 1, mergedDocument: {_id: 1, a: 0, b: 1, newField: "newValue"}}]);
// Test replacing a field in the root.
-assert.writeOK(coll.insert({_id: 2, a: 0, b: 1}));
+assert.commandWorked(coll.insert({_id: 2, a: 0, b: 1}));
result = coll.aggregate([
{$match: {_id: 2}},
{$project: {mergedDocument: {$mergeObjects: ["$$ROOT", {a: "newValue"}]}}}
@@ -36,7 +36,7 @@ result = coll.aggregate([
assert.eq(result, [{_id: 2, mergedDocument: {_id: 2, a: "newValue", b: 1}}]);
// Test overriding a document with root.
-assert.writeOK(coll.insert({_id: 3, a: 0, b: 1}));
+assert.commandWorked(coll.insert({_id: 3, a: 0, b: 1}));
result = coll.aggregate([
{$match: {_id: 3}},
{$project: {mergedDocument: {$mergeObjects: [{a: "defaultValue"}, "$$ROOT"]}}}
@@ -45,7 +45,7 @@ result = coll.aggregate([
assert.eq(result, [{_id: 3, mergedDocument: {a: 0, _id: 3, b: 1}}]);
// Test replacing root with merged document.
-assert.writeOK(coll.insert({_id: 4, a: 0, subObject: {b: 1, c: 2}}));
+assert.commandWorked(coll.insert({_id: 4, a: 0, subObject: {b: 1, c: 2}}));
result = coll.aggregate([
{$match: {_id: 4}},
{$replaceRoot: {newRoot: {$mergeObjects: ["$$ROOT", "$subObject"]}}}
@@ -54,7 +54,7 @@ result = coll.aggregate([
assert.eq(result, [{_id: 4, a: 0, subObject: {b: 1, c: 2}, b: 1, c: 2}]);
// Test merging with an embedded object.
-assert.writeOK(coll.insert({_id: 5, subObject: {b: 1, c: 1}}));
+assert.commandWorked(coll.insert({_id: 5, subObject: {b: 1, c: 1}}));
result =
coll.aggregate([
{$match: {_id: 5}},
@@ -67,18 +67,18 @@ result =
assert.eq(result, [{_id: 5, mergedDocument: {b: 1, c: 1, subObject1: {d: 1}, e: 1}}]);
// Test for errors on non-document types.
-assert.writeOK(coll.insert({_id: 6, a: "string"}));
+assert.commandWorked(coll.insert({_id: 6, a: "string"}));
assertErrorCode(
coll,
[{$match: {_id: 6}}, {$project: {mergedDocument: {$mergeObjects: ["$a", {a: "newString"}]}}}],
40400);
-assert.writeOK(coll.insert({_id: 7, a: {b: 1}, c: 1}));
+assert.commandWorked(coll.insert({_id: 7, a: {b: 1}, c: 1}));
assertErrorCode(
coll, [{$match: {_id: 7}}, {$project: {mergedDocument: {$mergeObjects: ["$a", "$c"]}}}], 40400);
// Test outputs with null values.
-assert.writeOK(coll.insert({_id: 8, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 8, a: {b: 1}}));
result =
coll.aggregate(
[{$match: {_id: 8}}, {$project: {mergedDocument: {$mergeObjects: ["$a", {b: null}]}}}])
@@ -86,7 +86,7 @@ result =
assert.eq(result, [{_id: 8, mergedDocument: {b: null}}]);
// Test output with undefined values.
-assert.writeOK(coll.insert({_id: 9, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 9, a: {b: 1}}));
result = coll.aggregate([
{$match: {_id: 9}},
{$project: {mergedDocument: {$mergeObjects: ["$a", {b: undefined}]}}}
@@ -95,7 +95,7 @@ result = coll.aggregate([
assert.eq(result, [{_id: 9, mergedDocument: {b: undefined}}]);
// Test output with missing values.
-assert.writeOK(coll.insert({_id: 10, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 10, a: {b: 1}}));
result = coll.aggregate([
{$match: {_id: 10}},
{$project: {mergedDocument: {$mergeObjects: ["$a", {b: "$nonExistentField"}]}}}
@@ -103,7 +103,7 @@ result = coll.aggregate([
.toArray();
assert.eq(result, [{_id: 10, mergedDocument: {b: 1}}]);
-assert.writeOK(coll.insert({_id: 11, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 11, a: {b: 1}}));
result =
coll.aggregate(
[{$match: {_id: 11}}, {$project: {mergedDocument: {$mergeObjects: ["$a", {b: ""}]}}}])
@@ -111,7 +111,7 @@ result =
assert.eq(result, [{_id: 11, mergedDocument: {b: ""}}]);
// Test outputs with empty values.
-assert.writeOK(coll.insert({_id: 12, b: 1, c: 1}));
+assert.commandWorked(coll.insert({_id: 12, b: 1, c: 1}));
result = coll.aggregate([{$match: {_id: 12}}, {$project: {mergedDocument: {$mergeObjects: [{}]}}}])
.toArray();
assert.eq(result, [{_id: 12, mergedDocument: {}}]);
@@ -122,10 +122,10 @@ result =
assert.eq(result, [{_id: 12, mergedDocument: {}}]);
// Test merge within a $group stage.
-assert.writeOK(coll.insert({_id: 13, group: 1, obj: {}}));
-assert.writeOK(coll.insert({_id: 14, group: 1, obj: {a: 2, b: 2}}));
-assert.writeOK(coll.insert({_id: 15, group: 1, obj: {a: 1, c: 3}}));
-assert.writeOK(coll.insert({_id: 16, group: 2, obj: {a: 1, b: 1}}));
+assert.commandWorked(coll.insert({_id: 13, group: 1, obj: {}}));
+assert.commandWorked(coll.insert({_id: 14, group: 1, obj: {a: 2, b: 2}}));
+assert.commandWorked(coll.insert({_id: 15, group: 1, obj: {a: 1, c: 3}}));
+assert.commandWorked(coll.insert({_id: 16, group: 2, obj: {a: 1, b: 1}}));
result = coll.aggregate([
{$match: {_id: {$in: [13, 14, 15, 16]}}},
{$sort: {_id: 1}},
@@ -137,7 +137,7 @@ assert.eq(result,
[{_id: 1, mergedDocument: {a: 1, b: 2, c: 3}}, {_id: 2, mergedDocument: {a: 1, b: 1}}]);
// Test merge with $$REMOVE operator.
-assert.writeOK(coll.insert({_id: 17, a: {b: 2}}));
+assert.commandWorked(coll.insert({_id: 17, a: {b: 2}}));
result = coll.aggregate([
{$match: {_id: 17}},
{$project: {mergedDocument: {$mergeObjects: ["$a", {b: "$$REMOVE"}]}}}
diff --git a/jstests/aggregation/expressions/objectToArray.js b/jstests/aggregation/expressions/objectToArray.js
index 0ec4a40c2c0..704aacd62a6 100644
--- a/jstests/aggregation/expressions/objectToArray.js
+++ b/jstests/aggregation/expressions/objectToArray.js
@@ -11,16 +11,16 @@ coll.drop();
let object_to_array_expr = {$project: {expanded: {$objectToArray: "$subDoc"}}};
// $objectToArray correctly converts a document to an array of key-value pairs.
-assert.writeOK(coll.insert({_id: 0, subDoc: {"a": 1, "b": 2, "c": "foo"}}));
+assert.commandWorked(coll.insert({_id: 0, subDoc: {"a": 1, "b": 2, "c": "foo"}}));
let result = coll.aggregate([{$match: {_id: 0}}, object_to_array_expr]).toArray();
assert.eq(result,
[{_id: 0, expanded: [{"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "c", "v": "foo"}]}]);
-assert.writeOK(coll.insert({_id: 1, subDoc: {"y": []}}));
+assert.commandWorked(coll.insert({_id: 1, subDoc: {"y": []}}));
result = coll.aggregate([{$match: {_id: 1}}, object_to_array_expr]).toArray();
assert.eq(result, [{_id: 1, expanded: [{"k": "y", "v": []}]}]);
-assert.writeOK(coll.insert({_id: 2, subDoc: {"a": 1, "b": {"d": "string"}, "c": [1, 2]}}));
+assert.commandWorked(coll.insert({_id: 2, subDoc: {"a": 1, "b": {"d": "string"}, "c": [1, 2]}}));
result = coll.aggregate([{$match: {_id: 2}}, object_to_array_expr]).toArray();
assert.eq(
result, [{
@@ -28,12 +28,12 @@ assert.eq(
expanded: [{"k": "a", "v": 1}, {"k": "b", "v": {"d": "string"}}, {"k": "c", "v": [1, 2]}]
}]);
-assert.writeOK(coll.insert({_id: 3, subDoc: {}}));
+assert.commandWorked(coll.insert({_id: 3, subDoc: {}}));
result = coll.aggregate([{$match: {_id: 3}}, object_to_array_expr]).toArray();
assert.eq(result, [{_id: 3, expanded: []}]);
// Turns to array from the root of the document.
-assert.writeOK(coll.insert({_id: 4, "a": 1, "b": 2, "c": 3}));
+assert.commandWorked(coll.insert({_id: 4, "a": 1, "b": 2, "c": 3}));
result = coll.aggregate([{$match: {_id: 4}}, {$project: {document: {$objectToArray: "$$ROOT"}}}])
.toArray();
assert.eq(
@@ -42,7 +42,7 @@ assert.eq(
document: [{"k": "_id", "v": 4}, {"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "c", "v": 3}]
}]);
-assert.writeOK(coll.insert({_id: 5, "date": ISODate("2017-01-24T00:00:00")}));
+assert.commandWorked(coll.insert({_id: 5, "date": ISODate("2017-01-24T00:00:00")}));
result = coll.aggregate([
{$match: {_id: 5}},
{$project: {document: {$objectToArray: {dayOfWeek: {$dayOfWeek: "$date"}}}}}
@@ -51,40 +51,40 @@ result = coll.aggregate([
assert.eq(result, [{_id: 5, document: [{"k": "dayOfWeek", "v": 3}]}]);
// $objectToArray errors on non-document types.
-assert.writeOK(coll.insert({_id: 6, subDoc: "string"}));
+assert.commandWorked(coll.insert({_id: 6, subDoc: "string"}));
assertErrorCode(coll, [{$match: {_id: 6}}, object_to_array_expr], 40390);
-assert.writeOK(coll.insert({_id: 7, subDoc: ObjectId()}));
+assert.commandWorked(coll.insert({_id: 7, subDoc: ObjectId()}));
assertErrorCode(coll, [{$match: {_id: 7}}, object_to_array_expr], 40390);
-assert.writeOK(coll.insert({_id: 8, subDoc: NumberLong(0)}));
+assert.commandWorked(coll.insert({_id: 8, subDoc: NumberLong(0)}));
assertErrorCode(coll, [{$match: {_id: 8}}, object_to_array_expr], 40390);
-assert.writeOK(coll.insert({_id: 9, subDoc: []}));
+assert.commandWorked(coll.insert({_id: 9, subDoc: []}));
assertErrorCode(coll, [{$match: {_id: 9}}, object_to_array_expr], 40390);
-assert.writeOK(coll.insert({_id: 10, subDoc: [0]}));
+assert.commandWorked(coll.insert({_id: 10, subDoc: [0]}));
assertErrorCode(coll, [{$match: {_id: 10}}, object_to_array_expr], 40390);
-assert.writeOK(coll.insert({_id: 11, subDoc: ["string"]}));
+assert.commandWorked(coll.insert({_id: 11, subDoc: ["string"]}));
assertErrorCode(coll, [{$match: {_id: 11}}, object_to_array_expr], 40390);
-assert.writeOK(coll.insert({_id: 12, subDoc: [{"a": "b"}]}));
+assert.commandWorked(coll.insert({_id: 12, subDoc: [{"a": "b"}]}));
assertErrorCode(coll, [{$match: {_id: 12}}, object_to_array_expr], 40390);
-assert.writeOK(coll.insert({_id: 13, subDoc: NaN}));
+assert.commandWorked(coll.insert({_id: 13, subDoc: NaN}));
assertErrorCode(coll, [{$match: {_id: 13}}, object_to_array_expr], 40390);
// $objectToArray outputs null on null-ish types.
-assert.writeOK(coll.insert({_id: 14, subDoc: null}));
+assert.commandWorked(coll.insert({_id: 14, subDoc: null}));
result = coll.aggregate([{$match: {_id: 14}}, object_to_array_expr]).toArray();
assert.eq(result, [{_id: 14, expanded: null}]);
-assert.writeOK(coll.insert({_id: 15, subDoc: undefined}));
+assert.commandWorked(coll.insert({_id: 15, subDoc: undefined}));
result = coll.aggregate([{$match: {_id: 15}}, object_to_array_expr]).toArray();
assert.eq(result, [{_id: 15, expanded: null}]);
-assert.writeOK(coll.insert({_id: 16}));
+assert.commandWorked(coll.insert({_id: 16}));
result = coll.aggregate([{$match: {_id: 16}}, object_to_array_expr]).toArray();
assert.eq(result, [{_id: 16, expanded: null}]);
}());
diff --git a/jstests/aggregation/expressions/object_ids_for_date_expressions.js b/jstests/aggregation/expressions/object_ids_for_date_expressions.js
index bff8ab587b3..63bada5d893 100644
--- a/jstests/aggregation/expressions/object_ids_for_date_expressions.js
+++ b/jstests/aggregation/expressions/object_ids_for_date_expressions.js
@@ -11,7 +11,7 @@ let testOpCount = 0;
coll.drop();
// Seed collection so that the pipeline will execute.
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
function makeObjectIdFromDate(dt) {
try {
diff --git a/jstests/aggregation/expressions/round_trunc.js b/jstests/aggregation/expressions/round_trunc.js
index 735c2b54477..1e03eb47e68 100644
--- a/jstests/aggregation/expressions/round_trunc.js
+++ b/jstests/aggregation/expressions/round_trunc.js
@@ -9,7 +9,7 @@ load("jstests/aggregation/extras/utils.js");
var coll = db.server19548;
coll.drop();
// Seed collection so that the pipeline will execute.
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
// Helper for testing that op returns expResult.
function testOp(op, expResult) {
diff --git a/jstests/aggregation/expressions/size.js b/jstests/aggregation/expressions/size.js
index 4e21c71bf4e..3aa1c5d61eb 100644
--- a/jstests/aggregation/expressions/size.js
+++ b/jstests/aggregation/expressions/size.js
@@ -8,15 +8,15 @@ load("jstests/aggregation/extras/utils.js");
const coll = db.expression_size;
coll.drop();
-assert.writeOK(coll.insert({_id: 0, arr: []}));
-assert.writeOK(coll.insert({_id: 1, arr: [1]}));
-assert.writeOK(coll.insert({_id: 2, arr: ["asdf", "asdfasdf"]}));
-assert.writeOK(coll.insert({_id: 3, arr: [1, "asdf", 1234, 4.3, {key: 23}]}));
-assert.writeOK(coll.insert({_id: 4, arr: [3, [31, 31, 13, 13]]}));
+assert.commandWorked(coll.insert({_id: 0, arr: []}));
+assert.commandWorked(coll.insert({_id: 1, arr: [1]}));
+assert.commandWorked(coll.insert({_id: 2, arr: ["asdf", "asdfasdf"]}));
+assert.commandWorked(coll.insert({_id: 3, arr: [1, "asdf", 1234, 4.3, {key: 23}]}));
+assert.commandWorked(coll.insert({_id: 4, arr: [3, [31, 31, 13, 13]]}));
const result = coll.aggregate([{$sort: {_id: 1}}, {$project: {_id: 0, length: {$size: "$arr"}}}]);
assert.eq(result.toArray(), [{length: 0}, {length: 1}, {length: 2}, {length: 5}, {length: 2}]);
-assert.writeOK(coll.insert({arr: 231}));
+assert.commandWorked(coll.insert({arr: 231}));
assertErrorCode(coll, {$project: {_id: 0, length: {$size: "$arr"}}}, 17124);
}());
diff --git a/jstests/aggregation/expressions/split.js b/jstests/aggregation/expressions/split.js
index 86200334395..bdece4ae583 100644
--- a/jstests/aggregation/expressions/split.js
+++ b/jstests/aggregation/expressions/split.js
@@ -7,7 +7,7 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
var coll = db.split;
coll.drop();
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
testExpression(coll, {$split: ["abc", "b"]}, ["a", "c"]);
testExpression(coll, {$split: ["aaa", "b"]}, ["aaa"]);
diff --git a/jstests/aggregation/expressions/trim.js b/jstests/aggregation/expressions/trim.js
index af197adca5a..821e15ea4a3 100644
--- a/jstests/aggregation/expressions/trim.js
+++ b/jstests/aggregation/expressions/trim.js
@@ -28,7 +28,7 @@ testExpressionWithCollation(coll, {$ltrim: {input: "xXx", chars: "x"}}, "Xx", ca
// Test using inputs from documents.
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, name: ", Charlie"},
{_id: 1, name: "Obama\t, Barack"},
{_id: 2, name: " Ride, Sally "}
@@ -43,7 +43,7 @@ assert.eq(
[{_id: 0, firstName: "Charlie"}, {_id: 1, firstName: "Barack"}, {_id: 2, firstName: "Sally"}]);
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, poorlyParsedWebTitle: "The title of my document"},
{_id: 1, poorlyParsedWebTitle: "\u2001\u2002 Odd unicode indentation"},
{_id: 2, poorlyParsedWebTitle: "\u2001\u2002 Odd unicode indentation\u200A"},
@@ -59,7 +59,7 @@ assert.eq(
]);
coll.drop();
-assert.writeOK(coll.insert([
+assert.commandWorked(coll.insert([
{_id: 0, proof: "Left as an exercise for the reader∎"},
{_id: 1, proof: "∎∃ proof∎"},
{_id: 2, proof: "Just view the problem as a continuous DAG whose elements are taylor series∎"},
diff --git a/jstests/aggregation/match_swapping_renamed_fields.js b/jstests/aggregation/match_swapping_renamed_fields.js
index e537f249454..26886181f3a 100644
--- a/jstests/aggregation/match_swapping_renamed_fields.js
+++ b/jstests/aggregation/match_swapping_renamed_fields.js
@@ -11,7 +11,7 @@ load("jstests/libs/analyze_plan.js");
let coll = db.match_swapping_renamed_fields;
coll.drop();
-assert.writeOK(coll.insert([{a: 1, b: 1, c: 1}, {a: 2, b: 2, c: 2}, {a: 3, b: 3, c: 3}]));
+assert.commandWorked(coll.insert([{a: 1, b: 1, c: 1}, {a: 2, b: 2, c: 2}, {a: 3, b: 3, c: 3}]));
assert.commandWorked(coll.createIndex({a: 1}));
// Test that a $match can result in index usage after moving past a field renamed by $project.
@@ -52,8 +52,8 @@ explain = coll.explain().aggregate(pipeline);
assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: 1, c: 1}, {b: 2, c: 2}]}));
-assert.writeOK(coll.insert({_id: 1, a: [{b: 3, c: 3}, {b: 4, c: 4}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 1, c: 1}, {b: 2, c: 2}]}));
+assert.commandWorked(coll.insert({_id: 1, a: [{b: 3, c: 3}, {b: 4, c: 4}]}));
assert.commandWorked(coll.createIndex({"a.b": 1, "a.c": 1}));
// Test that a $match can result in index usage after moving past a dotted array path renamed by
@@ -96,8 +96,8 @@ assert.neq(null, ixscan, tojson(explain));
assert.eq({"a.b": 1, "a.c": 1}, ixscan.keyPattern, tojson(ixscan));
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: [{c: 1}, {c: 2}]}, {b: [{c: 3}, {c: 4}]}]}));
-assert.writeOK(coll.insert({_id: 1, a: [{b: [{c: 5}, {c: 6}]}, {b: [{c: 7}, {c: 8}]}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: [{c: 1}, {c: 2}]}, {b: [{c: 3}, {c: 4}]}]}));
+assert.commandWorked(coll.insert({_id: 1, a: [{b: [{c: 5}, {c: 6}]}, {b: [{c: 7}, {c: 8}]}]}));
assert.commandWorked(coll.createIndex({"a.b.c": 1}));
// Test that a $match can result in index usage by moving past a rename of a field inside
diff --git a/jstests/aggregation/mongos_merge.js b/jstests/aggregation/mongos_merge.js
index f6bbeea0122..8662f203af3 100644
--- a/jstests/aggregation/mongos_merge.js
+++ b/jstests/aggregation/mongos_merge.js
@@ -69,9 +69,9 @@ var georng = new GeoNearRandomTest(mongosColl);
// Write 400 documents across the 4 chunks.
for (let i = -200; i < 200; i++) {
- assert.writeOK(mongosColl.insert(
+ assert.commandWorked(mongosColl.insert(
{_id: i, a: [i], b: {redactThisDoc: true}, c: true, geo: georng.mkPt(), text: "txt"}));
- assert.writeOK(unshardedColl.insert({_id: i, x: i}));
+ assert.commandWorked(unshardedColl.insert({_id: i, x: i}));
}
let testNameHistory = new Set();
diff --git a/jstests/aggregation/optimize_away_pipeline.js b/jstests/aggregation/optimize_away_pipeline.js
index 8ca82dd3ed6..686414b7035 100644
--- a/jstests/aggregation/optimize_away_pipeline.js
+++ b/jstests/aggregation/optimize_away_pipeline.js
@@ -18,9 +18,9 @@ load("jstests/libs/fixture_helpers.js"); // For 'isMongos' and 'isSharded'.
const coll = db.optimize_away_pipeline;
coll.drop();
-assert.writeOK(coll.insert({_id: 1, x: 10}));
-assert.writeOK(coll.insert({_id: 2, x: 20}));
-assert.writeOK(coll.insert({_id: 3, x: 30}));
+assert.commandWorked(coll.insert({_id: 1, x: 10}));
+assert.commandWorked(coll.insert({_id: 2, x: 20}));
+assert.commandWorked(coll.insert({_id: 3, x: 30}));
// Asserts that the give pipeline has *not* been optimized away and the request is answered
// using the aggregation module. There should be pipeline stages present in the explain output.
@@ -144,7 +144,7 @@ assertPipelineDoesNotUseAggregation(
// Pipelines with a collation.
// Test a simple pipeline with a case-insensitive collation.
-assert.writeOK(coll.insert({_id: 4, x: 40, b: "abc"}));
+assert.commandWorked(coll.insert({_id: 4, x: 40, b: "abc"}));
assertPipelineDoesNotUseAggregation({
pipeline: [{$match: {b: "ABC"}}],
pipelineOptions: {collation: {locale: "en_US", strength: 2}},
@@ -182,7 +182,7 @@ assert.commandWorked(coll.dropIndexes());
// Pipelines which cannot be optimized away.
// TODO SERVER-40254: Uncovered queries.
-assert.writeOK(coll.insert({_id: 4, x: 40, a: {b: "ab1"}}));
+assert.commandWorked(coll.insert({_id: 4, x: 40, a: {b: "ab1"}}));
assertPipelineUsesAggregation({
pipeline: [{$project: {x: 1, _id: 0}}],
expectedStage: "COLLSCAN",
diff --git a/jstests/aggregation/shard_targeting.js b/jstests/aggregation/shard_targeting.js
index 1654c17760d..0512d80090b 100644
--- a/jstests/aggregation/shard_targeting.js
+++ b/jstests/aggregation/shard_targeting.js
@@ -70,10 +70,10 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
// Write one document into each of the chunks.
-assert.writeOK(mongosColl.insert({_id: -150}));
-assert.writeOK(mongosColl.insert({_id: -50}));
-assert.writeOK(mongosColl.insert({_id: 50}));
-assert.writeOK(mongosColl.insert({_id: 150}));
+assert.commandWorked(mongosColl.insert({_id: -150}));
+assert.commandWorked(mongosColl.insert({_id: -50}));
+assert.commandWorked(mongosColl.insert({_id: 50}));
+assert.commandWorked(mongosColl.insert({_id: 150}));
const shardExceptions =
[ErrorCodes.StaleConfig, ErrorCodes.StaleShardVersion, ErrorCodes.StaleEpoch];
diff --git a/jstests/aggregation/sharded_agg_cleanup_on_error.js b/jstests/aggregation/sharded_agg_cleanup_on_error.js
index cbcb1f02e53..5fba3e477a9 100644
--- a/jstests/aggregation/sharded_agg_cleanup_on_error.js
+++ b/jstests/aggregation/sharded_agg_cleanup_on_error.js
@@ -29,7 +29,7 @@ const shard1DB = st.shard1.getDB(kDBName);
let coll = mongosDB.sharded_agg_cleanup_on_error;
for (let i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
diff --git a/jstests/aggregation/sources/addFields/use_cases.js b/jstests/aggregation/sources/addFields/use_cases.js
index 2f6f454ba5a..34703a75907 100644
--- a/jstests/aggregation/sources/addFields/use_cases.js
+++ b/jstests/aggregation/sources/addFields/use_cases.js
@@ -24,7 +24,7 @@ function doExecutionTest(conn) {
// Insert a bunch of documents of the form above.
const nDocs = 10;
for (let i = 0; i < nDocs; i++) {
- assert.writeOK(coll.insert({"_id": i, "2i": i * 2, "3i": i * 3}));
+ assert.commandWorked(coll.insert({"_id": i, "2i": i * 2, "3i": i * 3}));
}
// Add the minimum, maximum, and average temperatures, and make sure that doing the same
diff --git a/jstests/aggregation/sources/addFields/weather.js b/jstests/aggregation/sources/addFields/weather.js
index 16e570b843c..feb57c9a8a6 100644
--- a/jstests/aggregation/sources/addFields/weather.js
+++ b/jstests/aggregation/sources/addFields/weather.js
@@ -56,7 +56,7 @@ function doExecutionTest(conn) {
// Insert a bunch of documents of the form above.
const nDocs = 10;
for (let i = 0; i < nDocs; i++) {
- assert.writeOK(coll.insert(generateRandomDocument()));
+ assert.commandWorked(coll.insert(generateRandomDocument()));
}
// Add the minimum, maximum, and average temperatures, and make sure that doing the same
diff --git a/jstests/aggregation/sources/bucket/collation_bucket.js b/jstests/aggregation/sources/bucket/collation_bucket.js
index 617bf8085f2..182d9d8b17c 100644
--- a/jstests/aggregation/sources/bucket/collation_bucket.js
+++ b/jstests/aggregation/sources/bucket/collation_bucket.js
@@ -14,15 +14,15 @@ var coll = db.collation_bucket;
coll.drop();
function insertData() {
- assert.writeOK(coll.insert({num: "1"}));
- assert.writeOK(coll.insert({num: "2"}));
- assert.writeOK(coll.insert({num: "5"}));
- assert.writeOK(coll.insert({num: "10"}));
- assert.writeOK(coll.insert({num: "20"}));
- assert.writeOK(coll.insert({num: "50"}));
- assert.writeOK(coll.insert({num: "100"}));
- assert.writeOK(coll.insert({num: "200"}));
- assert.writeOK(coll.insert({num: "500"}));
+ assert.commandWorked(coll.insert({num: "1"}));
+ assert.commandWorked(coll.insert({num: "2"}));
+ assert.commandWorked(coll.insert({num: "5"}));
+ assert.commandWorked(coll.insert({num: "10"}));
+ assert.commandWorked(coll.insert({num: "20"}));
+ assert.commandWorked(coll.insert({num: "50"}));
+ assert.commandWorked(coll.insert({num: "100"}));
+ assert.commandWorked(coll.insert({num: "200"}));
+ assert.commandWorked(coll.insert({num: "500"}));
}
insertData();
diff --git a/jstests/aggregation/sources/bucketauto/collation_bucketauto.js b/jstests/aggregation/sources/bucketauto/collation_bucketauto.js
index 26b48951ab7..3ae8586b81b 100644
--- a/jstests/aggregation/sources/bucketauto/collation_bucketauto.js
+++ b/jstests/aggregation/sources/bucketauto/collation_bucketauto.js
@@ -14,15 +14,15 @@ var coll = db.collation_bucket;
coll.drop();
function insertData() {
- assert.writeOK(coll.insert({num: "1"}));
- assert.writeOK(coll.insert({num: "2"}));
- assert.writeOK(coll.insert({num: "5"}));
- assert.writeOK(coll.insert({num: "10"}));
- assert.writeOK(coll.insert({num: "20"}));
- assert.writeOK(coll.insert({num: "50"}));
- assert.writeOK(coll.insert({num: "100"}));
- assert.writeOK(coll.insert({num: "200"}));
- assert.writeOK(coll.insert({num: "500"}));
+ assert.commandWorked(coll.insert({num: "1"}));
+ assert.commandWorked(coll.insert({num: "2"}));
+ assert.commandWorked(coll.insert({num: "5"}));
+ assert.commandWorked(coll.insert({num: "10"}));
+ assert.commandWorked(coll.insert({num: "20"}));
+ assert.commandWorked(coll.insert({num: "50"}));
+ assert.commandWorked(coll.insert({num: "100"}));
+ assert.commandWorked(coll.insert({num: "200"}));
+ assert.commandWorked(coll.insert({num: "500"}));
}
insertData();
diff --git a/jstests/aggregation/sources/collStats/count.js b/jstests/aggregation/sources/collStats/count.js
index 5eb96cd7146..6a840caf9cd 100644
--- a/jstests/aggregation/sources/collStats/count.js
+++ b/jstests/aggregation/sources/collStats/count.js
@@ -12,7 +12,7 @@ coll.drop();
let nDocs = 1000;
for (var i = 0; i < nDocs; i++) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
// Test that $collStats must be first stage.
@@ -50,7 +50,7 @@ assert(result.hasOwnProperty("storageStats"));
assert.eq(nDocs, result.storageStats.count);
// Test the record count against an empty collection.
-assert.writeOK(coll.remove({}));
+assert.commandWorked(coll.remove({}));
pipeline = [{$collStats: {count: {}}}];
result = coll.aggregate(pipeline).next();
assert.eq(0, result.count);
diff --git a/jstests/aggregation/sources/facet/inner_graphlookup.js b/jstests/aggregation/sources/facet/inner_graphlookup.js
index 9631b8878ef..a58736c4ec4 100644
--- a/jstests/aggregation/sources/facet/inner_graphlookup.js
+++ b/jstests/aggregation/sources/facet/inner_graphlookup.js
@@ -14,10 +14,10 @@ var graphColl = db.facetGraphLookup;
// The graph in ASCII form: 0 --- 1 --- 2 3
graphColl.drop();
-assert.writeOK(graphColl.insert({_id: 0, edges: [1]}));
-assert.writeOK(graphColl.insert({_id: 1, edges: [0, 2]}));
-assert.writeOK(graphColl.insert({_id: 2, edges: [1]}));
-assert.writeOK(graphColl.insert({_id: 3}));
+assert.commandWorked(graphColl.insert({_id: 0, edges: [1]}));
+assert.commandWorked(graphColl.insert({_id: 1, edges: [0, 2]}));
+assert.commandWorked(graphColl.insert({_id: 2, edges: [1]}));
+assert.commandWorked(graphColl.insert({_id: 3}));
// For each document in the collection, this will compute all the other documents that are
// reachable from this one.
diff --git a/jstests/aggregation/sources/facet/inner_lookup.js b/jstests/aggregation/sources/facet/inner_lookup.js
index 0852f820869..f9cb88cbb91 100644
--- a/jstests/aggregation/sources/facet/inner_lookup.js
+++ b/jstests/aggregation/sources/facet/inner_lookup.js
@@ -13,13 +13,13 @@ var local = db.facetLookupLocal;
var foreign = db.facetLookupForeign;
local.drop();
-assert.writeOK(local.insert({_id: 0}));
-assert.writeOK(local.insert({_id: 1}));
+assert.commandWorked(local.insert({_id: 0}));
+assert.commandWorked(local.insert({_id: 1}));
foreign.drop();
-assert.writeOK(foreign.insert({_id: 0, foreignKey: 0}));
-assert.writeOK(foreign.insert({_id: 1, foreignKey: 1}));
-assert.writeOK(foreign.insert({_id: 2, foreignKey: 2}));
+assert.commandWorked(foreign.insert({_id: 0, foreignKey: 0}));
+assert.commandWorked(foreign.insert({_id: 1, foreignKey: 1}));
+assert.commandWorked(foreign.insert({_id: 2, foreignKey: 2}));
function runTest(lookupStage) {
const lookupResults = local.aggregate([lookupStage]).toArray();
diff --git a/jstests/aggregation/sources/facet/use_cases.js b/jstests/aggregation/sources/facet/use_cases.js
index 83f5d58d4d8..acf79f16f32 100644
--- a/jstests/aggregation/sources/facet/use_cases.js
+++ b/jstests/aggregation/sources/facet/use_cases.js
@@ -61,7 +61,7 @@ function populateData(conn, nDocs) {
const doc = generateRandomDocument(i);
bulk.insert(doc);
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
function doExecutionTest(conn) {
diff --git a/jstests/aggregation/sources/geonear/collation_geonear.js b/jstests/aggregation/sources/geonear/collation_geonear.js
index d4c47c1aec0..5fb756c64da 100644
--- a/jstests/aggregation/sources/geonear/collation_geonear.js
+++ b/jstests/aggregation/sources/geonear/collation_geonear.js
@@ -12,7 +12,7 @@ const caseInsensitive = {
var coll = db.collation_geonear;
coll.drop();
assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
-assert.writeOK(coll.insert({loc: [0, 0], str: "A"}));
+assert.commandWorked(coll.insert({loc: [0, 0], str: "A"}));
// Test that the $geoNear agg stage respects an explicit collation.
assert.eq(0,
@@ -54,7 +54,7 @@ assert.throws(function() {
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
-assert.writeOK(coll.insert({loc: [0, 0], str: "A"}));
+assert.commandWorked(coll.insert({loc: [0, 0], str: "A"}));
// Test that the $geoNear agg stage respects an inherited collation.
assert.eq(1,
diff --git a/jstests/aggregation/sources/geonear/distancefield_and_includelocs.js b/jstests/aggregation/sources/geonear/distancefield_and_includelocs.js
index 1ed2364ccb3..c2406299a4b 100644
--- a/jstests/aggregation/sources/geonear/distancefield_and_includelocs.js
+++ b/jstests/aggregation/sources/geonear/distancefield_and_includelocs.js
@@ -54,9 +54,9 @@ const docWithGeoLine = {
assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
// Populate the collection.
-assert.writeOK(coll.insert(docWithLegacyPoint));
-assert.writeOK(coll.insert(docWithGeoPoint));
-assert.writeOK(coll.insert(docWithGeoLine));
+assert.commandWorked(coll.insert(docWithLegacyPoint));
+assert.commandWorked(coll.insert(docWithGeoPoint));
+assert.commandWorked(coll.insert(docWithGeoLine));
// Define a custom way to compare documents since the results here might differ by insignificant
// amounts.
diff --git a/jstests/aggregation/sources/graphLookup/airports.js b/jstests/aggregation/sources/graphLookup/airports.js
index 779678b07da..801e15c3bbb 100644
--- a/jstests/aggregation/sources/graphLookup/airports.js
+++ b/jstests/aggregation/sources/graphLookup/airports.js
@@ -28,7 +28,7 @@ var bulk = foreign.initializeUnorderedBulkOp();
airports.forEach(function(a) {
bulk.insert(a);
});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Insert a dummy document so that something will flow through the pipeline.
local.insert({});
diff --git a/jstests/aggregation/sources/graphLookup/basic.js b/jstests/aggregation/sources/graphLookup/basic.js
index ef44b9b60bb..e217be2d26d 100644
--- a/jstests/aggregation/sources/graphLookup/basic.js
+++ b/jstests/aggregation/sources/graphLookup/basic.js
@@ -30,7 +30,7 @@ assert.eq(local.aggregate([basicGraphLookup]).toArray().length,
"expected an empty result set for a $graphLookup with non-existent local and foreign " +
"collections");
-assert.writeOK(foreign.insert({}));
+assert.commandWorked(foreign.insert({}));
assert.eq(local.aggregate([basicGraphLookup]).toArray().length,
0,
@@ -39,7 +39,7 @@ assert.eq(local.aggregate([basicGraphLookup]).toArray().length,
local.drop();
foreign.drop();
-assert.writeOK(local.insert({_id: 0}));
+assert.commandWorked(local.insert({_id: 0}));
assert.eq(local.aggregate([basicGraphLookup]).toArray(),
[{_id: 0, results: []}],
@@ -52,9 +52,9 @@ var bulk = foreign.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({_id: i, neighbors: [i - 1, i + 1]});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
-assert.writeOK(local.insert({starting: 50}));
+assert.commandWorked(local.insert({starting: 50}));
// Perform a simple $graphLookup and ensure it retrieves every result.
var res = local
@@ -125,9 +125,9 @@ assert.eq(res.integers.length, 3);
// mistakenly does, then it would look for a 'connectToField' value of null. In order to prevent
// regressions, we insert a document with a 'connectToField' value of null, then perform a
// $graphLookup, and ensure that we do not find the erroneous document.
-assert.writeOK(foreign.remove({_id: 51}));
-assert.writeOK(foreign.insert({_id: 51}));
-assert.writeOK(foreign.insert({_id: null, neighbors: [50, 52]}));
+assert.commandWorked(foreign.remove({_id: 51}));
+assert.commandWorked(foreign.insert({_id: 51}));
+assert.commandWorked(foreign.insert({_id: null, neighbors: [50, 52]}));
res = local
.aggregate({
@@ -145,11 +145,11 @@ res = local
assert.eq(res.integers.length, 52);
// Perform a $graphLookup and ensure we don't go into an infinite loop when our graph is cyclic.
-assert.writeOK(foreign.remove({_id: {$in: [null, 51]}}));
-assert.writeOK(foreign.insert({_id: 51, neighbors: [50, 52]}));
+assert.commandWorked(foreign.remove({_id: {$in: [null, 51]}}));
+assert.commandWorked(foreign.insert({_id: 51, neighbors: [50, 52]}));
-assert.writeOK(foreign.update({_id: 99}, {$set: {neighbors: [98, 0]}}));
-assert.writeOK(foreign.update({_id: 0}, {$set: {neighbors: [99, 1]}}));
+assert.commandWorked(foreign.update({_id: 99}, {$set: {neighbors: [98, 0]}}));
+assert.commandWorked(foreign.update({_id: 0}, {$set: {neighbors: [99, 1]}}));
res = local
.aggregate({
diff --git a/jstests/aggregation/sources/graphLookup/collation_graphlookup.js b/jstests/aggregation/sources/graphLookup/collation_graphlookup.js
index f3fbcf2ee34..459a7e70704 100644
--- a/jstests/aggregation/sources/graphLookup/collation_graphlookup.js
+++ b/jstests/aggregation/sources/graphLookup/collation_graphlookup.js
@@ -24,8 +24,8 @@ var foreignColl = db.collation_graphlookup_foreign;
// Test that $graphLookup respects the collation set on the aggregation pipeline. Case
// insensitivity should mean that we find both "jeremy" and "jimmy" as friends.
coll.drop();
-assert.writeOK(coll.insert({username: "erica", friends: ["jeremy", "jimmy"]}));
-assert.writeOK(coll.insert([{username: "JEREMY"}, {username: "JIMMY"}]));
+assert.commandWorked(coll.insert({username: "erica", friends: ["jeremy", "jimmy"]}));
+assert.commandWorked(coll.insert([{username: "JEREMY"}, {username: "JIMMY"}]));
res = coll.aggregate(
[
@@ -66,10 +66,10 @@ assert.eq(0, res[0].friendUsers.length);
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), caseInsensitiveUS));
-assert.writeOK(coll.insert({username: "erica", friends: ["jeremy", "jimmy"]}));
+assert.commandWorked(coll.insert({username: "erica", friends: ["jeremy", "jimmy"]}));
foreignColl.drop();
assert.commandWorked(db.createCollection(foreignColl.getName(), caseSensitiveUS));
-assert.writeOK(foreignColl.insert([{username: "JEREMY"}, {username: "JIMMY"}]));
+assert.commandWorked(foreignColl.insert([{username: "JEREMY"}, {username: "JIMMY"}]));
// Test that $graphLookup inherits the default collation of the collection on which it is run,
// and that this collation is used instead of the default collation of the foreign collection.
@@ -93,9 +93,9 @@ assert.eq(2, res[0].friendUsers.length);
// Test that we don't use the collation to dedup string _id values. This would cause us to miss
// nodes in the graph that have distinct _id values which compare equal under the collation.
coll.drop();
-assert.writeOK(coll.insert({username: "erica", friends: ["jeremy"]}));
-assert.writeOK(coll.insert({_id: "foo", username: "JEREMY", friends: ["jimmy"]}));
-assert.writeOK(coll.insert({_id: "FOO", username: "jimmy", friends: []}));
+assert.commandWorked(coll.insert({username: "erica", friends: ["jeremy"]}));
+assert.commandWorked(coll.insert({_id: "foo", username: "JEREMY", friends: ["jimmy"]}));
+assert.commandWorked(coll.insert({_id: "FOO", username: "jimmy", friends: []}));
res = coll.aggregate(
[
@@ -119,9 +119,9 @@ assert.eq(2, res[0].friendUsers.length);
// Test that the result set is not deduplicated under the collation. If two documents are
// entirely equal under the collation, they should still both get returned in the "as" field.
coll.drop();
-assert.writeOK(coll.insert({username: "erica", friends: ["jeremy"]}));
-assert.writeOK(coll.insert({_id: "foo", username: "jeremy"}));
-assert.writeOK(coll.insert({_id: "FOO", username: "JEREMY"}));
+assert.commandWorked(coll.insert({username: "erica", friends: ["jeremy"]}));
+assert.commandWorked(coll.insert({_id: "foo", username: "jeremy"}));
+assert.commandWorked(coll.insert({_id: "FOO", username: "JEREMY"}));
res = coll.aggregate(
[
diff --git a/jstests/aggregation/sources/graphLookup/error.js b/jstests/aggregation/sources/graphLookup/error.js
index b7360f3e9e8..cb8424b48b6 100644
--- a/jstests/aggregation/sources/graphLookup/error.js
+++ b/jstests/aggregation/sources/graphLookup/error.js
@@ -11,7 +11,7 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
var local = db.local;
local.drop();
-assert.writeOK(local.insert({b: 0}));
+assert.commandWorked(local.insert({b: 0}));
var pipeline = {$graphLookup: 4};
assertErrorCode(local, pipeline, ErrorCodes.FailedToParse, "$graphLookup spec must be an object");
@@ -291,7 +291,7 @@ assert.throws(
let foreign = db.foreign;
foreign.drop();
-assert.writeOK(foreign.insert({a: 0, x: 0}));
+assert.commandWorked(foreign.insert({a: 0, x: 0}));
// Test a restrictSearchWithMatch expression that fails to parse.
pipeline = {
@@ -333,7 +333,7 @@ for (var i = 0; i < 8; i++) {
initial.push(i);
bulk.insert(obj);
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
pipeline = {
$graphLookup: {
@@ -356,7 +356,7 @@ for (var i = 0; i < 14; i++) {
obj['s'] = new Array(7 * 1024 * 1024).join(' ');
bulk.insert(obj);
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
pipeline = {
$graphLookup: {
@@ -379,7 +379,7 @@ for (var i = 0; i < 13; i++) {
obj['s'] = new Array(7 * 1024 * 1024).join(' ');
bulk.insert(obj);
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var res = local
.aggregate({
diff --git a/jstests/aggregation/sources/graphLookup/filter.js b/jstests/aggregation/sources/graphLookup/filter.js
index 4b46c843d9a..407d422883a 100644
--- a/jstests/aggregation/sources/graphLookup/filter.js
+++ b/jstests/aggregation/sources/graphLookup/filter.js
@@ -18,8 +18,8 @@ var bulk = foreign.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({_id: i, neighbors: [i - 1, i + 1]});
}
-assert.writeOK(bulk.execute());
-assert.writeOK(local.insert({starting: 0}));
+assert.commandWorked(bulk.execute());
+assert.commandWorked(local.insert({starting: 0}));
// Assert that the graphLookup only retrieves ten documents, with _id from 0 to 9.
var res = local
@@ -55,9 +55,9 @@ res = local
assert.eq(res.integers.length, 0);
foreign.drop();
-assert.writeOK(foreign.insert({from: 0, to: 1, shouldBeIncluded: true}));
-assert.writeOK(foreign.insert({from: 1, to: 2, shouldBeIncluded: false}));
-assert.writeOK(foreign.insert({from: 2, to: 3, shouldBeIncluded: true}));
+assert.commandWorked(foreign.insert({from: 0, to: 1, shouldBeIncluded: true}));
+assert.commandWorked(foreign.insert({from: 1, to: 2, shouldBeIncluded: false}));
+assert.commandWorked(foreign.insert({from: 2, to: 3, shouldBeIncluded: true}));
// Assert that the $graphLookup stops exploring when it finds a document that doesn't match the
// filter.
diff --git a/jstests/aggregation/sources/graphLookup/nested_objects.js b/jstests/aggregation/sources/graphLookup/nested_objects.js
index 43c81302ae4..90d60f1681f 100644
--- a/jstests/aggregation/sources/graphLookup/nested_objects.js
+++ b/jstests/aggregation/sources/graphLookup/nested_objects.js
@@ -19,9 +19,9 @@ var bulk = foreign.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({_id: i, neighbors: [{id: i + 1}, {id: i + 2}]});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
-assert.writeOK(local.insert({starting: 0}));
+assert.commandWorked(local.insert({starting: 0}));
var res = local
.aggregate({
@@ -43,7 +43,7 @@ var bulk = foreign.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({previous: [{neighbor: i}, {neighbor: i - 1}], value: i + 1});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var res = local
.aggregate({
@@ -68,7 +68,7 @@ for (var i = 0; i < 100; i++) {
values: [{neighbor: i + 1}, {neighbor: i + 2}]
});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var res = local
.aggregate({
diff --git a/jstests/aggregation/sources/graphLookup/socialite.js b/jstests/aggregation/sources/graphLookup/socialite.js
index f38f6c2ffc0..051be6e2289 100644
--- a/jstests/aggregation/sources/graphLookup/socialite.js
+++ b/jstests/aggregation/sources/graphLookup/socialite.js
@@ -22,13 +22,13 @@ var userDocs = [
];
userDocs.forEach(function(userDoc) {
- assert.writeOK(users.insert(userDoc));
+ assert.commandWorked(users.insert(userDoc));
});
var followers = [{_f: "djw", _t: "jsr"}, {_f: "jsr", _t: "bmw"}, {_f: "ftr", _t: "bmw"}];
followers.forEach(function(f) {
- assert.writeOK(follower.insert(f));
+ assert.commandWorked(follower.insert(f));
});
// Find the social network of "Darren", that is, people Darren follows, and people who are
diff --git a/jstests/aggregation/sources/group/collation_group.js b/jstests/aggregation/sources/group/collation_group.js
index 94db6f15ed1..9b12f96a1bf 100644
--- a/jstests/aggregation/sources/group/collation_group.js
+++ b/jstests/aggregation/sources/group/collation_group.js
@@ -16,10 +16,10 @@ var caseAndDiacriticInsensitive = {collation: {locale: "en_US", strength: 1}};
assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
-assert.writeOK(coll.insert({_id: 0, str: "A", str2: "á"}));
-assert.writeOK(coll.insert({_id: 1, str: "a", str2: "a"}));
-assert.writeOK(coll.insert({_id: 2, str: "B", str2: "é"}));
-assert.writeOK(coll.insert({_id: 3, str: "b", str2: "e"}));
+assert.commandWorked(coll.insert({_id: 0, str: "A", str2: "á"}));
+assert.commandWorked(coll.insert({_id: 1, str: "a", str2: "a"}));
+assert.commandWorked(coll.insert({_id: 2, str: "B", str2: "é"}));
+assert.commandWorked(coll.insert({_id: 3, str: "b", str2: "e"}));
// Ensure that equality of groups respects the collation inherited from the collection default.
assert.eq(2, coll.aggregate([{$group: {_id: "$str"}}]).itcount());
@@ -71,8 +71,8 @@ assert.eq(true, results[0].areEqual[0]);
// Test that the $min and $max accumulators respect the inherited collation.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
-assert.writeOK(coll.insert({num: "100"}));
-assert.writeOK(coll.insert({num: "2"}));
+assert.commandWorked(coll.insert({num: "100"}));
+assert.commandWorked(coll.insert({num: "2"}));
results = coll.aggregate([{$group: {_id: null, min: {$min: "$num"}}}]).toArray();
assert.eq(1, results.length);
assert.eq("2", results[0].min);
diff --git a/jstests/aggregation/sources/group/numeric_grouping.js b/jstests/aggregation/sources/group/numeric_grouping.js
index a7b9f22d979..a79bde14747 100644
--- a/jstests/aggregation/sources/group/numeric_grouping.js
+++ b/jstests/aggregation/sources/group/numeric_grouping.js
@@ -7,13 +7,13 @@ const coll = db.numeric_grouping;
coll.drop();
-assert.writeOK(coll.insert({key: new NumberInt(24), value: 17}));
-assert.writeOK(coll.insert({key: new NumberLong(24), value: 8}));
-assert.writeOK(coll.insert({key: 24, value: 5}));
+assert.commandWorked(coll.insert({key: new NumberInt(24), value: 17}));
+assert.commandWorked(coll.insert({key: new NumberLong(24), value: 8}));
+assert.commandWorked(coll.insert({key: 24, value: 5}));
-assert.writeOK(coll.insert({key: new NumberInt(42), value: 11}));
-assert.writeOK(coll.insert({key: new NumberLong(42), value: 13}));
-assert.writeOK(coll.insert({key: 42, value: 6}));
+assert.commandWorked(coll.insert({key: new NumberInt(42), value: 11}));
+assert.commandWorked(coll.insert({key: new NumberLong(42), value: 13}));
+assert.commandWorked(coll.insert({key: 42, value: 6}));
const results = coll.aggregate({$group: {_id: "$key", s: {$sum: "$value"}}}).toArray();
diff --git a/jstests/aggregation/sources/group/text_score_grouping.js b/jstests/aggregation/sources/group/text_score_grouping.js
index 2952602ee46..1fa328a196e 100644
--- a/jstests/aggregation/sources/group/text_score_grouping.js
+++ b/jstests/aggregation/sources/group/text_score_grouping.js
@@ -7,8 +7,8 @@ const coll = db.text_score_grouping;
coll.drop();
-assert.writeOK(coll.insert({"_id": 1, "title": "cakes"}));
-assert.writeOK(coll.insert({"_id": 2, "title": "cookies and cakes"}));
+assert.commandWorked(coll.insert({"_id": 1, "title": "cakes"}));
+assert.commandWorked(coll.insert({"_id": 2, "title": "cookies and cakes"}));
assert.commandWorked(coll.createIndex({title: "text"}));
diff --git a/jstests/aggregation/sources/lookup/lookup_absorb_match.js b/jstests/aggregation/sources/lookup/lookup_absorb_match.js
index 1d85817970f..c39b8defff2 100644
--- a/jstests/aggregation/sources/lookup/lookup_absorb_match.js
+++ b/jstests/aggregation/sources/lookup/lookup_absorb_match.js
@@ -13,12 +13,12 @@ let testDB = db.getSiblingDB("lookup_absorb_match");
testDB.dropDatabase();
let locations = testDB.getCollection("locations");
-assert.writeOK(locations.insert({_id: "doghouse", coordinates: [25.0, 60.0]}));
-assert.writeOK(locations.insert({_id: "bullpen", coordinates: [-25.0, -60.0]}));
+assert.commandWorked(locations.insert({_id: "doghouse", coordinates: [25.0, 60.0]}));
+assert.commandWorked(locations.insert({_id: "bullpen", coordinates: [-25.0, -60.0]}));
let animals = testDB.getCollection("animals");
-assert.writeOK(animals.insert({_id: "dog", locationId: "doghouse"}));
-assert.writeOK(animals.insert({_id: "bull", locationId: "bullpen"}));
+assert.commandWorked(animals.insert({_id: "dog", locationId: "doghouse"}));
+assert.commandWorked(animals.insert({_id: "bull", locationId: "bullpen"}));
// Test that a $match with $geoWithin works properly when performed directly on an absorbed
// lookup field.
diff --git a/jstests/aggregation/sources/lookup/lookup_non_correlated.js b/jstests/aggregation/sources/lookup/lookup_non_correlated.js
index d7323d861c1..2e7546686fc 100644
--- a/jstests/aggregation/sources/lookup/lookup_non_correlated.js
+++ b/jstests/aggregation/sources/lookup/lookup_non_correlated.js
@@ -16,13 +16,13 @@ const foreignName = "foreign";
const foreignColl = testDB.getCollection(foreignName);
foreignColl.drop();
-assert.writeOK(localColl.insert({_id: "A"}));
-assert.writeOK(localColl.insert({_id: "B"}));
-assert.writeOK(localColl.insert({_id: "C"}));
+assert.commandWorked(localColl.insert({_id: "A"}));
+assert.commandWorked(localColl.insert({_id: "B"}));
+assert.commandWorked(localColl.insert({_id: "C"}));
-assert.writeOK(foreignColl.insert({_id: 1}));
-assert.writeOK(foreignColl.insert({_id: 2}));
-assert.writeOK(foreignColl.insert({_id: 3}));
+assert.commandWorked(foreignColl.insert({_id: 1}));
+assert.commandWorked(foreignColl.insert({_id: 2}));
+assert.commandWorked(foreignColl.insert({_id: 3}));
// Basic non-correlated lookup returns expected results.
let cursor = localColl.aggregate([
diff --git a/jstests/aggregation/sources/lookup/lookup_subpipeline.js b/jstests/aggregation/sources/lookup/lookup_subpipeline.js
index d9933c869cf..59a1ddd552b 100644
--- a/jstests/aggregation/sources/lookup/lookup_subpipeline.js
+++ b/jstests/aggregation/sources/lookup/lookup_subpipeline.js
@@ -42,14 +42,14 @@ function testPipeline(pipeline, expectedResult, collection) {
// Pipeline syntax using 'let' variables.
//
coll.drop();
-assert.writeOK(coll.insert({_id: 1, x: 1}));
-assert.writeOK(coll.insert({_id: 2, x: 2}));
-assert.writeOK(coll.insert({_id: 3, x: 3}));
+assert.commandWorked(coll.insert({_id: 1, x: 1}));
+assert.commandWorked(coll.insert({_id: 2, x: 2}));
+assert.commandWorked(coll.insert({_id: 3, x: 3}));
from.drop();
-assert.writeOK(from.insert({_id: 1}));
-assert.writeOK(from.insert({_id: 2}));
-assert.writeOK(from.insert({_id: 3}));
+assert.commandWorked(from.insert({_id: 1}));
+assert.commandWorked(from.insert({_id: 2}));
+assert.commandWorked(from.insert({_id: 3}));
// Basic non-equi theta join via $project.
let pipeline = [
@@ -425,7 +425,7 @@ testPipeline(pipeline, expectedResults, coll);
// Comparison where a 'let' variable references an array.
coll.drop();
-assert.writeOK(coll.insert({x: [1, 2, 3]}));
+assert.commandWorked(coll.insert({x: [1, 2, 3]}));
pipeline = [
{
@@ -448,7 +448,7 @@ testPipeline(pipeline, expectedResults, coll);
// Pipeline syntax with nested object.
//
coll.drop();
-assert.writeOK(coll.insert({x: {y: {z: 10}}}));
+assert.commandWorked(coll.insert({x: {y: {z: 10}}}));
// Subfields of 'let' variables can be referenced via dotted path.
pipeline = [
@@ -508,24 +508,24 @@ testPipeline(pipeline, expectedResults, coll);
// Pipeline syntax with nested $lookup.
//
coll.drop();
-assert.writeOK(coll.insert({_id: 1, w: 1}));
-assert.writeOK(coll.insert({_id: 2, w: 2}));
-assert.writeOK(coll.insert({_id: 3, w: 3}));
+assert.commandWorked(coll.insert({_id: 1, w: 1}));
+assert.commandWorked(coll.insert({_id: 2, w: 2}));
+assert.commandWorked(coll.insert({_id: 3, w: 3}));
from.drop();
-assert.writeOK(from.insert({_id: 1, x: 1}));
-assert.writeOK(from.insert({_id: 2, x: 2}));
-assert.writeOK(from.insert({_id: 3, x: 3}));
+assert.commandWorked(from.insert({_id: 1, x: 1}));
+assert.commandWorked(from.insert({_id: 2, x: 2}));
+assert.commandWorked(from.insert({_id: 3, x: 3}));
thirdColl.drop();
-assert.writeOK(thirdColl.insert({_id: 1, y: 1}));
-assert.writeOK(thirdColl.insert({_id: 2, y: 2}));
-assert.writeOK(thirdColl.insert({_id: 3, y: 3}));
+assert.commandWorked(thirdColl.insert({_id: 1, y: 1}));
+assert.commandWorked(thirdColl.insert({_id: 2, y: 2}));
+assert.commandWorked(thirdColl.insert({_id: 3, y: 3}));
fourthColl.drop();
-assert.writeOK(fourthColl.insert({_id: 1, z: 1}));
-assert.writeOK(fourthColl.insert({_id: 2, z: 2}));
-assert.writeOK(fourthColl.insert({_id: 3, z: 3}));
+assert.commandWorked(fourthColl.insert({_id: 1, z: 1}));
+assert.commandWorked(fourthColl.insert({_id: 2, z: 2}));
+assert.commandWorked(fourthColl.insert({_id: 3, z: 3}));
// Nested $lookup pipeline.
pipeline = [
diff --git a/jstests/aggregation/sources/match/collation_match.js b/jstests/aggregation/sources/match/collation_match.js
index 8c8c225f66d..3962db8f52b 100644
--- a/jstests/aggregation/sources/match/collation_match.js
+++ b/jstests/aggregation/sources/match/collation_match.js
@@ -9,7 +9,7 @@ var caseInsensitive = {collation: {locale: "en_US", strength: 2}};
var coll = db.collation_match;
coll.drop();
-assert.writeOK(coll.insert({a: "a"}));
+assert.commandWorked(coll.insert({a: "a"}));
// Test that the $match respects an explicit collation when it can be pushed down into the query
// layer.
@@ -28,7 +28,7 @@ assert.eq(1,
// Test that when a $match can be split to be part before the $unwind and part after, both
// pieces of the split respect the collation.
coll.drop();
-assert.writeOK(coll.insert({a: "foo", b: ["bar"]}));
+assert.commandWorked(coll.insert({a: "foo", b: ["bar"]}));
assert.eq(
1,
coll.aggregate([{$limit: 1}, {$unwind: "$b"}, {$match: {a: "FOO", b: "BAR"}}], caseInsensitive)
@@ -36,7 +36,7 @@ assert.eq(
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
-assert.writeOK(coll.insert({a: "a"}));
+assert.commandWorked(coll.insert({a: "a"}));
// Test that the $match respects the inherited collation when it can be pushed down into the
// query layer.
diff --git a/jstests/aggregation/sources/match/expr_match.js b/jstests/aggregation/sources/match/expr_match.js
index 8e2541958db..6f780883955 100644
--- a/jstests/aggregation/sources/match/expr_match.js
+++ b/jstests/aggregation/sources/match/expr_match.js
@@ -4,10 +4,10 @@
const coll = db.expr_match;
coll.drop();
-assert.writeOK(coll.insert({x: 0}));
-assert.writeOK(coll.insert({x: 1, y: 1}));
-assert.writeOK(coll.insert({x: 2, y: 4}));
-assert.writeOK(coll.insert({x: 3, y: 9}));
+assert.commandWorked(coll.insert({x: 0}));
+assert.commandWorked(coll.insert({x: 1, y: 1}));
+assert.commandWorked(coll.insert({x: 2, y: 4}));
+assert.commandWorked(coll.insert({x: 3, y: 9}));
// $match with $expr representing local document field path reference.
assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$x", 2]}}}]).itcount());
diff --git a/jstests/aggregation/sources/project/remove_redundant_projects.js b/jstests/aggregation/sources/project/remove_redundant_projects.js
index f1a21264c7e..e4584df4f65 100644
--- a/jstests/aggregation/sources/project/remove_redundant_projects.js
+++ b/jstests/aggregation/sources/project/remove_redundant_projects.js
@@ -10,7 +10,7 @@ load('jstests/libs/analyze_plan.js'); // For planHasStage().
let coll = db.remove_redundant_projects;
coll.drop();
-assert.writeOK(coll.insert({_id: {a: 1, b: 1}, a: 1, c: {d: 1}, e: ['elem1']}));
+assert.commandWorked(coll.insert({_id: {a: 1, b: 1}, a: 1, c: {d: 1}, e: ['elem1']}));
let indexSpec = {a: 1, 'c.d': 1, 'e.0': 1};
diff --git a/jstests/aggregation/sources/redact/collation_redact.js b/jstests/aggregation/sources/redact/collation_redact.js
index 7ff1e1ad4f1..df67b98d31b 100644
--- a/jstests/aggregation/sources/redact/collation_redact.js
+++ b/jstests/aggregation/sources/redact/collation_redact.js
@@ -9,7 +9,7 @@ var caseInsensitive = {collation: {locale: "en_US", strength: 2}};
var coll = db.collation_redact;
coll.drop();
-assert.writeOK(coll.insert({a: "a"}));
+assert.commandWorked(coll.insert({a: "a"}));
// Test that $redact respects an explicit collation. Since the top-level of the document gets
// pruned, we end up redacting the entire document and returning no results.
@@ -20,7 +20,7 @@ assert.eq(
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
-assert.writeOK(coll.insert({a: "a"}));
+assert.commandWorked(coll.insert({a: "a"}));
// Test that $redact respects the inherited collation. Since the top-level of the document gets
// pruned, we end up redacting the entire document and returning no results.
diff --git a/jstests/aggregation/sources/replaceRoot/address.js b/jstests/aggregation/sources/replaceRoot/address.js
index 537ec7d50ac..051c693110f 100644
--- a/jstests/aggregation/sources/replaceRoot/address.js
+++ b/jstests/aggregation/sources/replaceRoot/address.js
@@ -61,7 +61,7 @@ function doExecutionTest(conn) {
for (let i = 0; i < nDocs; i++) {
bulk.insert(generateRandomDocument());
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
// Extract the contents of the address field, and make sure that doing the same
// with replaceRoot yields the correct answer.
diff --git a/jstests/aggregation/sources/sort/collation_sort.js b/jstests/aggregation/sources/sort/collation_sort.js
index 6d8b20f9ab2..231930d0772 100644
--- a/jstests/aggregation/sources/sort/collation_sort.js
+++ b/jstests/aggregation/sources/sort/collation_sort.js
@@ -13,15 +13,15 @@ var frenchAccentOrdering = {collation: {locale: "fr", backwards: true}};
var coll = db.collation_sort;
coll.drop();
-assert.writeOK(coll.insert({_id: 1, word1: "pêche", word2: "côté"}));
-assert.writeOK(coll.insert({_id: 2, word1: "pêche", word2: "coté"}));
-assert.writeOK(coll.insert({_id: 3, word1: "pêche", word2: "côte"}));
-assert.writeOK(coll.insert({_id: 4, word1: "pèché", word2: "côté"}));
-assert.writeOK(coll.insert({_id: 5, word1: "pèché", word2: "coté"}));
-assert.writeOK(coll.insert({_id: 6, word1: "pèché", word2: "côte"}));
-assert.writeOK(coll.insert({_id: 7, word1: "pêché", word2: "côté"}));
-assert.writeOK(coll.insert({_id: 8, word1: "pêché", word2: "coté"}));
-assert.writeOK(coll.insert({_id: 9, word1: "pêché", word2: "côte"}));
+assert.commandWorked(coll.insert({_id: 1, word1: "pêche", word2: "côté"}));
+assert.commandWorked(coll.insert({_id: 2, word1: "pêche", word2: "coté"}));
+assert.commandWorked(coll.insert({_id: 3, word1: "pêche", word2: "côte"}));
+assert.commandWorked(coll.insert({_id: 4, word1: "pèché", word2: "côté"}));
+assert.commandWorked(coll.insert({_id: 5, word1: "pèché", word2: "coté"}));
+assert.commandWorked(coll.insert({_id: 6, word1: "pèché", word2: "côte"}));
+assert.commandWorked(coll.insert({_id: 7, word1: "pêché", word2: "côté"}));
+assert.commandWorked(coll.insert({_id: 8, word1: "pêché", word2: "coté"}));
+assert.commandWorked(coll.insert({_id: 9, word1: "pêché", word2: "côte"}));
// Test that ascending sort respects the collation.
assert.eq([{_id: "pèché"}, {_id: "pêche"}, {_id: "pêché"}],
diff --git a/jstests/aggregation/sources/sort/collation_sort_japanese.js b/jstests/aggregation/sources/sort/collation_sort_japanese.js
index 9051ed45aa7..88cdf18185a 100644
--- a/jstests/aggregation/sources/sort/collation_sort_japanese.js
+++ b/jstests/aggregation/sources/sort/collation_sort_japanese.js
@@ -47,7 +47,7 @@ function runTests(docs) {
for (let doc of docs) {
bulk.insert(doc);
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
let sortOrder;
diff --git a/jstests/aggregation/sources/sort/explain_sort.js b/jstests/aggregation/sources/sort/explain_sort.js
index d519ea323c7..ac93acc11ab 100644
--- a/jstests/aggregation/sources/sort/explain_sort.js
+++ b/jstests/aggregation/sources/sort/explain_sort.js
@@ -34,7 +34,7 @@ function checkResults(results, verbosity) {
}
for (let i = 0; i < kNumDocs; i++) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
// Execute several aggregations with a sort stage combined with various single document
diff --git a/jstests/aggregation/sources/sort/sort_with_metadata.js b/jstests/aggregation/sources/sort/sort_with_metadata.js
index ab4d7ce5b83..f312bf1fd27 100644
--- a/jstests/aggregation/sources/sort/sort_with_metadata.js
+++ b/jstests/aggregation/sources/sort/sort_with_metadata.js
@@ -4,10 +4,10 @@
var coll = db.sort_with_metadata;
coll.drop();
-assert.writeOK(coll.insert({_id: 1, text: "apple", words: 1}));
-assert.writeOK(coll.insert({_id: 2, text: "banana", words: 1}));
-assert.writeOK(coll.insert({_id: 3, text: "apple banana", words: 2}));
-assert.writeOK(coll.insert({_id: 4, text: "cantaloupe", words: 1}));
+assert.commandWorked(coll.insert({_id: 1, text: "apple", words: 1}));
+assert.commandWorked(coll.insert({_id: 2, text: "banana", words: 1}));
+assert.commandWorked(coll.insert({_id: 3, text: "apple banana", words: 2}));
+assert.commandWorked(coll.insert({_id: 4, text: "cantaloupe", words: 1}));
assert.commandWorked(coll.createIndex({text: "text"}));
diff --git a/jstests/aggregation/testall.js b/jstests/aggregation/testall.js
index a58a1bb00f2..94966fb2d62 100644
--- a/jstests/aggregation/testall.js
+++ b/jstests/aggregation/testall.js
@@ -118,7 +118,7 @@ assert(arrayEq(firstBatch, u1result), tojson({got: firstBatch, expected: u1resul
// unwind an array at the end of a dotted path
testDB.ut.drop();
-assert.writeOK(testDB.ut.insert({_id: 4, a: 1, b: {e: 7, f: [4, 3, 2, 1]}, c: 12, d: 17}));
+assert.commandWorked(testDB.ut.insert({_id: 4, a: 1, b: {e: 7, f: [4, 3, 2, 1]}, c: 12, d: 17}));
let u2 = testDB.runCommand(
{aggregate: "ut", pipeline: [{$unwind: "$b.f"}, {$sort: {"b.f": -1}}], cursor: {}});
diff --git a/jstests/aggregation/use_query_project_and_sort.js b/jstests/aggregation/use_query_project_and_sort.js
index 191b4d78d3f..de16c125183 100644
--- a/jstests/aggregation/use_query_project_and_sort.js
+++ b/jstests/aggregation/use_query_project_and_sort.js
@@ -17,7 +17,7 @@ const bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < 100; ++i) {
bulk.insert({_id: i, x: "string", a: -i, y: i % 2});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function assertQueryCoversProjectionAndSort(pipeline) {
const explainOutput = coll.explain().aggregate(pipeline);
diff --git a/jstests/aggregation/use_query_projection.js b/jstests/aggregation/use_query_projection.js
index dccc24f58b9..ad83f06acb1 100644
--- a/jstests/aggregation/use_query_projection.js
+++ b/jstests/aggregation/use_query_projection.js
@@ -17,7 +17,7 @@ const bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < 100; ++i) {
bulk.insert({_id: i, x: "string", a: -i, y: i % 2});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function assertQueryCoversProjection({pipeline = [], pipelineOptimizedAway = true} = {}) {
const explainOutput = coll.explain().aggregate(pipeline);
@@ -107,7 +107,7 @@ assertQueryCoversProjection({
// Test that a multikey index will prevent a covered plan.
assert.commandWorked(coll.dropIndex({x: 1})); // Make sure there is only one plan considered.
-assert.writeOK(coll.insert({x: ["an", "array!"]}));
+assert.commandWorked(coll.insert({x: ["an", "array!"]}));
assertQueryDoesNotCoverProjection({
pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1}}],
pipelineOptimizedAway: false
diff --git a/jstests/aggregation/use_query_sort.js b/jstests/aggregation/use_query_sort.js
index af9338be79e..8dbbc0c41ec 100644
--- a/jstests/aggregation/use_query_sort.js
+++ b/jstests/aggregation/use_query_sort.js
@@ -16,7 +16,7 @@ const bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < 100; ++i) {
bulk.insert({_id: i, x: "string", a: -i, y: i % 2});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function assertHasNonBlockingQuerySort(pipeline) {
const explainOutput = coll.explain().aggregate(pipeline);
diff --git a/jstests/aggregation/variables/layered_variables.js b/jstests/aggregation/variables/layered_variables.js
index 95e2d535402..1518d91c721 100644
--- a/jstests/aggregation/variables/layered_variables.js
+++ b/jstests/aggregation/variables/layered_variables.js
@@ -6,7 +6,7 @@ const testDB = db.getSiblingDB("layered_variables");
assert.commandWorked(testDB.dropDatabase());
const coll = testDB.getCollection("test");
-assert.writeOK(coll.insert({_id: 1, has_permissions: 1, my_array: [2, 3]}));
+assert.commandWorked(coll.insert({_id: 1, has_permissions: 1, my_array: [2, 3]}));
const res = assert.commandWorked(testDB.runCommand({
aggregate: "test",
diff --git a/jstests/aggregation/variables/remove_system_variable.js b/jstests/aggregation/variables/remove_system_variable.js
index 5dd0cda9525..2ac85f1e213 100644
--- a/jstests/aggregation/variables/remove_system_variable.js
+++ b/jstests/aggregation/variables/remove_system_variable.js
@@ -7,9 +7,9 @@
let coll = db[jsTest.name()];
coll.drop();
-assert.writeOK(coll.insert({_id: 1, a: 2, b: 3}));
-assert.writeOK(coll.insert({_id: 2, a: 3, b: 4}));
-assert.writeOK(coll.insert({_id: 3, a: {b: 98, c: 99}}));
+assert.commandWorked(coll.insert({_id: 1, a: 2, b: 3}));
+assert.commandWorked(coll.insert({_id: 2, a: 3, b: 4}));
+assert.commandWorked(coll.insert({_id: 3, a: {b: 98, c: 99}}));
let projectStage = {
$project: {_id: 0, a: 1, b: {$cond: {if: {$eq: ["$b", 4]}, then: "$$REMOVE", else: "$b"}}}
diff --git a/jstests/auth/db_multiple_login.js b/jstests/auth/db_multiple_login.js
index 45675c10865..b4d46aeb9e9 100644
--- a/jstests/auth/db_multiple_login.js
+++ b/jstests/auth/db_multiple_login.js
@@ -21,7 +21,7 @@ assert.throws(function() {
// Writer logged in, can read and write.
test.auth('writer', 'a');
-assert.writeOK(test.docs.insert({value: 1}));
+assert.commandWorked(test.docs.insert({value: 1}));
test.foo.findOne();
// Reader logged in, replacing writer, can only read.
diff --git a/jstests/auth/deleted_recreated_user.js b/jstests/auth/deleted_recreated_user.js
index 704710107c0..d668b47fced 100644
--- a/jstests/auth/deleted_recreated_user.js
+++ b/jstests/auth/deleted_recreated_user.js
@@ -30,7 +30,7 @@ function runTest(s0, s1) {
// Connect as basic user and create a session.
assert(admin.auth('user', 'pass'));
- assert.writeOK(admin.mycoll.insert({_id: "foo", data: "bar"}));
+ assert.commandWorked(admin.mycoll.insert({_id: "foo", data: "bar"}));
// Perform administrative commands via separate shell.
function evalCmd(cmd) {
diff --git a/jstests/auth/explain_auth.js b/jstests/auth/explain_auth.js
index 29b00035b6e..b440fc3dd8d 100644
--- a/jstests/auth/explain_auth.js
+++ b/jstests/auth/explain_auth.js
@@ -10,7 +10,7 @@ admin.auth({user: "adminUser", pwd: "pwd"});
var db = conn.getDB("explain_auth_db");
var coll = db.explain_auth_coll;
-assert.writeOK(coll.insert({_id: 1, a: 1}));
+assert.commandWorked(coll.insert({_id: 1, a: 1}));
/**
* Runs explains of find, count, remove, and update. Checks that they either succeed or fail with
diff --git a/jstests/auth/getMore.js b/jstests/auth/getMore.js
index 4495d61200b..c593a35957d 100644
--- a/jstests/auth/getMore.js
+++ b/jstests/auth/getMore.js
@@ -20,9 +20,9 @@ function runTest(conn) {
const testDBName = "auth_getMore";
let testDB = adminDB.getSiblingDB(testDBName);
testDB.dropDatabase();
- assert.writeOK(testDB.foo.insert({_id: 0}));
- assert.writeOK(testDB.foo.insert({_id: 1}));
- assert.writeOK(testDB.foo.insert({_id: 2}));
+ assert.commandWorked(testDB.foo.insert({_id: 0}));
+ assert.commandWorked(testDB.foo.insert({_id: 1}));
+ assert.commandWorked(testDB.foo.insert({_id: 2}));
//
// Test that a user can only run a getMore on a cursor that they created.
@@ -241,7 +241,7 @@ function runTest(conn) {
//
assert.eq(1, adminDB.auth("admin", "admin"));
- assert.writeOK(testDB.bar.insert({_id: 0}));
+ assert.commandWorked(testDB.bar.insert({_id: 0}));
// Create a user "fooUser" on the test database that can read the "foo" collection.
assert.commandWorked(testDB.runCommand({
diff --git a/jstests/auth/keyfile_rollover.js b/jstests/auth/keyfile_rollover.js
index ea66397d8c8..8e16c15b623 100644
--- a/jstests/auth/keyfile_rollover.js
+++ b/jstests/auth/keyfile_rollover.js
@@ -32,7 +32,7 @@ const runPrimaryTest = function(fn) {
rst.getPrimary().getDB('admin').createUser({user: 'root', pwd: 'root', roles: ['root']});
runPrimaryTest((curPrimary) => {
- assert.writeOK(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
+ assert.commandWorked(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
assert.eq(1, curPrimary.getDB('test').a.count(), 'Error interacting with replSet');
});
@@ -76,7 +76,7 @@ const rolloverKey = function(keyFileForServers, keyFileForAuth) {
rolloverKey("jstests/libs/keyForRollover", "jstests/libs/key1");
runPrimaryTest((curPrimary) => {
- assert.writeOK(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
+ assert.commandWorked(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
assert.eq(2, curPrimary.getDB('test').a.count(), 'Error interacting with replSet');
});
@@ -84,7 +84,7 @@ jsTestLog("Upgrading set to use key2");
rolloverKey("jstests/libs/key2", "jstests/libs/key2");
runPrimaryTest((curPrimary) => {
- assert.writeOK(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
+ assert.commandWorked(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
assert.eq(3, curPrimary.getDB('test').a.count(), 'Error interacting with replSet');
});
diff --git a/jstests/auth/kill_cursors.js b/jstests/auth/kill_cursors.js
index 3d9a535311b..893c7139c79 100644
--- a/jstests/auth/kill_cursors.js
+++ b/jstests/auth/kill_cursors.js
@@ -107,8 +107,8 @@ function runTest(mongod) {
assert(testA.auth('user1', 'pass'));
assert(testB.auth('user3', 'pass'));
for (var i = 0; i < 101; ++i) {
- assert.writeOK(testA.coll.insert({_id: i}));
- assert.writeOK(testB.coll.insert({_id: i}));
+ assert.commandWorked(testA.coll.insert({_id: i}));
+ assert.commandWorked(testB.coll.insert({_id: i}));
}
testA.logout();
testB.logout();
diff --git a/jstests/auth/localhostAuthBypass.js b/jstests/auth/localhostAuthBypass.js
index f68ed6dfba4..2880dda230a 100644
--- a/jstests/auth/localhostAuthBypass.js
+++ b/jstests/auth/localhostAuthBypass.js
@@ -96,9 +96,9 @@ var assertCanRunCommands = function(mongo) {
// will throw on failure
test.system.users.findOne();
- assert.writeOK(test.foo.save({_id: 0}));
- assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeOK(test.foo.remove({_id: 0}));
+ assert.commandWorked(test.foo.save({_id: 0}));
+ assert.commandWorked(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.commandWorked(test.foo.remove({_id: 0}));
test.foo.mapReduce(
function() {
diff --git a/jstests/auth/mongos_cache_invalidation.js b/jstests/auth/mongos_cache_invalidation.js
index 0917cb68f36..8e8e706d76a 100644
--- a/jstests/auth/mongos_cache_invalidation.js
+++ b/jstests/auth/mongos_cache_invalidation.js
@@ -35,8 +35,8 @@ assert.commandFailed(res, "Setting the invalidation interval to an disallowed va
res = st.s1.getDB('admin').runCommand({getParameter: 1, userCacheInvalidationIntervalSecs: 1});
assert.eq(5, res.userCacheInvalidationIntervalSecs);
-assert.writeOK(st.s1.getDB('test').foo.insert({a: 1})); // initial data
-assert.writeOK(st.s1.getDB('test').bar.insert({a: 1})); // initial data
+assert.commandWorked(st.s1.getDB('test').foo.insert({a: 1})); // initial data
+assert.commandWorked(st.s1.getDB('test').bar.insert({a: 1})); // initial data
st.s1.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
st.s1.getDB('admin').logout();
@@ -104,7 +104,7 @@ db3.auth('spencer', 'pwd');
"myRole", [{resource: {db: 'test', collection: ''}, actions: ['update']}]);
// s0/db1 should update its cache instantly
- assert.writeOK(db1.foo.update({}, {$inc: {a: 1}}));
+ assert.commandWorked(db1.foo.update({}, {$inc: {a: 1}}));
assert.eq(2, db1.foo.findOne().a);
// s1/db2 should update its cache in 10 seconds.
@@ -118,7 +118,7 @@ db3.auth('spencer', 'pwd');
// We manually invalidate the cache on s2/db3.
db3.adminCommand("invalidateUserCache");
- assert.writeOK(db3.foo.update({}, {$inc: {a: 1}}));
+ assert.commandWorked(db3.foo.update({}, {$inc: {a: 1}}));
assert.eq(4, db3.foo.findOne().a);
})();
@@ -153,7 +153,7 @@ db3.auth('spencer', 'pwd');
db1.getSiblingDB('test').grantRolesToUser("spencer", ['readWrite']);
// s0/db1 should update its cache instantly
- assert.writeOK(db1.foo.update({}, {$inc: {a: 1}}));
+ assert.commandWorked(db1.foo.update({}, {$inc: {a: 1}}));
// s1/db2 should update its cache in 10 seconds.
assert.soon(function() {
@@ -162,21 +162,21 @@ db3.auth('spencer', 'pwd');
// We manually invalidate the cache on s1/db3.
db3.adminCommand("invalidateUserCache");
- assert.writeOK(db3.foo.update({}, {$inc: {a: 1}}));
+ assert.commandWorked(db3.foo.update({}, {$inc: {a: 1}}));
})();
(function testConcurrentUserModification() {
jsTestLog("Testing having 2 mongoses modify the same user at the same time"); // SERVER-13850
- assert.writeOK(db1.foo.update({}, {$inc: {a: 1}}));
- assert.writeOK(db3.foo.update({}, {$inc: {a: 1}}));
+ assert.commandWorked(db1.foo.update({}, {$inc: {a: 1}}));
+ assert.commandWorked(db3.foo.update({}, {$inc: {a: 1}}));
db1.getSiblingDB('test').revokeRolesFromUser("spencer", ['readWrite']);
// At this point db3 still thinks "spencer" has readWrite. Use it to add a different role
// and make sure it doesn't add back readWrite
hasAuthzError(db1.foo.update({}, {$inc: {a: 1}}));
- assert.writeOK(db3.foo.update({}, {$inc: {a: 1}}));
+ assert.commandWorked(db3.foo.update({}, {$inc: {a: 1}}));
db3.getSiblingDB('test').grantRolesToUser("spencer", ['dbAdmin']);
diff --git a/jstests/auth/renameRestrictedCollections.js b/jstests/auth/renameRestrictedCollections.js
index 40169bef2d6..793f5fb5b07 100644
--- a/jstests/auth/renameRestrictedCollections.js
+++ b/jstests/auth/renameRestrictedCollections.js
@@ -85,10 +85,10 @@ assert.eq(1, res.ok, tojson(res));
// Start with test against inserting to and renaming collections in config and local
// as userAdminAnyDatabase.
-assert.writeOK(configDB.test.insert({'a': 1}));
+assert.commandWorked(configDB.test.insert({'a': 1}));
assert.commandWorked(configDB.test.renameCollection('test2'));
-assert.writeOK(localDB.test.insert({'a': 1}));
+assert.commandWorked(localDB.test.insert({'a': 1}));
assert.commandWorked(localDB.test.renameCollection('test2'));
adminDB.logout();
diff --git a/jstests/auth/repl_auth.js b/jstests/auth/repl_auth.js
index fea8f227366..70857a5caf4 100644
--- a/jstests/auth/repl_auth.js
+++ b/jstests/auth/repl_auth.js
@@ -34,11 +34,11 @@ var barDB1 = replConn1.getDB('bar');
fooDB0.auth('foo', 'foopwd');
barDB1.auth('bar', 'barpwd');
-assert.writeOK(fooDB0.user.insert({x: 1}, {writeConcern: {w: NUM_NODES}}));
+assert.commandWorked(fooDB0.user.insert({x: 1}, {writeConcern: {w: NUM_NODES}}));
assert.writeError(barDB0.user.insert({x: 1}, {writeConcern: {w: NUM_NODES}}));
assert.writeError(fooDB1.user.insert({x: 2}, {writeConcern: {w: NUM_NODES}}));
-assert.writeOK(barDB1.user.insert({x: 2}, {writeConcern: {w: NUM_NODES}}));
+assert.commandWorked(barDB1.user.insert({x: 2}, {writeConcern: {w: NUM_NODES}}));
// Make sure replica set connection in the shell is ready.
_awaitRSHostViaRSMonitor(rsTest.getPrimary().name, {ok: true, ismaster: true}, rsTest.name);
diff --git a/jstests/auth/role_management_commands_lib.js b/jstests/auth/role_management_commands_lib.js
index a706899c6e7..5dcc14ab121 100644
--- a/jstests/auth/role_management_commands_lib.js
+++ b/jstests/auth/role_management_commands_lib.js
@@ -75,7 +75,7 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
assert.throws(function() {
db.foo.findOne();
});
- assert.writeOK(db.foo.insert({a: 1}));
+ assert.commandWorked(db.foo.insert({a: 1}));
hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
assert.commandFailedWithCode(db.adminCommand('connPoolSync'), ErrorCodes.Unauthorized);
@@ -84,7 +84,7 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
db.foo.findOne();
});
assert.eq(1, db.foo.count());
- assert.writeOK(db.foo.insert({a: 1}));
+ assert.commandWorked(db.foo.insert({a: 1}));
assert.eq(2, db.foo.count());
hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
assert.eq(1, db.foo.findOne().a);
@@ -108,7 +108,7 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
assert.throws(function() {
db.foo.findOne();
});
- assert.writeOK(db.foo.insert({a: 1}));
+ assert.commandWorked(db.foo.insert({a: 1}));
hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
assert.commandFailedWithCode(db.adminCommand('connPoolSync'), ErrorCodes.Unauthorized);
@@ -120,7 +120,7 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
db.foo.findOne();
});
assert.eq(3, db.foo.count());
- assert.writeOK(db.foo.insert({a: 1}));
+ assert.commandWorked(db.foo.insert({a: 1}));
assert.eq(4, db.foo.count());
hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
assert.eq(1, db.foo.findOne().a);
@@ -164,7 +164,7 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
db.foo.findOne();
});
assert.eq(4, db.foo.count());
- assert.writeOK(db.foo.insert({a: 1}));
+ assert.commandWorked(db.foo.insert({a: 1}));
assert.eq(5, db.foo.count());
hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
assert.eq(1, db.foo.findOne().a);
@@ -219,9 +219,9 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
assert.doesNotThrow(function() {
db.foo.findOne();
});
- assert.writeOK(db.foo.insert({a: 1}));
+ assert.commandWorked(db.foo.insert({a: 1}));
assert.eq(6, db.foo.count());
- assert.writeOK(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.commandWorked(db.foo.update({}, {$inc: {a: 1}}, false, true));
assert.eq(2, db.foo.findOne().a);
assert.commandFailedWithCode(db.adminCommand('connPoolSync'), ErrorCodes.Unauthorized);
assert.commandFailedWithCode(db.adminCommand('serverStatus'), ErrorCodes.Unauthorized);
@@ -237,7 +237,7 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
assert.doesNotThrow(function() {
db.foo.findOne();
});
- assert.writeOK(db.foo.insert({a: 1}));
+ assert.commandWorked(db.foo.insert({a: 1}));
assert.eq(7, db.foo.count());
hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
assert.eq(2, db.foo.findOne().a);
@@ -309,7 +309,7 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
assert.doesNotThrow(function() {
db.foo.findOne();
});
- assert.writeOK(db.foo.insert({a: 1}));
+ assert.commandWorked(db.foo.insert({a: 1}));
assert.eq(8, db.foo.count());
testUserAdmin.dropRole('testRole2', writeConcern);
diff --git a/jstests/auth/server-4892.js b/jstests/auth/server-4892.js
index bfa51347fe1..b256b42b822 100644
--- a/jstests/auth/server-4892.js
+++ b/jstests/auth/server-4892.js
@@ -56,7 +56,7 @@ withMongod({noauth: ""}, function setupTest(mongod) {
somedb.createUser({user: 'frim', pwd: 'fram', roles: jsTest.basicUserRoles});
somedb.data.drop();
for (var i = 0; i < 10; ++i) {
- assert.writeOK(somedb.data.insert({val: i}));
+ assert.commandWorked(somedb.data.insert({val: i}));
}
admin.logout();
});
diff --git a/jstests/auth/user_defined_roles.js b/jstests/auth/user_defined_roles.js
index a58d4ea52b6..ba9d2aba87f 100644
--- a/jstests/auth/user_defined_roles.js
+++ b/jstests/auth/user_defined_roles.js
@@ -57,7 +57,7 @@ function runTest(conn) {
testUserAdmin.grantPrivilegesToRole(
'testRole1', [{resource: {db: 'test', collection: 'foo'}, actions: ['insert']}]);
- assert.writeOK(testDB.foo.insert({a: 1}));
+ assert.commandWorked(testDB.foo.insert({a: 1}));
assert.eq(1, testDB.foo.findOne().a);
assert.eq(1, testDB.foo.count());
assert.eq(1, testDB.foo.find().itcount());
@@ -69,9 +69,9 @@ function runTest(conn) {
adminUserAdmin.grantPrivilegesToRole(
'adminRole', [{resource: {db: '', collection: 'foo'}, actions: ['update']}]);
- assert.writeOK(testDB.foo.update({a: 1}, {$inc: {a: 1}}));
+ assert.commandWorked(testDB.foo.update({a: 1}, {$inc: {a: 1}}));
assert.eq(2, testDB.foo.findOne().a);
- assert.writeOK(testDB.foo.update({b: 1}, {$inc: {b: 1}}, true)); // upsert
+ assert.commandWorked(testDB.foo.update({b: 1}, {$inc: {b: 1}}, true)); // upsert
assert.eq(2, testDB.foo.count());
assert.eq(2, testDB.foo.findOne({b: {$exists: true}}).b);
hasAuthzError(testDB.foo.remove({b: 2}));
@@ -79,7 +79,7 @@ function runTest(conn) {
adminUserAdmin.grantPrivilegesToRole(
'adminRole', [{resource: {db: '', collection: ''}, actions: ['remove']}]);
- assert.writeOK(testDB.foo.remove({b: 2}));
+ assert.commandWorked(testDB.foo.remove({b: 2}));
assert.eq(1, testDB.foo.count());
// Test revoking privileges
@@ -87,7 +87,7 @@ function runTest(conn) {
'testRole1', [{resource: {db: 'test', collection: 'foo'}, actions: ['insert']}]);
hasAuthzError(testDB.foo.insert({a: 1}));
assert.eq(1, testDB.foo.count());
- assert.writeOK(testDB.foo.update({a: 2}, {$inc: {a: 1}}));
+ assert.commandWorked(testDB.foo.update({a: 2}, {$inc: {a: 1}}));
assert.eq(3, testDB.foo.findOne({a: {$exists: true}}).a);
hasAuthzError(testDB.foo.update({c: 1}, {$inc: {c: 1}}, true)); // upsert should fail
assert.eq(1, testDB.foo.count());
diff --git a/jstests/auth/user_defined_roles_on_secondaries.js b/jstests/auth/user_defined_roles_on_secondaries.js
index 47746e9cd56..50c1bb0d4ed 100644
--- a/jstests/auth/user_defined_roles_on_secondaries.js
+++ b/jstests/auth/user_defined_roles_on_secondaries.js
@@ -90,7 +90,7 @@ rstest.reInitiate();
// This write will have to wait on the initial sync to complete before progressing.
assert.soonNoExcept(() => {
- assert.writeOK(rstest.getPrimary().getDB("db1")["aCollection"].insert(
+ assert.commandWorked(rstest.getPrimary().getDB("db1")["aCollection"].insert(
{a: "afterSecondNodeAdded"}, {writeConcern: {w: 2, wtimeout: 60 * 1000}}));
return true;
});
diff --git a/jstests/auth/user_management_commands_lib.js b/jstests/auth/user_management_commands_lib.js
index 3bea79ab955..a04cce68f80 100644
--- a/jstests/auth/user_management_commands_lib.js
+++ b/jstests/auth/user_management_commands_lib.js
@@ -61,7 +61,7 @@ function runAllUserManagementCommandsTests(conn, writeConcern) {
var user = testUserAdmin.getUser('spencer');
assert.eq(10028, user.customData.zipCode);
assert(db.auth('spencer', 'pwd'));
- assert.writeOK(db.foo.insert({a: 1}));
+ assert.commandWorked(db.foo.insert({a: 1}));
assert.eq(1, db.foo.findOne().a);
assert.doesNotThrow(function() {
db.getRole('testRole');
@@ -102,7 +102,7 @@ function runAllUserManagementCommandsTests(conn, writeConcern) {
testUserAdmin.updateUser(
'spencer', {roles: ["readWrite", {role: 'adminRole', db: 'admin'}]}, writeConcern);
- assert.writeOK(db.foo.update({}, {$inc: {a: 1}}));
+ assert.commandWorked(db.foo.update({}, {$inc: {a: 1}}));
assert.eq(2, db.foo.findOne().a);
assert.eq(1, db.foo.count());
assert.throws(function() {
@@ -127,7 +127,7 @@ function runAllUserManagementCommandsTests(conn, writeConcern) {
writeConcern);
assert.commandWorked(db.runCommand({collMod: 'foo'}));
- assert.writeOK(db.foo.update({}, {$inc: {a: 1}}));
+ assert.commandWorked(db.foo.update({}, {$inc: {a: 1}}));
assert.eq(3, db.foo.findOne().a);
assert.eq(1, db.foo.count());
assert.doesNotThrow(function() {
diff --git a/jstests/auth/user_special_chars.js b/jstests/auth/user_special_chars.js
index 85ef75b48af..6ee9371860c 100644
--- a/jstests/auth/user_special_chars.js
+++ b/jstests/auth/user_special_chars.js
@@ -40,12 +40,12 @@ var testUserAndDatabaseAtSymbolConflation = function() {
// Ensure that they can both successfully authenticate to their correct database.
assert(cDB.auth('a@b', 'pass1'));
- assert.writeOK(cDB.col.insert({data: 1}));
+ assert.commandWorked(cDB.col.insert({data: 1}));
assert.writeError(bcDB.col.insert({data: 2}));
assert(cDB.logout());
assert(bcDB.auth('a', 'pass2'));
- assert.writeOK(bcDB.col.insert({data: 3}));
+ assert.commandWorked(bcDB.col.insert({data: 3}));
assert.writeError(cDB.col.insert({data: 4}));
assert(bcDB.logout());
diff --git a/jstests/auth/views_authz.js b/jstests/auth/views_authz.js
index 6223312249c..f510912f3fe 100644
--- a/jstests/auth/views_authz.js
+++ b/jstests/auth/views_authz.js
@@ -109,7 +109,7 @@ function runTest(conn) {
assert.eq(1, adminDB.auth("admin", "admin"));
assert.commandWorked(viewsDB.createView("view2", "forbidden", []));
for (let i = 0; i < 10; i++) {
- assert.writeOK(viewsDB.forbidden.insert({x: 1}));
+ assert.commandWorked(viewsDB.forbidden.insert({x: 1}));
}
adminDB.logout();
diff --git a/jstests/change_streams/ban_from_lookup.js b/jstests/change_streams/ban_from_lookup.js
index 45d3c692eea..9e2f6ee8c1b 100644
--- a/jstests/change_streams/ban_from_lookup.js
+++ b/jstests/change_streams/ban_from_lookup.js
@@ -10,7 +10,7 @@ load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Col
const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_lookup");
const foreignColl = "unsharded";
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
// Verify that we cannot create a $lookup using a pipeline which begins with $changeStream.
assertErrorCode(
diff --git a/jstests/change_streams/ban_from_views.js b/jstests/change_streams/ban_from_views.js
index 29f78710544..f3a7185b6ea 100644
--- a/jstests/change_streams/ban_from_views.js
+++ b/jstests/change_streams/ban_from_views.js
@@ -7,7 +7,7 @@
load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_views");
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
const normalViewName = "nonChangeStreamView";
const csViewName = "changeStreamView";
diff --git a/jstests/change_streams/change_stream.js b/jstests/change_streams/change_stream.js
index 6c03864cedd..faa6a816077 100644
--- a/jstests/change_streams/change_stream.js
+++ b/jstests/change_streams/change_stream.js
@@ -47,13 +47,13 @@ assertValidChangeStreamNss(db.getName(), "systemindexes");
assertValidChangeStreamNss(db.getName(), "system_users");
// Similar test but for DB names that are not considered internal.
-assert.writeOK(db.getSiblingDB("admincustomDB")["test"].insert({}));
+assert.commandWorked(db.getSiblingDB("admincustomDB")["test"].insert({}));
assertValidChangeStreamNss("admincustomDB");
-assert.writeOK(db.getSiblingDB("local_")["test"].insert({}));
+assert.commandWorked(db.getSiblingDB("local_")["test"].insert({}));
assertValidChangeStreamNss("local_");
-assert.writeOK(db.getSiblingDB("_config_")["test"].insert({}));
+assert.commandWorked(db.getSiblingDB("_config_")["test"].insert({}));
assertValidChangeStreamNss("_config_");
let cst = new ChangeStreamTest(db);
@@ -63,7 +63,7 @@ jsTestLog("Testing single insert");
// Test that if there are no changes, we return an empty batch.
assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-assert.writeOK(db.t1.insert({_id: 0, a: 1}));
+assert.commandWorked(db.t1.insert({_id: 0, a: 1}));
let expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0, a: 1},
@@ -78,7 +78,7 @@ assert.eq(0, cursor.nextBatch.length, "Cursor had changes: " + tojson(cursor));
jsTestLog("Testing second insert");
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-assert.writeOK(db.t1.insert({_id: 1, a: 2}));
+assert.commandWorked(db.t1.insert({_id: 1, a: 2}));
expected = {
documentKey: {_id: 1},
fullDocument: {_id: 1, a: 2},
@@ -89,7 +89,7 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
jsTestLog("Testing update");
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-assert.writeOK(db.t1.update({_id: 0}, {_id: 0, a: 3}));
+assert.commandWorked(db.t1.update({_id: 0}, {_id: 0, a: 3}));
expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0, a: 3},
@@ -100,7 +100,7 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
jsTestLog("Testing update of another field");
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-assert.writeOK(db.t1.update({_id: 0}, {_id: 0, b: 3}));
+assert.commandWorked(db.t1.update({_id: 0}, {_id: 0, b: 3}));
expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0, b: 3},
@@ -111,7 +111,7 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
jsTestLog("Testing upsert");
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-assert.writeOK(db.t1.update({_id: 2}, {_id: 2, a: 4}, {upsert: true}));
+assert.commandWorked(db.t1.update({_id: 2}, {_id: 2, a: 4}, {upsert: true}));
expected = {
documentKey: {_id: 2},
fullDocument: {_id: 2, a: 4},
@@ -121,9 +121,9 @@ expected = {
cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
jsTestLog("Testing partial update with $inc");
-assert.writeOK(db.t1.insert({_id: 3, a: 5, b: 1}));
+assert.commandWorked(db.t1.insert({_id: 3, a: 5, b: 1}));
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-assert.writeOK(db.t1.update({_id: 3}, {$inc: {b: 2}}));
+assert.commandWorked(db.t1.update({_id: 3}, {$inc: {b: 2}}));
expected = {
documentKey: {_id: 3},
ns: {db: "test", coll: "t1"},
@@ -133,10 +133,10 @@ expected = {
cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
jsTestLog("Testing multi:true update");
-assert.writeOK(db.t1.insert({_id: 4, a: 0, b: 1}));
-assert.writeOK(db.t1.insert({_id: 5, a: 0, b: 1}));
+assert.commandWorked(db.t1.insert({_id: 4, a: 0, b: 1}));
+assert.commandWorked(db.t1.insert({_id: 5, a: 0, b: 1}));
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-assert.writeOK(db.t1.update({a: 0}, {$set: {b: 2}}, {multi: true}));
+assert.commandWorked(db.t1.update({a: 0}, {$set: {b: 2}}, {multi: true}));
expected = [
{
documentKey: {_id: 4},
@@ -155,7 +155,7 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
jsTestLog("Testing delete");
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-assert.writeOK(db.t1.remove({_id: 1}));
+assert.commandWorked(db.t1.remove({_id: 1}));
expected = {
documentKey: {_id: 1},
ns: {db: "test", coll: "t1"},
@@ -164,10 +164,10 @@ expected = {
cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
jsTestLog("Testing justOne:false delete");
-assert.writeOK(db.t1.insert({_id: 6, a: 1, b: 1}));
-assert.writeOK(db.t1.insert({_id: 7, a: 1, b: 1}));
+assert.commandWorked(db.t1.insert({_id: 6, a: 1, b: 1}));
+assert.commandWorked(db.t1.insert({_id: 7, a: 1, b: 1}));
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-assert.writeOK(db.t1.remove({a: 1}, {justOne: false}));
+assert.commandWorked(db.t1.remove({a: 1}, {justOne: false}));
expected = [
{
documentKey: {_id: 6},
@@ -185,7 +185,7 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
jsTestLog("Testing intervening write on another collection");
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
let t2cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t2});
-assert.writeOK(db.t2.insert({_id: 100, c: 1}));
+assert.commandWorked(db.t2.insert({_id: 100, c: 1}));
cst.assertNoChange(cursor);
expected = {
documentKey: {_id: 100},
@@ -196,7 +196,7 @@ expected = {
cst.assertNextChangesEqual({cursor: t2cursor, expectedChanges: [expected]});
jsTestLog("Testing drop of unrelated collection");
-assert.writeOK(db.dropping.insert({}));
+assert.commandWorked(db.dropping.insert({}));
assertDropCollection(db, db.dropping.getName());
// Should still see the previous change from t2, shouldn't see anything about 'dropping'.
@@ -205,7 +205,7 @@ assertDropCollection(db, "dne1");
assertDropCollection(db, "dne2");
const dne1cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne1});
const dne2cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne2});
-assert.writeOK(db.t2.insert({_id: 101, renameCollection: "test.dne1", to: "test.dne2"}));
+assert.commandWorked(db.t2.insert({_id: 101, renameCollection: "test.dne1", to: "test.dne2"}));
cst.assertNoChange(dne1cursor);
cst.assertNoChange(dne2cursor);
@@ -227,7 +227,7 @@ let resumeCursor =
cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.resume1});
// Insert a document and save the resulting change stream.
-assert.writeOK(db.resume1.insert({_id: 1}));
+assert.commandWorked(db.resume1.insert({_id: 1}));
const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
@@ -239,10 +239,10 @@ resumeCursor = cst.startWatchingChanges({
});
jsTestLog("Inserting additional documents.");
-assert.writeOK(db.resume1.insert({_id: 2}));
+assert.commandWorked(db.resume1.insert({_id: 2}));
const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
-assert.writeOK(db.resume1.insert({_id: 3}));
+assert.commandWorked(db.resume1.insert({_id: 3}));
const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
diff --git a/jstests/change_streams/collation.js b/jstests/change_streams/collation.js
index 3d50b564711..4abc2f06ffb 100644
--- a/jstests/change_streams/collation.js
+++ b/jstests/change_streams/collation.js
@@ -63,8 +63,8 @@ let explicitCaseInsensitiveStream = cst.startWatchingChanges({
aggregateOptions: {collation: caseInsensitive}
});
-assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
-assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
// 'didNotInheritCollationStream' should not have inherited the collection's case-insensitive
// default collation, and should only see the second insert. 'explicitCaseInsensitiveStream'
@@ -90,8 +90,8 @@ explicitCaseInsensitiveStream = cst.startWatchingChanges({
doNotModifyInPassthroughs: true
});
-assert.writeOK(similarNameCollection.insert({_id: 0, text: "aBc"}));
-assert.writeOK(caseInsensitiveCollection.insert({_id: 2, text: "ABC"}));
+assert.commandWorked(similarNameCollection.insert({_id: 0, text: "aBc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: 2, text: "ABC"}));
// The case-insensitive stream should not see the first insert (to the other collection), only
// the second. We do not expect to see the insert in 'didNotInheritCollationStream'.
@@ -109,7 +109,7 @@ const streamCreatedBeforeNoCollationCollection = cst.startWatchingChanges({
});
noCollationCollection = assertCreateCollection(db, noCollationCollection);
-assert.writeOK(noCollationCollection.insert({_id: 0}));
+assert.commandWorked(noCollationCollection.insert({_id: 0}));
cst.assertNextChangesEqual(
{cursor: streamCreatedBeforeNoCollationCollection, expectedChanges: [{docId: 0}]});
@@ -128,7 +128,7 @@ const streamCreatedBeforeSimpleCollationCollection = cst.startWatchingChanges({
simpleCollationCollection =
assertCreateCollection(db, simpleCollationCollection, {collation: {locale: "simple"}});
-assert.writeOK(simpleCollationCollection.insert({_id: 0}));
+assert.commandWorked(simpleCollationCollection.insert({_id: 0}));
cst.assertNextChangesEqual(
{cursor: streamCreatedBeforeSimpleCollationCollection, expectedChanges: [{docId: 0}]});
@@ -147,7 +147,7 @@ const frenchChangeStream = cst.startWatchingChanges({
});
frenchCollection = assertCreateCollection(db, frenchCollection, {collation: {locale: "fr"}});
-assert.writeOK(frenchCollection.insert({_id: 0}));
+assert.commandWorked(frenchCollection.insert({_id: 0}));
cst.assertNextChangesEqual({cursor: frenchChangeStream, expectedChanges: [{docId: 0}]});
}());
@@ -169,7 +169,7 @@ const englishCaseInsensitiveStream = cst.startWatchingChanges({
});
germanCollection = assertCreateCollection(db, germanCollection, {collation: {locale: "de"}});
-assert.writeOK(germanCollection.insert({_id: 0, text: "aBc"}));
+assert.commandWorked(germanCollection.insert({_id: 0, text: "aBc"}));
cst.assertNextChangesEqual({cursor: englishCaseInsensitiveStream, expectedChanges: [{docId: 0}]});
}());
@@ -190,8 +190,8 @@ const englishCaseSensitiveStream = cst.startWatchingChanges({
collection: caseInsensitiveCollection
});
-assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
-assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
cst.assertNextChangesEqual({cursor: englishCaseSensitiveStream, expectedChanges: [{docId: 1}]});
}());
@@ -206,8 +206,8 @@ const cursor = noCollationCollection.watch(
[{$match: {"fullDocument.text": "abc"}}, {$project: {docId: "$documentKey._id"}}],
{collation: caseInsensitive});
assert(!cursor.hasNext());
-assert.writeOK(noCollationCollection.insert({_id: 0, text: "aBc"}));
-assert.writeOK(noCollationCollection.insert({_id: 1, text: "abc"}));
+assert.commandWorked(noCollationCollection.insert({_id: 0, text: "aBc"}));
+assert.commandWorked(noCollationCollection.insert({_id: 1, text: "abc"}));
assert.soon(() => cursor.hasNext());
assertChangeStreamEventEq(cursor.next(), {docId: 0});
assert.soon(() => cursor.hasNext());
@@ -225,7 +225,7 @@ let caseInsensitiveCollection =
let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
{collation: caseInsensitive});
-assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
assert.soon(() => changeStream.hasNext());
const next = changeStream.next();
@@ -233,7 +233,7 @@ assert.docEq(next.documentKey, {_id: 0});
const resumeToken = next._id;
// Insert a second document to see after resuming.
-assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
// Drop the collection to invalidate the stream.
assertDropCollection(db, collName);
@@ -276,7 +276,7 @@ let caseInsensitiveCollection =
let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
{collation: caseInsensitive});
-assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
assert.soon(() => changeStream.hasNext());
const next = changeStream.next();
@@ -284,12 +284,12 @@ assert.docEq(next.documentKey, {_id: 0});
const resumeToken = next._id;
// Insert a second document to see after resuming.
-assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
// Recreate the collection with a different collation.
caseInsensitiveCollection = assertDropAndRecreateCollection(
db, caseInsensitiveCollection.getName(), {collation: {locale: "simple"}});
-assert.writeOK(caseInsensitiveCollection.insert({_id: "new collection", text: "abc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({_id: "new collection", text: "abc"}));
// Verify that the stream sees the insert before the drop and then is exhausted. We won't
// see the invalidate because the pipeline has a $match stage after the $changeStream.
diff --git a/jstests/change_streams/lookup_post_image.js b/jstests/change_streams/lookup_post_image.js
index fa2658ed6f8..c918fd22110 100644
--- a/jstests/change_streams/lookup_post_image.js
+++ b/jstests/change_streams/lookup_post_image.js
@@ -20,22 +20,22 @@ jsTestLog("Testing change streams without 'fullDocument' specified");
// Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for
// an insert.
let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: coll});
-assert.writeOK(coll.insert({_id: "fullDocument not specified"}));
+assert.commandWorked(coll.insert({_id: "fullDocument not specified"}));
let latestChange = cst.getOneChange(cursor);
assert.eq(latestChange.operationType, "insert");
assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified"});
// Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for a
// replacement-style update.
-assert.writeOK(coll.update({_id: "fullDocument not specified"},
- {_id: "fullDocument not specified", replaced: true}));
+assert.commandWorked(coll.update({_id: "fullDocument not specified"},
+ {_id: "fullDocument not specified", replaced: true}));
latestChange = cst.getOneChange(cursor);
assert.eq(latestChange.operationType, "replace");
assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified", replaced: true});
// Test that not specifying 'fullDocument' does not include a 'fullDocument' in the result
// for a non-replacement update.
-assert.writeOK(coll.update({_id: "fullDocument not specified"}, {$set: {updated: true}}));
+assert.commandWorked(coll.update({_id: "fullDocument not specified"}, {$set: {updated: true}}));
latestChange = cst.getOneChange(cursor);
assert.eq(latestChange.operationType, "update");
assert(!latestChange.hasOwnProperty("fullDocument"));
@@ -46,22 +46,22 @@ jsTestLog("Testing change streams with 'fullDocument' specified as 'default'");
// result for an insert.
cursor = cst.startWatchingChanges(
{collection: coll, pipeline: [{$changeStream: {fullDocument: "default"}}]});
-assert.writeOK(coll.insert({_id: "fullDocument is default"}));
+assert.commandWorked(coll.insert({_id: "fullDocument is default"}));
latestChange = cst.getOneChange(cursor);
assert.eq(latestChange.operationType, "insert");
assert.eq(latestChange.fullDocument, {_id: "fullDocument is default"});
// Test that specifying 'fullDocument' as 'default' does include a 'fullDocument' in the
// result for a replacement-style update.
-assert.writeOK(coll.update({_id: "fullDocument is default"},
- {_id: "fullDocument is default", replaced: true}));
+assert.commandWorked(coll.update({_id: "fullDocument is default"},
+ {_id: "fullDocument is default", replaced: true}));
latestChange = cst.getOneChange(cursor);
assert.eq(latestChange.operationType, "replace");
assert.eq(latestChange.fullDocument, {_id: "fullDocument is default", replaced: true});
// Test that specifying 'fullDocument' as 'default' does not include a 'fullDocument' in the
// result for a non-replacement update.
-assert.writeOK(coll.update({_id: "fullDocument is default"}, {$set: {updated: true}}));
+assert.commandWorked(coll.update({_id: "fullDocument is default"}, {$set: {updated: true}}));
latestChange = cst.getOneChange(cursor);
assert.eq(latestChange.operationType, "update");
assert(!latestChange.hasOwnProperty("fullDocument"));
@@ -72,14 +72,14 @@ jsTestLog("Testing change streams with 'fullDocument' specified as 'updateLookup
// the result for an insert.
cursor = cst.startWatchingChanges(
{collection: coll, pipeline: [{$changeStream: {fullDocument: "updateLookup"}}]});
-assert.writeOK(coll.insert({_id: "fullDocument is lookup"}));
+assert.commandWorked(coll.insert({_id: "fullDocument is lookup"}));
latestChange = cst.getOneChange(cursor);
assert.eq(latestChange.operationType, "insert");
assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup"});
// Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
// the result for a replacement-style update.
-assert.writeOK(
+assert.commandWorked(
coll.update({_id: "fullDocument is lookup"}, {_id: "fullDocument is lookup", replaced: true}));
latestChange = cst.getOneChange(cursor);
assert.eq(latestChange.operationType, "replace");
@@ -87,7 +87,7 @@ assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup", replaced: t
// Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
// the result for a non-replacement update.
-assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updated: true}}));
+assert.commandWorked(coll.update({_id: "fullDocument is lookup"}, {$set: {updated: true}}));
latestChange = cst.getOneChange(cursor);
assert.eq(latestChange.operationType, "update");
assert.eq(latestChange.fullDocument,
@@ -99,8 +99,8 @@ cursor = cst.startWatchingChanges({
collection: coll,
pipeline: [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {operationType: "update"}}]
});
-assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updatedAgain: true}}));
-assert.writeOK(coll.remove({_id: "fullDocument is lookup"}));
+assert.commandWorked(coll.update({_id: "fullDocument is lookup"}, {$set: {updatedAgain: true}}));
+assert.commandWorked(coll.remove({_id: "fullDocument is lookup"}));
// If this test is running with secondary read preference, it's necessary for the remove
// to propagate to all secondary nodes and be available for majority reads before we can
// assume looking up the document will fail.
@@ -115,8 +115,8 @@ const deleteDocResumePoint = latestChange._id;
// Test that looking up the post image of an update after the collection has been dropped
// will result in 'fullDocument' with a value of null. This must be done using getMore
// because new cursors cannot be established after a collection drop.
-assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
-assert.writeOK(coll.update({_id: "fullDocument is lookup 2"}, {$set: {updated: true}}));
+assert.commandWorked(coll.insert({_id: "fullDocument is lookup 2"}));
+assert.commandWorked(coll.update({_id: "fullDocument is lookup 2"}, {$set: {updated: true}}));
// Open a $changeStream cursor with batchSize 0, so that no oplog entries are retrieved yet.
cursor = cst.startWatchingChanges({
@@ -200,7 +200,7 @@ assert.eq(latestChange.fullDocument, null);
// Insert a document with the same _id, verify the change stream won't return it due to
// different UUID.
assertCreateCollection(db, coll.getName());
-assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
+assert.commandWorked(coll.insert({_id: "fullDocument is lookup 2"}));
// Confirm that the next entry's post-image is null since new collection has a different
// UUID.
@@ -210,13 +210,13 @@ assert(latestChange.hasOwnProperty("fullDocument"));
assert.eq(latestChange.fullDocument, null);
jsTestLog("Testing full document lookup with a real getMore");
-assert.writeOK(coll.insert({_id: "getMoreEnabled"}));
+assert.commandWorked(coll.insert({_id: "getMoreEnabled"}));
cursor = cst.startWatchingChanges({
collection: coll,
pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
});
-assert.writeOK(coll.update({_id: "getMoreEnabled"}, {$set: {updated: true}}));
+assert.commandWorked(coll.update({_id: "getMoreEnabled"}, {$set: {updated: true}}));
const doc = cst.getOneChange(cursor);
assert.docEq(doc["fullDocument"], {_id: "getMoreEnabled", updated: true});
@@ -228,7 +228,7 @@ cursor = cst.startWatchingChanges({
pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
aggregateOptions: {cursor: {batchSize: 0}}
});
-assert.writeOK(coll.insert({_id: "testing invalidate"}));
+assert.commandWorked(coll.insert({_id: "testing invalidate"}));
assertDropCollection(db, coll.getName());
// Wait until two-phase drop finishes.
assert.soon(function() {
diff --git a/jstests/change_streams/metadata_notifications.js b/jstests/change_streams/metadata_notifications.js
index 8b3aae094fe..a6d33b0335d 100644
--- a/jstests/change_streams/metadata_notifications.js
+++ b/jstests/change_streams/metadata_notifications.js
@@ -51,7 +51,7 @@ assert.eq(change.nextBatch.length, 0, tojson(change.nextBatch));
// After collection creation, we expect to see oplog entries for each subsequent operation.
let coll = assertCreateCollection(db, collName);
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
// Determine the number of shards that the collection is distributed across.
const numShards = FixtureHelpers.numberOfShardsForCollection(coll);
@@ -60,9 +60,9 @@ change = cst.getOneChange(cursor);
assert.eq(change.operationType, "insert", tojson(change));
// Create oplog entries of type insert, update, delete, and drop.
-assert.writeOK(coll.insert({_id: 1}));
-assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
-assert.writeOK(coll.remove({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {a: 1}}));
+assert.commandWorked(coll.remove({_id: 1}));
assertDropCollection(db, coll.getName());
// We should get oplog entries of type insert, update, delete, drop, and invalidate. The cursor
@@ -100,7 +100,7 @@ assert.commandWorked(db.runCommand({
// Recreate the collection.
coll = assertCreateCollection(db, collName);
-assert.writeOK(coll.insert({_id: "after recreate"}));
+assert.commandWorked(coll.insert({_id: "after recreate"}));
// Test resuming the change stream from the collection drop using 'resumeAfter'. If running in a
// sharded passthrough suite, resuming from the drop will first return the drop from the other
@@ -156,7 +156,7 @@ cst.consumeDropUpTo({
if (!FixtureHelpers.isSharded(coll)) {
cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
assertDropCollection(db, "renamed_coll");
- assert.writeOK(coll.renameCollection("renamed_coll"));
+ assert.commandWorked(coll.renameCollection("renamed_coll"));
expectedChanges = [
{
operationType: "rename",
@@ -172,7 +172,7 @@ if (!FixtureHelpers.isSharded(coll)) {
// Repeat the test, this time with a change stream open on the target.
cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
- assert.writeOK(coll.renameCollection(collName));
+ assert.commandWorked(coll.renameCollection(collName));
expectedChanges = [
{
operationType: "rename",
@@ -186,7 +186,7 @@ if (!FixtureHelpers.isSharded(coll)) {
const resumeTokenInvalidate = changes[1]._id;
coll = db[collName];
- assert.writeOK(coll.insert({_id: "after rename"}));
+ assert.commandWorked(coll.insert({_id: "after rename"}));
// Test resuming the change stream from the collection rename using 'resumeAfter'.
assertResumeExpected({
@@ -224,13 +224,13 @@ if (!FixtureHelpers.isSharded(coll)) {
});
assertDropAndRecreateCollection(db, "renamed_coll");
- assert.writeOK(db.renamed_coll.insert({_id: 0}));
+ assert.commandWorked(db.renamed_coll.insert({_id: 0}));
// Repeat the test again, this time using the 'dropTarget' option with an existing target
// collection.
cursor =
cst.startWatchingChanges({collection: "renamed_coll", pipeline: [{$changeStream: {}}]});
- assert.writeOK(coll.renameCollection("renamed_coll", true /* dropTarget */));
+ assert.commandWorked(coll.renameCollection("renamed_coll", true /* dropTarget */));
expectedChanges = [
{
operationType: "rename",
diff --git a/jstests/change_streams/only_wake_getmore_for_relevant_changes.js b/jstests/change_streams/only_wake_getmore_for_relevant_changes.js
index 16400360d55..bfe84bcbd32 100644
--- a/jstests/change_streams/only_wake_getmore_for_relevant_changes.js
+++ b/jstests/change_streams/only_wake_getmore_for_relevant_changes.js
@@ -32,7 +32,7 @@ function runGetMoreInParallelWithEvent(
const awaitShellDoingEventDuringGetMore = startParallelShell(`
// Signal that the parallel shell has started.
-assert.writeOK(db.getCollection("${ shellSentinelCollection.getName() }").insert({}));
+assert.commandWorked(db.getCollection("${ shellSentinelCollection.getName() }").insert({}));
// Wait for the getMore to appear in currentOp.
assert.soon(function() {
@@ -132,7 +132,7 @@ const getMoreResponse = assertEventWakesCursor({
collection: changesCollection,
awaitDataCursorId: changeCursorId,
identifyingComment: wholeCollectionStreamComment,
- event: () => assert.writeOK(db.changes.insert({_id: "wake up"}))
+ event: () => assert.commandWorked(db.changes.insert({_id: "wake up"}))
});
assert.eq(getMoreResponse.cursor.nextBatch.length, 1);
assert.eq(getMoreResponse.cursor.nextBatch[0].operationType,
@@ -148,7 +148,7 @@ assertEventDoesNotWakeCursor({
collection: changesCollection,
awaitDataCursorId: changeCursorId,
identifyingComment: wholeCollectionStreamComment,
- event: () => assert.writeOK(db.unrelated_collection.insert({_id: "unrelated change"}))
+ event: () => assert.commandWorked(db.unrelated_collection.insert({_id: "unrelated change"}))
});
assert.commandWorked(
db.runCommand({killCursors: changesCollection.getName(), cursors: [changeCursorId]}));
@@ -171,7 +171,7 @@ assertEventDoesNotWakeCursor({
collection: changesCollection,
awaitDataCursorId: res.cursor.id,
identifyingComment: noInvalidatesComment,
- event: () => assert.writeOK(db.changes.insert({_id: "should not appear"}))
+ event: () => assert.commandWorked(db.changes.insert({_id: "should not appear"}))
});
assert.commandWorked(
db.runCommand({killCursors: changesCollection.getName(), cursors: [res.cursor.id]}));
diff --git a/jstests/change_streams/shell_helper.js b/jstests/change_streams/shell_helper.js
index b4e8aae00b3..f57d929e5c3 100644
--- a/jstests/change_streams/shell_helper.js
+++ b/jstests/change_streams/shell_helper.js
@@ -54,7 +54,7 @@ let changeStreamCursor = coll.watch();
assert(!changeStreamCursor.hasNext());
// Write the first document into the collection. We will save the resume token from this change.
-assert.writeOK(coll.insert({_id: 0, x: 1}));
+assert.commandWorked(coll.insert({_id: 0, x: 1}));
let resumeToken;
// Test that each of the change stream cursors picks up the change.
@@ -110,7 +110,7 @@ checkNextChange(changeStreamCursor, {docId: 1});
jsTestLog("Testing watch() with updateLookup");
changeStreamCursor = coll.watch([], {fullDocument: "updateLookup"});
-assert.writeOK(coll.update({_id: 0}, {$set: {x: 10}}));
+assert.commandWorked(coll.update({_id: 0}, {$set: {x: 10}}));
expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0, x: 10},
@@ -127,7 +127,7 @@ const isMongos = FixtureHelpers.isMongos(db);
if (!isMongos) {
// Increase a field by 5 times and verify the batch size is respected.
for (let i = 0; i < 5; i++) {
- assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
+ assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}}));
}
// Only watch the "update" changes of the specific doc since the beginning.
@@ -166,7 +166,7 @@ testCommandIsCalled(() => assert(!changeStreamCursor.hasNext()), (cmdObj) => {
jsTestLog("Testing the cursor gets closed when the collection gets dropped");
changeStreamCursor = coll.watch([{$project: {clusterTime: 0}}]);
-assert.writeOK(coll.insert({_id: 2, x: 1}));
+assert.commandWorked(coll.insert({_id: 2, x: 1}));
expected = {
documentKey: {_id: 2},
fullDocument: {_id: 2, x: 1},
diff --git a/jstests/change_streams/start_at_cluster_time.js b/jstests/change_streams/start_at_cluster_time.js
index 2edcb530e20..9b289f26bc9 100644
--- a/jstests/change_streams/start_at_cluster_time.js
+++ b/jstests/change_streams/start_at_cluster_time.js
@@ -8,14 +8,14 @@ const coll = assertDropAndRecreateCollection(db, jsTestName());
const testStartTime = db.runCommand({isMaster: 1}).$clusterTime.clusterTime;
// Write a document to each chunk, and wait for replication.
-assert.writeOK(coll.insert({_id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(coll.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
// Perform two updates, then use a change stream to capture the cluster time of the first update
// to be resumed from.
const streamToFindClusterTime = coll.watch();
-assert.writeOK(coll.update({_id: -1}, {$set: {updated: true}}));
-assert.writeOK(coll.update({_id: 1}, {$set: {updated: true}}));
+assert.commandWorked(coll.update({_id: -1}, {$set: {updated: true}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {updated: true}}));
assert.soon(() => streamToFindClusterTime.hasNext());
let next = streamToFindClusterTime.next();
assert.eq(next.operationType, "update");
diff --git a/jstests/change_streams/whole_cluster.js b/jstests/change_streams/whole_cluster.js
index 7d2d3f22dbb..a1cc114cd9a 100644
--- a/jstests/change_streams/whole_cluster.js
+++ b/jstests/change_streams/whole_cluster.js
@@ -32,7 +32,7 @@ let cursor = cst.startWatchingAllChangesForCluster();
assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
// Test that the change stream returns an inserted doc.
-assert.writeOK(db.t1.insert({_id: 0, a: 1}));
+assert.commandWorked(db.t1.insert({_id: 0, a: 1}));
let expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0, a: 1},
@@ -42,7 +42,7 @@ let expected = {
cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
// Test that the change stream returns another inserted doc in a different database.
-assert.writeOK(otherDB.t2.insert({_id: 0, a: 2}));
+assert.commandWorked(otherDB.t2.insert({_id: 0, a: 2}));
expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0, a: 2},
@@ -65,7 +65,7 @@ const validUserDBs = [
"_config_"
];
validUserDBs.forEach(dbName => {
- assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
+ assert.commandWorked(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
expected = [
{
documentKey: {_id: 0},
@@ -81,7 +81,7 @@ validUserDBs.forEach(dbName => {
// includes "system" but is not considered an internal collection.
const validSystemColls = ["system", "systems.views", "ssystem.views", "test.system"];
validSystemColls.forEach(collName => {
- assert.writeOK(db.getCollection(collName).insert({_id: 0, a: 1}));
+ assert.commandWorked(db.getCollection(collName).insert({_id: 0, a: 1}));
expected = [
{
documentKey: {_id: 0},
@@ -101,10 +101,10 @@ filteredDBs.forEach(dbName => {
if (FixtureHelpers.isMongos(db) && dbName == "local")
return;
- assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
+ assert.commandWorked(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
// Insert to the test collection to ensure that the change stream has something to
// return.
- assert.writeOK(db.t1.insert({_id: dbName}));
+ assert.commandWorked(db.t1.insert({_id: dbName}));
expected = [
{
documentKey: {_id: dbName},
diff --git a/jstests/change_streams/whole_cluster_metadata_notifications.js b/jstests/change_streams/whole_cluster_metadata_notifications.js
index 0ac9c660ea6..8c72df1ba44 100644
--- a/jstests/change_streams/whole_cluster_metadata_notifications.js
+++ b/jstests/change_streams/whole_cluster_metadata_notifications.js
@@ -29,9 +29,9 @@ let aggCursor = cst.startWatchingAllChangesForCluster();
// Generate oplog entries of type insert, update, and delete across both databases.
for (let coll of [db1Coll, db2Coll]) {
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
- assert.writeOK(coll.remove({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 1}));
+ assert.commandWorked(coll.update({_id: 1}, {$set: {a: 1}}));
+ assert.commandWorked(coll.remove({_id: 1}));
}
// Drop the second database, which should generate a 'drop' entry for the collection followed
@@ -64,7 +64,7 @@ db1Coll = assertDropAndRecreateCollection(testDB1, db1Coll.getName());
// Get a valid resume token that the next change stream can use.
aggCursor = cst.startWatchingAllChangesForCluster();
-assert.writeOK(db1Coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(db1Coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
let change = cst.getOneChange(aggCursor, false);
const resumeToken = change._id;
@@ -96,7 +96,7 @@ for (let collToInvalidate of [db1Coll, db2Coll]) {
// Insert into the collections on both databases, and verify the change stream is able to
// pick them up.
for (let collToWrite of [db1Coll, db2Coll]) {
- assert.writeOK(collToWrite.insert({_id: _idForTest}));
+ assert.commandWorked(collToWrite.insert({_id: _idForTest}));
change = cst.getOneChange(aggCursor);
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.documentKey._id, _idForTest);
@@ -112,7 +112,7 @@ for (let collToInvalidate of [db1Coll, db2Coll]) {
// Start watching all changes in the cluster.
aggCursor = cst.startWatchingAllChangesForCluster();
- assert.writeOK(collToInvalidate.renameCollection("renamed_coll"));
+ assert.commandWorked(collToInvalidate.renameCollection("renamed_coll"));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [
@@ -128,8 +128,8 @@ for (let collToInvalidate of [db1Coll, db2Coll]) {
// collection.
collToInvalidate = testDB.getCollection("renamed_coll");
assertDropAndRecreateCollection(testDB, collName);
- assert.writeOK(testDB[collName].insert({_id: 0}));
- assert.writeOK(collToInvalidate.renameCollection(collName, true /* dropTarget */));
+ assert.commandWorked(testDB[collName].insert({_id: 0}));
+ assert.commandWorked(collToInvalidate.renameCollection(collName, true /* dropTarget */));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [
@@ -188,14 +188,14 @@ for (let collToInvalidate of [db1Coll, db2Coll]) {
// The change stream should not be invalidated by the rename(s).
assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
- assert.writeOK(collToInvalidate.insert({_id: 2}));
+ assert.commandWorked(collToInvalidate.insert({_id: 2}));
assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
// Test that renaming a "system" collection to a user collection *does* return a rename
// notification.
assert.commandWorked(
testDB.runCommand({create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
- assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
+ assert.commandWorked(testDB.system.views.renameCollection("non_system_collection"));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
@@ -211,17 +211,17 @@ for (let collToInvalidate of [db1Coll, db2Coll]) {
assert.commandWorked(
testDB.runCommand({create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
// Note that the target of the rename must be a valid "system" collection.
- assert.writeOK(testDB.system.views.renameCollection("system.users"));
+ assert.commandWorked(testDB.system.views.renameCollection("system.users"));
// Verify that the change stream filters out the rename above, instead returning the
// next insert to the test collection.
- assert.writeOK(collToInvalidate.insert({_id: 1}));
+ assert.commandWorked(collToInvalidate.insert({_id: 1}));
change = cst.getOneChange(aggCursor);
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.ns, {db: testDB.getName(), coll: collToInvalidate.getName()});
// Test that renaming a user collection to a "system" collection *does* return a rename
// notification.
- assert.writeOK(collToInvalidate.renameCollection("system.views"));
+ assert.commandWorked(collToInvalidate.renameCollection("system.views"));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
@@ -235,7 +235,7 @@ for (let collToInvalidate of [db1Coll, db2Coll]) {
assertDropCollection(testDB, "system.views");
// Recreate the test collection for the remainder of the test.
- assert.writeOK(collToInvalidate.insert({_id: 0}));
+ assert.commandWorked(collToInvalidate.insert({_id: 0}));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
@@ -252,7 +252,7 @@ for (let collToInvalidate of [db1Coll, db2Coll]) {
// Insert to the test collection to queue up another change after the drop. This is needed
// since the number of 'drop' notifications is not deterministic in the sharded passthrough
// suites.
- assert.writeOK(collToInvalidate.insert({_id: 0}));
+ assert.commandWorked(collToInvalidate.insert({_id: 0}));
cst.consumeDropUpTo({
cursor: aggCursor,
dropType: "drop",
@@ -275,7 +275,7 @@ for (let collToInvalidate of [db1Coll, db2Coll]) {
// Verify that the change stream does not report the insertion into "system.views", and is
// not invalidated by dropping the system collection. Instead, it correctly reports the next
// write to the test collection.
- assert.writeOK(collToInvalidate.insert({_id: 1}));
+ assert.commandWorked(collToInvalidate.insert({_id: 1}));
change = cst.getOneChange(aggCursor);
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.ns, {db: testDB.getName(), coll: collToInvalidate.getName()});
diff --git a/jstests/change_streams/whole_cluster_resumability.js b/jstests/change_streams/whole_cluster_resumability.js
index 270f6c465db..8564d01e770 100644
--- a/jstests/change_streams/whole_cluster_resumability.js
+++ b/jstests/change_streams/whole_cluster_resumability.js
@@ -15,7 +15,7 @@ let cst = new ChangeStreamTest(adminDB);
let resumeCursor = cst.startWatchingAllChangesForCluster();
// Insert a document in the first database and save the resulting change stream.
-assert.writeOK(db1Coll.insert({_id: 1}));
+assert.commandWorked(db1Coll.insert({_id: 1}));
const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
@@ -28,12 +28,12 @@ resumeCursor = cst.startWatchingChanges({
});
// Write the next document into the second database.
-assert.writeOK(db2Coll.insert({_id: 2}));
+assert.commandWorked(db2Coll.insert({_id: 2}));
const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
// Write the third document into the first database again.
-assert.writeOK(db1Coll.insert({_id: 3}));
+assert.commandWorked(db1Coll.insert({_id: 3}));
const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
@@ -68,7 +68,7 @@ if (!FixtureHelpers.isSharded(db1Coll)) {
pipeline: [{$changeStream: {allChangesForCluster: true}}],
aggregateOptions: {cursor: {batchSize: 0}}
});
- assert.writeOK(db1Coll.renameCollection(renameColl.getName()));
+ assert.commandWorked(db1Coll.renameCollection(renameColl.getName()));
const renameChanges = cst.assertNextChangesEqual({
cursor: resumeCursor,
@@ -83,7 +83,7 @@ if (!FixtureHelpers.isSharded(db1Coll)) {
const resumeTokenRename = renameChanges[0]._id;
// Insert into the renamed collection.
- assert.writeOK(renameColl.insert({_id: "after rename"}));
+ assert.commandWorked(renameColl.insert({_id: "after rename"}));
// Resume from the rename notification using 'resumeAfter' and verify that the change stream
// returns the next insert.
@@ -117,7 +117,7 @@ if (!FixtureHelpers.isSharded(db1Coll)) {
// Rename back to the original collection for reliability of the collection drops when
// dropping the database.
- assert.writeOK(renameColl.renameCollection(db1Coll.getName()));
+ assert.commandWorked(renameColl.renameCollection(db1Coll.getName()));
}
// Dropping a database should generate a 'drop' notification for the collection followed by a
@@ -128,7 +128,7 @@ const dropDbChanges = cst.assertDatabaseDrop({cursor: resumeCursor, db: testDBs[
const resumeTokenDbDrop = dropDbChanges[dropDbChanges.length - 1]._id;
// Recreate the collection and insert a document.
-assert.writeOK(db1Coll.insert({_id: "after recreate"}));
+assert.commandWorked(db1Coll.insert({_id: "after recreate"}));
let expectedInsert = {
operationType: "insert",
diff --git a/jstests/change_streams/whole_db.js b/jstests/change_streams/whole_db.js
index aaa6fd0a29f..099bf388525 100644
--- a/jstests/change_streams/whole_db.js
+++ b/jstests/change_streams/whole_db.js
@@ -27,7 +27,7 @@ let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collecti
assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
// Test that the change stream returns an inserted doc.
-assert.writeOK(db.t1.insert({_id: 0, a: 1}));
+assert.commandWorked(db.t1.insert({_id: 0, a: 1}));
let expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0, a: 1},
@@ -38,7 +38,7 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
// Test that the change stream returns another inserted doc in a different collection but still
// in the target db.
-assert.writeOK(db.t2.insert({_id: 0, a: 2}));
+assert.commandWorked(db.t2.insert({_id: 0, a: 2}));
expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0, a: 2},
@@ -53,7 +53,7 @@ const validSystemColls = ["system", "systems.views", "ssystem.views", "test.syst
validSystemColls.forEach(collName => {
cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
const coll = db.getCollection(collName);
- assert.writeOK(coll.insert({_id: 0, a: 1}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
expected = [
{
documentKey: {_id: 0},
@@ -69,7 +69,7 @@ validSystemColls.forEach(collName => {
// Insert to the test collection to queue up another change after the drop. This is needed
// since the number of 'drop' notifications is not deterministic in the sharded passthrough
// suites.
- assert.writeOK(coll.insert({_id: 0}));
+ assert.commandWorked(coll.insert({_id: 0}));
cst.consumeDropUpTo({
cursor: cursor,
dropType: "drop",
diff --git a/jstests/change_streams/whole_db_metadata_notifications.js b/jstests/change_streams/whole_db_metadata_notifications.js
index 7b659ff4e12..1500402bc1c 100644
--- a/jstests/change_streams/whole_db_metadata_notifications.js
+++ b/jstests/change_streams/whole_db_metadata_notifications.js
@@ -22,9 +22,9 @@ let coll = assertDropAndRecreateCollection(testDB, collName);
let aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
// Create oplog entries of type insert, update, and delete.
-assert.writeOK(coll.insert({_id: 1}));
-assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
-assert.writeOK(coll.remove({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {a: 1}}));
+assert.commandWorked(coll.remove({_id: 1}));
// Drop and recreate the collection.
const collAgg = assertDropAndRecreateCollection(testDB, collName);
@@ -40,7 +40,7 @@ change = cst.getOneChange(aggCursor);
assert.eq(change.operationType, "drop", tojson(change));
// Get a valid resume token that the next change stream can use.
-assert.writeOK(collAgg.insert({_id: 1}));
+assert.commandWorked(collAgg.insert({_id: 1}));
change = cst.getOneChange(aggCursor, false);
const resumeToken = change._id;
@@ -57,7 +57,7 @@ assert.commandWorked(testDB.runCommand(
// Test that invalidation entries for other databases are filtered out.
const otherDB = testDB.getSiblingDB(jsTestName() + "other");
const otherDBColl = otherDB[collName + "_other"];
-assert.writeOK(otherDBColl.insert({_id: 0}));
+assert.commandWorked(otherDBColl.insert({_id: 0}));
// Create collection on the database being watched.
coll = assertDropAndRecreateCollection(testDB, collName);
@@ -73,7 +73,7 @@ assertDropCollection(otherDB, otherDBColl.getName());
// Insert into the collection in the watched database, and verify the change stream is able to
// pick it up.
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
change = cst.getOneChange(aggCursor);
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.documentKey._id, 1);
@@ -85,7 +85,7 @@ if (!FixtureHelpers.isSharded(coll)) {
assertDropAndRecreateCollection(testDB, coll.getName());
assertDropCollection(testDB, "renamed_coll");
aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- assert.writeOK(coll.renameCollection("renamed_coll"));
+ assert.commandWorked(coll.renameCollection("renamed_coll"));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
@@ -99,8 +99,8 @@ if (!FixtureHelpers.isSharded(coll)) {
// collection.
coll = testDB["renamed_coll"];
assertCreateCollection(testDB, collName);
- assert.writeOK(testDB[collName].insert({_id: 0}));
- assert.writeOK(coll.renameCollection(collName, true /* dropTarget */));
+ assert.commandWorked(testDB[collName].insert({_id: 0}));
+ assert.commandWorked(coll.renameCollection(collName, true /* dropTarget */));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [
@@ -159,7 +159,7 @@ if (!FixtureHelpers.isSharded(coll)) {
// The change stream should not be invalidated by the rename(s).
assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
- assert.writeOK(coll.insert({_id: 2}));
+ assert.commandWorked(coll.insert({_id: 2}));
assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
// Drop the new collection to avoid an additional 'drop' notification when the database is
@@ -189,7 +189,7 @@ assertDropCollection(testDB, "system.views");
// Verify that the change stream does not report the insertion into "system.views", and is
// not invalidated by dropping the system collection. Instead, it correctly reports the next
// write to the test collection.
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
change = cst.getOneChange(aggCursor);
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
@@ -197,7 +197,7 @@ assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
// Test that renaming a "system" collection *does* return a notification if the target of
// the rename is a non-system collection.
assert.commandWorked(testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
-assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
+assert.commandWorked(testDB.system.views.renameCollection("non_system_collection"));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
@@ -212,17 +212,17 @@ cst.assertNextChangesEqual({
aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
assert.commandWorked(testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
// Note that the target of the rename must be a valid "system" collection.
-assert.writeOK(testDB.system.views.renameCollection("system.users"));
+assert.commandWorked(testDB.system.views.renameCollection("system.users"));
// Verify that the change stream filters out the rename above, instead returning the next insert
// to the test collection.
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
change = cst.getOneChange(aggCursor);
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
// Test that renaming a user collection to a "system" collection *is* returned in the change
// stream.
-assert.writeOK(coll.renameCollection("system.views"));
+assert.commandWorked(coll.renameCollection("system.views"));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
diff --git a/jstests/change_streams/whole_db_resumability.js b/jstests/change_streams/whole_db_resumability.js
index 697f72ddcf9..2e2c0e183ec 100644
--- a/jstests/change_streams/whole_db_resumability.js
+++ b/jstests/change_streams/whole_db_resumability.js
@@ -18,8 +18,8 @@ let cst = new ChangeStreamTest(testDB);
let resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
// Insert a single document to each collection and save the resume token from the first insert.
-assert.writeOK(coll.insert({_id: 1}));
-assert.writeOK(otherColl.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(otherColl.insert({_id: 2}));
const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
assert.eq(firstInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
@@ -37,7 +37,7 @@ assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
assert.eq(secondInsertChangeDoc.ns, {db: testDB.getName(), coll: otherColl.getName()});
// Insert a third document to the first collection and test that the change stream picks it up.
-assert.writeOK(coll.insert({_id: 3}));
+assert.commandWorked(coll.insert({_id: 3}));
const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
assert.eq(thirdInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
@@ -67,7 +67,7 @@ if (!FixtureHelpers.isSharded(coll)) {
assertDropCollection(renameColl.getDB(), renameColl.getName());
resumeCursor = cst.startWatchingChanges({collection: 1, pipeline: [{$changeStream: {}}]});
- assert.writeOK(coll.renameCollection(renameColl.getName()));
+ assert.commandWorked(coll.renameCollection(renameColl.getName()));
const renameChanges = cst.assertNextChangesEqual({
cursor: resumeCursor,
@@ -82,7 +82,7 @@ if (!FixtureHelpers.isSharded(coll)) {
const resumeTokenRename = renameChanges[0]._id;
// Insert into the renamed collection.
- assert.writeOK(renameColl.insert({_id: "after rename"}));
+ assert.commandWorked(renameColl.insert({_id: "after rename"}));
// Resume from the rename notification using 'resumeAfter' and verify that the change stream
// returns the next insert.
@@ -110,7 +110,7 @@ if (!FixtureHelpers.isSharded(coll)) {
// Rename back to the original collection for reliability of the collection drops when
// dropping the database.
- assert.writeOK(renameColl.renameCollection(coll.getName()));
+ assert.commandWorked(renameColl.renameCollection(coll.getName()));
}
// Explicitly drop one collection to ensure reliability of the order of notifications from the
@@ -149,7 +149,7 @@ const resumeTokenInvalidate =
});
// Recreate the test collection.
-assert.writeOK(coll.insert({_id: "after recreate"}));
+assert.commandWorked(coll.insert({_id: "after recreate"}));
// Test resuming from the 'dropDatabase' entry using 'resumeAfter'.
resumeCursor = cst.startWatchingChanges({
diff --git a/jstests/client_encrypt/fle_auto_decrypt.js b/jstests/client_encrypt/fle_auto_decrypt.js
index 182c8af3ac4..1b6d1335ad2 100644
--- a/jstests/client_encrypt/fle_auto_decrypt.js
+++ b/jstests/client_encrypt/fle_auto_decrypt.js
@@ -34,7 +34,8 @@ const deterministicAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic";
const shell = Mongo(conn.host, clientSideFLEOptions);
const keyVault = shell.getKeyVault();
-assert.writeOK(keyVault.createKey("local", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
+assert.commandWorked(
+ keyVault.createKey("local", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
const clientEncrypt = shell.getClientEncryption();
const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
@@ -45,7 +46,7 @@ const encryptedStr = clientEncrypt.encrypt(keyId, "mongodb", deterministicAlgori
const collection = conn.getDB("test").getCollection("collection");
for (var i = 0; i < 150; i++) {
- assert.writeOK(collection.insert({string: encryptedStr, id: 1}));
+ assert.commandWorked(collection.insert({string: encryptedStr, id: 1}));
}
// Ensure string is auto decrypted
diff --git a/jstests/client_encrypt/fle_aws_faults.js b/jstests/client_encrypt/fle_aws_faults.js
index 1d9a621b42a..1f295310cbc 100644
--- a/jstests/client_encrypt/fle_aws_faults.js
+++ b/jstests/client_encrypt/fle_aws_faults.js
@@ -84,7 +84,7 @@ function testBadDecryptResult(fault) {
runKMS(mock_kms, (shell) => {
const keyVault = shell.getKeyVault();
- assert.writeOK(
+ assert.commandWorked(
keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
const keyId = keyVault.getKeys("mongoKey").toArray()[0]._id;
const str = "mongo";
@@ -102,7 +102,7 @@ function testBadDecryptKeyResult(fault) {
runKMS(mock_kms, (shell, cleanCacheShell) => {
const keyVault = shell.getKeyVault();
- assert.writeOK(
+ assert.commandWorked(
keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
const keyId = keyVault.getKeys("mongoKey").toArray()[0]._id;
const str = "mongo";
@@ -123,7 +123,7 @@ function testBadDecryptError() {
runKMS(mock_kms, (shell) => {
const keyVault = shell.getKeyVault();
- assert.writeOK(
+ assert.commandWorked(
keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
const keyId = keyVault.getKeys("mongoKey").toArray()[0]._id;
const str = "mongo";
diff --git a/jstests/client_encrypt/fle_encrypt_decrypt_shell.js b/jstests/client_encrypt/fle_encrypt_decrypt_shell.js
index f67bc72dccc..db0f8285a07 100644
--- a/jstests/client_encrypt/fle_encrypt_decrypt_shell.js
+++ b/jstests/client_encrypt/fle_encrypt_decrypt_shell.js
@@ -87,7 +87,7 @@ for (const kmsType of kmsTypes) {
for (const encryptionAlgorithm of encryptionAlgorithms) {
collection.drop();
- assert.writeOK(
+ assert.commandWorked(
keyVault.createKey(kmsType, "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
diff --git a/jstests/client_encrypt/fle_key_faults.js b/jstests/client_encrypt/fle_key_faults.js
index 41b2505c8ba..4df4d7eb6b6 100644
--- a/jstests/client_encrypt/fle_key_faults.js
+++ b/jstests/client_encrypt/fle_key_faults.js
@@ -48,7 +48,7 @@ function testFault(kmsType, func) {
const shell = Mongo(conn.host, clientSideFLEOptions);
const keyVault = shell.getKeyVault();
- assert.writeOK(
+ assert.commandWorked(
keyVault.createKey(kmsType, "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
diff --git a/jstests/client_encrypt/fle_keys.js b/jstests/client_encrypt/fle_keys.js
index 646b95141ad..526fac471da 100644
--- a/jstests/client_encrypt/fle_keys.js
+++ b/jstests/client_encrypt/fle_keys.js
@@ -68,9 +68,9 @@ result = keyVault.deleteKey(keyId);
assert.eq(0, keyVault.getKey(keyId).itcount());
assert.eq(0, keyVault.getKeys().itcount());
-assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake1"));
-assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-2:fake:fake:fake2"));
-assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-3:fake:fake:fake3"));
+assert.commandWorked(keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake1"));
+assert.commandWorked(keyVault.createKey("aws", "arn:aws:kms:us-east-2:fake:fake:fake2"));
+assert.commandWorked(keyVault.createKey("aws", "arn:aws:kms:us-east-3:fake:fake:fake3"));
assert.eq(3, keyVault.getKeys().itcount());
diff --git a/jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js b/jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js
index da83d69c87b..dc2e954281f 100644
--- a/jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js
+++ b/jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js
@@ -51,7 +51,8 @@ const failTestCases = [null, undefined, MinKey(), MaxKey(), DBRef("test", "test"
for (const encryptionAlgorithm of encryptionAlgorithms) {
collection.drop();
- assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
+ assert.commandWorked(
+ keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
let pass;
diff --git a/jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js b/jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js
index bebbb083d2f..8c02b494af8 100644
--- a/jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js
+++ b/jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js
@@ -102,7 +102,7 @@ function doSnapshotGetMore(collName, data, getMoreErrorCodes, commitTransactionE
function insertSessionDoc(db, collName, tid, sessionId) {
const sessionDoc = {"_id": "sessionDoc" + tid, "id": sessionId};
const res = db[collName].insert(sessionDoc);
- assert.writeOK(res);
+ assert.commandWorked(res);
assert.eq(1, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/agg_base.js b/jstests/concurrency/fsm_workloads/agg_base.js
index b38c8771d74..0b3ad432418 100644
--- a/jstests/concurrency/fsm_workloads/agg_base.js
+++ b/jstests/concurrency/fsm_workloads/agg_base.js
@@ -60,7 +60,7 @@ var $config = (function() {
this.docSize));
}
var res = bulk.execute();
- assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.commandWorked(res);
assertWhenOwnColl.eq(this.numDocs, res.nInserted);
assertWhenOwnColl.eq(this.numDocs, db[collName].find().itcount());
assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({flag: false}).itcount());
diff --git a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
index 5dbdd784ae0..27861eaa8e5 100644
--- a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
+++ b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
@@ -35,7 +35,7 @@ var $config = (function() {
var index = Random.randInt(this.numDocs + 1);
var update = Random.randInt(this.numDocs + 1);
var res = db[collName].update({_id: index}, {$set: {to: update}});
- assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.commandWorked(res);
}
};
@@ -48,7 +48,7 @@ var $config = (function() {
bulk.insert({_id: i, to: i + 1});
}
var res = bulk.execute();
- assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.commandWorked(res);
assertWhenOwnColl.eq(this.numDocs, res.nInserted);
assertWhenOwnColl.eq(this.numDocs, db[collName].find().itcount());
}
diff --git a/jstests/concurrency/fsm_workloads/collmod.js b/jstests/concurrency/fsm_workloads/collmod.js
index 15ea365c08e..095602b43af 100644
--- a/jstests/concurrency/fsm_workloads/collmod.js
+++ b/jstests/concurrency/fsm_workloads/collmod.js
@@ -44,7 +44,7 @@ var $config = (function() {
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.numDocs, res.nInserted);
// create TTL index
diff --git a/jstests/concurrency/fsm_workloads/compact.js b/jstests/concurrency/fsm_workloads/compact.js
index bf38ec947c2..1ef611e7761 100644
--- a/jstests/concurrency/fsm_workloads/compact.js
+++ b/jstests/concurrency/fsm_workloads/compact.js
@@ -30,7 +30,7 @@ var $config = (function() {
bulk.insert({a: Random.randInt(10), b: Random.randInt(10), c: Random.randInt(10)});
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.nDocumentsToInsert, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js b/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
index 9fd7846fb76..179fe97d064 100644
--- a/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
+++ b/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
@@ -24,7 +24,7 @@ var $config = (function() {
bulk.insert({x: i});
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(nDocumentsToInsert, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
index 6dddc40b49e..ddde8bfcc38 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
@@ -34,7 +34,7 @@ var $config = (function() {
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq((this.tid + 1) * 200, res.nInserted);
assertWhenOwnDB(!db[this.threadCollName].isCapped());
diff --git a/jstests/concurrency/fsm_workloads/count.js b/jstests/concurrency/fsm_workloads/count.js
index 1b10482a383..517941e9747 100644
--- a/jstests/concurrency/fsm_workloads/count.js
+++ b/jstests/concurrency/fsm_workloads/count.js
@@ -42,7 +42,7 @@ var $config = (function() {
bulk.insert({i: i % this.modulus, tid: this.tid});
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.getNumDocs(), res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js
index 5a74c060919..942af8a8231 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js
@@ -32,7 +32,7 @@ var $config = (function() {
var doc = makeDocWithSize(targetSize);
var res = db[collName].insert(doc);
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted);
return doc._id;
diff --git a/jstests/concurrency/fsm_workloads/create_database.js b/jstests/concurrency/fsm_workloads/create_database.js
index 573764a98ef..da54db62323 100644
--- a/jstests/concurrency/fsm_workloads/create_database.js
+++ b/jstests/concurrency/fsm_workloads/create_database.js
@@ -28,7 +28,7 @@ var $config = (function() {
if (mayFailWithDatabaseDifferCase && res.hasWriteError())
assertAlways.writeErrorWithCode(res, ErrorCodes.DatabaseDifferCase);
else
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
return res;
}
};
diff --git a/jstests/concurrency/fsm_workloads/create_index_background.js b/jstests/concurrency/fsm_workloads/create_index_background.js
index 9fec0d40a95..e6e7aa38880 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background.js
@@ -53,7 +53,7 @@ var $config = (function() {
bulk.insert(this.extendDocument(doc));
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.nDocumentsToSeed, res.nInserted, tojson(res));
// In the first thread create the background index.
@@ -86,7 +86,7 @@ var $config = (function() {
for (var i = 0; i < this.nDocumentsToCreate; ++i) {
const doc = {x: i + highest + 1, tid: this.tid, crud: 1};
res = coll.insert(this.extendDocument(doc));
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(res.nInserted, 1, tojson(res));
}
assertWhenOwnColl.eq(coll.find({tid: this.tid}).itcount(),
@@ -133,7 +133,7 @@ var $config = (function() {
updateExpr = this.extendUpdateExpr(updateExpr);
res = coll.update({x: Random.randInt(highest), tid: this.tid}, updateExpr);
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.contains(res.nModified, [0, 1], tojson(res));
}
@@ -169,7 +169,7 @@ var $config = (function() {
// Do randomized deletes on index x. A document is not guaranteed
// to match the randomized 'x' predicate.
res = coll.remove({x: Random.randInt(highest), tid: this.tid});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertWhenOwnColl.contains(res.nRemoved, [0, 1], tojson(res));
nActualDeletes += res.nRemoved;
}
@@ -210,7 +210,7 @@ var $config = (function() {
bulk.insert({x: i});
}
res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(nSetupDocs, res.nInserted, tojson(res));
// Increase the following parameters to reduce the number of yields.
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique.js b/jstests/concurrency/fsm_workloads/create_index_background_unique.js
index 3f43e2ce52e..906d2245fe1 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_unique.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_unique.js
@@ -75,7 +75,7 @@ var $config = (function() {
const uniqueValuePrefix = i.toString() + "_";
bulk.insert(this.buildvariableSizedDoc(uniqueValuePrefix));
}
- assertAlways.writeOK(bulk.execute());
+ assertAlways.commandWorked(bulk.execute());
assertAlways.eq(this.numDocsToLoad, db[collectionName].find({}).itcount());
}
}
diff --git a/jstests/concurrency/fsm_workloads/distinct.js b/jstests/concurrency/fsm_workloads/distinct.js
index a400d18b29d..2832ea215d3 100644
--- a/jstests/concurrency/fsm_workloads/distinct.js
+++ b/jstests/concurrency/fsm_workloads/distinct.js
@@ -19,7 +19,7 @@ var $config = (function() {
bulk.insert({i: i});
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.numDocs, res.nInserted);
assertAlways.commandWorked(db[this.threadCollName].ensureIndex({i: 1}));
}
diff --git a/jstests/concurrency/fsm_workloads/distinct_noindex.js b/jstests/concurrency/fsm_workloads/distinct_noindex.js
index 1848fe78901..b2f2f69bcdd 100644
--- a/jstests/concurrency/fsm_workloads/distinct_noindex.js
+++ b/jstests/concurrency/fsm_workloads/distinct_noindex.js
@@ -25,7 +25,7 @@ var $config = (function() {
bulk.insert({i: i % this.modulus, tid: this.tid});
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.numDocs, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove.js b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
index bcca3834c52..3c20a7ae61f 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
@@ -16,7 +16,7 @@ var $config = (function() {
function insertAndRemove(db, collName) {
var res = db[collName].insert({tid: this.tid, value: this.iter});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted);
res = db.runCommand({
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
index c97ac6eb10a..8066810489f 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
@@ -38,7 +38,7 @@ var $config = (function() {
updateDoc.$push[this.opName] = id;
var res = ownedDB[collName].update({_id: this.tid}, updateDoc, {upsert: true});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.contains(res.nMatched, [0, 1], tojson(res));
if (res.nMatched === 0) {
@@ -96,7 +96,7 @@ var $config = (function() {
bulk.insert(doc);
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.numDocs, res.nInserted);
this.getIndexSpecs().forEach(function ensureIndex(indexSpec) {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update.js b/jstests/concurrency/fsm_workloads/findAndModify_update.js
index 03e391409ae..9035e6f472f 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update.js
@@ -22,7 +22,7 @@ var $config = (function() {
function init(db, collName) {
for (var i = 0; i < this.numDocsPerThread; ++i) {
var res = db[collName].insert(makeDoc(this.tid));
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
index 68de0be1cc1..19ba3f27ab9 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
@@ -41,7 +41,7 @@ var $config = (function() {
this.bsonsize = Object.bsonsize(doc);
var res = db[collName].insert(doc);
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base.js b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
index 6b44042e59f..e098d5b9cfd 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
@@ -35,7 +35,7 @@ var $config = (function() {
insert: function insert(db, collName) {
var res = db[collName].insert(this.getDoc());
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted, tojson(res));
this.nInserted += this.docsPerInsert;
},
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
index 4fc72a6cd0c..148c77edbfb 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
@@ -25,7 +25,7 @@ var $config = extendWorkload($config, function($config, $super) {
bulk.insert(doc);
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
this.nInserted += this.docsPerInsert;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text.js b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
index 7967bd30811..beea5501dd7 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
@@ -17,7 +17,7 @@ var $config = (function() {
var snippet = this.getRandomTextSnippet();
doc[this.indexedField] = snippet;
var res = db[collName].insert(doc);
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted, tojson(res));
// TODO: what else can we assert? should that go in a read test?
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
index 4cb203311ca..e9b949f02c6 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
@@ -12,13 +12,13 @@ var $config = (function() {
var states = {
init: function init(db, collName) {
var res = db[collName].insert({indexed_insert_ttl: new ISODate(), first: true});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertWhenOwnColl.eq(1, res.nInserted, tojson(res));
},
insert: function insert(db, collName) {
var res = db[collName].insert({indexed_insert_ttl: new ISODate()});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertWhenOwnColl.eq(1, res.nInserted, tojson(res));
}
};
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
index 233f630a8b4..e8fe91483d4 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
@@ -25,7 +25,7 @@ var $config = extendWorkload($config, function($config, $super) {
bulk.insert(doc);
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
this.nInserted += this.docsPerInsert;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_where.js b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
index b44967d3550..88c69594378 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_where.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
@@ -26,7 +26,7 @@ var $config = (function() {
bulk.insert(this.generateDocumentToInsert());
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.documentsToInsert, res.nInserted);
this.insertedDocuments += this.documentsToInsert;
},
diff --git a/jstests/concurrency/fsm_workloads/invalidated_cursors.js b/jstests/concurrency/fsm_workloads/invalidated_cursors.js
index 58a3f007aaa..341e1185499 100644
--- a/jstests/concurrency/fsm_workloads/invalidated_cursors.js
+++ b/jstests/concurrency/fsm_workloads/invalidated_cursors.js
@@ -38,7 +38,7 @@ var $config = (function() {
bulk.insert({});
}
let res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.numDocs, res.nInserted, tojson(res));
this.indexSpecs.forEach(indexSpec => {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_drop.js b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
index 9066bca8375..64de629acb7 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_drop.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
@@ -58,7 +58,7 @@ var $config = (function() {
bulk.insert({key: Random.randInt(10000)});
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
var options = {
finalize: function finalize(key, reducedValue) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
index 203fecbe6a8..94382b1fe70 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_inline.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
@@ -78,7 +78,7 @@ var $config = (function() {
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.numDocs, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
index 59c6870b7b5..1bd48dce230 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
@@ -23,7 +23,7 @@ var $config = extendWorkload($config, function($config, $super) {
for (var i = 0; i < 20; ++i) {
var res = db[collName].remove({value: {$gte: Random.randInt(this.numDocs / 10)}},
{justOne: true});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.lte(0, res.nRemoved, tojson(res));
}
};
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js
index 0c097015c46..ed01e10ad88 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js
@@ -23,7 +23,7 @@ var $config = extendWorkload($config, function($config, $super) {
$config.states.init = function init(db, collName) {
$super.states.init.apply(this, arguments);
- assertWhenOwnColl.writeOK(db[this.majorityWriteCollName].insert(
+ assertWhenOwnColl.commandWorked(db[this.majorityWriteCollName].insert(
{_id: this.tid, counter: this.counter}, {writeConcern: {w: 'majority'}}));
};
@@ -34,7 +34,7 @@ var $config = extendWorkload($config, function($config, $super) {
*/
$config.states.majorityWriteUnrelatedDoc = function majorityWriteUnrelatedDoc(db, collName) {
this.counter += 1;
- assertWhenOwnColl.writeOK(db[this.majorityWriteCollName].update(
+ assertWhenOwnColl.commandWorked(db[this.majorityWriteCollName].update(
{_id: this.tid}, {$set: {counter: this.counter}}, {writeConcern: {w: 'majority'}}));
// As soon as the write returns, its effects should be visible in the majority snapshot.
@@ -56,9 +56,10 @@ var $config = extendWorkload($config, function($config, $super) {
// based on the thread's id, since threads may concurrently write to the same document.
const transactionDocId = Random.randInt(this.numAccounts);
const threadUniqueField = 'thread' + this.tid;
- assertWhenOwnColl.writeOK(db[collName].update({_id: transactionDocId},
- {$set: {[threadUniqueField]: this.counter}},
- {writeConcern: {w: 'majority'}}));
+ assertWhenOwnColl.commandWorked(
+ db[collName].update({_id: transactionDocId},
+ {$set: {[threadUniqueField]: this.counter}},
+ {writeConcern: {w: 'majority'}}));
// As soon as the write returns, its effects should be visible in the majority snapshot.
const doc = db[collName].findOne({_id: transactionDocId});
diff --git a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
index 1ab8a48212c..58cc84f9582 100644
--- a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+++ b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
@@ -17,7 +17,7 @@ var $config = (function() {
bulk.insert({a: 1, b: Random.rand()});
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
// Create two indexes to force plan caching: The {a: 1} index is
// cached by the query planner because we query on a single value
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_base.js b/jstests/concurrency/fsm_workloads/random_moveChunk_base.js
index 866d21a6b72..f150829f61b 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_base.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_base.js
@@ -117,7 +117,7 @@ var $config = extendWorkload($config, function($config, $super) {
// Give each document the same shard key and _id value, but a different tid.
bulk.insert({_id: i, skey: i, tid: chosenThread});
}
- assertAlways.writeOK(bulk.execute());
+ assertAlways.commandWorked(bulk.execute());
// Create a chunk with boundaries matching the partition's. The low chunk's lower bound
// is minKey, so a split is not necessary.
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js
index 6f62354216f..c458c32c730 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js
@@ -80,7 +80,7 @@ var $config = extendWorkload($config, function($config, $super) {
const collection = this.session.getDatabase(db.getName()).getCollection(collName);
withTxnAndAutoRetry(this.session, () => {
- assertWhenOwnColl.writeOK(collection.remove({_id: idToDelete}, {multi: false}));
+ assertWhenOwnColl.commandWorked(collection.remove({_id: idToDelete}, {multi: false}));
});
// Remove the deleted document from the in-memory representation.
@@ -105,7 +105,7 @@ var $config = extendWorkload($config, function($config, $super) {
const collection = this.session.getDatabase(db.getName()).getCollection(collName);
withTxnAndAutoRetry(this.session, () => {
- assertWhenOwnColl.writeOK(
+ assertWhenOwnColl.commandWorked(
collection.remove({tid: this.tid, groupId: groupIdToDelete}, {multi: true}));
});
@@ -153,7 +153,8 @@ var $config = extendWorkload($config, function($config, $super) {
// deleted by group later.
let nextGroupId = 0;
db[collName].find({tid: this.tid}).forEach(doc => {
- assert.writeOK(db[collName].update({_id: doc._id}, {$set: {groupId: nextGroupId}}));
+ assert.commandWorked(
+ db[collName].update({_id: doc._id}, {$set: {groupId: nextGroupId}}));
nextGroupId = (nextGroupId + 1) % this.numGroupsWithinThread;
});
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js
index 706657b9631..74180f5c378 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js
@@ -50,7 +50,7 @@ var $config = extendWorkload($config, function($config, $super) {
const collection = this.session.getDatabase(db.getName()).getCollection(collName);
withTxnAndAutoRetry(this.session, () => {
- assertWhenOwnColl.writeOK(
+ assertWhenOwnColl.commandWorked(
collection.update({_id: idToUpdate}, {$inc: {counter: 1}}, {multi: false}));
});
@@ -65,7 +65,7 @@ var $config = extendWorkload($config, function($config, $super) {
$config.states.multiUpdate = function multiUpdate(db, collName, connCache) {
const collection = this.session.getDatabase(db.getName()).getCollection(collName);
withTxnAndAutoRetry(this.session, () => {
- assertWhenOwnColl.writeOK(
+ assertWhenOwnColl.commandWorked(
collection.update({tid: this.tid}, {$inc: {counter: 1}}, {multi: true}));
});
@@ -100,7 +100,7 @@ var $config = extendWorkload($config, function($config, $super) {
// Assign a default counter value to each document owned by this thread.
db[collName].find({tid: this.tid}).forEach(doc => {
this.expectedCounters[doc._id] = 0;
- assert.writeOK(db[collName].update({_id: doc._id}, {$set: {counter: 0}}));
+ assert.commandWorked(db[collName].update({_id: doc._id}, {$set: {counter: 0}}));
});
};
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
index 0b22a7909a9..02c1c357345 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
@@ -243,7 +243,7 @@ var $config = extendWorkload($config, function($config, $super) {
this.generateRandomUpdateStyle(idToUpdate, newShardKey, counterForId),
{multi: false});
try {
- assertWhenOwnColl.writeOK(updateResult);
+ assertWhenOwnColl.commandWorked(updateResult);
this.expectedCounters[idToUpdate] = counterForId + 1;
} catch (e) {
const err = updateResult instanceof WriteResult ? updateResult.getWriteError()
@@ -331,7 +331,7 @@ var $config = extendWorkload($config, function($config, $super) {
// Assign a default counter value to each document owned by this thread.
db[collName].find({tid: this.tid}).forEach(doc => {
this.expectedCounters[doc._id] = 0;
- assert.writeOK(db[collName].update({_id: doc._id}, {$set: {counter: 0}}));
+ assert.commandWorked(db[collName].update({_id: doc._id}, {$set: {counter: 0}}));
});
};
@@ -354,7 +354,7 @@ var $config = extendWorkload($config, function($config, $super) {
bulk.insert({_id: i, skey: i, tid: tid});
}
- assertAlways.writeOK(bulk.execute());
+ assertAlways.commandWorked(bulk.execute());
// Create a chunk with boundaries matching the partition's. The low chunk's lower bound
// is minKey, so a split is not necessary.
diff --git a/jstests/concurrency/fsm_workloads/reindex.js b/jstests/concurrency/fsm_workloads/reindex.js
index f46a57d5243..890d52e78cc 100644
--- a/jstests/concurrency/fsm_workloads/reindex.js
+++ b/jstests/concurrency/fsm_workloads/reindex.js
@@ -29,7 +29,7 @@ var $config = (function() {
});
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.nDocumentsToInsert, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
index 0401126e907..8ef93b538c9 100644
--- a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
@@ -16,7 +16,7 @@ var $config = (function() {
for (var i = 0; i < 100; ++i) {
bulk.insert({});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
},
remove: function remove(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document.js b/jstests/concurrency/fsm_workloads/remove_single_document.js
index c2d3831f223..d6191da48f6 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document.js
@@ -44,7 +44,7 @@ var $config = (function() {
return db[collName].remove(query, options);
},
assertResult: function assertResult(res) {
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
// when running on its own collection,
// this iteration should remove exactly one document
assertWhenOwnColl.eq(1, res.nRemoved, tojson(res));
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
index 72d966bb7a3..b2659b50dd4 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
@@ -27,7 +27,7 @@ var $config = (function() {
function insert(db, collName, numDocs) {
for (var i = 0; i < numDocs; ++i) {
var res = db[collName].insert({});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
index 9d2f0b2ac45..90adf57a0e6 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
@@ -27,7 +27,7 @@ var $config = (function() {
function insert(db, collName, numDocs) {
for (var i = 0; i < numDocs; ++i) {
var res = db[collName].insert({});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
index e9063730aea..7a7c133b1fc 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
@@ -23,7 +23,7 @@ var $config = (function() {
function insert(db, collName, numDocs) {
for (var i = 0; i < numDocs; ++i) {
var res = db[collName].insert({});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
index 91a810015ae..140b8dc5dde 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
@@ -23,7 +23,7 @@ var $config = (function() {
function insert(db, collName, numDocs) {
for (var i = 0; i < numDocs; ++i) {
var res = db[collName].insert({});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/secondary_reads.js b/jstests/concurrency/fsm_workloads/secondary_reads.js
index f04ec6e75dd..2c02c530849 100644
--- a/jstests/concurrency/fsm_workloads/secondary_reads.js
+++ b/jstests/concurrency/fsm_workloads/secondary_reads.js
@@ -36,7 +36,7 @@ var $config = (function() {
bulk.insert({_id: i, x: i});
}
let res = bulk.execute(writeConcern);
- assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.commandWorked(res);
assertWhenOwnColl.eq(this.nDocumentsToInsert, res.nInserted);
this.nDocumentsInTotal += this.nDocumentsToInsert;
}
diff --git a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
index 7d16f61dd8d..5272629c710 100644
--- a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
@@ -170,7 +170,7 @@ var $config = (function() {
for (var i = partition.lower; i < partition.upper; ++i) {
bulk.insert({_id: i});
}
- assertAlways.writeOK(bulk.execute());
+ assertAlways.commandWorked(bulk.execute());
// Add split point for lower end of this thread's partition.
// Since a split point will be created at the low end of each partition,
diff --git a/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js b/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js
index 70515b7eaa9..0976f70c720 100644
--- a/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js
+++ b/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js
@@ -61,7 +61,7 @@ var $config = (function() {
}
var res = bulk.execute();
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(this.numSplitPoints, res.nInserted, tojson(res));
for (i = 0; i < this.numSplitPoints; ++i) {
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js
index 008b0fe05f0..87a0c2f0515 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js
@@ -46,7 +46,7 @@ var $config = (function() {
insertDocs: function insertDocs(db, collName) {
for (let i = 0; i < this.numDocsToInsertPerThread; ++i) {
const res = db[collName].insert({value: this.valueToBeInserted});
- assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.commandWorked(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
},
@@ -146,7 +146,7 @@ var $config = (function() {
assertWhenOwnColl.commandWorked(db.runCommand({create: collName}));
for (let i = 0; i < this.numIds; ++i) {
const res = db[collName].insert({_id: i, value: this.valueToBeInserted});
- assert.writeOK(res);
+ assert.commandWorked(res);
assert.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
index 70db1a7c44b..ee33ab8978a 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
@@ -131,7 +131,7 @@ var $config = (function() {
assertWhenOwnColl.commandWorked(db.runCommand({create: collName}));
for (let i = 0; i < this.numIds; ++i) {
const res = db[collName].insert({_id: i, value: i});
- assert.writeOK(res);
+ assert.commandWorked(res);
assert.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
index ed27a6bbc63..56d57513831 100644
--- a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
@@ -17,7 +17,7 @@ var $config = (function() {
for (var i = 0; i < 100; ++i) {
bulk.insert({});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
},
update: function update(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/update_array.js b/jstests/concurrency/fsm_workloads/update_array.js
index d4923b485bb..661656e829d 100644
--- a/jstests/concurrency/fsm_workloads/update_array.js
+++ b/jstests/concurrency/fsm_workloads/update_array.js
@@ -114,7 +114,7 @@ var $config = (function() {
assertAlways.commandWorked(db[collName].ensureIndex({arr: 1}));
for (var i = 0; i < this.numDocs; ++i) {
var res = db[collName].insert({_id: i, arr: []});
- assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.commandWorked(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/update_check_index.js b/jstests/concurrency/fsm_workloads/update_check_index.js
index 8a94c568d6f..8cfa98f6c84 100644
--- a/jstests/concurrency/fsm_workloads/update_check_index.js
+++ b/jstests/concurrency/fsm_workloads/update_check_index.js
@@ -25,7 +25,7 @@ var $config = (function() {
assertAlways.commandWorked(db[collName].ensureIndex({c: 1}));
for (var i = 0; i < 10; i++) {
- assertAlways.writeOK(db[collName].insert({a: 1, b: 1, c: 1}));
+ assertAlways.commandWorked(db[collName].insert({a: 1, b: 1, c: 1}));
}
}
diff --git a/jstests/concurrency/fsm_workloads/update_multifield.js b/jstests/concurrency/fsm_workloads/update_multifield.js
index da980099b70..cd1d0dd172e 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield.js
@@ -63,7 +63,7 @@ var $config = (function() {
for (var i = 0; i < this.numDocs; ++i) {
var res = db[collName].insert({_id: i});
- assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.commandWorked(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/update_rename.js b/jstests/concurrency/fsm_workloads/update_rename.js
index 675c469848e..c542382fc89 100644
--- a/jstests/concurrency/fsm_workloads/update_rename.js
+++ b/jstests/concurrency/fsm_workloads/update_rename.js
@@ -55,7 +55,7 @@ var $config = (function() {
var doc = {};
doc[fieldName] = i;
var res = db[collName].insert(doc);
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
assertAlways.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/update_replace.js b/jstests/concurrency/fsm_workloads/update_replace.js
index 12b6c8026f4..882e5faaa4a 100644
--- a/jstests/concurrency/fsm_workloads/update_replace.js
+++ b/jstests/concurrency/fsm_workloads/update_replace.js
@@ -72,7 +72,7 @@ var $config = (function() {
for (var i = 0; i < this.numDocs; ++i) {
var res = db[collName].insert({_id: i});
- assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.commandWorked(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/update_simple.js b/jstests/concurrency/fsm_workloads/update_simple.js
index 65178eafafb..75614cc4ddd 100644
--- a/jstests/concurrency/fsm_workloads/update_simple.js
+++ b/jstests/concurrency/fsm_workloads/update_simple.js
@@ -37,7 +37,7 @@ var $config = (function() {
// make sure the inserted docs have a 'value' field, so they won't need
// to grow when this workload runs against a capped collection
var res = db[collName].insert({_id: i, value: 0});
- assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.commandWorked(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
}
diff --git a/jstests/concurrency/fsm_workloads/update_where.js b/jstests/concurrency/fsm_workloads/update_where.js
index b9723c86c07..5e5b2d11574 100644
--- a/jstests/concurrency/fsm_workloads/update_where.js
+++ b/jstests/concurrency/fsm_workloads/update_where.js
@@ -25,7 +25,7 @@ var $config = extendWorkload($config, function($config, $super) {
},
{$set: {x: Random.randInt(this.randomBound)}},
{multi: true});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.gte(res.nModified, 0);
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
index 98ccaa7e3d0..aa7f463a766 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
@@ -126,10 +126,10 @@ var $config = (function() {
function setup(db, collName, cluster) {
const coll = db[collName];
- assertAlways.writeOK(coll.insert({a: 1, b: 2}));
- assertAlways.writeOK(coll.insert({a: 2, b: 3}));
- assertAlways.writeOK(coll.insert({a: 3, b: 4}));
- assertAlways.writeOK(coll.insert({a: 4, b: 1}));
+ assertAlways.commandWorked(coll.insert({a: 1, b: 2}));
+ assertAlways.commandWorked(coll.insert({a: 2, b: 3}));
+ assertAlways.commandWorked(coll.insert({a: 3, b: 4}));
+ assertAlways.commandWorked(coll.insert({a: 4, b: 1}));
for (let viewName of this.viewList) {
assertAlways.commandWorked(db.createView(viewName, collName, []));
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
index 51eee139928..d974376d10c 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
@@ -87,7 +87,7 @@ var $config = (function() {
function setup(db, collName, cluster) {
let coll = db[collName];
- assertAlways.writeOK(coll.insert({x: 1}));
+ assertAlways.commandWorked(coll.insert({x: 1}));
for (let viewName of this.viewList) {
assertAlways.commandWorked(db.createView(viewName, collName, []));
diff --git a/jstests/concurrency/fsm_workloads/yield.js b/jstests/concurrency/fsm_workloads/yield.js
index 748c912352c..5202e5e4ee8 100644
--- a/jstests/concurrency/fsm_workloads/yield.js
+++ b/jstests/concurrency/fsm_workloads/yield.js
@@ -68,7 +68,7 @@ var $config = (function() {
return;
}
var updateDoc = this.genUpdateDoc();
- assertAlways.writeOK(db[collName].update(randDoc, updateDoc));
+ assertAlways.commandWorked(db[collName].update(randDoc, updateDoc));
},
/*
@@ -80,9 +80,9 @@ var $config = (function() {
var doc = db[collName].findOne({_id: id});
if (doc !== null) {
var res = db[collName].remove({_id: id});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
if (res.nRemoved > 0) {
- assertAlways.writeOK(db[collName].insert(doc));
+ assertAlways.commandWorked(db[collName].insert(doc));
}
}
},
@@ -144,7 +144,7 @@ var $config = (function() {
bulk.find({_id: i}).upsert().updateOne(
{$set: {a: i, b: N - i, c: i, d: N - i, yield_text: word}});
}
- assertAlways.writeOK(bulk.execute());
+ assertAlways.commandWorked(bulk.execute());
}
/*
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near.js b/jstests/concurrency/fsm_workloads/yield_geo_near.js
index b025b7ec23b..e6509bb7503 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near.js
@@ -73,7 +73,7 @@ var $config = extendWorkload($config, function($config, $super) {
i++;
}
}
- assertAlways.writeOK(bulk.execute());
+ assertAlways.commandWorked(bulk.execute());
assertAlways.commandWorked(db[collName].ensureIndex(this.getIndexSpec()));
};
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
index 1e6da602641..7d3af9d2c24 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
@@ -13,13 +13,13 @@ var $config = extendWorkload($config, function($config, $super) {
var doc = db[collName].findOne({_id: id});
if (doc !== null) {
var res = db[collName].remove({_id: id});
- assertAlways.writeOK(res);
+ assertAlways.commandWorked(res);
if (res.nRemoved > 0) {
// Re-insert the document with the same '_id', but an incremented
// 'timesInserted' to
// distinguish it from the deleted document.
doc.timesInserted++;
- assertAlways.writeOK(db[collName].insert(doc));
+ assertAlways.commandWorked(db[collName].insert(doc));
}
}
};
diff --git a/jstests/core/SERVER-23626.js b/jstests/core/SERVER-23626.js
index 9a25bda2291..31f963bd313 100644
--- a/jstests/core/SERVER-23626.js
+++ b/jstests/core/SERVER-23626.js
@@ -4,14 +4,14 @@
var t = db.jstests_server23626;
t.mycoll.drop();
-assert.writeOK(t.mycoll.insert({_id: 0, a: Date.prototype}));
+assert.commandWorked(t.mycoll.insert({_id: 0, a: Date.prototype}));
assert.eq(1, t.mycoll.find({a: {$type: 'date'}}).itcount());
t.mycoll.drop();
-assert.writeOK(t.mycoll.insert({_id: 0, a: Function.prototype}));
+assert.commandWorked(t.mycoll.insert({_id: 0, a: Function.prototype}));
assert.eq(1, t.mycoll.find({a: {$type: 'javascript'}}).itcount());
t.mycoll.drop();
-assert.writeOK(t.mycoll.insert({_id: 0, a: RegExp.prototype}));
+assert.commandWorked(t.mycoll.insert({_id: 0, a: RegExp.prototype}));
assert.eq(1, t.mycoll.find({a: {$type: 'regex'}}).itcount());
}()); \ No newline at end of file
diff --git a/jstests/core/add_skip_stage_before_fetch.js b/jstests/core/add_skip_stage_before_fetch.js
index aaad7bb5db3..846cab46e1f 100644
--- a/jstests/core/add_skip_stage_before_fetch.js
+++ b/jstests/core/add_skip_stage_before_fetch.js
@@ -29,7 +29,7 @@ for (let i = 0; i < 10000; i++) {
d: Math.floor(Math.random() * 1000)
});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// The {a: 0, b: 2} query will match exactly one quarter of the documents in the collection:
// 2500 in total. In the test queries below, we skip the first 2400, returning exactly 100
diff --git a/jstests/core/agg_hint.js b/jstests/core/agg_hint.js
index 899bbd2217a..10b3c6192d8 100644
--- a/jstests/core/agg_hint.js
+++ b/jstests/core/agg_hint.js
@@ -72,7 +72,7 @@ function confirmAggUsesIndex({
// Specify hint as a string, representing index name.
assert.commandWorked(coll.createIndex({x: 1}));
for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
+ assert.commandWorked(coll.insert({x: i}));
}
confirmAggUsesIndex({
@@ -94,7 +94,7 @@ confirmAggUsesIndex({
coll.drop();
assert.commandWorked(coll.createIndex({x: 1}));
for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
+ assert.commandWorked(coll.insert({x: i}));
}
confirmAggUsesIndex({
@@ -125,7 +125,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({x: 1}));
assert.commandWorked(coll.createIndex({y: 1}));
for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i, y: i}));
+ assert.commandWorked(coll.insert({x: i, y: i}));
}
confirmAggUsesIndex({
@@ -155,7 +155,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({x: 1}));
assert.commandWorked(coll.createIndex({x: 1, y: 1}));
for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i, y: i}));
+ assert.commandWorked(coll.insert({x: i, y: i}));
}
confirmAggUsesIndex({
@@ -183,7 +183,7 @@ coll.drop();
view.drop();
assert.commandWorked(coll.createIndex({x: 1}));
for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
+ assert.commandWorked(coll.insert({x: i}));
}
assert.commandWorked(testDB.createView("view", "test", [{$match: {x: {$gte: 0}}}]));
@@ -213,7 +213,7 @@ coll.drop();
view.drop();
assert.commandWorked(coll.createIndex({x: 1}));
for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
+ assert.commandWorked(coll.insert({x: i}));
}
assert.commandWorked(testDB.createView("view", "test", []));
@@ -240,7 +240,7 @@ coll.drop();
view.drop();
assert.commandWorked(coll.createIndex({x: 1}));
for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
+ assert.commandWorked(coll.insert({x: i}));
}
assert.commandWorked(testDB.createView("view", "test", []));
diff --git a/jstests/core/aggregation_getmore_batchsize.js b/jstests/core/aggregation_getmore_batchsize.js
index c723d2ca45d..7cef863979d 100644
--- a/jstests/core/aggregation_getmore_batchsize.js
+++ b/jstests/core/aggregation_getmore_batchsize.js
@@ -10,7 +10,7 @@ db.getMongo().forceReadMode("commands");
var coll = db["aggregation_getmore_batchsize"];
// Insert some data to query for
-assert.writeOK(coll.insert([{a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}]));
+assert.commandWorked(coll.insert([{a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}]));
// Create a cursor with a batch size of 2 (should require three full batches to return all
// documents).
diff --git a/jstests/core/apply_ops_dups.js b/jstests/core/apply_ops_dups.js
index 85bc04437a6..63659c8082c 100644
--- a/jstests/core/apply_ops_dups.js
+++ b/jstests/core/apply_ops_dups.js
@@ -12,7 +12,7 @@ var t = db.apply_ops_dups;
t.drop();
// Check that duplicate _id fields don't cause an error
-assert.writeOK(t.insert({_id: 0, x: 1}));
+assert.commandWorked(t.insert({_id: 0, x: 1}));
assert.commandWorked(t.createIndex({x: 1}, {unique: true}));
var a = assert.commandWorked(db.adminCommand({
applyOps: [
diff --git a/jstests/core/apply_ops_invalid_index_spec.js b/jstests/core/apply_ops_invalid_index_spec.js
index d602cae29c1..6d1024d14d3 100644
--- a/jstests/core/apply_ops_invalid_index_spec.js
+++ b/jstests/core/apply_ops_invalid_index_spec.js
@@ -25,7 +25,7 @@ const cmdNs = db.getName() + '.$cmd';
const systemIndexesNs = db.getCollection('system.indexes').getFullName();
assert.commandWorked(db.createCollection(t.getName()));
-assert.writeOK(t.save({_id: 100, a: 100}));
+assert.commandWorked(t.save({_id: 100, a: 100}));
// Tests that db.collection.createIndex() fails when given an index spec containing an unknown
// field.
diff --git a/jstests/core/awaitdata_getmore_cmd.js b/jstests/core/awaitdata_getmore_cmd.js
index eb1fb194a32..d142615fa94 100644
--- a/jstests/core/awaitdata_getmore_cmd.js
+++ b/jstests/core/awaitdata_getmore_cmd.js
@@ -24,7 +24,7 @@ var coll = db[collName];
// Create a non-capped collection with 10 documents.
coll.drop();
for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
// Find with tailable flag set should fail for a non-capped collection.
@@ -44,7 +44,7 @@ assert.eq(cmdRes.cursor.firstBatch.length, 0);
// Create a capped collection with 10 documents.
assert.commandWorked(db.createCollection(collName, {capped: true, size: 2048}));
for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
// GetMore should succeed if query has awaitData but no maxTimeMS is supplied.
@@ -161,7 +161,7 @@ assert.eq(cmdRes.cursor.firstBatch.length, 0);
// the user if a document was inserted, but it did not match the filter.
let insertshell = startParallelShell(() => {
// Signal to the original shell that the parallel shell has successfully started.
- assert.writeOK(db.await_data.insert({_id: "signal parent shell"}));
+ assert.commandWorked(db.await_data.insert({_id: "signal parent shell"}));
// Wait for the parent shell to start watching for the next document.
assert.soon(() => db.currentOp({
@@ -171,7 +171,7 @@ let insertshell = startParallelShell(() => {
() => tojson(db.currentOp().inprog));
// Now write a non-matching document to the collection.
- assert.writeOK(db.await_data.insert({_id: "no match", x: 0}));
+ assert.commandWorked(db.await_data.insert({_id: "no match", x: 0}));
// Make sure the getMore has not ended after a while.
sleep(2000);
@@ -182,7 +182,7 @@ let insertshell = startParallelShell(() => {
tojson(db.currentOp().inprog));
// Now write a matching document to wake it up.
- assert.writeOK(db.await_data.insert({_id: "match", x: 1}));
+ assert.commandWorked(db.await_data.insert({_id: "match", x: 1}));
});
// Wait until we receive confirmation that the parallel shell has started.
diff --git a/jstests/core/background_index_multikey.js b/jstests/core/background_index_multikey.js
index 3db0c2d81f1..e9d0f0aa9ed 100644
--- a/jstests/core/background_index_multikey.js
+++ b/jstests/core/background_index_multikey.js
@@ -15,7 +15,7 @@ function testIndexBuilds(isBackground) {
// Build index after multikey document is in the collection.
let doc = {_id: 0, a: [1, 2]};
- assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.insert(doc));
assert.commandWorked(coll.createIndex({a: 1}, {background: isBackground}));
assert.eq(1, coll.count({a: 1}));
assert.eq(doc, coll.findOne({a: 1}));
@@ -24,7 +24,7 @@ function testIndexBuilds(isBackground) {
// Build index where multikey is in an embedded document.
doc = {_id: 1, b: {c: [1, 2]}};
- assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.insert(doc));
assert.commandWorked(coll.createIndex({'b.c': 1}, {background: isBackground}));
assert.eq(1, coll.count({'b.c': 1}));
assert.eq(doc, coll.findOne({'b.c': 1}));
@@ -33,7 +33,7 @@ function testIndexBuilds(isBackground) {
// Add new multikey path to embedded path.
doc = {_id: 2, b: [1, 2]};
- assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.insert(doc));
assert.eq(1, coll.count({b: 1}));
assert.eq(doc, coll.findOne({b: 1}));
assert.eq(1, coll.count({b: 2}));
@@ -41,11 +41,11 @@ function testIndexBuilds(isBackground) {
// Build index on a large collection that is not multikey, and then make it multikey.
for (let i = 100; i < 1100; i++) {
- assert.writeOK(coll.insert({_id: i, d: i}));
+ assert.commandWorked(coll.insert({_id: i, d: i}));
}
assert.commandWorked(coll.createIndex({d: 1}, {background: isBackground}));
doc = {_id: 3, d: [1, 2]};
- assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.insert(doc));
assert.eq(1, coll.count({d: 1}));
assert.eq(doc, coll.findOne({d: 1}));
assert.eq(1, coll.count({d: 2}));
@@ -53,7 +53,7 @@ function testIndexBuilds(isBackground) {
// Build compound multikey index.
doc = {_id: 4, e: [1, 2]};
- assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.insert(doc));
assert.commandWorked(coll.createIndex({'e': 1, 'f': 1}, {background: isBackground}));
assert.eq(1, coll.count({e: 1}));
assert.eq(doc, coll.findOne({e: 1}));
@@ -62,7 +62,7 @@ function testIndexBuilds(isBackground) {
// Add new multikey path to compound index.
doc = {_id: 5, f: [1, 2]};
- assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.insert(doc));
assert.eq(1, coll.count({f: 1}));
assert.eq(doc, coll.findOne({f: 1}));
assert.eq(1, coll.count({f: 2}));
diff --git a/jstests/core/batch_write_collation_estsize.js b/jstests/core/batch_write_collation_estsize.js
index d0e4254d6b2..dd8612ef357 100644
--- a/jstests/core/batch_write_collation_estsize.js
+++ b/jstests/core/batch_write_collation_estsize.js
@@ -11,7 +11,7 @@
// Setup the test collection.
db.batch_write_collation_estsize.drop();
-assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"}));
+assert.commandWorked(db.batch_write_collation_estsize.insert({str: "FOO"}));
if (db.getMongo().writeMode() !== "commands") {
// Cannot use the bulk API to set a collation when using legacy write ops.
@@ -160,7 +160,7 @@ if (db.getMongo().writeMode() !== "commands") {
assert.eq(1, res.deletedCount);
// Reinsert a document to test deleteMany bulk write operation.
- assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"}));
+ assert.commandWorked(db.batch_write_collation_estsize.insert({str: "FOO"}));
// Test deleteMany bulk write operation with collation specification.
res = db.batch_write_collation_estsize.bulkWrite([{
diff --git a/jstests/core/batch_write_command_delete.js b/jstests/core/batch_write_command_delete.js
index 4004a519412..2d7feb7fe23 100644
--- a/jstests/core/batch_write_command_delete.js
+++ b/jstests/core/batch_write_command_delete.js
@@ -123,7 +123,7 @@ for (var i = 0; i < maxWriteBatchSize; ++i) {
insertBatch.insert({_id: i});
batch.push({q: {_id: i}, limit: 0});
}
-assert.writeOK(insertBatch.execute());
+assert.commandWorked(insertBatch.execute());
request = {
delete: coll.getName(),
deletes: batch,
@@ -144,7 +144,7 @@ for (var i = 0; i < maxWriteBatchSize + 1; ++i) {
insertBatch.insert({_id: i});
batch.push({q: {_id: i}, limit: 0});
}
-assert.writeOK(insertBatch.execute());
+assert.commandWorked(insertBatch.execute());
request = {
delete: coll.getName(),
deletes: batch,
diff --git a/jstests/core/bindata_indexonly.js b/jstests/core/bindata_indexonly.js
index f215a17e882..f0d645db7e7 100644
--- a/jstests/core/bindata_indexonly.js
+++ b/jstests/core/bindata_indexonly.js
@@ -11,10 +11,10 @@ load("jstests/libs/analyze_plan.js");
var coll = db.jstests_bindata_indexonly;
coll.drop();
-assert.writeOK(coll.insert({_id: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"), a: 1}));
-assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv"), a: 2}));
-assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz"), a: 3}));
-assert.writeOK(coll.insert({_id: BinData(0, "////////////////////////////"), a: 4}));
+assert.commandWorked(coll.insert({_id: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"), a: 1}));
+assert.commandWorked(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv"), a: 2}));
+assert.commandWorked(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz"), a: 3}));
+assert.commandWorked(coll.insert({_id: BinData(0, "////////////////////////////"), a: 4}));
assert.commandWorked(coll.createIndex({_id: 1, a: 1}));
assert.throws(function() {
diff --git a/jstests/core/bittest.js b/jstests/core/bittest.js
index 00785a5efcf..1ac5ddbe59b 100644
--- a/jstests/core/bittest.js
+++ b/jstests/core/bittest.js
@@ -19,11 +19,11 @@ function assertQueryCorrect(query, count) {
// Tests on numbers.
coll.drop();
-assert.writeOK(coll.insert({a: 0}));
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 54}));
-assert.writeOK(coll.insert({a: 88}));
-assert.writeOK(coll.insert({a: 255}));
+assert.commandWorked(coll.insert({a: 0}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 54}));
+assert.commandWorked(coll.insert({a: 88}));
+assert.commandWorked(coll.insert({a: 255}));
assert.commandWorked(coll.createIndex({a: 1}));
// Tests with bitmask.
@@ -74,9 +74,9 @@ assertQueryCorrect({a: {$bitsAllSet: 54, $bitsAllClear: 201}}, 1);
// Tests on negative numbers.
coll.drop();
-assert.writeOK(coll.insert({a: -0}));
-assert.writeOK(coll.insert({a: -1}));
-assert.writeOK(coll.insert({a: -54}));
+assert.commandWorked(coll.insert({a: -0}));
+assert.commandWorked(coll.insert({a: -1}));
+assert.commandWorked(coll.insert({a: -54}));
// Tests with bitmask.
assertQueryCorrect({a: {$bitsAllSet: 0}}, 3);
@@ -118,10 +118,10 @@ assertQueryCorrect({a: {$bitsAllSet: 74, $bitsAllClear: 53}}, 1);
// Tests on BinData.
coll.drop();
-assert.writeOK(coll.insert({a: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}));
-assert.writeOK(coll.insert({a: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}));
-assert.writeOK(coll.insert({a: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}));
-assert.writeOK(coll.insert({a: BinData(0, "////////////////////////////")}));
+assert.commandWorked(coll.insert({a: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}));
+assert.commandWorked(coll.insert({a: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}));
+assert.commandWorked(coll.insert({a: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}));
+assert.commandWorked(coll.insert({a: BinData(0, "////////////////////////////")}));
assert.commandWorked(coll.createIndex({a: 1}));
// Tests with binary string bitmask.
diff --git a/jstests/core/bulk_legacy_enforce_gle.js b/jstests/core/bulk_legacy_enforce_gle.js
index 6359b277c0b..e9def78d241 100644
--- a/jstests/core/bulk_legacy_enforce_gle.js
+++ b/jstests/core/bulk_legacy_enforce_gle.js
@@ -16,7 +16,7 @@ const coll = db.bulk_legacy_enforce_gle;
function insertDocument(doc) {
let res = coll.insert(doc);
if (res) {
- assert.writeOK(res);
+ assert.commandWorked(res);
} else {
assert.gleOK(db.runCommand({getLastError: 1}));
}
@@ -25,7 +25,7 @@ function insertDocument(doc) {
coll.drop();
let bulk = coll.initializeUnorderedBulkOp();
bulk.find({_id: 1}).upsert().updateOne({_id: 1});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
let gle = assert.gleOK(db.runCommand({getLastError: 1}));
assert.eq(1, gle.n, tojson(gle));
diff --git a/jstests/core/bypass_doc_validation.js b/jstests/core/bypass_doc_validation.js
index dcf1a0d28dc..8c9fd3e8419 100644
--- a/jstests/core/bypass_doc_validation.js
+++ b/jstests/core/bypass_doc_validation.js
@@ -47,8 +47,8 @@ function runBypassDocumentValidationTest(validator) {
coll.drop({writeConcern: {w: "majority"}});
// Insert documents into the collection that would not be valid before setting 'validator'.
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.insert({_id: 2}));
+ assert.commandWorked(coll.insert({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 2}));
assert.commandWorked(myDb.runCommand({collMod: collName, validator: validator}));
const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
@@ -117,7 +117,7 @@ function runBypassDocumentValidationTest(validator) {
assertFailsValidation(BulkWriteResult(res));
res = myDb.runCommand(
{insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: true});
- assert.writeOK(res);
+ assert.commandWorked(res);
// Test the update command.
res = myDb.runCommand({
@@ -132,7 +132,7 @@ function runBypassDocumentValidationTest(validator) {
updates: [{q: {}, u: {$set: {update: 1}}}],
bypassDocumentValidation: true
});
- assert.writeOK(res);
+ assert.commandWorked(res);
assert.eq(1, coll.count({update: 1}));
// Pipeline-style update is only supported for commands and not for OP_UPDATE which cannot
diff --git a/jstests/core/capped6.js b/jstests/core/capped6.js
index 393d8589a60..c9d8867c3f0 100644
--- a/jstests/core/capped6.js
+++ b/jstests/core/capped6.js
@@ -70,7 +70,7 @@ function runCapTrunc(valueArray, valueArrayCurIndex, n, inc) {
assert.gt(n, 0);
assert.gte(valueArray.length, maxDocuments);
for (var i = valueArrayCurIndex; i < maxDocuments; ++i) {
- assert.writeOK(coll.insert(valueArray[i]));
+ assert.commandWorked(coll.insert(valueArray[i]));
}
count = coll.count();
diff --git a/jstests/core/capped_queries_and_id_index.js b/jstests/core/capped_queries_and_id_index.js
index 1bf463f05ed..3fdecfa1e2f 100644
--- a/jstests/core/capped_queries_and_id_index.js
+++ b/jstests/core/capped_queries_and_id_index.js
@@ -7,18 +7,18 @@ coll.drop();
assert.commandWorked(db.createCollection("capped9", {capped: true, size: 1024 * 50}));
-assert.writeOK(coll.insert({_id: 1, x: 2, y: 3}));
+assert.commandWorked(coll.insert({_id: 1, x: 2, y: 3}));
assert.eq(1, coll.find({x: 2}).itcount());
assert.eq(1, coll.find({y: 3}).itcount());
// SERVER-3064 proposes making the following queries/updates by _id result in an error.
assert.eq(1, coll.find({_id: 1}).itcount());
-assert.writeOK(coll.update({_id: 1}, {$set: {y: 4}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {y: 4}}));
assert.eq(4, coll.findOne().y);
assert.commandWorked(coll.createIndex({_id: 1}));
assert.eq(1, coll.find({_id: 1}).itcount());
-assert.writeOK(coll.update({_id: 1}, {$set: {y: 5}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {y: 5}}));
assert.eq(5, coll.findOne().y);
}());
diff --git a/jstests/core/capped_update.js b/jstests/core/capped_update.js
index f11502b45fe..d15c6939fcd 100644
--- a/jstests/core/capped_update.js
+++ b/jstests/core/capped_update.js
@@ -16,10 +16,10 @@ assert.commandWorked(
assert.eq(0, t.getIndexes().length, "the capped collection has indexes");
for (var j = 1; j <= 10; j++) {
- assert.writeOK(t.insert({_id: j, s: "Hello, World!"}));
+ assert.commandWorked(t.insert({_id: j, s: "Hello, World!"}));
}
-assert.writeOK(t.update({_id: 3}, {s: "Hello, Mongo!"})); // Mongo is same length as World
+assert.commandWorked(t.update({_id: 3}, {s: "Hello, Mongo!"})); // Mongo is same length as World
assert.writeError(t.update({_id: 3}, {$set: {s: "Hello!"}}));
assert.writeError(t.update({_id: 10}, {}));
assert.writeError(t.update({_id: 10}, {s: "Hello, World!!!"}));
@@ -27,7 +27,7 @@ assert.writeError(t.update({_id: 10}, {s: "Hello, World!!!"}));
assert.commandWorked(t.getDB().runCommand({godinsert: t.getName(), obj: {a: 2}}));
var doc = t.findOne({a: 2});
assert.eq(undefined, doc["_id"], "now has _id after godinsert");
-assert.writeOK(t.update({a: 2}, {$inc: {a: 1}}));
+assert.commandWorked(t.update({a: 2}, {$inc: {a: 1}}));
doc = t.findOne({a: 3});
assert.eq(undefined, doc["_id"], "now has _id after update");
})();
diff --git a/jstests/core/collation.js b/jstests/core/collation.js
index a3a186349e7..e96a67c3754 100644
--- a/jstests/core/collation.js
+++ b/jstests/core/collation.js
@@ -249,7 +249,7 @@ if (db.getMongo().useReadCommands()) {
coll.drop();
assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
assert.commandWorked(coll.createIndex({b: 1}));
- assert.writeOK(coll.insert({a: "foo", b: "foo"}));
+ assert.commandWorked(coll.insert({a: "foo", b: "foo"}));
assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().itcount());
assert.neq("foo", coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().next().a);
assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().itcount());
@@ -304,8 +304,8 @@ assert.eq(0, coll.aggregate([], {collation: {locale: "fr"}}).itcount());
// Aggregation should return correct results when collation specified and collection does exist.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
assert.eq(0, coll.aggregate([{$match: {str: "FOO"}}]).itcount());
assert.eq(1,
coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "en_US", strength: 2}})
@@ -316,7 +316,7 @@ assert.eq(1,
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({str: "foo"}));
+assert.commandWorked(coll.insert({str: "foo"}));
assert.eq(1, coll.aggregate([{$match: {str: "FOO"}}]).itcount());
// Aggregation should return correct results when "simple" collation specified and collection
@@ -324,7 +324,7 @@ assert.eq(1, coll.aggregate([{$match: {str: "FOO"}}]).itcount());
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({str: "foo"}));
+assert.commandWorked(coll.insert({str: "foo"}));
assert.eq(0, coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "simple"}}).itcount());
// Aggregation should select compatible index when no collation specified and collection has a
@@ -356,8 +356,8 @@ assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count());
// Count should return correct results when collation specified and collection does exist.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
assert.eq(0, coll.find({str: "FOO"}).count());
assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count());
assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).count());
@@ -370,7 +370,7 @@ assert.eq(1, coll.count({str: "FOO"}, {collation: {locale: "en_US", strength: 2}
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({str: "foo"}));
+assert.commandWorked(coll.insert({str: "foo"}));
assert.eq(1, coll.find({str: "FOO"}).count());
// Count should return correct results when "simple" collation specified and collection has a
@@ -378,13 +378,13 @@ assert.eq(1, coll.find({str: "FOO"}).count());
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({str: "foo"}));
+assert.commandWorked(coll.insert({str: "foo"}));
assert.eq(0, coll.find({str: "FOO"}).collation({locale: "simple"}).count());
// Count should return correct results when collation specified and when run with explain.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
explainRes = coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).count();
assert.commandWorked(explainRes);
planStage = getPlanStage(explainRes.executionStats.executionStages, "COLLSCAN");
@@ -461,8 +461,8 @@ assert.eq(0, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}
// Distinct should return correct results when collation specified and no indexes exist.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "FOO"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "FOO"}));
var res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}});
assert.eq(1, res.length);
assert.eq("foo", res[0].toLowerCase());
@@ -482,8 +482,8 @@ assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({str: "foo"}));
-assert.writeOK(coll.insert({str: "FOO"}));
+assert.commandWorked(coll.insert({str: "foo"}));
+assert.commandWorked(coll.insert({str: "FOO"}));
assert.eq(1, coll.distinct("str").length);
assert.eq(2, coll.distinct("_id", {str: "foo"}).length);
@@ -492,8 +492,8 @@ assert.eq(2, coll.distinct("_id", {str: "foo"}).length);
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({str: "foo"}));
-assert.writeOK(coll.insert({str: "FOO"}));
+assert.commandWorked(coll.insert({str: "foo"}));
+assert.commandWorked(coll.insert({str: "FOO"}));
assert.eq(2, coll.distinct("str", {}, {collation: {locale: "simple"}}).length);
assert.eq(1, coll.distinct("_id", {str: "foo"}, {collation: {locale: "simple"}}).length);
@@ -588,13 +588,13 @@ if (db.getMongo().useReadCommands()) {
// Find should return correct results when collation specified and filter is a match on _id.
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- assert.writeOK(coll.insert({_id: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
+ assert.commandWorked(coll.insert({_id: "foo"}));
assert.eq(0, coll.find({_id: "FOO"}).itcount());
assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount());
assert.eq(1, coll.find({_id: "FOO"}).collation({locale: "en_US", strength: 2}).itcount());
- assert.writeOK(coll.remove({_id: "foo"}));
+ assert.commandWorked(coll.remove({_id: "foo"}));
// Find should return correct results when collation specified and no indexes exist.
assert.eq(0, coll.find({str: "FOO"}).itcount());
@@ -626,17 +626,17 @@ if (db.getMongo().useReadCommands()) {
assert.eq(
1,
coll.find({str: "foo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
- assert.writeOK(coll.insert({_id: 3, str: "goo"}));
+ assert.commandWorked(coll.insert({_id: 3, str: "goo"}));
assert.eq(
0,
coll.find({str: "goo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
- assert.writeOK(coll.remove({_id: 3}));
+ assert.commandWorked(coll.remove({_id: 3}));
assert.commandWorked(coll.dropIndexes());
// Queries that use a index with a non-matching collation should add a sort
// stage if needed.
coll.drop();
- assert.writeOK(coll.insert([{a: "A"}, {a: "B"}, {a: "b"}, {a: "a"}]));
+ assert.commandWorked(coll.insert([{a: "A"}, {a: "B"}, {a: "b"}, {a: "a"}]));
// Ensure results from an index that doesn't match the query collation are sorted to match
// the requested collation.
@@ -649,7 +649,7 @@ if (db.getMongo().useReadCommands()) {
// Find should return correct results when collation specified and query contains $expr.
coll.drop();
- assert.writeOK(coll.insert([{a: "A"}, {a: "B"}]));
+ assert.commandWorked(coll.insert([{a: "A"}, {a: "B"}]));
assert.eq(
1,
coll.find({$expr: {$eq: ["$a", "a"]}}).collation({locale: "en_US", strength: 2}).itcount());
@@ -660,9 +660,9 @@ if (db.getMongo().useReadCommands()) {
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({str: "foo"}));
-assert.writeOK(coll.insert({str: "FOO"}));
-assert.writeOK(coll.insert({str: "bar"}));
+assert.commandWorked(coll.insert({str: "foo"}));
+assert.commandWorked(coll.insert({str: "FOO"}));
+assert.commandWorked(coll.insert({str: "bar"}));
assert.eq(3, coll.find({str: {$in: ["foo", "bar"]}}).itcount());
assert.eq(2, coll.find({str: "foo"}).itcount());
assert.eq(1, coll.find({str: {$ne: "foo"}}).itcount());
@@ -674,7 +674,7 @@ assert.eq([{str: "bar"}, {str: "foo"}, {str: "FOO"}],
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({_id: "foo"}));
+assert.commandWorked(coll.insert({_id: "foo"}));
assert.eq(1, coll.find({_id: "FOO"}).itcount());
// Find on _id should use idhack stage when query inherits collection default collation.
@@ -690,7 +690,7 @@ assert.neq(null, planStage);
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert([{a: "A"}, {a: "B"}]));
+assert.commandWorked(coll.insert([{a: "A"}, {a: "B"}]));
assert.eq(1, coll.find({$expr: {$eq: ["$a", "a"]}}).itcount());
if (db.getMongo().useReadCommands()) {
@@ -699,9 +699,9 @@ if (db.getMongo().useReadCommands()) {
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({str: "foo"}));
- assert.writeOK(coll.insert({str: "FOO"}));
- assert.writeOK(coll.insert({str: "bar"}));
+ assert.commandWorked(coll.insert({str: "foo"}));
+ assert.commandWorked(coll.insert({str: "FOO"}));
+ assert.commandWorked(coll.insert({str: "bar"}));
assert.eq(2, coll.find({str: {$in: ["foo", "bar"]}}).collation({locale: "simple"}).itcount());
assert.eq(1, coll.find({str: "foo"}).collation({locale: "simple"}).itcount());
assert.eq(
@@ -713,8 +713,8 @@ if (db.getMongo().useReadCommands()) {
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 3}}));
- assert.writeOK(coll.insert({_id: "foo"}));
- assert.writeOK(coll.insert({_id: "FOO"}));
+ assert.commandWorked(coll.insert({_id: "foo"}));
+ assert.commandWorked(coll.insert({_id: "FOO"}));
assert.eq(2, coll.find({_id: "foo"}).collation({locale: "en_US", strength: 2}).itcount());
// Find on _id should use idhack stage when explicitly given query collation matches
@@ -772,7 +772,7 @@ assert(isIxscan(db, explain.queryPlanner.winningPlan));
// Find should return correct results when collation specified and run with explain.
coll.drop();
-assert.writeOK(coll.insert({str: "foo"}));
+assert.commandWorked(coll.insert({str: "foo"}));
explainRes =
coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).finish();
assert.commandWorked(explainRes);
@@ -867,8 +867,8 @@ if (!db.getMongo().useReadCommands()) {
// find() shell helper should error if a collation is specified and the shell is not using
// read commands.
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
assert.throws(function() {
coll.find().collation({locale: "fr"}).itcount();
});
@@ -888,8 +888,8 @@ assert.eq(
// Update-findAndModify should return correct results when collation specified.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
assert.eq({_id: 1, str: "baz"}, coll.findAndModify({
query: {str: "FOO"},
update: {$set: {str: "baz"}},
@@ -911,8 +911,8 @@ assert.eq(1, planStage.nWouldModify);
// Delete-findAndModify should return correct results when collation specified.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
assert.eq({_id: 1, str: "foo"},
coll.findAndModify(
{query: {str: "FOO"}, remove: true, collation: {locale: "en_US", strength: 2}}));
@@ -933,7 +933,7 @@ assert.eq(1, planStage.nWouldDelete);
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.eq({_id: 1, str: "foo"}, coll.findAndModify({query: {str: "FOO"}, update: {$set: {x: 1}}}));
assert.eq({_id: 1, str: "foo", x: 1}, coll.findAndModify({query: {str: "FOO"}, remove: true}));
@@ -942,7 +942,7 @@ assert.eq({_id: 1, str: "foo", x: 1}, coll.findAndModify({query: {str: "FOO"}, r
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.eq(null,
coll.findAndModify(
{query: {str: "FOO"}, update: {$set: {x: 1}}, collation: {locale: "simple"}}));
@@ -969,8 +969,8 @@ assert.throws(function() {
// mapReduce should return correct results when collation specified and no indexes exist.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
var mapReduceOut = coll.mapReduce(
function() {
emit(this.str, 1);
@@ -987,7 +987,7 @@ assert.eq(mapReduceOut.results.length, 1);
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
var mapReduceOut = coll.mapReduce(
function() {
emit(this.str, 1);
@@ -1004,7 +1004,7 @@ assert.eq(mapReduceOut.results.length, 1);
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
var mapReduceOut = coll.mapReduce(
function() {
emit(this.str, 1);
@@ -1023,21 +1023,21 @@ assert.eq(mapReduceOut.results.length, 0);
if (db.getMongo().writeMode() === "commands") {
// Remove should succeed when collation specified and collection does not exist.
coll.drop();
- assert.writeOK(coll.remove({str: "foo"}, {justOne: true, collation: {locale: "fr"}}));
+ assert.commandWorked(coll.remove({str: "foo"}, {justOne: true, collation: {locale: "fr"}}));
// Remove should return correct results when collation specified.
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
writeRes =
coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
// Explain of remove should return correct results when collation specified.
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
explainRes = coll.explain("executionStats").remove({str: "FOO"}, {
justOne: true,
collation: {locale: "en_US", strength: 2}
@@ -1053,9 +1053,9 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
writeRes = coll.remove({str: "FOO"}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
// Remove with idhack should return correct results when no collation specified and collection
@@ -1063,9 +1063,9 @@ assert.eq(1, writeRes.nRemoved);
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({_id: "foo"}));
+assert.commandWorked(coll.insert({_id: "foo"}));
writeRes = coll.remove({_id: "FOO"}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
// Remove on _id should use idhack stage when query inherits collection default collation.
@@ -1082,9 +1082,9 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
writeRes = coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(0, writeRes.nRemoved);
// Remove on _id should return correct results when "simple" collation specified and
@@ -1092,9 +1092,9 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: "foo"}));
+ assert.commandWorked(coll.insert({_id: "foo"}));
writeRes = coll.remove({_id: "FOO"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(0, writeRes.nRemoved);
// Remove on _id should use idhack stage when explicit query collation matches collection
@@ -1122,8 +1122,8 @@ if (db.getMongo().writeMode() !== "commands") {
// remove() shell helper should error if a collation is specified and the shell is not using
// write commands.
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
assert.throws(function() {
coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}});
});
@@ -1140,13 +1140,13 @@ if (db.getMongo().writeMode() !== "commands") {
if (db.getMongo().writeMode() === "commands") {
// Update should succeed when collation specified and collection does not exist.
coll.drop();
- assert.writeOK(
+ assert.commandWorked(
coll.update({str: "foo"}, {$set: {other: 99}}, {multi: true, collation: {locale: "fr"}}));
// Update should return correct results when collation specified.
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
writeRes = coll.update({str: "FOO"},
{$set: {other: 99}},
{multi: true, collation: {locale: "en_US", strength: 2}});
@@ -1154,8 +1154,8 @@ if (db.getMongo().writeMode() === "commands") {
// Explain of update should return correct results when collation specified.
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
explainRes = coll.explain("executionStats").update({str: "FOO"}, {$set: {other: 99}}, {
multi: true,
collation: {locale: "en_US", strength: 2}
@@ -1171,9 +1171,9 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
writeRes = coll.update({str: "FOO"}, {$set: {other: 99}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
// Update with idhack should return correct results when no collation specified and collection
@@ -1181,9 +1181,9 @@ assert.eq(1, writeRes.nMatched);
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert({_id: "foo"}));
+assert.commandWorked(coll.insert({_id: "foo"}));
writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
// Update on _id should use idhack stage when query inherits collection default collation.
@@ -1200,9 +1200,9 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
writeRes = coll.update({str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(0, writeRes.nModified);
// Update on _id should return correct results when "simple" collation specified and
@@ -1210,9 +1210,9 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: "foo"}));
+ assert.commandWorked(coll.insert({_id: "foo"}));
writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(0, writeRes.nModified);
// Update on _id should use idhack stage when explicitly given query collation matches
@@ -1242,8 +1242,8 @@ if (db.getMongo().writeMode() !== "commands") {
// update() shell helper should error if a collation is specified and the shell is not using
// write commands.
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
assert.throws(function() {
coll.update({str: "FOO"},
{$set: {other: 99}},
@@ -1277,7 +1277,7 @@ assert.commandFailedWithCode(db.runCommand({
// $geoNear rejects the now-deprecated "collation" option.
coll.drop();
-assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
+assert.commandWorked(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
assert.commandFailedWithCode(db.runCommand({
aggregate: coll.getName(),
cursor: {},
@@ -1335,7 +1335,7 @@ coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
-assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
+assert.commandWorked(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
assert.eq(1, coll.aggregate([geoNearStage]).itcount());
// $geoNear should return correct results when "simple" collation specified and collection has
@@ -1344,7 +1344,7 @@ coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
-assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
+assert.commandWorked(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
assert.eq(0, coll.aggregate([geoNearStage], {collation: {locale: "simple"}}).itcount());
//
@@ -1365,7 +1365,7 @@ if (db.getMongo().useReadCommands()) {
// Find with $nearSphere should return correct results when collation specified and string
// predicate not indexed.
coll.drop();
- assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
+ assert.commandWorked(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
assert.eq(
0,
@@ -1438,8 +1438,8 @@ var bulk;
if (db.getMongo().writeMode() !== "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
// Can't use the bulk API to set a collation when using legacy write ops.
bulk = coll.initializeUnorderedBulkOp();
@@ -1454,44 +1454,44 @@ if (db.getMongo().writeMode() !== "commands") {
} else {
// update().
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
bulk = coll.initializeUnorderedBulkOp();
bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({$set: {other: 99}});
writeRes = bulk.execute();
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(2, writeRes.nModified);
// updateOne().
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
bulk = coll.initializeUnorderedBulkOp();
bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({
$set: {other: 99}
});
writeRes = bulk.execute();
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nModified);
// replaceOne().
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
bulk = coll.initializeUnorderedBulkOp();
bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "oof"});
writeRes = bulk.execute();
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nModified);
// replaceOne() with upsert().
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
bulk = coll.initializeUnorderedBulkOp();
bulk.find({str: "FOO"}).collation({locale: "en_US"}).upsert().replaceOne({str: "foo"});
writeRes = bulk.execute();
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nUpserted);
assert.eq(0, writeRes.nModified);
@@ -1500,28 +1500,28 @@ if (db.getMongo().writeMode() !== "commands") {
str: "foo"
});
writeRes = bulk.execute();
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(0, writeRes.nUpserted);
assert.eq(1, writeRes.nModified);
// removeOne().
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
bulk = coll.initializeUnorderedBulkOp();
bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne();
writeRes = bulk.execute();
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
// remove().
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+ assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
bulk = coll.initializeUnorderedBulkOp();
bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove();
writeRes = bulk.execute();
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(2, writeRes.nRemoved);
}
@@ -1531,8 +1531,8 @@ if (db.getMongo().writeMode() !== "commands") {
// deleteOne().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
assert.eq(1, res.deletedCount);
@@ -1544,8 +1544,8 @@ if (db.getMongo().writeMode() === "commands") {
// deleteMany().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
assert.eq(2, res.deletedCount);
@@ -1557,14 +1557,14 @@ if (db.getMongo().writeMode() === "commands") {
// findOneAndDelete().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.eq({_id: 1, str: "foo"},
coll.findOneAndDelete({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}));
assert.eq(null, coll.findOne({_id: 1}));
// findOneAndReplace().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.eq({_id: 1, str: "foo"},
coll.findOneAndReplace(
{str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}}));
@@ -1572,7 +1572,7 @@ assert.neq(null, coll.findOne({str: "bar"}));
// findOneAndUpdate().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.eq({_id: 1, str: "foo"},
coll.findOneAndUpdate(
{str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}));
@@ -1580,8 +1580,8 @@ assert.neq(null, coll.findOne({other: 99}));
// replaceOne().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res =
coll.replaceOne({str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}});
@@ -1594,8 +1594,8 @@ if (db.getMongo().writeMode() === "commands") {
// updateOne().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.updateOne(
{str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
@@ -1609,8 +1609,8 @@ if (db.getMongo().writeMode() === "commands") {
// updateMany().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.updateMany(
{str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
@@ -1624,8 +1624,8 @@ if (db.getMongo().writeMode() === "commands") {
// updateOne with bulkWrite().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.bulkWrite([{
updateOne: {
@@ -1649,8 +1649,8 @@ if (db.getMongo().writeMode() === "commands") {
// updateMany with bulkWrite().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.bulkWrite([{
updateMany: {
@@ -1674,8 +1674,8 @@ if (db.getMongo().writeMode() === "commands") {
// replaceOne with bulkWrite().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.bulkWrite([{
replaceOne: {
@@ -1699,8 +1699,8 @@ if (db.getMongo().writeMode() === "commands") {
// deleteOne with bulkWrite().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.bulkWrite(
[{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
@@ -1714,8 +1714,8 @@ if (db.getMongo().writeMode() === "commands") {
// deleteMany with bulkWrite().
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.bulkWrite(
[{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
@@ -1729,8 +1729,8 @@ if (db.getMongo().writeMode() === "commands") {
// Two deleteOne ops with bulkWrite using different collations.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, str: "foo"}));
-assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
if (db.getMongo().writeMode() === "commands") {
var res = coll.bulkWrite([
{deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}},
@@ -1751,7 +1751,7 @@ if (!isMongos) {
coll.drop();
assert.commandWorked(
db.createCollection("collation", {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: "foo", x: 5, str: "bar"}));
+ assert.commandWorked(coll.insert({_id: "foo", x: 5, str: "bar"}));
// preCondition.q respects collection default collation.
assert.commandFailed(db.runCommand({
@@ -1798,8 +1798,8 @@ if (!isMongos) {
const originalCollectionInfos = db.getCollectionInfos({name: coll.getName()});
assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos));
- assert.writeOK(coll.insert({_id: "FOO"}));
- assert.writeOK(coll.insert({_id: "bar"}));
+ assert.commandWorked(coll.insert({_id: "FOO"}));
+ assert.commandWorked(coll.insert({_id: "bar"}));
assert.eq([{_id: "FOO"}],
coll.find({_id: "foo"}).toArray(),
"query should have performed a case-insensitive match");
@@ -1822,14 +1822,14 @@ if (!isMongos) {
// Test that the find command's min/max options respect the collation.
if (db.getMongo().useReadCommands()) {
coll.drop();
- assert.writeOK(coll.insert({str: "a"}));
- assert.writeOK(coll.insert({str: "A"}));
- assert.writeOK(coll.insert({str: "b"}));
- assert.writeOK(coll.insert({str: "B"}));
- assert.writeOK(coll.insert({str: "c"}));
- assert.writeOK(coll.insert({str: "C"}));
- assert.writeOK(coll.insert({str: "d"}));
- assert.writeOK(coll.insert({str: "D"}));
+ assert.commandWorked(coll.insert({str: "a"}));
+ assert.commandWorked(coll.insert({str: "A"}));
+ assert.commandWorked(coll.insert({str: "b"}));
+ assert.commandWorked(coll.insert({str: "B"}));
+ assert.commandWorked(coll.insert({str: "c"}));
+ assert.commandWorked(coll.insert({str: "C"}));
+ assert.commandWorked(coll.insert({str: "d"}));
+ assert.commandWorked(coll.insert({str: "D"}));
// This query should fail, since there is no index to support the min/max.
let err = assert.throws(() => coll.find()
@@ -1880,7 +1880,7 @@ if (db.getMongo().useReadCommands()) {
// Ensure results from index with min/max query are sorted to match requested collation.
coll.drop();
assert.commandWorked(coll.ensureIndex({a: 1, b: 1}));
- assert.writeOK(
+ assert.commandWorked(
coll.insert([{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}]));
var expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}];
res = coll.find({}, {_id: 0})
diff --git a/jstests/core/collation_convert_to_capped.js b/jstests/core/collation_convert_to_capped.js
index 237156e86d7..811d2849bc7 100644
--- a/jstests/core/collation_convert_to_capped.js
+++ b/jstests/core/collation_convert_to_capped.js
@@ -21,8 +21,8 @@ assert.commandWorked(
const originalCollectionInfos = testDb.getCollectionInfos({name: coll.getName()});
assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos));
-assert.writeOK(coll.insert({_id: "FOO"}));
-assert.writeOK(coll.insert({_id: "bar"}));
+assert.commandWorked(coll.insert({_id: "FOO"}));
+assert.commandWorked(coll.insert({_id: "bar"}));
assert.eq([{_id: "FOO"}],
coll.find({_id: "foo"}).toArray(),
"query should have performed a case-insensitive match");
diff --git a/jstests/core/collation_plan_cache.js b/jstests/core/collation_plan_cache.js
index 07507938cc3..97d7f220f54 100644
--- a/jstests/core/collation_plan_cache.js
+++ b/jstests/core/collation_plan_cache.js
@@ -13,7 +13,7 @@
var coll = db.collation_plan_cache;
coll.drop();
-assert.writeOK(coll.insert({a: 'foo', b: 5}));
+assert.commandWorked(coll.insert({a: 'foo', b: 5}));
// We need two indexes that each query can use so that a plan cache entry is created.
assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: 'en_US'}}));
diff --git a/jstests/core/collation_update.js b/jstests/core/collation_update.js
index 32538e15653..1337bfcd271 100644
--- a/jstests/core/collation_update.js
+++ b/jstests/core/collation_update.js
@@ -20,8 +20,8 @@ const numericOrdering = {
// Update modifiers respect collection default collation on simple _id query.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
-assert.writeOK(coll.insert({_id: 1, a: "124"}));
-assert.writeOK(coll.update({_id: 1}, {$min: {a: "1234"}}));
+assert.commandWorked(coll.insert({_id: 1, a: "124"}));
+assert.commandWorked(coll.update({_id: 1}, {$min: {a: "1234"}}));
assert.eq(coll.find({a: "124"}).count(), 1);
// $min respects query collation.
@@ -29,20 +29,20 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
// 1234 > 124, so no change should occur.
- assert.writeOK(coll.insert({a: "124"}));
- assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, numericOrdering));
+ assert.commandWorked(coll.insert({a: "124"}));
+ assert.commandWorked(coll.update({a: "124"}, {$min: {a: "1234"}}, numericOrdering));
assert.eq(coll.find({a: "124"}).count(), 1);
// "1234" < "124" (non-numeric ordering), so an update should occur.
- assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, caseSensitive));
+ assert.commandWorked(coll.update({a: "124"}, {$min: {a: "1234"}}, caseSensitive));
assert.eq(coll.find({a: "1234"}).count(), 1);
}
// $min respects collection default collation.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
-assert.writeOK(coll.insert({a: "124"}));
-assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}));
+assert.commandWorked(coll.insert({a: "124"}));
+assert.commandWorked(coll.update({a: "124"}, {$min: {a: "1234"}}));
assert.eq(coll.find({a: "124"}).count(), 1);
// $max respects query collation.
@@ -50,20 +50,20 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
// "1234" < "124", so an update should not occur.
- assert.writeOK(coll.insert({a: "124"}));
- assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, caseSensitive));
+ assert.commandWorked(coll.insert({a: "124"}));
+ assert.commandWorked(coll.update({a: "124"}, {$max: {a: "1234"}}, caseSensitive));
assert.eq(coll.find({a: "124"}).count(), 1);
// 1234 > 124, so an update should occur.
- assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, numericOrdering));
+ assert.commandWorked(coll.update({a: "124"}, {$max: {a: "1234"}}, numericOrdering));
assert.eq(coll.find({a: "1234"}).count(), 1);
}
// $max respects collection default collation.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
-assert.writeOK(coll.insert({a: "124"}));
-assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}));
+assert.commandWorked(coll.insert({a: "124"}));
+assert.commandWorked(coll.update({a: "124"}, {$max: {a: "1234"}}));
assert.eq(coll.find({a: "1234"}).count(), 1);
// $addToSet respects query collation.
@@ -71,21 +71,21 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
// "foo" == "FOO" (case-insensitive), so set isn't extended.
- assert.writeOK(coll.insert({a: ["foo"]}));
- assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseInsensitive));
+ assert.commandWorked(coll.insert({a: ["foo"]}));
+ assert.commandWorked(coll.update({}, {$addToSet: {a: "FOO"}}, caseInsensitive));
var set = coll.findOne().a;
assert.eq(set.length, 1);
// "foo" != "FOO" (case-sensitive), so set is extended.
- assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseSensitive));
+ assert.commandWorked(coll.update({}, {$addToSet: {a: "FOO"}}, caseSensitive));
set = coll.findOne().a;
assert.eq(set.length, 2);
coll.drop();
// $each and $addToSet respect collation
- assert.writeOK(coll.insert({a: ["foo", "bar", "FOO"]}));
- assert.writeOK(
+ assert.commandWorked(coll.insert({a: ["foo", "bar", "FOO"]}));
+ assert.commandWorked(
coll.update({}, {$addToSet: {a: {$each: ["FOO", "BAR", "str"]}}}, caseInsensitive));
set = coll.findOne().a;
assert.eq(set.length, 4);
@@ -98,8 +98,8 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
// "foo" == "FOO" (case-insensitive), so set isn't extended.
-assert.writeOK(coll.insert({a: ["foo"]}));
-assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}));
+assert.commandWorked(coll.insert({a: ["foo"]}));
+assert.commandWorked(coll.update({}, {$addToSet: {a: "FOO"}}));
var set = coll.findOne().a;
assert.eq(set.length, 1);
@@ -108,21 +108,21 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
// "foo" != "FOO" (case-sensitive), so it is not pulled.
- assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseSensitive));
+ assert.commandWorked(coll.insert({a: ["foo", "FOO"]}));
+ assert.commandWorked(coll.update({}, {$pull: {a: "foo"}}, caseSensitive));
var arr = coll.findOne().a;
assert.eq(arr.length, 1);
assert(arr.includes("FOO"));
// "foo" == "FOO" (case-insensitive), so "FOO" is pulled.
- assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive));
+ assert.commandWorked(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive));
arr = coll.findOne().a;
assert.eq(arr.length, 0);
// collation-aware $pull removes all instances that match.
coll.drop();
- assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive));
+ assert.commandWorked(coll.insert({a: ["foo", "FOO"]}));
+ assert.commandWorked(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive));
arr = coll.findOne().a;
assert.eq(arr.length, 0);
@@ -130,13 +130,13 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
// "124" > "1234" (case-sensitive), so it is not removed.
- assert.writeOK(coll.insert({a: ["124", "1234"]}));
- assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, caseSensitive));
+ assert.commandWorked(coll.insert({a: ["124", "1234"]}));
+ assert.commandWorked(coll.update({}, {$pull: {a: {$lt: "1234"}}}, caseSensitive));
arr = coll.findOne().a;
assert.eq(arr.length, 2);
// 124 < 1234 (numeric ordering), so it is removed.
- assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, numericOrdering));
+ assert.commandWorked(coll.update({}, {$pull: {a: {$lt: "1234"}}}, numericOrdering));
arr = coll.findOne().a;
assert.eq(arr.length, 1);
assert(arr.includes("1234"));
@@ -145,8 +145,8 @@ if (db.getMongo().writeMode() === "commands") {
// $pull respects collection default collation.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
-assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
-assert.writeOK(coll.update({}, {$pull: {a: "foo"}}));
+assert.commandWorked(coll.insert({a: ["foo", "FOO"]}));
+assert.commandWorked(coll.update({}, {$pull: {a: "foo"}}));
var arr = coll.findOne().a;
assert.eq(arr.length, 0);
@@ -155,13 +155,13 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
// "foo" != "FOO" (case-sensitive), so no changes are made.
- assert.writeOK(coll.insert({a: ["foo", "bar"]}));
- assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseSensitive));
+ assert.commandWorked(coll.insert({a: ["foo", "bar"]}));
+ assert.commandWorked(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseSensitive));
var arr = coll.findOne().a;
assert.eq(arr.length, 2);
// "foo" == "FOO", "bar" == "BAR" (case-insensitive), so both are removed.
- assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseInsensitive));
+ assert.commandWorked(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseInsensitive));
arr = coll.findOne().a;
assert.eq(arr.length, 0);
}
@@ -169,8 +169,8 @@ if (db.getMongo().writeMode() === "commands") {
// $pullAll respects collection default collation.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
-assert.writeOK(coll.insert({a: ["foo", "bar"]}));
-assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}));
+assert.commandWorked(coll.insert({a: ["foo", "bar"]}));
+assert.commandWorked(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}));
var arr = coll.findOne().a;
assert.eq(arr.length, 0);
@@ -179,8 +179,8 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
// "1230" < "1234" < "124" (case-sensitive)
- assert.writeOK(coll.insert({a: ["1234", "124"]}));
- assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, caseSensitive));
+ assert.commandWorked(coll.insert({a: ["1234", "124"]}));
+ assert.commandWorked(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, caseSensitive));
var arr = coll.findOne().a;
assert.eq(arr.length, 3);
assert.eq(arr[0], "1230");
@@ -189,8 +189,9 @@ if (db.getMongo().writeMode() === "commands") {
// "124" < "1230" < "1234" (numeric ordering)
coll.drop();
- assert.writeOK(coll.insert({a: ["1234", "124"]}));
- assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, numericOrdering));
+ assert.commandWorked(coll.insert({a: ["1234", "124"]}));
+ assert.commandWorked(
+ coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, numericOrdering));
arr = coll.findOne().a;
assert.eq(arr.length, 3);
assert.eq(arr[0], "124");
@@ -201,8 +202,8 @@ if (db.getMongo().writeMode() === "commands") {
// $push with $sort respects collection default collation.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
-assert.writeOK(coll.insert({a: ["1234", "124"]}));
-assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}));
+assert.commandWorked(coll.insert({a: ["1234", "124"]}));
+assert.commandWorked(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}));
var arr = coll.findOne().a;
assert.eq(arr.length, 3);
assert.eq(arr[0], "124");
@@ -214,16 +215,16 @@ if (db.getMongo().writeMode() === "commands") {
coll.drop();
// "foo" != "FOO" (case-sensitive) so no update occurs.
- assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseSensitive));
+ assert.commandWorked(coll.insert({a: ["foo", "FOO"]}));
+ assert.commandWorked(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseSensitive));
var arr = coll.findOne().a;
assert.eq(arr.length, 2);
assert.eq(arr[0], "foo");
assert.eq(arr[1], "FOO");
// "foo" == "FOO" (case-insensitive) so no update occurs.
- assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseInsensitive));
+ assert.commandWorked(coll.insert({a: ["foo", "FOO"]}));
+ assert.commandWorked(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseInsensitive));
var arr = coll.findOne().a;
assert.eq(arr.length, 2);
assert.eq(arr[0], "FOO");
@@ -233,8 +234,8 @@ if (db.getMongo().writeMode() === "commands") {
// $ positional operator respects collection default collation on $set.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
-assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
-assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}));
+assert.commandWorked(coll.insert({a: ["foo", "FOO"]}));
+assert.commandWorked(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}));
var arr = coll.findOne().a;
assert.eq(arr.length, 2);
assert.eq(arr[0], "FOO");
diff --git a/jstests/core/collmod_without_uuid.js b/jstests/core/collmod_without_uuid.js
index d3b2ca5a287..354d587b637 100644
--- a/jstests/core/collmod_without_uuid.js
+++ b/jstests/core/collmod_without_uuid.js
@@ -20,7 +20,7 @@ function checkUUIDs() {
}
db[collName].drop();
-assert.writeOK(db[collName].insert({}));
+assert.commandWorked(db[collName].insert({}));
checkUUIDs();
let cmd = {applyOps: [{ns: "test.$cmd", op: "c", o: {collMod: collName}}]};
let res = db.runCommand(cmd);
diff --git a/jstests/core/commands_namespace_parsing.js b/jstests/core/commands_namespace_parsing.js
index 88431388eca..e17ab50ea50 100644
--- a/jstests/core/commands_namespace_parsing.js
+++ b/jstests/core/commands_namespace_parsing.js
@@ -56,7 +56,7 @@ assert.commandWorked(isMaster);
const isMongos = (isMaster.msg === "isdbgrid");
db.commands_namespace_parsing.drop();
-assert.writeOK(db.commands_namespace_parsing.insert({a: 1}));
+assert.commandWorked(db.commands_namespace_parsing.insert({a: 1}));
// Test aggregate fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
diff --git a/jstests/core/compare_timestamps.js b/jstests/core/compare_timestamps.js
index b88bb003483..54db4b0fd36 100644
--- a/jstests/core/compare_timestamps.js
+++ b/jstests/core/compare_timestamps.js
@@ -3,7 +3,7 @@
'use strict';
var t = db.compare_timestamps;
t.drop();
-assert.writeOK(t.insert({a: new Timestamp(0xffffffff, 3), b: "non-zero"}));
-assert.writeOK(t.insert({a: new Timestamp(0, 0), b: "zero"}));
+assert.commandWorked(t.insert({a: new Timestamp(0xffffffff, 3), b: "non-zero"}));
+assert.commandWorked(t.insert({a: new Timestamp(0, 0), b: "zero"}));
assert.eq(t.find().sort({a: 1}).limit(1).next().b, "zero", "timestamp must compare unsigned");
}());
diff --git a/jstests/core/constructors.js b/jstests/core/constructors.js
index 0e28150e701..7657f3b4b1e 100644
--- a/jstests/core/constructors.js
+++ b/jstests/core/constructors.js
@@ -77,7 +77,7 @@ function whereConstructorTest(constructorList) {
constructorList = addConstructorsWithNew(constructorList);
t = db.where_constructors;
t.drop();
- assert.writeOK(t.insert({x: 1}));
+ assert.commandWorked(t.insert({x: 1}));
constructorList.valid.forEach(function(constructor) {
try {
diff --git a/jstests/core/convert_to_capped.js b/jstests/core/convert_to_capped.js
index 58731299dae..73694bc52d0 100644
--- a/jstests/core/convert_to_capped.js
+++ b/jstests/core/convert_to_capped.js
@@ -17,7 +17,7 @@ testDb.dropDatabase();
// Create a collection with some data.
let num = 10;
for (let i = 0; i < num; ++i) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
// Ensure we do not allow overflowing the size long long on the server (SERVER-33078).
diff --git a/jstests/core/count_hint.js b/jstests/core/count_hint.js
index 9bb485410ef..21c7a37b87d 100644
--- a/jstests/core/count_hint.js
+++ b/jstests/core/count_hint.js
@@ -13,8 +13,8 @@
var coll = db.jstests_count_hint;
coll.drop();
-assert.writeOK(coll.insert({i: 1}));
-assert.writeOK(coll.insert({i: 2}));
+assert.commandWorked(coll.insert({i: 1}));
+assert.commandWorked(coll.insert({i: 2}));
assert.eq(2, coll.find().count());
diff --git a/jstests/core/coveredIndex1.js b/jstests/core/coveredIndex1.js
index 2be0cae4bf9..a4c614a2b1c 100644
--- a/jstests/core/coveredIndex1.js
+++ b/jstests/core/coveredIndex1.js
@@ -15,12 +15,12 @@ coll.drop();
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-assert.writeOK(coll.insert({order: 0, fn: "john", ln: "doe"}));
-assert.writeOK(coll.insert({order: 1, fn: "jack", ln: "doe"}));
-assert.writeOK(coll.insert({order: 2, fn: "john", ln: "smith"}));
-assert.writeOK(coll.insert({order: 3, fn: "jack", ln: "black"}));
-assert.writeOK(coll.insert({order: 4, fn: "bob", ln: "murray"}));
-assert.writeOK(coll.insert({order: 5, fn: "aaa", ln: "bbb", obj: {a: 1, b: "blah"}}));
+assert.commandWorked(coll.insert({order: 0, fn: "john", ln: "doe"}));
+assert.commandWorked(coll.insert({order: 1, fn: "jack", ln: "doe"}));
+assert.commandWorked(coll.insert({order: 2, fn: "john", ln: "smith"}));
+assert.commandWorked(coll.insert({order: 3, fn: "jack", ln: "black"}));
+assert.commandWorked(coll.insert({order: 4, fn: "bob", ln: "murray"}));
+assert.commandWorked(coll.insert({order: 5, fn: "aaa", ln: "bbb", obj: {a: 1, b: "blah"}}));
/**
* Asserts that running the find command with query 'query' and projection 'projection' is
diff --git a/jstests/core/covered_multikey.js b/jstests/core/covered_multikey.js
index cb5e97d8dbb..297728545ab 100644
--- a/jstests/core/covered_multikey.js
+++ b/jstests/core/covered_multikey.js
@@ -15,7 +15,7 @@ load("jstests/libs/analyze_plan.js");
let coll = db.covered_multikey;
coll.drop();
-assert.writeOK(coll.insert({a: 1, b: [2, 3, 4]}));
+assert.commandWorked(coll.insert({a: 1, b: [2, 3, 4]}));
assert.commandWorked(coll.createIndex({a: 1, b: 1}));
assert.eq(1, coll.find({a: 1, b: 2}, {_id: 0, a: 1}).itcount());
@@ -25,8 +25,8 @@ assert(isIxscan(db, explainRes.queryPlanner.winningPlan));
assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
coll.drop();
-assert.writeOK(coll.insert({a: 1, b: [1, 2, 3], c: 3, d: 5}));
-assert.writeOK(coll.insert({a: [1, 2, 3], b: 1, c: 4, d: 6}));
+assert.commandWorked(coll.insert({a: 1, b: [1, 2, 3], c: 3, d: 5}));
+assert.commandWorked(coll.insert({a: [1, 2, 3], b: 1, c: 4, d: 6}));
assert.commandWorked(coll.createIndex({a: 1, b: 1, c: -1, d: -1}));
let cursor = coll.find({a: 1, b: 1}, {_id: 0, c: 1, d: 1}).sort({c: -1, d: -1});
@@ -41,7 +41,7 @@ assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
// Verify that a query cannot be covered over a path which is multikey due to an empty array.
coll.drop();
-assert.writeOK(coll.insert({a: []}));
+assert.commandWorked(coll.insert({a: []}));
assert.commandWorked(coll.createIndex({a: 1}));
assert.eq({a: []}, coll.findOne({a: []}, {_id: 0, a: 1}));
explainRes = coll.explain("queryPlanner").find({a: []}, {_id: 0, a: 1}).finish();
@@ -53,7 +53,7 @@ assert.eq(true, ixscanStage.isMultiKey);
// Verify that a query cannot be covered over a path which is multikey due to a single-element
// array.
coll.drop();
-assert.writeOK(coll.insert({a: [2]}));
+assert.commandWorked(coll.insert({a: [2]}));
assert.commandWorked(coll.createIndex({a: 1}));
assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1}));
explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish();
@@ -65,9 +65,9 @@ assert.eq(true, ixscanStage.isMultiKey);
// Verify that a query cannot be covered over a path which is multikey due to a single-element
// array, where the path is made multikey by an update rather than an insert.
coll.drop();
-assert.writeOK(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 2}));
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.update({}, {$set: {a: [2]}}));
+assert.commandWorked(coll.update({}, {$set: {a: [2]}}));
assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1}));
explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish();
assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN"));
@@ -78,12 +78,12 @@ assert.eq(true, ixscanStage.isMultiKey);
// Verify that a trailing empty array makes a 2dsphere index multikey.
coll.drop();
assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"}));
-assert.writeOK(coll.insert({a: {b: 1}, c: {type: "Point", coordinates: [0, 0]}}));
+assert.commandWorked(coll.insert({a: {b: 1}, c: {type: "Point", coordinates: [0, 0]}}));
explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
assert.neq(null, ixscanStage);
assert.eq(false, ixscanStage.isMultiKey);
-assert.writeOK(coll.insert({a: {b: []}, c: {type: "Point", coordinates: [0, 0]}}));
+assert.commandWorked(coll.insert({a: {b: []}, c: {type: "Point", coordinates: [0, 0]}}));
explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
assert.neq(null, ixscanStage);
@@ -92,7 +92,7 @@ assert.eq(true, ixscanStage.isMultiKey);
// Verify that a mid-path empty array makes a 2dsphere index multikey.
coll.drop();
assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"}));
-assert.writeOK(coll.insert({a: [], c: {type: "Point", coordinates: [0, 0]}}));
+assert.commandWorked(coll.insert({a: [], c: {type: "Point", coordinates: [0, 0]}}));
explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
assert.neq(null, ixscanStage);
@@ -101,7 +101,7 @@ assert.eq(true, ixscanStage.isMultiKey);
// Verify that a single-element array makes a 2dsphere index multikey.
coll.drop();
assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"}));
-assert.writeOK(coll.insert({a: {b: [3]}, c: {type: "Point", coordinates: [0, 0]}}));
+assert.commandWorked(coll.insert({a: {b: [3]}, c: {type: "Point", coordinates: [0, 0]}}));
explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
assert.neq(null, ixscanStage);
diff --git a/jstests/core/currentop.js b/jstests/core/currentop.js
index 636fdee2cb3..577ec6b159e 100644
--- a/jstests/core/currentop.js
+++ b/jstests/core/currentop.js
@@ -17,7 +17,7 @@ coll.drop();
assert.commandWorked(db.fsyncLock());
const awaitInsertShell = startParallelShell(function() {
- assert.writeOK(db.jstests_currentop.insert({}));
+ assert.commandWorked(db.jstests_currentop.insert({}));
});
// Wait until the write appears in the currentOp output reporting that it is waiting for a lock.
diff --git a/jstests/core/cursora.js b/jstests/core/cursora.js
index 3def8c6162f..a4c6e94595b 100644
--- a/jstests/core/cursora.js
+++ b/jstests/core/cursora.js
@@ -21,7 +21,7 @@ function run(n) {
let bulk = t.initializeUnorderedBulkOp();
for (let i = 0; i < n; i++)
bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
const join = startParallelShell("sleep(50);" +
"db.cursora.remove({});");
diff --git a/jstests/core/dbcase.js b/jstests/core/dbcase.js
index 033608a3f6a..bcbb103d873 100644
--- a/jstests/core/dbcase.js
+++ b/jstests/core/dbcase.js
@@ -6,7 +6,7 @@ b = db.getSisterDB("dbcasetest_dbnameA");
a.dropDatabase();
b.dropDatabase();
-assert.writeOK(a.foo.save({x: 1}));
+assert.commandWorked(a.foo.save({x: 1}));
res = b.foo.save({x: 1});
assert.writeError(res);
diff --git a/jstests/core/dbstats.js b/jstests/core/dbstats.js
index aa413f905fc..ea424295ebf 100644
--- a/jstests/core/dbstats.js
+++ b/jstests/core/dbstats.js
@@ -29,7 +29,7 @@ const doc = {
_id: 1,
x: 1
};
-assert.writeOK(coll.insert(doc));
+assert.commandWorked(coll.insert(doc));
let dbStats = testDB.runCommand({dbStats: 1});
assert.commandWorked(dbStats);
diff --git a/jstests/core/distinct1.js b/jstests/core/distinct1.js
index 1d4ccaab16c..c35e4179879 100644
--- a/jstests/core/distinct1.js
+++ b/jstests/core/distinct1.js
@@ -6,11 +6,11 @@ coll.drop();
assert.eq(0, coll.distinct("a").length, "test empty");
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 2}));
-assert.writeOK(coll.insert({a: 2}));
-assert.writeOK(coll.insert({a: 2}));
-assert.writeOK(coll.insert({a: 3}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 3}));
// Test that distinct returns all the distinct values.
assert.eq([1, 2, 3], coll.distinct("a").sort(), "distinct returned unexpected results");
@@ -22,10 +22,10 @@ assert.eq([1, 2],
assert(coll.drop());
-assert.writeOK(coll.insert({a: {b: "a"}, c: 12}));
-assert.writeOK(coll.insert({a: {b: "b"}, c: 12}));
-assert.writeOK(coll.insert({a: {b: "c"}, c: 12}));
-assert.writeOK(coll.insert({a: {b: "c"}, c: 12}));
+assert.commandWorked(coll.insert({a: {b: "a"}, c: 12}));
+assert.commandWorked(coll.insert({a: {b: "b"}, c: 12}));
+assert.commandWorked(coll.insert({a: {b: "c"}, c: 12}));
+assert.commandWorked(coll.insert({a: {b: "c"}, c: 12}));
// Test that distinct works on fields in embedded documents.
assert.eq(["a", "b", "c"],
@@ -34,8 +34,8 @@ assert.eq(["a", "b", "c"],
assert(coll.drop());
-assert.writeOK(coll.insert({_id: 1, a: 1}));
-assert.writeOK(coll.insert({_id: 2, a: 2}));
+assert.commandWorked(coll.insert({_id: 1, a: 1}));
+assert.commandWorked(coll.insert({_id: 2, a: 2}));
// Test that distinct works on the _id field.
assert.eq([1, 2], coll.distinct("_id").sort(), "distinct on _id returned unexpected results");
@@ -46,13 +46,13 @@ assert.eq(
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: 2}));
-assert.writeOK(coll.insert({a: 2, b: 2}));
-assert.writeOK(coll.insert({a: 2, b: 1}));
-assert.writeOK(coll.insert({a: 2, b: 2}));
-assert.writeOK(coll.insert({a: 3, b: 2}));
-assert.writeOK(coll.insert({a: 4, b: 1}));
-assert.writeOK(coll.insert({a: 4, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
+assert.commandWorked(coll.insert({a: 2, b: 1}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
+assert.commandWorked(coll.insert({a: 3, b: 2}));
+assert.commandWorked(coll.insert({a: 4, b: 1}));
+assert.commandWorked(coll.insert({a: 4, b: 1}));
// Test running the distinct command directly, rather than via shell helper.
let res = assert.commandWorked(db.runCommand({distinct: collName, key: "a"}));
diff --git a/jstests/core/distinct3.js b/jstests/core/distinct3.js
index 23557820bec..ba1b3182542 100644
--- a/jstests/core/distinct3.js
+++ b/jstests/core/distinct3.js
@@ -20,7 +20,7 @@ for (i = 0; i < 50; ++i) {
for (i = 0; i < 100; ++i) {
bulk.insert({b: i, c: i + 50});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Attempt to remove the last match for the {a:1} index scan while distinct is yielding.
p = startParallelShell('for( i = 0; i < 100; ++i ) { ' +
@@ -29,7 +29,7 @@ p = startParallelShell('for( i = 0; i < 100; ++i ) {
' for( j = 0; j < 20; ++j ) { ' +
' bulk.insert( { a:49, c:49, d:j } ); ' +
' } ' +
- ' assert.writeOK(bulk.execute()); ' +
+ ' assert.commandWorked(bulk.execute()); ' +
'} ');
for (i = 0; i < 100; ++i) {
diff --git a/jstests/core/distinct_compound_index.js b/jstests/core/distinct_compound_index.js
index 6182267ea51..91debcdeeb0 100644
--- a/jstests/core/distinct_compound_index.js
+++ b/jstests/core/distinct_compound_index.js
@@ -9,10 +9,10 @@ var coll = db.distinct_multikey_index;
coll.drop();
for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.save({a: 1, b: 1}));
- assert.writeOK(coll.save({a: 1, b: 2}));
- assert.writeOK(coll.save({a: 2, b: 1}));
- assert.writeOK(coll.save({a: 2, b: 3}));
+ assert.commandWorked(coll.save({a: 1, b: 1}));
+ assert.commandWorked(coll.save({a: 1, b: 2}));
+ assert.commandWorked(coll.save({a: 2, b: 1}));
+ assert.commandWorked(coll.save({a: 2, b: 3}));
}
coll.createIndex({a: 1, b: 1});
diff --git a/jstests/core/distinct_multikey.js b/jstests/core/distinct_multikey.js
index 72acd2c342c..7208b67fb64 100644
--- a/jstests/core/distinct_multikey.js
+++ b/jstests/core/distinct_multikey.js
@@ -9,9 +9,9 @@ load("jstests/libs/analyze_plan.js");
let coll = db.jstest_distinct_multikey;
coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.insert({a: [1, 2, 3]}));
-assert.writeOK(coll.insert({a: [2, 3, 4]}));
-assert.writeOK(coll.insert({a: [5, 6, 7]}));
+assert.commandWorked(coll.insert({a: [1, 2, 3]}));
+assert.commandWorked(coll.insert({a: [2, 3, 4]}));
+assert.commandWorked(coll.insert({a: [5, 6, 7]}));
// Test that distinct can correctly use a multikey index when there is no predicate.
let result = coll.distinct("a");
@@ -31,8 +31,8 @@ assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
// Test distinct over a dotted multikey field, with a predicate.
coll.drop();
assert.commandWorked(coll.createIndex({"a.b": 1}));
-assert.writeOK(coll.insert({a: {b: [1, 2, 3]}}));
-assert.writeOK(coll.insert({a: {b: [2, 3, 4]}}));
+assert.commandWorked(coll.insert({a: {b: [1, 2, 3]}}));
+assert.commandWorked(coll.insert({a: {b: [2, 3, 4]}}));
result = coll.distinct("a.b", {"a.b": 3});
assert.eq([1, 2, 3, 4], result.sort());
@@ -44,9 +44,9 @@ assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
// multikey.
coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 2}));
-assert.writeOK(coll.insert({a: 3}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 3}));
result = coll.distinct("a", {a: {$gte: 2}});
assert.eq([2, 3], result.sort());
@@ -58,9 +58,9 @@ assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
// multikey.
coll.drop();
assert.commandWorked(coll.createIndex({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: 1, b: [2, 3]}));
-assert.writeOK(coll.insert({a: 8, b: [3, 4]}));
-assert.writeOK(coll.insert({a: 7, b: [4, 5]}));
+assert.commandWorked(coll.insert({a: 1, b: [2, 3]}));
+assert.commandWorked(coll.insert({a: 8, b: [3, 4]}));
+assert.commandWorked(coll.insert({a: 7, b: [4, 5]}));
result = coll.distinct("a", {a: {$gte: 2}});
assert.eq([7, 8], result.sort());
@@ -78,9 +78,9 @@ assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
// Test distinct over a trailing non-multikey field, where the leading field is multikey.
coll.drop();
assert.commandWorked(coll.createIndex({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: [2, 3], b: 1}));
-assert.writeOK(coll.insert({a: [3, 4], b: 8}));
-assert.writeOK(coll.insert({a: [3, 5], b: 7}));
+assert.commandWorked(coll.insert({a: [2, 3], b: 1}));
+assert.commandWorked(coll.insert({a: [3, 4], b: 8}));
+assert.commandWorked(coll.insert({a: [3, 5], b: 7}));
result = coll.distinct("b", {a: 3});
assert.eq([1, 7, 8], result.sort());
@@ -91,9 +91,9 @@ assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
// Test distinct over a trailing non-multikey dotted path where the leading field is multikey.
coll.drop();
assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
-assert.writeOK(coll.insert({a: [2, 3], b: {c: 1}}));
-assert.writeOK(coll.insert({a: [3, 4], b: {c: 8}}));
-assert.writeOK(coll.insert({a: [3, 5], b: {c: 7}}));
+assert.commandWorked(coll.insert({a: [2, 3], b: {c: 1}}));
+assert.commandWorked(coll.insert({a: [3, 4], b: {c: 8}}));
+assert.commandWorked(coll.insert({a: [3, 5], b: {c: 7}}));
result = coll.distinct("b.c", {a: 3});
assert.eq([1, 7, 8], result.sort());
diff --git a/jstests/core/doc_validation.js b/jstests/core/doc_validation.js
index 57f99adf48c..6d6af5dfe5d 100644
--- a/jstests/core/doc_validation.js
+++ b/jstests/core/doc_validation.js
@@ -38,10 +38,10 @@ function runInsertUpdateValidationTest(validator) {
assert.commandWorked(db.createCollection(collName, {validator: validator}));
// Insert and upsert documents that will pass validation.
- assert.writeOK(coll.insert({_id: "valid1", a: 1}));
- assert.writeOK(coll.update({_id: "valid2"}, {_id: "valid2", a: 2}, {upsert: true}));
- assert.writeOK(coll.runCommand("findAndModify",
- {query: {_id: "valid3"}, update: {$set: {a: 3}}, upsert: true}));
+ assert.commandWorked(coll.insert({_id: "valid1", a: 1}));
+ assert.commandWorked(coll.update({_id: "valid2"}, {_id: "valid2", a: 2}, {upsert: true}));
+ assert.commandWorked(coll.runCommand(
+ "findAndModify", {query: {_id: "valid3"}, update: {$set: {a: 3}}, upsert: true}));
// Insert and upsert documents that will not pass validation.
assertFailsValidation(coll.insert({_id: "invalid3", b: 1}));
@@ -50,32 +50,32 @@ function runInsertUpdateValidationTest(validator) {
"findAndModify", {query: {_id: "invalid4"}, update: {$set: {b: 3}}, upsert: true}));
// Assert that we can remove the document that passed validation.
- assert.writeOK(coll.remove({_id: "valid1"}));
+ assert.commandWorked(coll.remove({_id: "valid1"}));
// Check that we can only update documents that pass validation. We insert a valid and an
// invalid document, then set the validator.
coll.drop();
- assert.writeOK(coll.insert({_id: "valid1", a: 1}));
- assert.writeOK(coll.insert({_id: "invalid2", b: 1}));
+ assert.commandWorked(coll.insert({_id: "valid1", a: 1}));
+ assert.commandWorked(coll.insert({_id: "invalid2", b: 1}));
assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
// Assert that updates on a conforming document succeed when they affect fields not involved
// in validator.
// Add a new field.
- assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}}));
- assert.writeOK(
+ assert.commandWorked(coll.update({_id: "valid1"}, {$set: {z: 1}}));
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: 2}}}));
// In-place update.
- assert.writeOK(coll.update({_id: "valid1"}, {$inc: {z: 1}}));
- assert.writeOK(
+ assert.commandWorked(coll.update({_id: "valid1"}, {$inc: {z: 1}}));
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$inc: {y: 1}}}));
// Out-of-place update.
- assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: array}}));
- assert.writeOK(
+ assert.commandWorked(coll.update({_id: "valid1"}, {$set: {z: array}}));
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: array}}}));
// No-op update.
- assert.writeOK(coll.update({_id: "valid1"}, {a: 1}));
- assert.writeOK(
+ assert.commandWorked(coll.update({_id: "valid1"}, {a: 1}));
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {a: 1}}}));
// Verify those same updates will fail on non-conforming document.
@@ -90,40 +90,40 @@ function runInsertUpdateValidationTest(validator) {
coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {y: array}}}));
// A no-op update of an invalid doc will succeed.
- assert.writeOK(coll.update({_id: "invalid2"}, {$set: {b: 1}}));
- assert.writeOK(
+ assert.commandWorked(coll.update({_id: "invalid2"}, {$set: {b: 1}}));
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {b: 1}}}));
// Verify that we can't make a conforming document fail validation, but can update a
// non-conforming document to pass validation.
coll.drop();
- assert.writeOK(coll.insert({_id: "valid1", a: 1}));
- assert.writeOK(coll.insert({_id: "invalid2", b: 1}));
- assert.writeOK(coll.insert({_id: "invalid3", b: 1}));
+ assert.commandWorked(coll.insert({_id: "valid1", a: 1}));
+ assert.commandWorked(coll.insert({_id: "invalid2", b: 1}));
+ assert.commandWorked(coll.insert({_id: "invalid3", b: 1}));
assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
assertFailsValidation(coll.update({_id: "valid1"}, {$unset: {a: 1}}));
- assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}}));
+ assert.commandWorked(coll.update({_id: "invalid2"}, {$set: {a: 1}}));
assertFailsValidation(
coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}}));
- assert.writeOK(
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "invalid3"}, update: {$set: {a: 1}}}));
// Modify the collection to remove the document validator.
assert.commandWorked(coll.runCommand("collMod", {validator: {}}));
// Verify that no validation is applied to updates.
- assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}}));
- assert.writeOK(coll.update({_id: "invalid2"}, {$set: {z: 1}}));
- assert.writeOK(coll.update({_id: "valid1"}, {$unset: {a: 1}}));
- assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}}));
- assert.writeOK(
+ assert.commandWorked(coll.update({_id: "valid1"}, {$set: {z: 1}}));
+ assert.commandWorked(coll.update({_id: "invalid2"}, {$set: {z: 1}}));
+ assert.commandWorked(coll.update({_id: "valid1"}, {$unset: {a: 1}}));
+ assert.commandWorked(coll.update({_id: "invalid2"}, {$set: {a: 1}}));
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {z: 2}}}));
- assert.writeOK(
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {z: 2}}}));
- assert.writeOK(
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}}));
- assert.writeOK(
+ assert.commandWorked(
coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {a: 1}}}));
}
@@ -143,14 +143,14 @@ function runCollationValidationTest(validator) {
collName, {validator: validator, collation: {locale: "en_US", strength: 2}}));
// An insert that matches the validator should succeed.
- assert.writeOK(coll.insert({_id: 0, a: "xyz", b: "foo"}));
+ assert.commandWorked(coll.insert({_id: 0, a: "xyz", b: "foo"}));
const isJSONSchema = validator.hasOwnProperty("$jsonSchema");
// A normal validator should respect the collation and the inserts should succeed. A JSON
// Schema validator ignores the collation and the inserts should fail.
const assertCorrectResult =
- isJSONSchema ? res => assertFailsValidation(res) : res => assert.writeOK(res);
+ isJSONSchema ? res => assertFailsValidation(res) : res => assert.commandWorked(res);
assertCorrectResult(coll.insert({a: "XYZ"}));
assertCorrectResult(coll.insert({a: "XyZ", b: "foo"}));
assertCorrectResult(coll.update({_id: 0}, {a: "xyZ", b: "foo"}));
@@ -180,10 +180,10 @@ runCollationValidationTest({$jsonSchema: {properties: {a: {enum: ["xyz"]}}}});
// The validator is allowed to contain $expr.
coll.drop();
assert.commandWorked(db.createCollection(collName, {validator: {$expr: {$eq: ["$a", 5]}}}));
-assert.writeOK(coll.insert({a: 5}));
+assert.commandWorked(coll.insert({a: 5}));
assertFailsValidation(coll.insert({a: 4}));
assert.commandWorked(db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", 4]}}}));
-assert.writeOK(coll.insert({a: 4}));
+assert.commandWorked(coll.insert({a: 4}));
assertFailsValidation(coll.insert({a: 5}));
// The validator supports $expr with the date extraction expressions (with a timezone
@@ -192,7 +192,7 @@ coll.drop();
assert.commandWorked(db.createCollection(
collName,
{validator: {$expr: {$eq: [1, {$dayOfMonth: {date: "$a", timezone: "America/New_York"}}]}}}));
-assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
+assert.commandWorked(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")}));
// The validator supports $expr with a $dateToParts expression.
@@ -215,7 +215,7 @@ assert.commandWorked(db.createCollection(collName, {
}
}
}));
-assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
+assert.commandWorked(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")}));
// The validator supports $expr with $dateToString expression.
@@ -236,7 +236,7 @@ assert.commandWorked(db.createCollection(collName, {
}
}
}));
-assert.writeOK(coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "UTC"}));
+assert.commandWorked(coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "UTC"}));
assertFailsValidation(
coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "America/New_York"}));
@@ -252,7 +252,7 @@ assert.commandWorked(db.createCollection(collName, {
}
}
}));
-assert.writeOK(coll.insert({_id: 0, year: 2017, month: 6, day: 19, timezone: "Asia/Tokyo"}));
+assert.commandWorked(coll.insert({_id: 0, year: 2017, month: 6, day: 19, timezone: "Asia/Tokyo"}));
assertFailsValidation(
coll.insert({_id: 1, year: 2022, month: 1, day: 1, timezone: "America/New_York"}));
@@ -268,16 +268,16 @@ assert.commandWorked(db.createCollection(collName, {
}
}
}));
-assert.writeOK(coll.insert({_id: 0, date: "2017-07-04T11:56:02"}));
+assert.commandWorked(coll.insert({_id: 0, date: "2017-07-04T11:56:02"}));
assertFailsValidation(coll.insert({_id: 1, date: "2015-02-02T11:00:00"}));
// The validator can contain an $expr that may throw at runtime.
coll.drop();
assert.commandWorked(
db.createCollection(collName, {validator: {$expr: {$eq: ["$a", {$divide: [1, "$b"]}]}}}));
-assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
let res = coll.insert({a: 1, b: 0});
assert.writeError(res);
assert.eq(res.getWriteError().code, 16608);
-assert.writeOK(coll.insert({a: -1, b: -1}));
+assert.commandWorked(coll.insert({a: -1, b: -1}));
})();
diff --git a/jstests/core/dotted_path_in_null.js b/jstests/core/dotted_path_in_null.js
index f32f60a9ff2..7c8baf42709 100644
--- a/jstests/core/dotted_path_in_null.js
+++ b/jstests/core/dotted_path_in_null.js
@@ -4,11 +4,11 @@
const coll = db.dotted_path_in_null;
coll.drop();
-assert.writeOK(coll.insert({_id: 1, a: [{b: 5}]}));
-assert.writeOK(coll.insert({_id: 2, a: [{}]}));
-assert.writeOK(coll.insert({_id: 3, a: []}));
-assert.writeOK(coll.insert({_id: 4, a: [{}, {b: 5}]}));
-assert.writeOK(coll.insert({_id: 5, a: [5, {b: 5}]}));
+assert.commandWorked(coll.insert({_id: 1, a: [{b: 5}]}));
+assert.commandWorked(coll.insert({_id: 2, a: [{}]}));
+assert.commandWorked(coll.insert({_id: 3, a: []}));
+assert.commandWorked(coll.insert({_id: 4, a: [{}, {b: 5}]}));
+assert.commandWorked(coll.insert({_id: 5, a: [5, {b: 5}]}));
function getIds(query) {
let ids = [];
diff --git a/jstests/core/drop_index.js b/jstests/core/drop_index.js
index 83e03c5a8fd..60dbec0e3ea 100644
--- a/jstests/core/drop_index.js
+++ b/jstests/core/drop_index.js
@@ -32,7 +32,7 @@ function assertIndexes(expectedIndexNames, msg) {
}
}
-assert.writeOK(t.insert({_id: 1, a: 2, b: 3, c: 1, d: 1, e: 1}));
+assert.commandWorked(t.insert({_id: 1, a: 2, b: 3, c: 1, d: 1, e: 1}));
assertIndexes([], 'inserting test document');
assert.commandWorked(t.createIndex({a: 1}));
diff --git a/jstests/core/elemMatchProjection.js b/jstests/core/elemMatchProjection.js
index 390b7aa5d17..a35a11a0451 100644
--- a/jstests/core/elemMatchProjection.js
+++ b/jstests/core/elemMatchProjection.js
@@ -66,10 +66,10 @@ for (let i = 0; i < 100; i++) {
bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 1}, {a: 1, b: 2}]});
bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 2}, {a: 1, b: 1}]});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
-assert.writeOK(coll.createIndex({group: 1, 'y.d': 1}));
-assert.writeOK(coll.createIndex({group: 1, covered: 1})); // for covered index test
+assert.commandWorked(coll.createIndex({group: 1, 'y.d': 1}));
+assert.commandWorked(coll.createIndex({group: 1, covered: 1})); // for covered index test
// Tests for the $-positional operator.
assert.eq(1,
diff --git a/jstests/core/elemmatch_or_pushdown.js b/jstests/core/elemmatch_or_pushdown.js
index 86888996b19..2edef4d0678 100644
--- a/jstests/core/elemmatch_or_pushdown.js
+++ b/jstests/core/elemmatch_or_pushdown.js
@@ -8,12 +8,12 @@
const coll = db.elemmatch_or_pushdown;
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: 1, b: [{c: 4}]}));
-assert.writeOK(coll.insert({_id: 1, a: 2, b: [{c: 4}]}));
-assert.writeOK(coll.insert({_id: 2, a: 2, b: [{c: 5}]}));
-assert.writeOK(coll.insert({_id: 3, a: 1, b: [{c: 5}]}));
-assert.writeOK(coll.insert({_id: 4, a: 1, b: [{c: 6}]}));
-assert.writeOK(coll.insert({_id: 5, a: 1, b: [{c: 7}]}));
+assert.commandWorked(coll.insert({_id: 0, a: 1, b: [{c: 4}]}));
+assert.commandWorked(coll.insert({_id: 1, a: 2, b: [{c: 4}]}));
+assert.commandWorked(coll.insert({_id: 2, a: 2, b: [{c: 5}]}));
+assert.commandWorked(coll.insert({_id: 3, a: 1, b: [{c: 5}]}));
+assert.commandWorked(coll.insert({_id: 4, a: 1, b: [{c: 6}]}));
+assert.commandWorked(coll.insert({_id: 5, a: 1, b: [{c: 7}]}));
assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
assert.eq(coll.find({a: 1, b: {$elemMatch: {$or: [{c: 4}, {c: 5}]}}}).sort({_id: 1}).toArray(),
@@ -24,14 +24,14 @@ assert.eq(coll.find({a: 1, $or: [{a: 2}, {b: {$elemMatch: {$or: [{c: 4}, {c: 5}]
[{_id: 0, a: 1, b: [{c: 4}]}, {_id: 3, a: 1, b: [{c: 5}]}]);
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]}));
-assert.writeOK(coll.insert({_id: 1, a: 4, b: [{c: [{f: 8}], d: 6}]}));
-assert.writeOK(coll.insert({_id: 2, a: 5, b: [{c: [{f: 8}], d: 7}]}));
-assert.writeOK(coll.insert({_id: 3, a: 4, b: [{c: [{f: 9}], d: 6}]}));
-assert.writeOK(coll.insert({_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]}));
-assert.writeOK(coll.insert({_id: 5, a: 4, b: [{c: [{f: 8}], e: 7}]}));
-assert.writeOK(coll.insert({_id: 6, a: 5, b: [{c: [{f: 8}], e: 8}]}));
-assert.writeOK(coll.insert({_id: 7, a: 5, b: [{c: [{f: 9}], e: 7}]}));
+assert.commandWorked(coll.insert({_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]}));
+assert.commandWorked(coll.insert({_id: 1, a: 4, b: [{c: [{f: 8}], d: 6}]}));
+assert.commandWorked(coll.insert({_id: 2, a: 5, b: [{c: [{f: 8}], d: 7}]}));
+assert.commandWorked(coll.insert({_id: 3, a: 4, b: [{c: [{f: 9}], d: 6}]}));
+assert.commandWorked(coll.insert({_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]}));
+assert.commandWorked(coll.insert({_id: 5, a: 4, b: [{c: [{f: 8}], e: 7}]}));
+assert.commandWorked(coll.insert({_id: 6, a: 5, b: [{c: [{f: 8}], e: 8}]}));
+assert.commandWorked(coll.insert({_id: 7, a: 5, b: [{c: [{f: 9}], e: 7}]}));
assert.commandWorked(coll.createIndex({"b.d": 1, "b.c.f": 1}));
assert.commandWorked(coll.createIndex({"b.e": 1, "b.c.f": 1}));
diff --git a/jstests/core/ensure_sorted.js b/jstests/core/ensure_sorted.js
index 037eda45c19..fbdde2609df 100644
--- a/jstests/core/ensure_sorted.js
+++ b/jstests/core/ensure_sorted.js
@@ -11,16 +11,16 @@ var coll = db.ensure_sorted;
coll.drop();
assert.commandWorked(coll.createIndex({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: 1, b: 4}));
-assert.writeOK(coll.insert({a: 2, b: 3}));
-assert.writeOK(coll.insert({a: 3, b: 2}));
-assert.writeOK(coll.insert({a: 4, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 4}));
+assert.commandWorked(coll.insert({a: 2, b: 3}));
+assert.commandWorked(coll.insert({a: 3, b: 2}));
+assert.commandWorked(coll.insert({a: 4, b: 1}));
var cursor = coll.find({a: {$lt: 5}}).sort({b: -1}).batchSize(2);
cursor.next(); // {a: 1, b: 4}.
cursor.next(); // {a: 2, b: 3}.
-assert.writeOK(coll.update({b: 2}, {$set: {b: 5}}));
+assert.commandWorked(coll.update({b: 2}, {$set: {b: 5}}));
var result = cursor.next();
// We might either drop the document where "b" is 2 from the result set, or we might include the
diff --git a/jstests/core/exhaust.js b/jstests/core/exhaust.js
index 125c70cefe8..7e6c139d367 100644
--- a/jstests/core/exhaust.js
+++ b/jstests/core/exhaust.js
@@ -8,7 +8,7 @@ c.drop();
const docCount = 4;
for (var i = 0; i < docCount; i++) {
- assert.writeOK(c.insert({a: i}));
+ assert.commandWorked(c.insert({a: i}));
}
// Check that the query works without exhaust set
diff --git a/jstests/core/existsa.js b/jstests/core/existsa.js
index 66d0ded50d4..d1079310d71 100644
--- a/jstests/core/existsa.js
+++ b/jstests/core/existsa.js
@@ -7,9 +7,9 @@
const coll = db.jstests_existsa;
coll.drop();
-assert.writeOK(coll.insert({}));
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: {x: 1}, b: 1}));
+assert.commandWorked(coll.insert({}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: {x: 1}, b: 1}));
let indexKeySpec = {};
let indexKeyField = '';
@@ -89,9 +89,9 @@ assertExists({$or: [{a: {$exists: true}}]}); // $exists:true not
// Behavior is similar with $elemMatch.
coll.drop();
-assert.writeOK(coll.insert({a: [{}]}));
-assert.writeOK(coll.insert({a: [{b: 1}]}));
-assert.writeOK(coll.insert({a: [{b: [1]}]}));
+assert.commandWorked(coll.insert({a: [{}]}));
+assert.commandWorked(coll.insert({a: [{b: 1}]}));
+assert.commandWorked(coll.insert({a: [{b: [1]}]}));
setIndex('a.b');
assertMissing({a: {$elemMatch: {b: {$exists: false}}}});
@@ -105,7 +105,7 @@ assertExistsUnindexed({'a.b': {$elemMatch: {$gt: 0, $not: {$exists: false}}}}, 1
// A non sparse index will not be disallowed.
coll.drop();
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
coll.ensureIndex({a: 1});
assert.eq(1, coll.find({a: {$exists: false}}).itcount());
})();
diff --git a/jstests/core/explain_agg_write_concern.js b/jstests/core/explain_agg_write_concern.js
index cf28b097632..9ff556489fa 100644
--- a/jstests/core/explain_agg_write_concern.js
+++ b/jstests/core/explain_agg_write_concern.js
@@ -20,7 +20,7 @@ let outColl = db[collName + "_out"];
coll.drop();
outColl.drop();
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
// Agg should accept write concern if the last stage is a $out.
assert.commandWorked(db.runCommand({
diff --git a/jstests/core/explain_distinct.js b/jstests/core/explain_distinct.js
index 1c4d6612acb..f591d24fded 100644
--- a/jstests/core/explain_distinct.js
+++ b/jstests/core/explain_distinct.js
@@ -32,8 +32,8 @@ assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"));
// Insert the data to perform distinct() on.
for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 2, c: 1}));
+ assert.commandWorked(coll.insert({a: 1, b: 1}));
+ assert.commandWorked(coll.insert({a: 2, c: 1}));
}
assert.commandFailed(runDistinctExplain(coll, {}, {})); // Bad keyString.
diff --git a/jstests/core/explain_execution_error.js b/jstests/core/explain_execution_error.js
index f80af65b0c0..ec0dfba44bd 100644
--- a/jstests/core/explain_execution_error.js
+++ b/jstests/core/explain_execution_error.js
@@ -72,7 +72,7 @@ while (bigStr.length < (1024 * 1024)) {
// Make a collection that is about 120 MB * number of shards.
const numShards = FixtureHelpers.numberOfShardsForCollection(t);
for (var i = 0; i < 120 * numShards; i++) {
- assert.writeOK(t.insert({a: bigStr, b: 1, c: i}));
+ assert.commandWorked(t.insert({a: bigStr, b: 1, c: i}));
}
// A query which sorts the whole collection by "b" should throw an error due to hitting the
diff --git a/jstests/core/explain_find_and_modify.js b/jstests/core/explain_find_and_modify.js
index 8b7c65d519e..523a8d942a9 100644
--- a/jstests/core/explain_find_and_modify.js
+++ b/jstests/core/explain_find_and_modify.js
@@ -61,7 +61,7 @@ assert.commandFailed(db.runCommand({remove: true, new: true}));
// 4. Explaining findAndModify should not modify any contents of the collection.
var onlyDoc = {_id: 0, i: 1};
-assert.writeOK(t.insert(onlyDoc));
+assert.commandWorked(t.insert(onlyDoc));
// Explaining a delete should not delete anything.
var matchingRemoveCmd = {findAndModify: cName, remove: true, query: {_id: onlyDoc._id}};
diff --git a/jstests/core/explain_multikey.js b/jstests/core/explain_multikey.js
index 1ec20bb4552..e342203e503 100644
--- a/jstests/core/explain_multikey.js
+++ b/jstests/core/explain_multikey.js
@@ -31,7 +31,7 @@ function createIndexAndRunExplain(testOptions) {
coll.drop();
assert.commandWorked(coll.createIndex(keyPattern));
- assert.writeOK(coll.insert(testOptions.docToInsert));
+ assert.commandWorked(coll.insert(testOptions.docToInsert));
var explain = db.runCommand({explain: testOptions.commandObj});
assert.commandWorked(explain);
diff --git a/jstests/core/expr.js b/jstests/core/expr.js
index 78ef8b87f0c..4b39d05bc94 100644
--- a/jstests/core/expr.js
+++ b/jstests/core/expr.js
@@ -19,7 +19,7 @@ const isMongos = (isMaster.msg === "isdbgrid");
//
coll.drop();
-assert.writeOK(coll.insert({a: 0}));
+assert.commandWorked(coll.insert({a: 0}));
assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$a", 0]}}}]).itcount());
assert.throws(function() {
coll.aggregate([{$match: {$expr: {$eq: ["$a", "$$unbound"]}}}]);
@@ -33,7 +33,7 @@ assert.throws(function() {
//
coll.drop();
-assert.writeOK(coll.insert({a: 0}));
+assert.commandWorked(coll.insert({a: 0}));
assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).count());
assert.throws(function() {
coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).count();
@@ -47,7 +47,7 @@ assert.throws(function() {
//
coll.drop();
-assert.writeOK(coll.insert({a: 0}));
+assert.commandWorked(coll.insert({a: 0}));
assert.eq(1, coll.distinct("a", {$expr: {$eq: ["$a", 0]}}).length);
assert.throws(function() {
coll.distinct("a", {$expr: {$eq: ["$a", "$$unbound"]}});
@@ -62,12 +62,12 @@ assert.throws(function() {
// $expr is allowed in query.
coll.drop();
-assert.writeOK(coll.insert({a: 0}));
+assert.commandWorked(coll.insert({a: 0}));
assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).itcount());
// $expr with time zone expression across getMore (SERVER-31664).
coll.drop();
-assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
+assert.commandWorked(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
let res = assert.commandWorked(db.runCommand({
find: coll.getName(),
@@ -120,7 +120,7 @@ assert.throws(function() {
// $expr is not allowed in $elemMatch projection.
coll.drop();
-assert.writeOK(coll.insert({a: [{b: 5}]}));
+assert.commandWorked(coll.insert({a: [{b: 5}]}));
assert.throws(function() {
coll.find({}, {a: {$elemMatch: {$expr: {$eq: ["$b", 5]}}}}).itcount();
});
@@ -131,7 +131,7 @@ assert.throws(function() {
// $expr is allowed in the query when upsert=false.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: 0}));
+assert.commandWorked(coll.insert({_id: 0, a: 0}));
assert.eq({_id: 0, a: 0, b: 6},
coll.findAndModify(
{query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, new: true}));
@@ -149,7 +149,7 @@ assert.throws(function() {
// $expr is not allowed in the query when upsert=true.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: 0}));
+assert.commandWorked(coll.insert({_id: 0, a: 0}));
assert.throws(function() {
coll.findAndModify(
{query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, upsert: true});
@@ -157,7 +157,7 @@ assert.throws(function() {
// $expr is not allowed in $pull filter.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 5}]}));
assert.throws(function() {
coll.findAndModify({query: {_id: 0}, update: {$pull: {a: {$expr: {$eq: ["$b", 5]}}}}});
});
@@ -165,7 +165,7 @@ assert.throws(function() {
// $expr is not allowed in arrayFilters.
if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [{b: 5}]}));
assert.throws(function() {
coll.findAndModify({
query: {_id: 0},
@@ -180,7 +180,7 @@ if (db.getMongo().writeMode() === "commands") {
//
coll.drop();
-assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}));
+assert.commandWorked(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}));
assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
assert.eq(1,
coll.aggregate({
@@ -215,7 +215,7 @@ assert.throws(() => coll.aggregate({
//
coll.drop();
-assert.writeOK(coll.insert({a: 0}));
+assert.commandWorked(coll.insert({a: 0}));
let mapReduceOut = coll.mapReduce(
function() {
emit(this.a, 1);
@@ -252,18 +252,18 @@ assert.throws(function() {
//
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: 0}));
+assert.commandWorked(coll.insert({_id: 0, a: 0}));
let writeRes = coll.remove({_id: 0, $expr: {$eq: ["$a", 0]}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
assert.writeError(coll.remove({_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}));
-assert.writeOK(coll.insert({_id: 0, a: 0}));
+assert.commandWorked(coll.insert({_id: 0, a: 0}));
assert.writeError(coll.remove({_id: 0, $expr: {$divide: [1, "$a"]}}));
// Any writes preceding the write that fails to parse are executed.
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 1}));
writeRes = db.runCommand({
delete: coll.getName(),
deletes: [{q: {_id: 0}, limit: 1}, {q: {$expr: "$$unbound"}, limit: 1}]
@@ -278,8 +278,8 @@ assert.eq(writeRes.n, 1, tojson(writeRes));
// $expr is allowed in the query when upsert=false.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: 0}));
-assert.writeOK(coll.update({_id: 0, $expr: {$eq: ["$a", 0]}}, {$set: {b: 6}}));
+assert.commandWorked(coll.insert({_id: 0, a: 0}));
+assert.commandWorked(coll.update({_id: 0, $expr: {$eq: ["$a", 0]}}, {$set: {b: 6}}));
assert.eq({_id: 0, a: 0, b: 6}, coll.findOne({_id: 0}));
// $expr with unbound variable fails.
@@ -290,18 +290,18 @@ assert.writeError(coll.update({_id: 0, $expr: {$divide: [1, "$a"]}}, {$set: {b:
// $expr is not allowed in the query when upsert=true.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: 5}));
+assert.commandWorked(coll.insert({_id: 0, a: 5}));
assert.writeError(coll.update({_id: 0, $expr: {$eq: ["$a", 5]}}, {$set: {b: 6}}, {upsert: true}));
// $expr is not allowed in $pull filter.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 5}]}));
assert.writeError(coll.update({_id: 0}, {$pull: {a: {$expr: {$eq: ["$b", 5]}}}}));
// $expr is not allowed in arrayFilters.
if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [{b: 5}]}));
assert.writeError(coll.update({_id: 0},
{$set: {"a.$[i].b": 6}},
{arrayFilters: [{"i.b": 5, $expr: {$eq: ["$i.b", 5]}}]}));
@@ -309,8 +309,8 @@ if (db.getMongo().writeMode() === "commands") {
// Any writes preceding the write that fails to parse are executed.
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 1}));
writeRes = db.runCommand({
update: coll.getName(),
updates: [{q: {_id: 0}, u: {$set: {b: 6}}}, {q: {$expr: "$$unbound"}, u: {$set: {b: 6}}}]
diff --git a/jstests/core/expr_index_use.js b/jstests/core/expr_index_use.js
index d0eb55656b2..ece507f60d3 100644
--- a/jstests/core/expr_index_use.js
+++ b/jstests/core/expr_index_use.js
@@ -8,43 +8,43 @@ load("jstests/libs/analyze_plan.js");
const coll = db.expr_index_use;
coll.drop();
-assert.writeOK(coll.insert({a: {b: 1}}));
-assert.writeOK(coll.insert({a: {b: [1]}}));
-assert.writeOK(coll.insert({a: [{b: 1}]}));
-assert.writeOK(coll.insert({a: [{b: [1]}]}));
+assert.commandWorked(coll.insert({a: {b: 1}}));
+assert.commandWorked(coll.insert({a: {b: [1]}}));
+assert.commandWorked(coll.insert({a: [{b: 1}]}));
+assert.commandWorked(coll.insert({a: [{b: [1]}]}));
assert.commandWorked(coll.createIndex({"a.b": 1}));
-assert.writeOK(coll.insert({c: {d: 1}}));
+assert.commandWorked(coll.insert({c: {d: 1}}));
assert.commandWorked(coll.createIndex({"c.d": 1}));
-assert.writeOK(coll.insert({e: [{f: 1}]}));
+assert.commandWorked(coll.insert({e: [{f: 1}]}));
assert.commandWorked(coll.createIndex({"e.f": 1}));
-assert.writeOK(coll.insert({g: {h: [1]}}));
+assert.commandWorked(coll.insert({g: {h: [1]}}));
assert.commandWorked(coll.createIndex({"g.h": 1}));
-assert.writeOK(coll.insert({i: 1, j: [1]}));
+assert.commandWorked(coll.insert({i: 1, j: [1]}));
assert.commandWorked(coll.createIndex({i: 1, j: 1}));
-assert.writeOK(coll.insert({k: 1, l: "abc"}));
+assert.commandWorked(coll.insert({k: 1, l: "abc"}));
assert.commandWorked(coll.createIndex({k: 1, l: "text"}));
-assert.writeOK(coll.insert({x: 0}));
-assert.writeOK(coll.insert({x: 1, y: 1}));
-assert.writeOK(coll.insert({x: 2, y: 2}));
-assert.writeOK(coll.insert({x: 3, y: 10}));
-assert.writeOK(coll.insert({y: 20}));
+assert.commandWorked(coll.insert({x: 0}));
+assert.commandWorked(coll.insert({x: 1, y: 1}));
+assert.commandWorked(coll.insert({x: 2, y: 2}));
+assert.commandWorked(coll.insert({x: 3, y: 10}));
+assert.commandWorked(coll.insert({y: 20}));
assert.commandWorked(coll.createIndex({x: 1, y: 1}));
-assert.writeOK(coll.insert({w: 123}));
-assert.writeOK(coll.insert({}));
-assert.writeOK(coll.insert({w: null}));
-assert.writeOK(coll.insert({w: undefined}));
-assert.writeOK(coll.insert({w: NaN}));
-assert.writeOK(coll.insert({w: "foo"}));
-assert.writeOK(coll.insert({w: "FOO"}));
-assert.writeOK(coll.insert({w: {z: 1}}));
-assert.writeOK(coll.insert({w: {z: 2}}));
+assert.commandWorked(coll.insert({w: 123}));
+assert.commandWorked(coll.insert({}));
+assert.commandWorked(coll.insert({w: null}));
+assert.commandWorked(coll.insert({w: undefined}));
+assert.commandWorked(coll.insert({w: NaN}));
+assert.commandWorked(coll.insert({w: "foo"}));
+assert.commandWorked(coll.insert({w: "FOO"}));
+assert.commandWorked(coll.insert({w: {z: 1}}));
+assert.commandWorked(coll.insert({w: {z: 2}}));
assert.commandWorked(coll.createIndex({w: 1}));
assert.commandWorked(coll.createIndex({"w.z": 1}));
@@ -232,8 +232,8 @@ assert.throws(
// there is an index with a matching collation and when there isn't.
assert.commandWorked(db.runCommand({drop: coll.getName()}));
assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitiveCollation}));
-assert.writeOK(coll.insert({a: "foo", b: "bar"}));
-assert.writeOK(coll.insert({a: "FOO", b: "BAR"}));
+assert.commandWorked(coll.insert({a: "foo", b: "bar"}));
+assert.commandWorked(coll.insert({a: "FOO", b: "BAR"}));
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "simple"}}));
diff --git a/jstests/core/field_name_validation.js b/jstests/core/field_name_validation.js
index 72d346a0f0c..3568103e768 100644
--- a/jstests/core/field_name_validation.js
+++ b/jstests/core/field_name_validation.js
@@ -20,10 +20,10 @@ coll.drop();
//
// Test that dotted field names are allowed.
-assert.writeOK(coll.insert({"a.b": 1}));
-assert.writeOK(coll.insert({"_id.a": 1}));
-assert.writeOK(coll.insert({a: {"a.b": 1}}));
-assert.writeOK(coll.insert({_id: {"a.b": 1}}));
+assert.commandWorked(coll.insert({"a.b": 1}));
+assert.commandWorked(coll.insert({"_id.a": 1}));
+assert.commandWorked(coll.insert({a: {"a.b": 1}}));
+assert.commandWorked(coll.insert({_id: {"a.b": 1}}));
// Test that _id cannot be a regex.
assert.writeError(coll.insert({_id: /a/}));
@@ -32,7 +32,7 @@ assert.writeError(coll.insert({_id: /a/}));
assert.writeError(coll.insert({_id: [9]}));
// Test that $-prefixed field names are allowed in embedded objects.
-assert.writeOK(coll.insert({a: {$b: 1}}));
+assert.commandWorked(coll.insert({a: {$b: 1}}));
assert.eq(1, coll.find({"a.$b": 1}).itcount());
// Test that $-prefixed field names are not allowed at the top level.
@@ -49,9 +49,9 @@ assert.writeErrorWithCode(coll.insert({_id: {$b: 1}}), ErrorCodes.DollarPrefixed
assert.writeErrorWithCode(coll.insert({_id: {a: 1, $b: 1}}), ErrorCodes.DollarPrefixedFieldName);
// Should not enforce the same restrictions on an embedded _id field.
-assert.writeOK(coll.insert({a: {_id: [9]}}));
-assert.writeOK(coll.insert({a: {_id: /a/}}));
-assert.writeOK(coll.insert({a: {_id: {$b: 1}}}));
+assert.commandWorked(coll.insert({a: {_id: [9]}}));
+assert.commandWorked(coll.insert({a: {_id: /a/}}));
+assert.commandWorked(coll.insert({a: {_id: {$b: 1}}}));
//
// Update command field name validation.
@@ -59,20 +59,20 @@ assert.writeOK(coll.insert({a: {_id: {$b: 1}}}));
coll.drop();
// Dotted fields are allowed in an update.
-assert.writeOK(coll.update({}, {"a.b": 1}, {upsert: true}));
+assert.commandWorked(coll.update({}, {"a.b": 1}, {upsert: true}));
assert.eq(0, coll.find({"a.b": 1}).itcount());
assert.eq(1, coll.find({}).itcount());
// Dotted fields represent paths in $set.
-assert.writeOK(coll.update({}, {$set: {"a.b": 1}}, {upsert: true}));
+assert.commandWorked(coll.update({}, {$set: {"a.b": 1}}, {upsert: true}));
assert.eq(1, coll.find({"a.b": 1}).itcount());
// Dotted fields represent paths in the query object.
-assert.writeOK(coll.update({"a.b": 1}, {$set: {"a.b": 2}}));
+assert.commandWorked(coll.update({"a.b": 1}, {$set: {"a.b": 2}}));
assert.eq(1, coll.find({"a.b": 2}).itcount());
assert.eq(1, coll.find({a: {b: 2}}).itcount());
-assert.writeOK(coll.update({"a.b": 2}, {"a.b": 3}));
+assert.commandWorked(coll.update({"a.b": 2}, {"a.b": 3}));
assert.eq(0, coll.find({"a.b": 3}).itcount());
// $-prefixed field names are not allowed.
@@ -132,7 +132,7 @@ assert.throws(function() {
//
coll.drop();
-assert.writeOK(coll.insert({_id: {a: 1, b: 2}, "c.d": 3}));
+assert.commandWorked(coll.insert({_id: {a: 1, b: 2}, "c.d": 3}));
// Dotted fields represent paths in an aggregation pipeline.
assert.eq(coll.aggregate([{$match: {"_id.a": 1}}, {$project: {"_id.b": 1}}]).toArray(),
diff --git a/jstests/core/filemd5.js b/jstests/core/filemd5.js
index 9ea70283a73..5f5d876d11a 100644
--- a/jstests/core/filemd5.js
+++ b/jstests/core/filemd5.js
@@ -12,7 +12,7 @@
"use strict";
db.fs.chunks.drop();
-assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "test")}));
+assert.commandWorked(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "test")}));
assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs"}), ErrorCodes.BadValue);
@@ -21,8 +21,8 @@ assert.commandWorked(db.runCommand({filemd5: 1, root: "fs"}));
assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs", partialOk: 1, md5state: 5}),
50847);
-assert.writeOK(db.fs.chunks.insert({files_id: 2, n: 0}));
+assert.commandWorked(db.fs.chunks.insert({files_id: 2, n: 0}));
assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50848);
-assert.writeOK(db.fs.chunks.update({files_id: 2, n: 0}, {$set: {data: 5}}));
+assert.commandWorked(db.fs.chunks.update({files_id: 2, n: 0}, {$set: {data: 5}}));
assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50849);
}());
diff --git a/jstests/core/find4.js b/jstests/core/find4.js
index 3721763b358..3d22f6d3fcb 100644
--- a/jstests/core/find4.js
+++ b/jstests/core/find4.js
@@ -4,7 +4,7 @@
const coll = db.find4;
coll.drop();
-assert.writeOK(coll.insert({a: 1123, b: 54332}));
+assert.commandWorked(coll.insert({a: 1123, b: 54332}));
let o = coll.findOne();
assert.eq(1123, o.a, "A");
@@ -23,8 +23,8 @@ assert(!o.a, "I");
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: 2, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
assert.eq("1-1,2-2",
coll.find()
.sort({a: 1})
diff --git a/jstests/core/find5.js b/jstests/core/find5.js
index f7e52c0ccc6..f5333654ab2 100644
--- a/jstests/core/find5.js
+++ b/jstests/core/find5.js
@@ -6,8 +6,8 @@
const coll = db.find5;
coll.drop();
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({b: 5}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({b: 5}));
assert.eq(2, coll.find({}, {b: 1}).count(), "A");
@@ -40,7 +40,7 @@ assert.eq(5, second.b, "C8");
assert(coll.drop());
-assert.writeOK(coll.insert({a: 1, b: {c: 2, d: 3, e: 4}}));
+assert.commandWorked(coll.insert({a: 1, b: {c: 2, d: 3, e: 4}}));
assert.eq(2, coll.findOne({}, {"b.c": 1}).b.c, "D");
const o = coll.findOne({}, {"b.c": 1, "b.d": 1});
@@ -51,6 +51,6 @@ assert(!o.b.e, "E 3");
assert(!coll.findOne({}, {"b.c": 1}).b.d, "F");
assert(coll.drop());
-assert.writeOK(coll.insert({a: {b: {c: 1}}}));
+assert.commandWorked(coll.insert({a: {b: {c: 1}}}));
assert.eq(1, coll.findOne({}, {"a.b.c": 1}).a.b.c, "G");
}());
diff --git a/jstests/core/find_and_modify_concurrent_update.js b/jstests/core/find_and_modify_concurrent_update.js
index 9682bea4c65..bc7472259c4 100644
--- a/jstests/core/find_and_modify_concurrent_update.js
+++ b/jstests/core/find_and_modify_concurrent_update.js
@@ -20,7 +20,7 @@ for (var i = 0; i < 3; i++) {
assert.commandWorked(t.ensureIndex({a: 1}));
assert.commandWorked(t.ensureIndex({b: 1}));
- assert.writeOK(t.insert({_id: 1, a: 1, b: 1}));
+ assert.commandWorked(t.insert({_id: 1, a: 1, b: 1}));
var join =
startParallelShell("db.find_and_modify_concurrent.update({a: 1, b: 1}, {$inc: {a: 1}});");
diff --git a/jstests/core/find_and_modify_empty_update.js b/jstests/core/find_and_modify_empty_update.js
index 8bdd7fd43e0..d35b53539f4 100644
--- a/jstests/core/find_and_modify_empty_update.js
+++ b/jstests/core/find_and_modify_empty_update.js
@@ -17,22 +17,22 @@ assert.isnull(ret);
// Test update:{} when a document matches the query. The document is "replaced with the empty
// object" (i.e. all non-_id fields are unset).
coll.remove({});
-assert.writeOK(coll.insert({_id: 0, a: 1}));
+assert.commandWorked(coll.insert({_id: 0, a: 1}));
ret = coll.findAndModify({query: {a: 1}, update: {}});
assert.eq(ret, {_id: 0, a: 1});
assert.eq(coll.findOne({_id: 0}), {_id: 0});
// Test update:{} with new:true.
coll.remove({});
-assert.writeOK(coll.insert({_id: 0, a: 1}));
+assert.commandWorked(coll.insert({_id: 0, a: 1}));
ret = coll.findAndModify({query: {a: 1}, update: {}, new: true});
assert.eq(ret, {_id: 0});
assert.eq(coll.findOne({_id: 0}), {_id: 0});
// Test update:{} with a sort.
coll.remove({});
-assert.writeOK(coll.insert({_id: 0, a: 1}));
-assert.writeOK(coll.insert({_id: 1, a: 1}));
+assert.commandWorked(coll.insert({_id: 0, a: 1}));
+assert.commandWorked(coll.insert({_id: 1, a: 1}));
ret = coll.findAndModify({query: {a: 1}, update: {}, sort: {_id: 1}});
assert.eq(ret, {_id: 0, a: 1});
assert.eq(coll.findOne({_id: 0}), {_id: 0});
diff --git a/jstests/core/find_dedup.js b/jstests/core/find_dedup.js
index df9dbfa9d12..791ce338c45 100644
--- a/jstests/core/find_dedup.js
+++ b/jstests/core/find_dedup.js
@@ -20,11 +20,11 @@ function checkDedup(query, idArray) {
// Deduping $or
coll.drop();
coll.ensureIndex({a: 1, b: 1});
-assert.writeOK(coll.insert({_id: 1, a: 1, b: 1}));
-assert.writeOK(coll.insert({_id: 2, a: 1, b: 1}));
-assert.writeOK(coll.insert({_id: 3, a: 2, b: 2}));
-assert.writeOK(coll.insert({_id: 4, a: 3, b: 3}));
-assert.writeOK(coll.insert({_id: 5, a: 3, b: 3}));
+assert.commandWorked(coll.insert({_id: 1, a: 1, b: 1}));
+assert.commandWorked(coll.insert({_id: 2, a: 1, b: 1}));
+assert.commandWorked(coll.insert({_id: 3, a: 2, b: 2}));
+assert.commandWorked(coll.insert({_id: 4, a: 3, b: 3}));
+assert.commandWorked(coll.insert({_id: 5, a: 3, b: 3}));
checkDedup({
$or: [
{a: {$gte: 0, $lte: 2}, b: {$gte: 0, $lte: 2}},
@@ -36,8 +36,8 @@ checkDedup({
// Deduping multikey
assert(coll.drop());
-assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3], b: [4, 5, 6]}));
-assert.writeOK(coll.insert({_id: 2, a: [1, 2, 3], b: [4, 5, 6]}));
+assert.commandWorked(coll.insert({_id: 1, a: [1, 2, 3], b: [4, 5, 6]}));
+assert.commandWorked(coll.insert({_id: 2, a: [1, 2, 3], b: [4, 5, 6]}));
assert.eq(2, coll.count());
checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]);
diff --git a/jstests/core/find_getmore_bsonsize.js b/jstests/core/find_getmore_bsonsize.js
index 6a19dec4302..9a57e9ef558 100644
--- a/jstests/core/find_getmore_bsonsize.js
+++ b/jstests/core/find_getmore_bsonsize.js
@@ -31,8 +31,8 @@ assert.eq(bigStr.length, 16 * oneMB);
bigStr = bigStr.substring(0, (16 * oneMB) - oneKB);
// Collection has one ~1 MB doc followed by one ~16 MB doc.
-assert.writeOK(coll.insert({_id: 0, padding: smallStr}));
-assert.writeOK(coll.insert({_id: 1, padding: bigStr}));
+assert.commandWorked(coll.insert({_id: 0, padding: smallStr}));
+assert.commandWorked(coll.insert({_id: 1, padding: bigStr}));
// Find command should just return the first doc, as adding the last would create an invalid
// command response document.
@@ -78,7 +78,7 @@ while (bigStr.length < (16 * oneMB)) {
bigStr = bigStr.substring(0, (16 * oneMB) - 32);
var maxSizeDoc = {_id: 0, padding: bigStr};
assert.eq(Object.bsonsize(maxSizeDoc), 16 * oneMB);
-assert.writeOK(coll.insert(maxSizeDoc));
+assert.commandWorked(coll.insert(maxSizeDoc));
cmdRes = db.runCommand({find: collName});
assert.commandWorked(cmdRes);
diff --git a/jstests/core/find_getmore_cmd.js b/jstests/core/find_getmore_cmd.js
index 55ad7a4a443..248721ebbef 100644
--- a/jstests/core/find_getmore_cmd.js
+++ b/jstests/core/find_getmore_cmd.js
@@ -12,7 +12,7 @@ var coll = db[collName];
coll.drop();
for (var i = 0; i < 150; i++) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
// Verify result of a find command that specifies none of the optional arguments.
diff --git a/jstests/core/fsync.js b/jstests/core/fsync.js
index bd5526b8bc9..ec2a775c992 100644
--- a/jstests/core/fsync.js
+++ b/jstests/core/fsync.js
@@ -36,7 +36,7 @@ var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
fsyncLockDB.dropDatabase();
// Test that a single, regular write works as expected.
-assert.writeOK(fsyncLockDB.coll.insert({x: 1}));
+assert.commandWorked(fsyncLockDB.coll.insert({x: 1}));
// Test that fsyncLock doesn't work unless invoked against the admin DB.
var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1});
@@ -64,7 +64,7 @@ assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentO
// Make sure the db is unlocked and the initial write made it through.
writeOpHandle();
-assert.writeOK(fsyncLockDB.coll.insert({x: 2}));
+assert.commandWorked(fsyncLockDB.coll.insert({x: 2}));
assert.eq(3, fsyncLockDB.coll.count({}));
diff --git a/jstests/core/fts1.js b/jstests/core/fts1.js
index 2ce50a22eeb..b1775ff0d30 100644
--- a/jstests/core/fts1.js
+++ b/jstests/core/fts1.js
@@ -13,10 +13,10 @@ assert.commandWorked(coll.createIndex({x: "text"}, {name: "x_text"}));
assert.eq([], queryIDS(coll, "az"), "A0");
-assert.writeOK(coll.insert({_id: 1, x: "az b c"}));
-assert.writeOK(coll.insert({_id: 2, x: "az b"}));
-assert.writeOK(coll.insert({_id: 3, x: "b c"}));
-assert.writeOK(coll.insert({_id: 4, x: "b c d"}));
+assert.commandWorked(coll.insert({_id: 1, x: "az b c"}));
+assert.commandWorked(coll.insert({_id: 2, x: "az b"}));
+assert.commandWorked(coll.insert({_id: 3, x: "b c"}));
+assert.commandWorked(coll.insert({_id: 4, x: "b c d"}));
assert.eq([1, 2, 3, 4], queryIDS(coll, "c az").sort(), "A1");
assert.eq([4], queryIDS(coll, "d"), "A2");
diff --git a/jstests/core/fts_array.js b/jstests/core/fts_array.js
index 16d51981908..93d4ba1cfb8 100644
--- a/jstests/core/fts_array.js
+++ b/jstests/core/fts_array.js
@@ -40,14 +40,14 @@ assert.writeErrorWithCode(coll.insert({words: "macerate", y: [{z: 1}, {z: 2}]}),
// Verify that array fields are allowed when positionally indexed.
coll.drop();
assert.commandWorked(coll.createIndex({"a.0": 1, words: "text"}));
-assert.writeOK(coll.insert({a: [0, 1, 2], words: "dander"}));
+assert.commandWorked(coll.insert({a: [0, 1, 2], words: "dander"}));
assert.eq({a: [0, 1, 2], words: "dander"},
coll.findOne({"a.0": 0, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1}));
assert.writeErrorWithCode(coll.insert({a: [[8, 9], 1, 2], words: "dander"}),
ErrorCodes.CannotBuildIndexKeys);
coll.drop();
assert.commandWorked(coll.createIndex({"a.0.1": 1, words: "text"}));
-assert.writeOK(coll.insert({a: [[8, 9], 1, 2], words: "dander"}));
+assert.commandWorked(coll.insert({a: [[8, 9], 1, 2], words: "dander"}));
assert.eq({a: [[8, 9], 1, 2], words: "dander"},
coll.findOne({"a.0.1": 9, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1}));
}());
diff --git a/jstests/core/fts_casesensitive.js b/jstests/core/fts_casesensitive.js
index 5b0e0832130..6617822d43f 100644
--- a/jstests/core/fts_casesensitive.js
+++ b/jstests/core/fts_casesensitive.js
@@ -5,7 +5,7 @@ var coll = db.fts_casesensitive;
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: "The Quick Brown Fox Jumps Over The Lazy Dog"}));
+assert.commandWorked(coll.insert({_id: 0, a: "The Quick Brown Fox Jumps Over The Lazy Dog"}));
assert.commandWorked(coll.ensureIndex({a: "text"}));
assert.throws(function() {
diff --git a/jstests/core/fts_diacritic_and_caseinsensitive.js b/jstests/core/fts_diacritic_and_caseinsensitive.js
index 476fe9d2ca1..4b79837fed5 100644
--- a/jstests/core/fts_diacritic_and_caseinsensitive.js
+++ b/jstests/core/fts_diacritic_and_caseinsensitive.js
@@ -8,7 +8,7 @@ var coll = db.fts_diacritic_and_caseinsensitive;
coll.drop();
-assert.writeOK(coll.insert(
+assert.commandWorked(coll.insert(
{_id: 0, a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."}));
assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
diff --git a/jstests/core/fts_diacritic_and_casesensitive.js b/jstests/core/fts_diacritic_and_casesensitive.js
index ae3c51c703b..5d5711f640e 100644
--- a/jstests/core/fts_diacritic_and_casesensitive.js
+++ b/jstests/core/fts_diacritic_and_casesensitive.js
@@ -9,7 +9,7 @@ var coll = db.fts_diacritic_and_casesensitive;
coll.drop();
-assert.writeOK(coll.insert(
+assert.commandWorked(coll.insert(
{_id: 0, a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."}));
assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
diff --git a/jstests/core/fts_diacriticsensitive.js b/jstests/core/fts_diacriticsensitive.js
index a377b810ea6..c24b82350e0 100644
--- a/jstests/core/fts_diacriticsensitive.js
+++ b/jstests/core/fts_diacriticsensitive.js
@@ -8,7 +8,7 @@ var coll = db.fts_diacriticsensitive;
coll.drop();
-assert.writeOK(coll.insert(
+assert.commandWorked(coll.insert(
{_id: 0, a: "O próximo vôo à noite sobre o Atlântico, põe freqüentemente o único médico."}));
assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
diff --git a/jstests/core/fts_dotted_prefix_fields.js b/jstests/core/fts_dotted_prefix_fields.js
index efbe3a91abf..4f634c57312 100644
--- a/jstests/core/fts_dotted_prefix_fields.js
+++ b/jstests/core/fts_dotted_prefix_fields.js
@@ -6,8 +6,10 @@
let coll = db.fts_dotted_prefix_fields;
coll.drop();
assert.commandWorked(coll.createIndex({"a.x": 1, "a.y": 1, "b.x": 1, "b.y": 1, words: "text"}));
-assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 3, y: 4}, words: "lorem ipsum dolor sit"}));
-assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 5, y: 4}, words: "lorem ipsum dolor sit"}));
+assert.commandWorked(
+ coll.insert({a: {x: 1, y: 2}, b: {x: 3, y: 4}, words: "lorem ipsum dolor sit"}));
+assert.commandWorked(
+ coll.insert({a: {x: 1, y: 2}, b: {x: 5, y: 4}, words: "lorem ipsum dolor sit"}));
assert.eq(
1,
diff --git a/jstests/core/fts_explain.js b/jstests/core/fts_explain.js
index 0b147e5987a..2470d8becf9 100644
--- a/jstests/core/fts_explain.js
+++ b/jstests/core/fts_explain.js
@@ -15,7 +15,7 @@ res = coll.ensureIndex({content: "text"}, {default_language: "none"});
assert.commandWorked(res);
res = coll.insert({content: "some data"});
-assert.writeOK(res);
+assert.commandWorked(res);
const explain =
coll.find({$text: {$search: "\"a\" -b -\"c\""}}, {content: 1, score: {$meta: "textScore"}})
diff --git a/jstests/core/fts_index.js b/jstests/core/fts_index.js
index a1b47dad127..1a567668629 100644
--- a/jstests/core/fts_index.js
+++ b/jstests/core/fts_index.js
@@ -104,7 +104,7 @@ coll.drop();
// Can insert documents with valid language_override into text-indexed collection.
assert.commandWorked(coll.ensureIndex({a: "text"}));
coll.insert({a: ""});
-assert.writeOK(coll.insert({a: "", language: "spanish"}));
+assert.commandWorked(coll.insert({a: "", language: "spanish"}));
coll.drop();
// Can't insert documents with invalid language_override into text-indexed collection.
diff --git a/jstests/core/fts_index2.js b/jstests/core/fts_index2.js
index 51a92e09876..f2d5e23f23f 100644
--- a/jstests/core/fts_index2.js
+++ b/jstests/core/fts_index2.js
@@ -10,7 +10,7 @@ var coll2 = db.fts_index2.renamed;
coll1.drop();
coll2.drop();
-assert.writeOK(coll1.insert({a: {b: "some content"}}));
+assert.commandWorked(coll1.insert({a: {b: "some content"}}));
assert.commandWorked(coll1.ensureIndex({"$**": "text"}));
assert.eq(1, coll1.count({$text: {$search: "content"}}));
diff --git a/jstests/core/fts_index3.js b/jstests/core/fts_index3.js
index 1a700b3ed7b..d23b0e5418f 100644
--- a/jstests/core/fts_index3.js
+++ b/jstests/core/fts_index3.js
@@ -10,18 +10,18 @@ var coll = db.fts_index3;
// verify that $text with the new value returns the document.
coll.drop();
assert.commandWorked(coll.ensureIndex({a: "text"}));
-assert.writeOK(coll.insert({a: "hello"}));
+assert.commandWorked(coll.insert({a: "hello"}));
assert.eq(1, coll.find({$text: {$search: "hello"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {a: "world"}}));
+assert.commandWorked(coll.update({}, {$set: {a: "world"}}));
assert.eq(0, coll.find({$text: {$search: "hello"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "world"}}).itcount());
// 2) Same as #1, but with a wildcard text index.
coll.drop();
assert.commandWorked(coll.ensureIndex({"$**": "text"}));
-assert.writeOK(coll.insert({a: "hello"}));
+assert.commandWorked(coll.insert({a: "hello"}));
assert.eq(1, coll.find({$text: {$search: "hello"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {a: "world"}}));
+assert.commandWorked(coll.update({}, {$set: {a: "world"}}));
assert.eq(0, coll.find({$text: {$search: "hello"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "world"}}).itcount());
@@ -29,18 +29,18 @@ assert.eq(1, coll.find({$text: {$search: "world"}}).itcount());
// index prefix field, and verify that $text with the new value returns the document.
coll.drop();
assert.commandWorked(coll.ensureIndex({a: 1, b: "text"}));
-assert.writeOK(coll.insert({a: 1, b: "hello"}));
+assert.commandWorked(coll.insert({a: 1, b: "hello"}));
assert.eq(1, coll.find({a: 1, $text: {$search: "hello"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {a: 2}}));
+assert.commandWorked(coll.update({}, {$set: {a: 2}}));
assert.eq(0, coll.find({a: 1, $text: {$search: "hello"}}).itcount());
assert.eq(1, coll.find({a: 2, $text: {$search: "hello"}}).itcount());
// 4) Same as #3, but with a wildcard text index.
coll.drop();
assert.commandWorked(coll.ensureIndex({a: 1, "$**": "text"}));
-assert.writeOK(coll.insert({a: 1, b: "hello"}));
+assert.commandWorked(coll.insert({a: 1, b: "hello"}));
assert.eq(1, coll.find({a: 1, $text: {$search: "hello"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {a: 2}}));
+assert.commandWorked(coll.update({}, {$set: {a: 2}}));
assert.eq(0, coll.find({a: 1, $text: {$search: "hello"}}).itcount());
assert.eq(1, coll.find({a: 2, $text: {$search: "hello"}}).itcount());
@@ -48,18 +48,18 @@ assert.eq(1, coll.find({a: 2, $text: {$search: "hello"}}).itcount());
// index suffix field, and verify that $text with the new value returns the document.
coll.drop();
assert.commandWorked(coll.ensureIndex({a: "text", b: 1}));
-assert.writeOK(coll.insert({a: "hello", b: 1}));
+assert.commandWorked(coll.insert({a: "hello", b: 1}));
assert.eq(1, coll.find({b: 1, $text: {$search: "hello"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {b: 2}}));
+assert.commandWorked(coll.update({}, {$set: {b: 2}}));
assert.eq(0, coll.find({b: 1, $text: {$search: "hello"}}).itcount());
assert.eq(1, coll.find({b: 2, $text: {$search: "hello"}}).itcount());
// 6) Same as #5, but with a wildcard text index.
coll.drop();
assert.commandWorked(coll.ensureIndex({"$**": "text", b: 1}));
-assert.writeOK(coll.insert({a: "hello", b: 1}));
+assert.commandWorked(coll.insert({a: "hello", b: 1}));
assert.eq(1, coll.find({b: 1, $text: {$search: "hello"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {b: 2}}));
+assert.commandWorked(coll.update({}, {$set: {b: 2}}));
assert.eq(0, coll.find({b: 1, $text: {$search: "hello"}}).itcount());
assert.eq(1, coll.find({b: 2, $text: {$search: "hello"}}).itcount());
@@ -67,20 +67,20 @@ assert.eq(1, coll.find({b: 2, $text: {$search: "hello"}}).itcount());
// (so as to change the stemming), and verify that $text with the new language returns the document.
coll.drop();
assert.commandWorked(coll.ensureIndex({a: "text"}));
-assert.writeOK(coll.insert({a: "testing", language: "es"}));
+assert.commandWorked(coll.insert({a: "testing", language: "es"}));
assert.eq(1, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(0, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {language: "en"}}));
+assert.commandWorked(coll.update({}, {$set: {language: "en"}}));
assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
// 8) Same as #7, but with a wildcard text index.
coll.drop();
assert.commandWorked(coll.ensureIndex({"$**": "text"}));
-assert.writeOK(coll.insert({a: "testing", language: "es"}));
+assert.commandWorked(coll.insert({a: "testing", language: "es"}));
assert.eq(1, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(0, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {language: "en"}}));
+assert.commandWorked(coll.update({}, {$set: {language: "en"}}));
assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
@@ -89,20 +89,20 @@ assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount()
// the document.
coll.drop();
assert.commandWorked(coll.ensureIndex({"a.b": "text"}));
-assert.writeOK(coll.insert({a: {b: "testing", language: "es"}}));
+assert.commandWorked(coll.insert({a: {b: "testing", language: "es"}}));
assert.eq(1, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(0, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {"a.language": "en"}}));
+assert.commandWorked(coll.update({}, {$set: {"a.language": "en"}}));
assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
// 10) Same as #9, but with a wildcard text index.
coll.drop();
assert.commandWorked(coll.ensureIndex({"$**": "text"}));
-assert.writeOK(coll.insert({a: {b: "testing", language: "es"}}));
+assert.commandWorked(coll.insert({a: {b: "testing", language: "es"}}));
assert.eq(1, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(0, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {"a.language": "en"}}));
+assert.commandWorked(coll.update({}, {$set: {"a.language": "en"}}));
assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
@@ -111,19 +111,19 @@ assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount()
// the new language returns the document.
coll.drop();
assert.commandWorked(coll.ensureIndex({a: "text"}, {language_override: "idioma"}));
-assert.writeOK(coll.insert({a: "testing", idioma: "es"}));
+assert.commandWorked(coll.insert({a: "testing", idioma: "es"}));
assert.eq(1, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(0, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {idioma: "en"}}));
+assert.commandWorked(coll.update({}, {$set: {idioma: "en"}}));
assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
// 12) Same as #11, but with a wildcard text index.
coll.drop();
assert.commandWorked(coll.ensureIndex({"$**": "text"}, {language_override: "idioma"}));
-assert.writeOK(coll.insert({a: "testing", idioma: "es"}));
+assert.commandWorked(coll.insert({a: "testing", idioma: "es"}));
assert.eq(1, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(0, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
-assert.writeOK(coll.update({}, {$set: {idioma: "en"}}));
+assert.commandWorked(coll.update({}, {$set: {idioma: "en"}}));
assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
diff --git a/jstests/core/fts_index_version1.js b/jstests/core/fts_index_version1.js
index 0b1c869a3a5..8ae979c7dbe 100644
--- a/jstests/core/fts_index_version1.js
+++ b/jstests/core/fts_index_version1.js
@@ -4,18 +4,18 @@ var coll = db.fts_index_version1;
// Test basic English search.
coll.drop();
assert.commandWorked(coll.ensureIndex({a: "text"}, {textIndexVersion: 1}));
-assert.writeOK(coll.insert({a: "running"}));
+assert.commandWorked(coll.insert({a: "running"}));
assert.eq(1, coll.count({$text: {$search: "run"}}));
// Test search with a "language alias" only recognized in textIndexVersion:1 (note that the stopword
// machinery doesn't recognize these aliases).
coll.drop();
assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "eng", textIndexVersion: 1}));
-assert.writeOK(coll.insert({a: "running"}));
+assert.commandWorked(coll.insert({a: "running"}));
assert.eq(1, coll.count({$text: {$search: "run"}}));
// Test that textIndexVersion:1 indexes ignore subdocument language annotations.
coll.drop();
assert.commandWorked(coll.ensureIndex({"a.b": "text"}, {textIndexVersion: 1}));
-assert.writeOK(coll.insert({language: "none", a: {language: "english", b: "the"}}));
+assert.commandWorked(coll.insert({language: "none", a: {language: "english", b: "the"}}));
assert.eq(1, coll.count({$text: {$search: "the", $language: "none"}}));
diff --git a/jstests/core/fts_index_version2.js b/jstests/core/fts_index_version2.js
index f8c57f4e2d7..a74fe11a62f 100644
--- a/jstests/core/fts_index_version2.js
+++ b/jstests/core/fts_index_version2.js
@@ -8,7 +8,7 @@ var coll = db.fts_index_version2;
coll.drop();
-assert.writeOK(coll.insert(
+assert.commandWorked(coll.insert(
{_id: 0, a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."}));
assert.commandWorked(
diff --git a/jstests/core/fts_partition_no_multikey.js b/jstests/core/fts_partition_no_multikey.js
index 4c249522c30..1238bc60839 100644
--- a/jstests/core/fts_partition_no_multikey.js
+++ b/jstests/core/fts_partition_no_multikey.js
@@ -4,7 +4,7 @@ t.drop();
t.ensureIndex({x: 1, y: "text"});
-assert.writeOK(t.insert({x: 5, y: "this is fun"}));
+assert.commandWorked(t.insert({x: 5, y: "this is fun"}));
assert.writeError(t.insert({x: [], y: "this is fun"}));
diff --git a/jstests/core/fts_score_sort.js b/jstests/core/fts_score_sort.js
index 9a4cc1a120b..4b7189c1a4c 100644
--- a/jstests/core/fts_score_sort.js
+++ b/jstests/core/fts_score_sort.js
@@ -5,9 +5,9 @@
var t = db.getSiblingDB("test").getCollection("fts_score_sort");
t.drop();
-assert.writeOK(t.insert({_id: 0, a: "textual content"}));
-assert.writeOK(t.insert({_id: 1, a: "additional content"}));
-assert.writeOK(t.insert({_id: 2, a: "irrelevant content"}));
+assert.commandWorked(t.insert({_id: 0, a: "textual content"}));
+assert.commandWorked(t.insert({_id: 1, a: "additional content"}));
+assert.commandWorked(t.insert({_id: 2, a: "irrelevant content"}));
assert.commandWorked(t.ensureIndex({a: "text"}));
// $meta sort specification should be rejected if it has additional keys.
diff --git a/jstests/core/fts_spanish.js b/jstests/core/fts_spanish.js
index 264e1d9125b..988b55cbb80 100644
--- a/jstests/core/fts_spanish.js
+++ b/jstests/core/fts_spanish.js
@@ -6,11 +6,12 @@ load("jstests/libs/fts.js");
const coll = db.text_spanish;
coll.drop();
-assert.writeOK(coll.insert({_id: 1, title: "mi blog", text: "Este es un blog de prueba"}));
-assert.writeOK(coll.insert({_id: 2, title: "mi segundo post", text: "Este es un blog de prueba"}));
-assert.writeOK(coll.insert(
+assert.commandWorked(coll.insert({_id: 1, title: "mi blog", text: "Este es un blog de prueba"}));
+assert.commandWorked(
+ coll.insert({_id: 2, title: "mi segundo post", text: "Este es un blog de prueba"}));
+assert.commandWorked(coll.insert(
{_id: 3, title: "cuchillos son divertidos", text: "este es mi tercer blog stemmed"}));
-assert.writeOK(coll.insert(
+assert.commandWorked(coll.insert(
{_id: 4, language: "en", title: "My fourth blog", text: "This stemmed blog is in english"}));
// Create a text index, giving more weight to the "title" field.
diff --git a/jstests/core/fts_trailing_fields.js b/jstests/core/fts_trailing_fields.js
index 3f46cd1b1b7..9db8d905535 100644
--- a/jstests/core/fts_trailing_fields.js
+++ b/jstests/core/fts_trailing_fields.js
@@ -6,7 +6,7 @@ const coll = db.fts_trailing_fields;
coll.drop();
assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1}));
-assert.writeOK(coll.insert({a: 2, b: "lorem ipsum"}));
+assert.commandWorked(coll.insert({a: 2, b: "lorem ipsum"}));
assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: true}}).itcount());
assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: null}).itcount());
@@ -16,7 +16,7 @@ assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: false}}).i
// Same with an $elemMatch predicate on one of the trailing fields.
coll.drop();
assert.commandWorked(coll.createIndex({a: 1, b: "text", "c.d": 1}));
-assert.writeOK(coll.insert({a: 2, b: "lorem ipsum", c: {d: 3}}));
+assert.commandWorked(coll.insert({a: 2, b: "lorem ipsum", c: {d: 3}}));
assert.eq(0, coll.find({a: [1, 2], $text: {$search: "lorem"}}).itcount());
assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$elemMatch: {d: 3}}}).itcount());
}());
diff --git a/jstests/core/function_string_representations.js b/jstests/core/function_string_representations.js
index 106cdcda9f2..79dd5b31b39 100644
--- a/jstests/core/function_string_representations.js
+++ b/jstests/core/function_string_representations.js
@@ -13,7 +13,7 @@
var col = db.function_string_representations;
col.drop();
-assert.writeOK(col.insert({
+assert.commandWorked(col.insert({
_id: "abc123",
ord_date: new Date("Oct 04, 2012"),
status: 'A',
diff --git a/jstests/core/geo1.js b/jstests/core/geo1.js
index 35bba816df5..bf61e4f7ad8 100644
--- a/jstests/core/geo1.js
+++ b/jstests/core/geo1.js
@@ -12,7 +12,7 @@ idx = {
t.insert({zip: "06525", loc: [41.352964, 73.01212]});
t.insert({zip: "10024", loc: [40.786387, 73.97709]});
-assert.writeOK(t.insert({zip: "94061", loc: [37.463911, 122.23396]}));
+assert.commandWorked(t.insert({zip: "94061", loc: [37.463911, 122.23396]}));
// test "2d" has to be first
assert.eq(1, t.getIndexKeys().length, "S1");
@@ -40,4 +40,4 @@ assert.eq("06525", t.find({loc: wb.loc})[0].zip, "C3");
t.drop();
t.ensureIndex({loc: "2d"}, {min: -500, max: 500, bits: 4});
-assert.writeOK(t.insert({loc: [200, 200]}));
+assert.commandWorked(t.insert({loc: [200, 200]}));
diff --git a/jstests/core/geo10.js b/jstests/core/geo10.js
index 9b0fb42c2b5..13bb765c176 100644
--- a/jstests/core/geo10.js
+++ b/jstests/core/geo10.js
@@ -10,9 +10,9 @@ coll.drop();
assert.commandWorked(db.geo10.ensureIndex({c: '2d', t: 1}, {min: 0, max: Math.pow(2, 40)}));
assert.eq(2, db.geo10.getIndexes().length, "A3");
-assert.writeOK(db.geo10.insert({c: [1, 1], t: 1}));
-assert.writeOK(db.geo10.insert({c: [3600, 3600], t: 1}));
-assert.writeOK(db.geo10.insert({c: [0.001, 0.001], t: 1}));
+assert.commandWorked(db.geo10.insert({c: [1, 1], t: 1}));
+assert.commandWorked(db.geo10.insert({c: [3600, 3600], t: 1}));
+assert.commandWorked(db.geo10.insert({c: [0.001, 0.001], t: 1}));
printjson(
db.geo10
diff --git a/jstests/core/geo_2d_trailing_fields.js b/jstests/core/geo_2d_trailing_fields.js
index 3cb25a6e9ce..e4428c44888 100644
--- a/jstests/core/geo_2d_trailing_fields.js
+++ b/jstests/core/geo_2d_trailing_fields.js
@@ -8,7 +8,7 @@ const isMaster = assert.commandWorked(db.adminCommand({isMaster: 1}));
coll.drop();
assert.commandWorked(coll.createIndex({a: "2d", b: 1}));
-assert.writeOK(coll.insert({a: [0, 0]}));
+assert.commandWorked(coll.insert({a: [0, 0]}));
// Verify that $near queries handle existence predicates over the trailing fields correctly.
assert.eq(0, coll.find({a: {$near: [0, 0]}, b: {$exists: true}}).itcount());
@@ -23,7 +23,7 @@ assert.eq(1, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: fa
coll.drop();
assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1}));
-assert.writeOK(coll.insert({a: [0, 0], b: [{c: 2}, {c: 3}]}));
+assert.commandWorked(coll.insert({a: [0, 0], b: [{c: 2}, {c: 3}]}));
// Verify that $near queries correctly handle predicates which cannot be covered due to array
// semantics.
@@ -38,7 +38,7 @@ assert.eq(0,
coll.drop();
assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1}));
-assert.writeOK(coll.insert({a: [0, 0], b: [{c: 1}, {c: 2}]}));
+assert.commandWorked(coll.insert({a: [0, 0], b: [{c: 1}, {c: 2}]}));
// Verify that non-near 2d queries correctly handle predicates which cannot be covered due to
// array semantics.
diff --git a/jstests/core/geo_allowedcomparisons.js b/jstests/core/geo_allowedcomparisons.js
index 576e764820a..24b4ce95efa 100644
--- a/jstests/core/geo_allowedcomparisons.js
+++ b/jstests/core/geo_allowedcomparisons.js
@@ -34,7 +34,7 @@ t.ensureIndex({geo: "2d"});
// 2d doesn't know what to do w/this
assert.writeError(t.insert({geo: geojsonPoint}));
// Old points are OK.
-assert.writeOK(t.insert({geo: oldPoint}));
+assert.commandWorked(t.insert({geo: oldPoint}));
// Lines not OK in 2d
assert.writeError(t.insert({geo: geojsonLine}));
// Shapes are not OK to insert in 2d
diff --git a/jstests/core/geo_array0.js b/jstests/core/geo_array0.js
index 3b74e42e533..9d25eae9c51 100644
--- a/jstests/core/geo_array0.js
+++ b/jstests/core/geo_array0.js
@@ -10,7 +10,7 @@ function test(index) {
t.insert({zip: "10001", loc: [[10, 10], [50, 50]]});
t.insert({zip: "10002", loc: [[20, 20], [50, 50]]});
var res = t.insert({zip: "10003", loc: [[30, 30], [50, 50]]});
- assert.writeOK(res);
+ assert.commandWorked(res);
if (index) {
assert.commandWorked(t.ensureIndex({loc: "2d", zip: 1}));
@@ -18,7 +18,7 @@ function test(index) {
}
res = t.insert({zip: "10004", loc: [[40, 40], [50, 50]]});
- assert.writeOK(res);
+ assert.commandWorked(res);
// test normal access
printjson(t.find({loc: {$within: {$box: [[0, 0], [45, 45]]}}}).toArray());
diff --git a/jstests/core/geo_big_polygon.js b/jstests/core/geo_big_polygon.js
index 43738e58ae7..26b3f436780 100644
--- a/jstests/core/geo_big_polygon.js
+++ b/jstests/core/geo_big_polygon.js
@@ -39,11 +39,11 @@ var polarPoint = {type: "Point", coordinates: [85, 85]};
var lineEquator = {type: "LineString", coordinates: [[-20, 0], [20, 0]]};
-assert.writeOK(coll.insert({loc: poly10}));
-assert.writeOK(coll.insert({loc: line10}));
-assert.writeOK(coll.insert({loc: centerPoint}));
-assert.writeOK(coll.insert({loc: polarPoint}));
-assert.writeOK(coll.insert({loc: lineEquator}));
+assert.commandWorked(coll.insert({loc: poly10}));
+assert.commandWorked(coll.insert({loc: line10}));
+assert.commandWorked(coll.insert({loc: centerPoint}));
+assert.commandWorked(coll.insert({loc: polarPoint}));
+assert.commandWorked(coll.insert({loc: lineEquator}));
assert.eq(coll.find({}).count(), 5);
jsTest.log("Starting query...");
@@ -70,7 +70,7 @@ var bigPoly10 = {
crs: bigCRS
};
-assert.writeOK(coll.insert({_id: "bigPoly10", loc: bigPoly10}));
+assert.commandWorked(coll.insert({_id: "bigPoly10", loc: bigPoly10}));
assert.eq(coll.find({loc: {$geoWithin: {$geometry: bigPoly20}}}).count(), 3);
assert.eq(coll.find({loc: {$geoIntersects: {$geometry: bigPoly20}}}).count(), 4);
@@ -81,7 +81,7 @@ assert.eq(coll.find({loc: {$geoIntersects: {$geometry: bigPoly20Comp}}}).count()
assert.commandFailed(coll.ensureIndex({loc: "2dsphere"}));
// 3. After removing big polygon, index builds successfully
-assert.writeOK(coll.remove({_id: "bigPoly10"}));
+assert.commandWorked(coll.remove({_id: "bigPoly10"}));
assert.commandWorked(coll.ensureIndex({loc: "2dsphere"}));
// 4. With index, insert fails.
diff --git a/jstests/core/geo_big_polygon2.js b/jstests/core/geo_big_polygon2.js
index 1c4e0d42b87..d41377bb46a 100644
--- a/jstests/core/geo_big_polygon2.js
+++ b/jstests/core/geo_big_polygon2.js
@@ -551,7 +551,7 @@ objects.forEach(function(o) {
if (o.geo.crs && o.geo.crs == strictCRS) {
assert.writeError(coll.insert(o), "insert " + o.name);
} else {
- assert.writeOK(coll.insert(o), "insert " + o.name);
+ assert.commandWorked(coll.insert(o), "insert " + o.name);
}
});
diff --git a/jstests/core/geo_big_polygon3.js b/jstests/core/geo_big_polygon3.js
index 424510f521a..d9f850b7b2d 100644
--- a/jstests/core/geo_big_polygon3.js
+++ b/jstests/core/geo_big_polygon3.js
@@ -132,7 +132,7 @@ objects = [
// Insert GeoJson strictCRS objects
// Since there is no 2dsphere index, they can be inserted
objects.forEach(function(o) {
- assert.writeOK(coll.insert(o), "Geo Json strictCRS insert" + o.name);
+ assert.commandWorked(coll.insert(o), "Geo Json strictCRS insert" + o.name);
});
// Use Polygon to search for objects which should be ignored
@@ -207,7 +207,7 @@ objects = [
// Insert GeoJson crs84CRS & epsg4326CRS objects
objects.forEach(function(o) {
- assert.writeOK(coll.insert(o), "Geo Json insert" + o.name);
+ assert.commandWorked(coll.insert(o), "Geo Json insert" + o.name);
});
// Make sure stored crs84CRS & epsg4326CRS documents can be found
diff --git a/jstests/core/geo_center_sphere1.js b/jstests/core/geo_center_sphere1.js
index bd6c32196de..a533c0a598a 100644
--- a/jstests/core/geo_center_sphere1.js
+++ b/jstests/core/geo_center_sphere1.js
@@ -38,7 +38,7 @@ function test(index) {
}
gc(); // needed with low skip values
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
if (index) {
t.ensureIndex({loc: index});
diff --git a/jstests/core/geo_center_sphere2.js b/jstests/core/geo_center_sphere2.js
index 761cb5b7403..af8074d75cb 100644
--- a/jstests/core/geo_center_sphere2.js
+++ b/jstests/core/geo_center_sphere2.js
@@ -95,7 +95,7 @@ for (var test = 0; test < numTests; test++) {
docsOut: docsOut
});
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
assert.eq(docsIn + docsOut, numDocs);
assert.eq(pointsIn + pointsOut, totalPoints);
diff --git a/jstests/core/geo_distinct.js b/jstests/core/geo_distinct.js
index 965ec6f7a18..2ea860ff981 100644
--- a/jstests/core/geo_distinct.js
+++ b/jstests/core/geo_distinct.js
@@ -14,10 +14,10 @@ let res;
//
coll.drop();
-assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}}));
-assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}}));
-assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}}));
-assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}}));
+assert.commandWorked(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}}));
+assert.commandWorked(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}}));
+assert.commandWorked(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}}));
+assert.commandWorked(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}}));
assert.eq(4, coll.count());
// Test distinct on GeoJSON points with/without a 2dsphere index.
@@ -65,7 +65,7 @@ for (let i = 0; i < 50; ++i) {
bulk.insert({zone: 4, loc: {type: 'Point', coordinates: [10, 10]}});
bulk.insert({zone: 5, loc: {type: 'Point', coordinates: [20, 20]}});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
const originGeoJSON = {
type: 'Point',
diff --git a/jstests/core/geo_mindistance.js b/jstests/core/geo_mindistance.js
index 4ca58b26003..a92aab55f18 100644
--- a/jstests/core/geo_mindistance.js
+++ b/jstests/core/geo_mindistance.js
@@ -195,9 +195,9 @@ assert.eq(n_docs_within(1000) - n_docs_within(500),
t.drop();
assert.commandWorked(t.createIndex({loc: "2d"}));
-assert.writeOK(t.insert({loc: [0, 40]}));
-assert.writeOK(t.insert({loc: [0, 41]}));
-assert.writeOK(t.insert({loc: [0, 42]}));
+assert.commandWorked(t.insert({loc: [0, 40]}));
+assert.commandWorked(t.insert({loc: [0, 41]}));
+assert.commandWorked(t.insert({loc: [0, 42]}));
// Test minDistance for 2d index with $near.
assert.eq(3, t.find({loc: {$near: [0, 0]}}).itcount());
diff --git a/jstests/core/geo_multinest0.js b/jstests/core/geo_multinest0.js
index f59e61c037f..644fcd0d074 100644
--- a/jstests/core/geo_multinest0.js
+++ b/jstests/core/geo_multinest0.js
@@ -11,14 +11,14 @@ t.insert({zip: "10001", data: [{loc: [10, 10], type: "home"}, {loc: [50, 50], ty
t.insert({zip: "10002", data: [{loc: [20, 20], type: "home"}, {loc: [50, 50], type: "work"}]});
var res =
t.insert({zip: "10003", data: [{loc: [30, 30], type: "home"}, {loc: [50, 50], type: "work"}]});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.commandWorked(t.ensureIndex({"data.loc": "2d", zip: 1}));
assert.eq(2, t.getIndexKeys().length);
res =
t.insert({zip: "10004", data: [{loc: [40, 40], type: "home"}, {loc: [50, 50], type: "work"}]});
-assert.writeOK(res);
+assert.commandWorked(res);
// test normal access
@@ -44,7 +44,7 @@ assert.eq(2, t.getIndexKeys().length);
res =
t.insert({zip: "10004", data: [{loc: [40, 40], type: "home"}, {loc: [50, 50], type: "work"}]});
-assert.writeOK(res);
+assert.commandWorked(res);
// test normal access
printjson(t.find({"data.loc": {$within: {$box: [[0, 0], [45, 45]]}}}).toArray());
diff --git a/jstests/core/geo_multinest1.js b/jstests/core/geo_multinest1.js
index fc3e0d2d4d4..78021e4794f 100644
--- a/jstests/core/geo_multinest1.js
+++ b/jstests/core/geo_multinest1.js
@@ -11,14 +11,14 @@ t.insert({zip: "10001", data: [{loc: [10, 10], type: "home"}, {loc: [29, 29], ty
t.insert({zip: "10002", data: [{loc: [20, 20], type: "home"}, {loc: [39, 39], type: "work"}]});
var res =
t.insert({zip: "10003", data: [{loc: [30, 30], type: "home"}, {loc: [49, 49], type: "work"}]});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.commandWorked(t.ensureIndex({"data.loc": "2d", zip: 1}));
assert.eq(2, t.getIndexKeys().length);
res =
t.insert({zip: "10004", data: [{loc: [40, 40], type: "home"}, {loc: [59, 59], type: "work"}]});
-assert.writeOK(res);
+assert.commandWorked(res);
// test normal access
diff --git a/jstests/core/geo_operator_crs.js b/jstests/core/geo_operator_crs.js
index 063426b6b45..95dd130ebfd 100644
--- a/jstests/core/geo_operator_crs.js
+++ b/jstests/core/geo_operator_crs.js
@@ -18,7 +18,7 @@ var jsonZeroPt = {type: "Point", coordinates: [0, 0]};
var legacy90Pt = [90, 0];
var json90Pt = {type: "Point", coordinates: [90, 0]};
-assert.writeOK(coll.insert({geo: json90Pt}));
+assert.commandWorked(coll.insert({geo: json90Pt}));
var earthRadiusMeters = 6378.1 * 1000;
var result = null;
@@ -32,7 +32,7 @@ assert.close(result[0].dis, Math.PI / 2);
result = runQuery(jsonZeroPt);
assert.close(result[0].dis, (Math.PI / 2) * earthRadiusMeters);
-assert.writeOK(coll.remove({}));
+assert.commandWorked(coll.remove({}));
assert.commandWorked(coll.dropIndexes());
//
@@ -41,7 +41,7 @@ assert.commandWorked(coll.dropIndexes());
assert.commandWorked(coll.ensureIndex({geo: "2d"}));
-assert.writeOK(coll.insert({geo: legacy90Pt}));
+assert.commandWorked(coll.insert({geo: legacy90Pt}));
result = runQuery(legacyZeroPt);
assert.close(result[0].dis, Math.PI / 2);
diff --git a/jstests/core/geo_polygon1_noindex.js b/jstests/core/geo_polygon1_noindex.js
index d9831a6990c..5f43f736b45 100644
--- a/jstests/core/geo_polygon1_noindex.js
+++ b/jstests/core/geo_polygon1_noindex.js
@@ -45,7 +45,7 @@ pacman = [
[2, 0] // Bottom
];
-assert.writeOK(t.save({loc: [1, 3]})); // Add a point that's in
+assert.commandWorked(t.save({loc: [1, 3]})); // Add a point that's in
assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).count(), "Pacman single point");
diff --git a/jstests/core/geo_polygon3.js b/jstests/core/geo_polygon3.js
index f1e819e1920..fe62bee1f14 100644
--- a/jstests/core/geo_polygon3.js
+++ b/jstests/core/geo_polygon3.js
@@ -15,7 +15,7 @@ for (let n = 0; n < numTests; n++) {
for (let x = 1; x < 9; x++) {
for (let y = 1; y < 9; y++) {
let o = {_id: num++, loc: [x, y]};
- assert.writeOK(t.insert(o));
+ assert.commandWorked(t.insert(o));
}
}
@@ -51,14 +51,15 @@ for (let n = 0; n < numTests; n++) {
[2, 0] // Bottom
];
- assert.writeOK(t.insert({loc: [1, 3]})); // Add a point that's in
+ assert.commandWorked(t.insert({loc: [1, 3]})); // Add a point that's in
assert.commandWorked(t.createIndex({loc: "2d"}, {bits: 2 + n}));
assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman single point");
- assert.writeOK(t.insert({loc: [5, 3]})); // Add a point that's out right in the mouth opening
- assert.writeOK(t.insert({loc: [3, 7]})); // Add a point above the center of the head
- assert.writeOK(t.insert({loc: [3, -1]})); // Add a point below the center of the bottom
+ assert.commandWorked(
+ t.insert({loc: [5, 3]})); // Add a point that's out right in the mouth opening
+ assert.commandWorked(t.insert({loc: [3, 7]})); // Add a point above the center of the head
+ assert.commandWorked(t.insert({loc: [3, -1]})); // Add a point below the center of the bottom
assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman double point");
}
diff --git a/jstests/core/geo_s2cursorlimitskip.js b/jstests/core/geo_s2cursorlimitskip.js
index 87b3cf5b1b2..6614074e31b 100644
--- a/jstests/core/geo_s2cursorlimitskip.js
+++ b/jstests/core/geo_s2cursorlimitskip.js
@@ -35,7 +35,7 @@ function insertRandomPoints(num, minDist, maxDist) {
var lat = sign() * (minDist + random() * (maxDist - minDist));
var lng = sign() * (minDist + random() * (maxDist - minDist));
var point = {geo: {type: "Point", coordinates: [lng, lat]}};
- assert.writeOK(t.insert(point));
+ assert.commandWorked(t.insert(point));
}
}
diff --git a/jstests/core/geo_s2disjoint_holes.js b/jstests/core/geo_s2disjoint_holes.js
index dd17dd29b1d..a79ea432d48 100644
--- a/jstests/core/geo_s2disjoint_holes.js
+++ b/jstests/core/geo_s2disjoint_holes.js
@@ -68,8 +68,8 @@ assert.eq(1, t.getIndexes().length);
// But with no index we can insert bad polygons and bad multi-polygons.
//
t.drop();
-assert.writeOK(t.insert({p: poly}));
-assert.writeOK(t.insert({p: multiPoly}));
+assert.commandWorked(t.insert({p: poly}));
+assert.commandWorked(t.insert({p: multiPoly}));
t.drop();
diff --git a/jstests/core/geo_s2dupe_points.js b/jstests/core/geo_s2dupe_points.js
index faa06cabb9b..1038c6df087 100644
--- a/jstests/core/geo_s2dupe_points.js
+++ b/jstests/core/geo_s2dupe_points.js
@@ -8,7 +8,7 @@ t.ensureIndex({geo: "2dsphere"});
function testDuplicates(shapeName, shapeWithDupes, shapeWithoutDupes) {
// insert a doc with dupes
- assert.writeOK(t.insert(shapeWithDupes));
+ assert.commandWorked(t.insert(shapeWithDupes));
// duplicates are preserved when the document is fetched by _id
assert.eq(shapeWithDupes, t.findOne({_id: shapeName}));
diff --git a/jstests/core/geo_s2explain.js b/jstests/core/geo_s2explain.js
index 97f45e89a68..6e6938e509a 100644
--- a/jstests/core/geo_s2explain.js
+++ b/jstests/core/geo_s2explain.js
@@ -6,7 +6,7 @@ t.drop();
var point1 = {loc: {type: "Point", coordinates: [10, 10]}};
var point2 = {loc: {type: "Point", coordinates: [10.001, 10]}};
-assert.writeOK(t.insert([point1, point2]));
+assert.commandWorked(t.insert([point1, point2]));
assert.commandWorked(t.ensureIndex({loc: "2dsphere"}));
@@ -23,7 +23,7 @@ for (var i = 10; i < 70; i += 0.1) {
points.push({loc: {type: "Point", coordinates: [i, i]}});
}
-assert.writeOK(t.insert(points));
+assert.commandWorked(t.insert(points));
explain = t.find({loc: {$nearSphere: {type: "Point", coordinates: [10, 10]}}})
.limit(10)
diff --git a/jstests/core/geo_s2index.js b/jstests/core/geo_s2index.js
index 0b1644e41da..d909fab4489 100644
--- a/jstests/core/geo_s2index.js
+++ b/jstests/core/geo_s2index.js
@@ -106,11 +106,11 @@ res = t.insert({
assert.writeError(res);
assert.eq(0, t.find().itcount());
res = t.insert({loc: {type: 'Point', coordinates: [40, 5]}});
-assert.writeOK(res);
+assert.commandWorked(res);
res = t.insert({
loc: {type: 'Point', coordinates: [40, 5], crs: {type: 'name', properties: {name: 'EPSG:4326'}}}
});
-assert.writeOK(res);
+assert.commandWorked(res);
res = t.insert({
loc: {
type: 'Point',
@@ -118,7 +118,7 @@ res = t.insert({
crs: {type: 'name', properties: {name: 'urn:ogc:def:crs:OGC:1.3:CRS84'}}
}
});
-assert.writeOK(res);
+assert.commandWorked(res);
// We can pass level parameters and we verify that they're valid.
// 0 <= coarsestIndexedLevel <= finestIndexedLevel <= 30.
diff --git a/jstests/core/geo_s2indexversion1.js b/jstests/core/geo_s2indexversion1.js
index 7b17796f29f..52d2d29a5bb 100644
--- a/jstests/core/geo_s2indexversion1.js
+++ b/jstests/core/geo_s2indexversion1.js
@@ -176,30 +176,30 @@ var geometryCollectionDoc = {
res = coll.ensureIndex({geo: "2dsphere"}, {"2dsphereIndexVersion": 2});
assert.commandWorked(res);
res = coll.insert(pointDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
res = coll.insert(lineStringDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
res = coll.insert(polygonDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
res = coll.insert(multiPointDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
res = coll.insert(multiLineStringDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
res = coll.insert(multiPolygonDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
res = coll.insert(geometryCollectionDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
coll.drop();
// {2dsphereIndexVersion: 1} indexes allow only Point, LineString, and Polygon.
res = coll.ensureIndex({geo: "2dsphere"}, {"2dsphereIndexVersion": 1});
assert.commandWorked(res);
res = coll.insert(pointDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
res = coll.insert(lineStringDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
res = coll.insert(polygonDoc);
-assert.writeOK(res);
+assert.commandWorked(res);
res = coll.insert(multiPointDoc);
assert.writeError(res);
res = coll.insert(multiLineStringDoc);
diff --git a/jstests/core/geo_s2meridian.js b/jstests/core/geo_s2meridian.js
index 763067e8a34..65e2a9879e8 100644
--- a/jstests/core/geo_s2meridian.js
+++ b/jstests/core/geo_s2meridian.js
@@ -12,7 +12,7 @@ meridianCrossingLine = {
geo: {type: "LineString", coordinates: [[-178.0, 10.0], [178.0, 10.0]]}
};
-assert.writeOK(t.insert(meridianCrossingLine));
+assert.commandWorked(t.insert(meridianCrossingLine));
lineAlongMeridian = {
type: "LineString",
diff --git a/jstests/core/geo_s2multi.js b/jstests/core/geo_s2multi.js
index d9a4032d070..b72177eea73 100644
--- a/jstests/core/geo_s2multi.js
+++ b/jstests/core/geo_s2multi.js
@@ -8,13 +8,13 @@ multiPointA = {
"type": "MultiPoint",
"coordinates": [[100.0, 0.0], [101.0, 1.0]]
};
-assert.writeOK(t.insert({geo: multiPointA}));
+assert.commandWorked(t.insert({geo: multiPointA}));
multiLineStringA = {
"type": "MultiLineString",
"coordinates": [[[100.0, 0.0], [101.0, 1.0]], [[102.0, 2.0], [103.0, 3.0]]]
};
-assert.writeOK(t.insert({geo: multiLineStringA}));
+assert.commandWorked(t.insert({geo: multiLineStringA}));
multiPolygonA = {
"type": "MultiPolygon",
@@ -26,7 +26,7 @@ multiPolygonA = {
]
]
};
-assert.writeOK(t.insert({geo: multiPolygonA}));
+assert.commandWorked(t.insert({geo: multiPolygonA}));
assert.eq(3, t.find({
geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100, 0]}}}
@@ -65,7 +65,7 @@ partialPolygonA = {
"type": "Polygon",
"coordinates": [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]]
};
-assert.writeOK(t.insert({geo: partialPolygonA}));
+assert.commandWorked(t.insert({geo: partialPolygonA}));
// Polygon contains itself, the partial poly, and the multipoint
assert.eq(3, t.find({geo: {$geoWithin: {$geometry: multiPolygonA}}}).itcount());
diff --git a/jstests/core/geo_s2nongeoarray.js b/jstests/core/geo_s2nongeoarray.js
index 4b210f8f779..62842c8c7b4 100644
--- a/jstests/core/geo_s2nongeoarray.js
+++ b/jstests/core/geo_s2nongeoarray.js
@@ -7,7 +7,7 @@ oldPoint = [40, 5];
var data = {geo: oldPoint, nonGeo: [123, 456], otherNonGeo: [{b: [1, 2]}, {b: [3, 4]}]};
t.drop();
-assert.writeOK(t.insert(data));
+assert.commandWorked(t.insert(data));
assert.commandWorked(t.ensureIndex({otherNonGeo: 1}));
assert.eq(1, t.find({otherNonGeo: {b: [1, 2]}}).itcount());
assert.eq(0, t.find({otherNonGeo: 1}).itcount());
diff --git a/jstests/core/geo_s2ordering.js b/jstests/core/geo_s2ordering.js
index dc9f660ae6c..ecbfbc95782 100644
--- a/jstests/core/geo_s2ordering.js
+++ b/jstests/core/geo_s2ordering.js
@@ -27,7 +27,7 @@ function makepoints(needle) {
}
}
bulk.insert({nongeo: needle, geo: {type: "Point", coordinates: [0, 0]}});
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
function runTest(index) {
diff --git a/jstests/core/geo_s2sparse.js b/jstests/core/geo_s2sparse.js
index 2fb93200c44..d058afb9f9b 100644
--- a/jstests/core/geo_s2sparse.js
+++ b/jstests/core/geo_s2sparse.js
@@ -25,7 +25,7 @@ var bulkInsertDocs = function(coll, numDocs, makeDocFn) {
bulk.insert(makeDocFn(i));
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
print("Bulk inserting " + numDocs + " documents completed");
};
diff --git a/jstests/core/geo_s2twofields.js b/jstests/core/geo_s2twofields.js
index c50ca3c46b5..2e225c8cef6 100644
--- a/jstests/core/geo_s2twofields.js
+++ b/jstests/core/geo_s2twofields.js
@@ -31,7 +31,7 @@ for (var i = 0; i < maxPoints; ++i) {
{from: {type: "Point", coordinates: fromCoord}, to: {type: "Point", coordinates: toCoord}});
}
res = t.insert(arr);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(t.count(), maxPoints);
function semiRigorousTime(func) {
diff --git a/jstests/core/geo_s2within_line_polygon_sphere.js b/jstests/core/geo_s2within_line_polygon_sphere.js
index 17b89d25f9e..73e4a0f897d 100644
--- a/jstests/core/geo_s2within_line_polygon_sphere.js
+++ b/jstests/core/geo_s2within_line_polygon_sphere.js
@@ -14,10 +14,11 @@ function testGeoWithinCenterSphereLinePolygon(coll) {
}
// Basic tests.
- assert.writeOK(coll.insert({name: "Point1", geoField: {type: "Point", coordinates: [1, 1]}}));
- assert.writeOK(coll.insert(
+ assert.commandWorked(
+ coll.insert({name: "Point1", geoField: {type: "Point", coordinates: [1, 1]}}));
+ assert.commandWorked(coll.insert(
{name: "LineString1", geoField: {type: "LineString", coordinates: [[1, 1], [2, 2]]}}));
- assert.writeOK(coll.insert({
+ assert.commandWorked(coll.insert({
name: "Polygon1",
geoField: {type: "Polygon", coordinates: [[[1, 1], [2, 2], [2, 1], [1, 1]]]}
}));
@@ -35,7 +36,7 @@ function testGeoWithinCenterSphereLinePolygon(coll) {
[[151.0997772216797, -33.86157820443923], [151.21719360351562, -33.8952122494965]]
}
};
- assert.writeOK(coll.insert(geoDoc));
+ assert.commandWorked(coll.insert(geoDoc));
// Test for a LineString within a geowithin sphere.
testGeoWithinCenterSphere([[151.16789425018004, -33.8508357122312], 0.0011167360027064348],
@@ -58,7 +59,7 @@ function testGeoWithinCenterSphereLinePolygon(coll) {
]
}
};
- assert.writeOK(coll.insert(geoDoc));
+ assert.commandWorked(coll.insert(geoDoc));
// Test for a LineString forming a closed loop rectangle within a geowithin sphere.
testGeoWithinCenterSphere([[174.75211152791763, -36.88962755605813], 0.000550933650273084],
@@ -86,7 +87,7 @@ function testGeoWithinCenterSphereLinePolygon(coll) {
]]
}
};
- assert.writeOK(coll.insert(geoDoc));
+ assert.commandWorked(coll.insert(geoDoc));
// Test for a Polygon within a geowithin sphere.
testGeoWithinCenterSphere([[174.78536621904806, -41.30510816038769], 0.0009483659386360411],
@@ -127,7 +128,7 @@ function testGeoWithinCenterSphereLinePolygon(coll) {
}
};
- assert.writeOK(coll.insert(geoDoc));
+ assert.commandWorked(coll.insert(geoDoc));
// Test for a MultiPolygon (two seperate polygons) within a geowithin sphere.
testGeoWithinCenterSphere([[151.20821632978107, -33.865139891361636], 0.000981007241416606],
@@ -161,7 +162,7 @@ function testGeoWithinCenterSphereLinePolygon(coll) {
]]
}
};
- assert.writeOK(coll.insert(geoDoc));
+ assert.commandWorked(coll.insert(geoDoc));
// Test for a MultiPolygon (with a hole) within a geowithin sphere.
testGeoWithinCenterSphere([[151.20936119647115, -33.875266834633265], 0.00020277354002627845],
@@ -200,7 +201,7 @@ function testGeoWithinCenterSphereLinePolygon(coll) {
"coordinates": [[96.328125, 5.61598581915534], [153.984375, -6.315298538330033]]
}
};
- assert.writeOK(coll.insert(geoDoc));
+ assert.commandWorked(coll.insert(geoDoc));
// Test for a large query cap containing both of line vertices but not the line itself.
// (should not return a match).
@@ -223,7 +224,7 @@ function testGeoWithinCenterSphereLinePolygon(coll) {
]]
}
};
- assert.writeOK(coll.insert(geoDoc));
+ assert.commandWorked(coll.insert(geoDoc));
// Test for a large query cap containing both of line vertices but not the line itself.
// (should not return a match).
diff --git a/jstests/core/geo_update1.js b/jstests/core/geo_update1.js
index 4bb18256a57..54d123eff52 100644
--- a/jstests/core/geo_update1.js
+++ b/jstests/core/geo_update1.js
@@ -26,12 +26,12 @@ function p() {
p();
var res = t.update({loc: {$within: {$center: [[5, 5], 2]}}}, {$inc: {z: 1}}, false, true);
-assert.writeOK(res);
+assert.commandWorked(res);
p();
-assert.writeOK(t.update({}, {'$inc': {'z': 1}}, false, true));
+assert.commandWorked(t.update({}, {'$inc': {'z': 1}}, false, true));
p();
res = t.update({loc: {$within: {$center: [[5, 5], 2]}}}, {$inc: {z: 1}}, false, true);
-assert.writeOK(res);
+assert.commandWorked(res);
p();
diff --git a/jstests/core/geo_update2.js b/jstests/core/geo_update2.js
index fe59c55357b..27825416e02 100644
--- a/jstests/core/geo_update2.js
+++ b/jstests/core/geo_update2.js
@@ -25,13 +25,13 @@ function p() {
p();
-assert.writeOK(
+assert.commandWorked(
t.update({"loc": {"$within": {"$center": [[5, 5], 2]}}}, {'$inc': {'z': 1}}, false, true));
p();
-assert.writeOK(t.update({}, {'$inc': {'z': 1}}, false, true));
+assert.commandWorked(t.update({}, {'$inc': {'z': 1}}, false, true));
p();
-assert.writeOK(
+assert.commandWorked(
t.update({"loc": {"$within": {"$center": [[5, 5], 2]}}}, {'$inc': {'z': 1}}, false, true));
p();
diff --git a/jstests/core/geo_validate.js b/jstests/core/geo_validate.js
index 190f7886298..caee7720d58 100644
--- a/jstests/core/geo_validate.js
+++ b/jstests/core/geo_validate.js
@@ -48,7 +48,7 @@ assert.throws(function() {
//
//
// Make sure we can do a $within search with a zero-radius circular region
-assert.writeOK(coll.insert({geo: [0, 0]}));
+assert.commandWorked(coll.insert({geo: [0, 0]}));
assert.neq(null, coll.findOne({geo: {$within: {$center: [[0, 0], 0]}}}));
assert.neq(null, coll.findOne({geo: {$within: {$centerSphere: [[0, 0], 0]}}}));
assert.neq(null, coll.findOne({geo: {$within: {$center: [[0, 0], Infinity]}}}));
diff --git a/jstests/core/geonear_key.js b/jstests/core/geonear_key.js
index 0238e012577..b0afadaaa06 100644
--- a/jstests/core/geonear_key.js
+++ b/jstests/core/geonear_key.js
@@ -9,12 +9,12 @@ load("jstests/libs/analyze_plan.js");
const coll = db.jstests_geonear_key;
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [1, 1]}));
-assert.writeOK(coll.insert({_id: 1, a: [1, 2]}));
-assert.writeOK(coll.insert({_id: 2, b: {c: [1, 1]}}));
-assert.writeOK(coll.insert({_id: 3, b: {c: [1, 2]}}));
-assert.writeOK(coll.insert({_id: 4, b: {d: [1, 1]}}));
-assert.writeOK(coll.insert({_id: 5, b: {d: [1, 2]}}));
+assert.commandWorked(coll.insert({_id: 0, a: [1, 1]}));
+assert.commandWorked(coll.insert({_id: 1, a: [1, 2]}));
+assert.commandWorked(coll.insert({_id: 2, b: {c: [1, 1]}}));
+assert.commandWorked(coll.insert({_id: 3, b: {c: [1, 2]}}));
+assert.commandWorked(coll.insert({_id: 4, b: {d: [1, 1]}}));
+assert.commandWorked(coll.insert({_id: 5, b: {d: [1, 2]}}));
/**
* Runs an aggregation consisting of a single $geoNear stage described by 'nearParams', and
diff --git a/jstests/core/getmore_cmd_maxtimems.js b/jstests/core/getmore_cmd_maxtimems.js
index 1b8e20ba962..e37ee16b77a 100644
--- a/jstests/core/getmore_cmd_maxtimems.js
+++ b/jstests/core/getmore_cmd_maxtimems.js
@@ -12,7 +12,7 @@ var coll = db[collName];
coll.drop();
for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
// Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a non-capped
@@ -25,7 +25,7 @@ assert.commandFailed(cmdRes);
coll.drop();
assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024}));
for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
// Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a capped
diff --git a/jstests/core/getmore_invalidated_cursors.js b/jstests/core/getmore_invalidated_cursors.js
index 43f27ed5e49..57aa832f391 100644
--- a/jstests/core/getmore_invalidated_cursors.js
+++ b/jstests/core/getmore_invalidated_cursors.js
@@ -19,7 +19,7 @@ function setupCollection() {
for (let i = 0; i < nDocs; ++i) {
bulk.insert({_id: i, x: i});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
assert.commandWorked(coll.createIndex({x: 1}));
}
diff --git a/jstests/core/getmore_invalidated_documents.js b/jstests/core/getmore_invalidated_documents.js
index 378fde3b02a..72acb4a7cc8 100644
--- a/jstests/core/getmore_invalidated_documents.js
+++ b/jstests/core/getmore_invalidated_documents.js
@@ -19,15 +19,15 @@ var y;
// Case #1: Text search with deletion invalidation.
t.drop();
assert.commandWorked(t.ensureIndex({a: "text"}));
-assert.writeOK(t.insert({_id: 1, a: "bar"}));
-assert.writeOK(t.insert({_id: 2, a: "bar"}));
-assert.writeOK(t.insert({_id: 3, a: "bar"}));
+assert.commandWorked(t.insert({_id: 1, a: "bar"}));
+assert.commandWorked(t.insert({_id: 2, a: "bar"}));
+assert.commandWorked(t.insert({_id: 3, a: "bar"}));
cursor = t.find({$text: {$search: "bar"}}).batchSize(2);
cursor.next();
cursor.next();
-assert.writeOK(t.remove({_id: 3}));
+assert.commandWorked(t.remove({_id: 3}));
// We should get back the document or not (depending on the storage engine / concurrency model).
// Either is fine as long as we don't crash.
@@ -37,16 +37,16 @@ assert(count === 0 || count === 1);
// Case #2: Text search with mutation invalidation.
t.drop();
assert.commandWorked(t.ensureIndex({a: "text"}));
-assert.writeOK(t.insert({_id: 1, a: "bar"}));
-assert.writeOK(t.insert({_id: 2, a: "bar"}));
-assert.writeOK(t.insert({_id: 3, a: "bar"}));
+assert.commandWorked(t.insert({_id: 1, a: "bar"}));
+assert.commandWorked(t.insert({_id: 2, a: "bar"}));
+assert.commandWorked(t.insert({_id: 3, a: "bar"}));
cursor = t.find({$text: {$search: "bar"}}).batchSize(2);
cursor.next();
cursor.next();
// Update the next matching doc so that it no longer matches.
-assert.writeOK(t.update({_id: 3}, {$set: {a: "nomatch"}}));
+assert.commandWorked(t.update({_id: 3}, {$set: {a: "nomatch"}}));
// Either the cursor should skip the result that no longer matches, or we should get back the
// old
@@ -56,16 +56,16 @@ assert(!cursor.hasNext() || cursor.next()["a"] === "bar");
// Case #3: Merge sort with deletion invalidation.
t.drop();
assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
-assert.writeOK(t.insert({a: 1, b: 1}));
-assert.writeOK(t.insert({a: 1, b: 2}));
-assert.writeOK(t.insert({a: 2, b: 3}));
-assert.writeOK(t.insert({a: 2, b: 4}));
+assert.commandWorked(t.insert({a: 1, b: 1}));
+assert.commandWorked(t.insert({a: 1, b: 2}));
+assert.commandWorked(t.insert({a: 2, b: 3}));
+assert.commandWorked(t.insert({a: 2, b: 4}));
cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2);
cursor.next();
cursor.next();
-assert.writeOK(t.remove({a: 2, b: 3}));
+assert.commandWorked(t.remove({a: 2, b: 3}));
count = cursor.itcount();
assert(count === 1 || count === 2);
@@ -73,16 +73,16 @@ assert(count === 1 || count === 2);
// Case #4: Merge sort with mutation invalidation.
t.drop();
assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
-assert.writeOK(t.insert({a: 1, b: 1}));
-assert.writeOK(t.insert({a: 1, b: 2}));
-assert.writeOK(t.insert({a: 2, b: 3}));
-assert.writeOK(t.insert({a: 2, b: 4}));
+assert.commandWorked(t.insert({a: 1, b: 1}));
+assert.commandWorked(t.insert({a: 1, b: 2}));
+assert.commandWorked(t.insert({a: 2, b: 3}));
+assert.commandWorked(t.insert({a: 2, b: 4}));
cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2);
cursor.next();
cursor.next();
-assert.writeOK(t.update({a: 2, b: 3}, {$set: {a: 6}}));
+assert.commandWorked(t.update({a: 2, b: 3}, {$set: {a: 6}}));
// Either the cursor should skip the result that no longer matches, or we should get back the
// old
@@ -99,7 +99,7 @@ t.drop();
t.ensureIndex({geo: "2d"});
for (x = -1; x < 1; x++) {
for (y = -1; y < 1; y++) {
- assert.writeOK(t.insert({geo: [x, y]}));
+ assert.commandWorked(t.insert({geo: [x, y]}));
}
}
@@ -108,7 +108,7 @@ cursor.next();
cursor.next();
// Drop all documents in the collection.
-assert.writeOK(t.remove({}));
+assert.commandWorked(t.remove({}));
// Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered
// because it is the same distance from the center point as a doc already returned).
@@ -119,7 +119,7 @@ t.drop();
t.ensureIndex({geo: "2dsphere"});
for (x = -1; x < 1; x++) {
for (y = -1; y < 1; y++) {
- assert.writeOK(t.insert({geo: [x, y]}));
+ assert.commandWorked(t.insert({geo: [x, y]}));
}
}
@@ -128,7 +128,7 @@ cursor.next();
cursor.next();
// Drop all documents in the collection.
-assert.writeOK(t.remove({}));
+assert.commandWorked(t.remove({}));
// Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered
// because it is the same distance from the center point as a doc already returned).
@@ -138,7 +138,7 @@ assert(cursor.hasNext());
t.drop();
t.ensureIndex({geo: "2dsphere"});
for (x = 0; x < 6; x++) {
- assert.writeOK(t.insert({geo: [x, x]}));
+ assert.commandWorked(t.insert({geo: [x, x]}));
}
cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 10}}).batchSize(2);
@@ -146,7 +146,7 @@ cursor.next();
cursor.next();
// Drop all documents in the collection.
-assert.writeOK(t.remove({}));
+assert.commandWorked(t.remove({}));
// We might force-fetch or we might skip over the deleted documents, depending on the internals
// of the geo near search. Just make sure that we can exhaust the cursor without crashing.
@@ -157,7 +157,7 @@ t.drop();
t.ensureIndex({geo: "2d"});
for (x = -1; x < 1; x++) {
for (y = -1; y < 1; y++) {
- assert.writeOK(t.insert({geo: [x, y]}));
+ assert.commandWorked(t.insert({geo: [x, y]}));
}
}
@@ -166,7 +166,7 @@ cursor.next();
cursor.next();
// Update all documents in the collection to have position [15, 15].
-assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true));
+assert.commandWorked(t.update({}, {$set: {geo: [15, 15]}}, false, true));
// The old version of the document should be returned (the update should not be reflected in the
// results of the near search).
@@ -180,7 +180,7 @@ t.drop();
t.ensureIndex({geo: "2dsphere"});
for (x = -1; x < 1; x++) {
for (y = -1; y < 1; y++) {
- assert.writeOK(t.insert({geo: [x, y]}));
+ assert.commandWorked(t.insert({geo: [x, y]}));
}
}
@@ -189,7 +189,7 @@ cursor.next();
cursor.next();
// Update all documents in the collection to have position [15, 15].
-assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true));
+assert.commandWorked(t.update({}, {$set: {geo: [15, 15]}}, false, true));
// The old version of the document should be returned (the update should not be reflected in the
// results of the near search).
@@ -209,7 +209,7 @@ cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2);
cursor.next();
cursor.next();
-assert.writeOK(t.remove({a: 2}));
+assert.commandWorked(t.remove({a: 2}));
if (cursor.hasNext()) {
assert.eq(cursor.next().b, 3);
@@ -226,7 +226,7 @@ cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2);
cursor.next();
cursor.next();
-assert.writeOK(t.update({a: 2}, {$set: {a: 4}}));
+assert.commandWorked(t.update({a: 2}, {$set: {a: 4}}));
count = cursor.itcount();
if (cursor.hasNext()) {
diff --git a/jstests/core/grow_hash_table.js b/jstests/core/grow_hash_table.js
index da6fdcc004c..783616b93fb 100644
--- a/jstests/core/grow_hash_table.js
+++ b/jstests/core/grow_hash_table.js
@@ -25,7 +25,7 @@ var doTest = function(count) {
}
// Store the document
- assert.writeOK(testDB.collection.insert(doc));
+ assert.commandWorked(testDB.collection.insert(doc));
// Try to read the document using a large projection
try {
diff --git a/jstests/core/idhack.js b/jstests/core/idhack.js
index 880716ed206..a9e1cc68aa9 100644
--- a/jstests/core/idhack.js
+++ b/jstests/core/idhack.js
@@ -8,12 +8,12 @@ t.drop();
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-assert.writeOK(t.insert({_id: {x: 1}, z: 1}));
-assert.writeOK(t.insert({_id: {x: 2}, z: 2}));
-assert.writeOK(t.insert({_id: {x: 3}, z: 3}));
-assert.writeOK(t.insert({_id: 1, z: 4}));
-assert.writeOK(t.insert({_id: 2, z: 5}));
-assert.writeOK(t.insert({_id: 3, z: 6}));
+assert.commandWorked(t.insert({_id: {x: 1}, z: 1}));
+assert.commandWorked(t.insert({_id: {x: 2}, z: 2}));
+assert.commandWorked(t.insert({_id: {x: 3}, z: 3}));
+assert.commandWorked(t.insert({_id: 1, z: 4}));
+assert.commandWorked(t.insert({_id: 2, z: 5}));
+assert.commandWorked(t.insert({_id: 3, z: 6}));
assert.eq(2, t.findOne({_id: {x: 2}}).z);
assert.eq(2, t.find({_id: {$gte: 2}}).count());
@@ -46,7 +46,7 @@ explain = t.find(query).skip(1).explain();
assert(!isIdhack(db, explain.queryPlanner.winningPlan));
// ID hack cannot be used with a regex predicate.
-assert.writeOK(t.insert({_id: "abc"}));
+assert.commandWorked(t.insert({_id: "abc"}));
explain = t.find({_id: /abc/}).explain();
assert.eq({_id: "abc"}, t.findOne({_id: /abc/}));
assert(!isIdhack(db, explain.queryPlanner.winningPlan));
@@ -62,8 +62,8 @@ assert.eq({_id: {x: 2}}, t.findOne(query, {_id: 1}));
//
t.drop();
-assert.writeOK(t.insert({_id: 0, a: 0, b: [{c: 1}, {c: 2}]}));
-assert.writeOK(t.insert({_id: 1, a: 1, b: [{c: 3}, {c: 4}]}));
+assert.commandWorked(t.insert({_id: 0, a: 0, b: [{c: 1}, {c: 2}]}));
+assert.commandWorked(t.insert({_id: 1, a: 1, b: [{c: 3}, {c: 4}]}));
// Simple inclusion.
assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {a: 1}).next());
diff --git a/jstests/core/index_bounds_code.js b/jstests/core/index_bounds_code.js
index cd1fa58b306..52eec12c26b 100644
--- a/jstests/core/index_bounds_code.js
+++ b/jstests/core/index_bounds_code.js
@@ -12,7 +12,7 @@ assert.commandWorked(coll.createIndex({a: 1}));
const insertedFunc = function() {
return 1;
};
-assert.writeOK(coll.insert({a: insertedFunc}));
+assert.commandWorked(coll.insert({a: insertedFunc}));
// Test that queries involving comparison operators with values of type Code are covered.
const proj = {
@@ -39,9 +39,9 @@ assertCoveredQueryAndCount(
// Test that documents that lie outside of the generated index bounds are not returned.
coll.remove({});
-assert.writeOK(coll.insert({a: "string"}));
-assert.writeOK(coll.insert({a: {b: 1}}));
-assert.writeOK(coll.insert({a: MaxKey}));
+assert.commandWorked(coll.insert({a: "string"}));
+assert.commandWorked(coll.insert({a: {b: 1}}));
+assert.commandWorked(coll.insert({a: MaxKey}));
assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: func}}, project: proj, count: 0});
assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: func}}, project: proj, count: 0});
diff --git a/jstests/core/index_bounds_maxkey.js b/jstests/core/index_bounds_maxkey.js
index c581375e616..e632e52810f 100644
--- a/jstests/core/index_bounds_maxkey.js
+++ b/jstests/core/index_bounds_maxkey.js
@@ -9,7 +9,7 @@ const coll = db.index_bounds_maxkey;
coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.insert({a: MaxKey}));
+assert.commandWorked(coll.insert({a: MaxKey}));
// Test that queries involving comparison operators with MaxKey are covered.
const proj = {
@@ -24,9 +24,9 @@ assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MaxKey}}, projec
// Test that all documents are considered less than MaxKey, regardless of the presence of
// the queried field 'a'.
coll.remove({});
-assert.writeOK(coll.insert({a: "string"}));
-assert.writeOK(coll.insert({a: {b: 1}}));
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({a: "string"}));
+assert.commandWorked(coll.insert({a: {b: 1}}));
+assert.commandWorked(coll.insert({}));
assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0});
assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 0});
assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 3});
diff --git a/jstests/core/index_bounds_minkey.js b/jstests/core/index_bounds_minkey.js
index b1d244db0b7..5d2ea8b36fd 100644
--- a/jstests/core/index_bounds_minkey.js
+++ b/jstests/core/index_bounds_minkey.js
@@ -9,7 +9,7 @@ const coll = db.index_bounds_minkey;
coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.insert({a: MinKey}));
+assert.commandWorked(coll.insert({a: MinKey}));
// Test that queries involving comparison operators with MinKey are covered.
const proj = {
@@ -24,9 +24,9 @@ assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MinKey}}, projec
// Test that all documents are considered greater than MinKey, regardless of the presence of
// the queried field 'a'.
coll.remove({});
-assert.writeOK(coll.insert({a: "string"}));
-assert.writeOK(coll.insert({a: {b: 1}}));
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({a: "string"}));
+assert.commandWorked(coll.insert({a: {b: 1}}));
+assert.commandWorked(coll.insert({}));
assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 3});
assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 3});
assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0});
diff --git a/jstests/core/index_bounds_object.js b/jstests/core/index_bounds_object.js
index b1bdb2e9591..1b7e12fbd62 100644
--- a/jstests/core/index_bounds_object.js
+++ b/jstests/core/index_bounds_object.js
@@ -9,7 +9,7 @@ const coll = db.index_bounds_object;
coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.insert({a: {b: 1}}));
+assert.commandWorked(coll.insert({a: {b: 1}}));
// Test that queries involving comparison operators with objects are covered.
const proj = {
@@ -25,7 +25,7 @@ assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {b: 2}}}, project
assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {b: 1}}}, project: proj, count: 1});
// Test that queries involving comparisons with an empty object are covered.
-assert.writeOK(coll.insert({a: {}}));
+assert.commandWorked(coll.insert({a: {}}));
assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 1});
assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 2});
assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0});
@@ -42,8 +42,8 @@ assertCoveredQueryAndCount(
// Test that documents that lie outside of the generated index bounds are not returned. Cannot
// test empty array upper bounds since that would force the index to be multi-key.
coll.remove({});
-assert.writeOK(coll.insert({a: "string"}));
-assert.writeOK(coll.insert({a: true}));
+assert.commandWorked(coll.insert({a: "string"}));
+assert.commandWorked(coll.insert({a: true}));
assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 0});
assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 0});
assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0});
@@ -51,7 +51,7 @@ assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {}}}, project: p
// Adding a document containing an array makes the index multi-key which can never be used for a
// covered query.
-assert.writeOK(coll.insert({a: []}));
+assert.commandWorked(coll.insert({a: []}));
assert(!isIndexOnly(db, coll.find({a: {$gt: {}}}, proj).explain().queryPlanner.winningPlan));
assert(!isIndexOnly(db, coll.find({a: {$gte: {}}}, proj).explain().queryPlanner.winningPlan));
assert(!isIndexOnly(db, coll.find({a: {$lt: {}}}, proj).explain().queryPlanner.winningPlan));
diff --git a/jstests/core/index_bounds_pipe.js b/jstests/core/index_bounds_pipe.js
index e0ef8cf915a..35e33e27276 100644
--- a/jstests/core/index_bounds_pipe.js
+++ b/jstests/core/index_bounds_pipe.js
@@ -11,12 +11,12 @@ const collName = 'index_bounds_pipe';
const coll = db.getCollection(collName);
coll.drop();
-assert.writeOK(coll.insert({_id: ''}));
-assert.writeOK(coll.insert({_id: '\\|'}));
-assert.writeOK(coll.insert({_id: 'a'}));
-assert.writeOK(coll.insert({_id: 'a|b'}));
-assert.writeOK(coll.insert({_id: 'b'}));
-assert.writeOK(coll.insert({_id: '|'}));
+assert.commandWorked(coll.insert({_id: ''}));
+assert.commandWorked(coll.insert({_id: '\\|'}));
+assert.commandWorked(coll.insert({_id: 'a'}));
+assert.commandWorked(coll.insert({_id: 'a|b'}));
+assert.commandWorked(coll.insert({_id: 'b'}));
+assert.commandWorked(coll.insert({_id: '|'}));
/**
* Asserts that a query on a field using 'params.regex' uses index bounds 'params.bounds' and
@@ -56,7 +56,7 @@ function assertIndexBoundsAndResult(params) {
assert.commandWorked(collRegexValue.createIndex({x: 1}));
const doc = {_id: 0, x: params.regex};
- assert.writeOK(collRegexValue.insert(doc));
+ assert.commandWorked(collRegexValue.insert(doc));
const regexQuery = {x: params.regex};
assert.eq(
diff --git a/jstests/core/index_bounds_timestamp.js b/jstests/core/index_bounds_timestamp.js
index fe0acf12936..c500f12202b 100644
--- a/jstests/core/index_bounds_timestamp.js
+++ b/jstests/core/index_bounds_timestamp.js
@@ -24,7 +24,7 @@ const documents = [
{_id: 3, ts: new Timestamp(1, 0)},
{_id: 4, ts: new Timestamp(Math.pow(2, 32) - 1, Math.pow(2, 32) - 1)}
];
-assert.writeOK(coll.insert(documents));
+assert.commandWorked(coll.insert(documents));
// Sanity check the timestamp bounds generation plan.
let plan;
diff --git a/jstests/core/index_decimal.js b/jstests/core/index_decimal.js
index 9736d8f0903..73f74eda0f7 100644
--- a/jstests/core/index_decimal.js
+++ b/jstests/core/index_decimal.js
@@ -15,17 +15,17 @@ t.drop();
// Create doubles and NumberDecimals. The double 0.1 is actually 0.10000000000000000555
// and the double 0.3 is actually 0.2999999999999999888, so we can check ordering.
-assert.writeOK(t.insert({x: 0.1, y: NumberDecimal('0.3000')}));
-assert.writeOK(t.insert({x: 0.1}));
-assert.writeOK(t.insert({y: 0.3}));
+assert.commandWorked(t.insert({x: 0.1, y: NumberDecimal('0.3000')}));
+assert.commandWorked(t.insert({x: 0.1}));
+assert.commandWorked(t.insert({y: 0.3}));
// Create an index on existing numbers.
assert.commandWorked(t.createIndex({x: 1}));
assert.commandWorked(t.createIndex({y: -1}));
// Insert some more items after index creation. Use _id for decimal.
-assert.writeOK(t.insert({x: NumberDecimal('0.10')}));
-assert.writeOK(t.insert({_id: NumberDecimal('0E3')}));
+assert.commandWorked(t.insert({x: NumberDecimal('0.10')}));
+assert.commandWorked(t.insert({_id: NumberDecimal('0E3')}));
assert.writeError(t.insert({_id: -0.0}));
// Check that we return exactly the right document, use an index to do so, and that the
diff --git a/jstests/core/index_elemmatch2.js b/jstests/core/index_elemmatch2.js
index d2ff872dc49..19d744ce41d 100644
--- a/jstests/core/index_elemmatch2.js
+++ b/jstests/core/index_elemmatch2.js
@@ -10,10 +10,10 @@ load("jstests/libs/analyze_plan.js");
const coll = db.elemMatch_index;
coll.drop();
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: [{}]}));
-assert.writeOK(coll.insert({a: [1, null]}));
-assert.writeOK(coll.insert({a: [{type: "Point", coordinates: [0, 0]}]}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: [{}]}));
+assert.commandWorked(coll.insert({a: [1, null]}));
+assert.commandWorked(coll.insert({a: [{type: "Point", coordinates: [0, 0]}]}));
assert.commandWorked(coll.createIndex({a: 1}, {sparse: true}));
diff --git a/jstests/core/index_filter_commands.js b/jstests/core/index_filter_commands.js
index 45acbb10a49..de110e5abb9 100644
--- a/jstests/core/index_filter_commands.js
+++ b/jstests/core/index_filter_commands.js
@@ -215,7 +215,7 @@ var collationEN = {locale: "en_US"};
assert.commandWorked(t.createIndex(indexA1, {collation: collationEN, name: "a_1:en_US"}));
assert.commandWorked(t.createIndex(indexA1, {name: "a_1"}));
-assert.writeOK(t.insert({a: "a"}));
+assert.commandWorked(t.insert({a: "a"}));
assert.commandWorked(t.runCommand('planCacheSetFilter', {query: queryAA, indexes: [indexA1]}));
@@ -244,7 +244,7 @@ assert(isCollscan(db, explain.queryPlanner.winningPlan), "Expected collscan: " +
//
t.drop();
-assert.writeOK(t.insert({a: "a"}));
+assert.commandWorked(t.insert({a: "a"}));
assert.commandWorked(t.createIndex(indexA1, {name: "a_1"}));
assert.commandWorked(t.runCommand(
@@ -264,7 +264,7 @@ assert.eq(0, filters.length, tojson(filters));
//
t.drop();
-assert.writeOK(t.insert({a: "a"}));
+assert.commandWorked(t.insert({a: "a"}));
assert.commandWorked(t.createIndex(indexA1, {name: "a_1"}));
assert.commandFailed(
diff --git a/jstests/core/index_multiple_compatibility.js b/jstests/core/index_multiple_compatibility.js
index 8c203a49941..5a2a599f536 100644
--- a/jstests/core/index_multiple_compatibility.js
+++ b/jstests/core/index_multiple_compatibility.js
@@ -58,40 +58,40 @@ function testIndexCompat(coll, index1, index2, both) {
// Check index 1 document.
if (index1.hasOwnProperty('doc')) {
- assert.writeOK(coll.insert(index1.doc));
+ assert.commandWorked(coll.insert(index1.doc));
assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 1);
assert.eq(coll.find(index1.doc).hint(index2.index.name).itcount(), 0);
}
// Check index 2 document.
if (index2.hasOwnProperty('doc')) {
- assert.writeOK(coll.insert(index2.doc));
+ assert.commandWorked(coll.insert(index2.doc));
assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 1);
assert.eq(coll.find(index2.doc).hint(index1.index.name).itcount(), 0);
}
// Check for present of both in both index1 and index2.
if (typeof both !== "undefined") {
- assert.writeOK(coll.insert(both));
+ assert.commandWorked(coll.insert(both));
assert.eq(coll.find(both).hint(index1.index.name).itcount(), 1);
assert.eq(coll.find(both).hint(index2.index.name).itcount(), 1);
}
// Remove index 1 document.
if (index1.hasOwnProperty('doc')) {
- assert.writeOK(coll.remove(index1.doc));
+ assert.commandWorked(coll.remove(index1.doc));
assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 0);
}
// Remove index 2 document.
if (index2.hasOwnProperty('doc')) {
- assert.writeOK(coll.remove(index2.doc));
+ assert.commandWorked(coll.remove(index2.doc));
assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 0);
}
// Remove both.
if (typeof both !== "undefined") {
- assert.writeOK(coll.remove(both));
+ assert.commandWorked(coll.remove(both));
assert.eq(coll.find(both).hint(index1.index.name).itcount(), 0);
assert.eq(coll.find(both).hint(index2.index.name).itcount(), 0);
}
@@ -188,7 +188,7 @@ testIndexCompat(coll,
{a: "foo"});
// Test that unique constraints are still enforced.
-assert.writeOK(coll.insert({a: "f"}));
+assert.commandWorked(coll.insert({a: "f"}));
assert.writeError(coll.insert({a: "F"}));
// A unique partial index and non-unique index.
@@ -206,10 +206,10 @@ testIndexCompat(
{index: {key: {a: 1}, name: "a", collation: enUSStrength2, unique: false}, doc: {a: "foo"}},
{a: 5});
-assert.writeOK(coll.insert({a: 5}));
+assert.commandWorked(coll.insert({a: 5}));
// Test that uniqueness is only enforced by the partial index.
-assert.writeOK(coll.insert({a: "foo"}));
-assert.writeOK(coll.insert({a: "foo"}));
+assert.commandWorked(coll.insert({a: "foo"}));
+assert.commandWorked(coll.insert({a: "foo"}));
assert.writeError(coll.insert({a: 5}));
// Two unique indexes with different collations.
@@ -219,7 +219,7 @@ testIndexCompat(coll,
{a: "a"});
// Unique enforced on both indexes.
-assert.writeOK(coll.insert({a: "a"}));
+assert.commandWorked(coll.insert({a: "a"}));
assert.writeError(coll.insert({a: "a"}));
assert.writeError(coll.insert({a: "A"}));
@@ -232,8 +232,8 @@ testIndexCompat(
{index: {key: {a: 1}, name: "a2", collation: enUSStrength2, unique: false}, doc: {b: 0}},
{a: "a"});
-assert.writeOK(coll.insert({a: "a"}));
-assert.writeOK(coll.insert({}));
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({a: "a"}));
+assert.commandWorked(coll.insert({}));
+assert.commandWorked(coll.insert({}));
assert.writeError(coll.insert({a: "a"}));
})();
diff --git a/jstests/core/index_partial_2dsphere.js b/jstests/core/index_partial_2dsphere.js
index 15e6427667a..a194cf30d84 100644
--- a/jstests/core/index_partial_2dsphere.js
+++ b/jstests/core/index_partial_2dsphere.js
@@ -28,8 +28,8 @@ let indexedDoc = {
"geoJson": {"type": "Polygon", "coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]}
};
-assert.writeOK(coll.insert(unindexedDoc));
-assert.writeOK(coll.insert(indexedDoc));
+assert.commandWorked(coll.insert(unindexedDoc));
+assert.commandWorked(coll.insert(indexedDoc));
// Return the one indexed document.
assert.eq(
@@ -42,7 +42,7 @@ assert.eq(
assert.writeError(coll.update({_id: 0}, {$set: {isIndexed: 1}}));
// Update the indexed document to remove it from the index.
-assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: -1}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {isIndexed: -1}}));
// This query should now return zero documents.
assert.eq(
@@ -52,11 +52,11 @@ assert.eq(
.itcount());
// Re-index the document.
-assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: 1}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {isIndexed: 1}}));
// Remove both should succeed without error.
-assert.writeOK(coll.remove({_id: 0}));
-assert.writeOK(coll.remove({_id: 1}));
+assert.commandWorked(coll.remove({_id: 0}));
+assert.commandWorked(coll.remove({_id: 1}));
assert.eq(
0,
diff --git a/jstests/core/index_partial_create_drop.js b/jstests/core/index_partial_create_drop.js
index 0233f3fb8a0..1ad9d8178ed 100644
--- a/jstests/core/index_partial_create_drop.js
+++ b/jstests/core/index_partial_create_drop.js
@@ -43,7 +43,7 @@ assert.commandFailed(coll.createIndex(
{x: 1}, {partialFilterExpression: {$expr: {$eq: [{$trim: {input: "$x"}}, "hi"]}}}));
for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({x: i, a: i}));
+ assert.commandWorked(coll.insert({x: i, a: i}));
}
// Create partial index.
diff --git a/jstests/core/index_partial_read_ops.js b/jstests/core/index_partial_read_ops.js
index eba93a7ee23..62ba040b3fb 100644
--- a/jstests/core/index_partial_read_ops.js
+++ b/jstests/core/index_partial_read_ops.js
@@ -14,8 +14,8 @@ var coll = db.index_partial_read_ops;
coll.drop();
assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lte: 1.5}}}));
-assert.writeOK(coll.insert({x: 5, a: 2})); // Not in index.
-assert.writeOK(coll.insert({x: 6, a: 1})); // In index.
+assert.commandWorked(coll.insert({x: 5, a: 2})); // Not in index.
+assert.commandWorked(coll.insert({x: 6, a: 1})); // In index.
//
// Verify basic functionality with find().
diff --git a/jstests/core/index_partial_validate.js b/jstests/core/index_partial_validate.js
index 321fede5c19..e3dedfd87db 100644
--- a/jstests/core/index_partial_validate.js
+++ b/jstests/core/index_partial_validate.js
@@ -13,7 +13,7 @@ res = t.ensureIndex({b: 1});
assert.commandWorked(res);
res = t.insert({non_indexed_field: 'x'});
-assert.writeOK(res);
+assert.commandWorked(res);
res = t.validate(true);
assert.commandWorked(res);
diff --git a/jstests/core/index_partial_write_ops.js b/jstests/core/index_partial_write_ops.js
index d79ce93155f..8d96d094a33 100644
--- a/jstests/core/index_partial_write_ops.js
+++ b/jstests/core/index_partial_write_ops.js
@@ -23,23 +23,23 @@ coll.drop();
// Create partial index.
assert.commandWorked(coll.ensureIndex({x: 1}, {unique: true, partialFilterExpression: {a: 1}}));
-assert.writeOK(coll.insert({_id: 1, x: 5, a: 2, b: 1})); // Not in index.
-assert.writeOK(coll.insert({_id: 2, x: 6, a: 1, b: 1})); // In index.
+assert.commandWorked(coll.insert({_id: 1, x: 5, a: 2, b: 1})); // Not in index.
+assert.commandWorked(coll.insert({_id: 2, x: 6, a: 1, b: 1})); // In index.
assert.eq(1, getNumKeys("x_1"));
// Move into partial index, then back out.
-assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {a: 1}}));
assert.eq(2, getNumKeys("x_1"));
-assert.writeOK(coll.update({_id: 1}, {$set: {a: 2}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {a: 2}}));
assert.eq(1, getNumKeys("x_1"));
// Bit blip doc in partial index, and out of partial index.
-assert.writeOK(coll.update({_id: 2}, {$set: {b: 2}}));
+assert.commandWorked(coll.update({_id: 2}, {$set: {b: 2}}));
assert.eq(1, getNumKeys("x_1"));
-assert.writeOK(coll.update({_id: 1}, {$set: {b: 2}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {b: 2}}));
assert.eq(1, getNumKeys("x_1"));
var array = [];
@@ -48,23 +48,23 @@ for (var i = 0; i < 2048; i++) {
}
// Update that causes record relocation.
-assert.writeOK(coll.update({_id: 2}, {$set: {b: array}}));
+assert.commandWorked(coll.update({_id: 2}, {$set: {b: array}}));
assert.eq(1, getNumKeys("x_1"));
-assert.writeOK(coll.update({_id: 1}, {$set: {b: array}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {b: array}}));
assert.eq(1, getNumKeys("x_1"));
// Delete that doesn't affect partial index.
-assert.writeOK(coll.remove({x: 5}));
+assert.commandWorked(coll.remove({x: 5}));
assert.eq(1, getNumKeys("x_1"));
// Delete that does affect partial index.
-assert.writeOK(coll.remove({x: 6}));
+assert.commandWorked(coll.remove({x: 6}));
assert.eq(0, getNumKeys("x_1"));
// Documents with duplicate keys that straddle the index.
-assert.writeOK(coll.insert({_id: 3, x: 1, a: 1})); // In index.
-assert.writeOK(coll.insert({_id: 4, x: 1, a: 0})); // Not in index.
+assert.commandWorked(coll.insert({_id: 3, x: 1, a: 1})); // In index.
+assert.commandWorked(coll.insert({_id: 4, x: 1, a: 0})); // Not in index.
assert.writeErrorWithCode(coll.insert({_id: 5, x: 1, a: 1}),
ErrorCodes.DuplicateKey); // Duplicate key constraint prevents insertion.
@@ -72,7 +72,7 @@ assert.writeErrorWithCode(coll.insert({_id: 5, x: 1, a: 1}),
assert.eq(1, getNumKeys("x_1"));
// Remove _id 4, _id 3 should remain in index.
-assert.writeOK(coll.remove({_id: 4}));
+assert.commandWorked(coll.remove({_id: 4}));
// _id 3 is still in the index.
assert.eq(1, getNumKeys("x_1"));
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
index ee99fdc4831..9d1ee063812 100644
--- a/jstests/core/index_stats.js
+++ b/jstests/core/index_stats.js
@@ -57,9 +57,9 @@ var getIndexNamesForWinningPlan = function(explain) {
return indexNameList;
};
-assert.writeOK(col.insert({a: 1, b: 1, c: 1}));
-assert.writeOK(col.insert({a: 2, b: 2, c: 2}));
-assert.writeOK(col.insert({a: 3, b: 3, c: 3}));
+assert.commandWorked(col.insert({a: 1, b: 1, c: 1}));
+assert.commandWorked(col.insert({a: 2, b: 2, c: 2}));
+assert.commandWorked(col.insert({a: 3, b: 3, c: 3}));
//
// Confirm no index stats object exists prior to index creation.
@@ -105,7 +105,7 @@ res = db.runCommand({findAndModify: colName, query: {a: 2}, remove: true});
assert.commandWorked(res);
countA++;
assert.eq(countA, getUsageCount("a_1"));
-assert.writeOK(col.insert(res.value));
+assert.commandWorked(col.insert(res.value));
//
// Confirm $and operation ticks indexes for winning plan, but not rejected plans.
@@ -167,14 +167,14 @@ assert.eq(countB, getUsageCount("b_1_c_1"));
//
// Confirm index stats tick on update().
//
-assert.writeOK(col.update({a: 2}, {$set: {d: 2}}));
+assert.commandWorked(col.update({a: 2}, {$set: {d: 2}}));
countA++;
assert.eq(countA, getUsageCount("a_1"));
//
// Confirm index stats tick on remove().
//
-assert.writeOK(col.remove({a: 2}));
+assert.commandWorked(col.remove({a: 2}));
countA++;
assert.eq(countA, getUsageCount("a_1"));
@@ -212,9 +212,9 @@ assert.throws(function() {
//
const foreignCollection = db[colName + "_foreign"];
foreignCollection.drop();
-assert.writeOK(foreignCollection.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+assert.commandWorked(foreignCollection.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
col.drop();
-assert.writeOK(col.insert([{_id: 0, foreignId: 1}, {_id: 1, foreignId: 2}]));
+assert.commandWorked(col.insert([{_id: 0, foreignId: 1}, {_id: 1, foreignId: 2}]));
assert.eq(0, getUsageCount("_id_"));
assert.eq(2,
col.aggregate([
@@ -238,7 +238,7 @@ assert.eq(2,
// Confirm index use is recorded for $graphLookup.
//
foreignCollection.drop();
-assert.writeOK(foreignCollection.insert([
+assert.commandWorked(foreignCollection.insert([
{_id: 0, connectedTo: 1},
{_id: 1, connectedTo: "X"},
{_id: 2, connectedTo: 3},
@@ -246,7 +246,7 @@ assert.writeOK(foreignCollection.insert([
// $graphLookup doesn't cache the query.
]));
col.drop();
-assert.writeOK(col.insert([{_id: 0, foreignId: 0}, {_id: 1, foreignId: 2}]));
+assert.commandWorked(col.insert([{_id: 0, foreignId: 0}, {_id: 1, foreignId: 2}]));
assert.eq(0, getUsageCount("_id_"));
assert.eq(2,
col.aggregate([
diff --git a/jstests/core/index_type_change.js b/jstests/core/index_type_change.js
index af2671338a2..96f020c9ec8 100644
--- a/jstests/core/index_type_change.js
+++ b/jstests/core/index_type_change.js
@@ -17,12 +17,12 @@ var coll = db.index_type_change;
coll.drop();
assert.commandWorked(coll.ensureIndex({a: 1}));
-assert.writeOK(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 2}));
assert.eq(1, coll.find({a: {$type: "double"}}).itcount());
var newVal = new NumberLong(2);
var res = coll.update({}, {a: newVal}); // Replacement update.
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(res.nMatched, 1);
if (coll.getMongo().writeMode() == "commands")
assert.eq(res.nModified, 1);
diff --git a/jstests/core/indexes_multiple_commands.js b/jstests/core/indexes_multiple_commands.js
index 7058fd32019..f2b97f0e4f5 100644
--- a/jstests/core/indexes_multiple_commands.js
+++ b/jstests/core/indexes_multiple_commands.js
@@ -136,7 +136,7 @@ assertIndexesCreated(
() => coll.createIndex({a: 1},
{name: "caseInsensitive", collation: {locale: "en_US", strength: 2}}));
-assert.writeOK(coll.insert([{a: "a"}, {a: "A"}, {a: 20}]));
+assert.commandWorked(coll.insert([{a: "a"}, {a: "A"}, {a: 20}]));
// An ambiguous hint pattern fails.
assert.throws(() => coll.find({a: 1}).hint({a: 1}).itcount());
diff --git a/jstests/core/indexu.js b/jstests/core/indexu.js
index 953d178fff5..f8b6af7f15d 100644
--- a/jstests/core/indexu.js
+++ b/jstests/core/indexu.js
@@ -11,7 +11,7 @@ var dupDoc2 = {a: [{'1': 1}, 'c']};
var noDupDoc = {a: [{'1': 1}]};
// Test that we can't index dupDoc.
-assert.writeOK(t.save(dupDoc));
+assert.commandWorked(t.save(dupDoc));
assert.commandFailed(t.ensureIndex({'a.0': 1}));
t.remove({});
@@ -20,7 +20,7 @@ assert.writeError(t.save(dupDoc));
// Test that we can't index dupDoc2.
t.drop();
-assert.writeOK(t.save(dupDoc2));
+assert.commandWorked(t.save(dupDoc2));
assert.commandFailed(t.ensureIndex({'a.1': 1}));
t.remove({});
@@ -30,22 +30,22 @@ assert.writeError(t.save(dupDoc2));
// Test that we can index dupDoc with a different index.
t.drop();
t.ensureIndex({'a.b': 1});
-assert.writeOK(t.save(dupDoc));
+assert.commandWorked(t.save(dupDoc));
// Test number field starting with hyphen.
t.drop();
t.ensureIndex({'a.-1': 1});
-assert.writeOK(t.save({a: [{'-1': 1}]}));
+assert.commandWorked(t.save({a: [{'-1': 1}]}));
// Test number field starting with zero.
t.drop();
t.ensureIndex({'a.00': 1});
-assert.writeOK(t.save({a: [{'00': 1}]}));
+assert.commandWorked(t.save({a: [{'00': 1}]}));
// Test multiple array indexes
t.drop();
t.ensureIndex({'a.0': 1, 'a.1': 1});
-assert.writeOK(t.save({a: [{'1': 1}]}));
+assert.commandWorked(t.save({a: [{'1': 1}]}));
assert.writeError(t.save({a: [{'1': 1}, 4]}));
// Test that we can index noDupDoc.
@@ -57,7 +57,7 @@ assert.commandWorked(t.ensureIndex({'a.1': 1}));
t.drop();
t.ensureIndex({'a.0': 1});
t.ensureIndex({'a.1': 1});
-assert.writeOK(t.save(noDupDoc));
+assert.commandWorked(t.save(noDupDoc));
// Test that we can query noDupDoc.
assert.eq(1, t.find({'a.1': 1}).hint({'a.1': 1}).itcount());
@@ -80,7 +80,7 @@ assert.commandFailed(t.ensureIndex({'a.0.0': 1}));
// Check where there is a duplicate for a fully addressed field.
t.drop();
-assert.writeOK(t.save({a: [[1], {'0': [1]}]}));
+assert.commandWorked(t.save({a: [[1], {'0': [1]}]}));
assert.commandFailed(t.ensureIndex({'a.0.0': 1}));
// Two ways of addressing parse to an array.
diff --git a/jstests/core/insert1.js b/jstests/core/insert1.js
index f3cb243dad1..0365e439707 100644
--- a/jstests/core/insert1.js
+++ b/jstests/core/insert1.js
@@ -47,7 +47,7 @@ var count = 100 * 1000;
for (i = 0; i < count; ++i) {
toInsert.push({_id: i, a: 5});
}
-assert.writeOK(t.insert(toInsert));
+assert.commandWorked(t.insert(toInsert));
doc = t.findOne({_id: 1});
assert.eq(5, doc.a);
assert.eq(count, t.count(), "bad count");
diff --git a/jstests/core/insert_illegal_doc.js b/jstests/core/insert_illegal_doc.js
index d91866e8766..64ecb0ae7ad 100644
--- a/jstests/core/insert_illegal_doc.js
+++ b/jstests/core/insert_illegal_doc.js
@@ -19,7 +19,7 @@ assert.eq(0, coll.find().itcount(), "should not be a doc");
// test update
res = coll.insert({_id: 1});
-assert.writeOK(res, "insert failed");
+assert.commandWorked(res, "insert failed");
res = coll.update({_id: 1}, {$set: {a: [1, 2, 3], b: [4, 5, 6]}});
assert.writeError(res);
assert.eq(res.getWriteError().code, ErrorCodes.CannotIndexParallelArrays);
diff --git a/jstests/core/json_schema/json_schema.js b/jstests/core/json_schema/json_schema.js
index 13a10fde323..7611cefaa3a 100644
--- a/jstests/core/json_schema/json_schema.js
+++ b/jstests/core/json_schema/json_schema.js
@@ -12,16 +12,16 @@ load("jstests/libs/assert_schema_match.js");
let coll = db.jstests_json_schema;
coll.drop();
-assert.writeOK(coll.insert({_id: 0, num: 3}));
-assert.writeOK(coll.insert({_id: 1, num: -3}));
-assert.writeOK(coll.insert({_id: 2, num: NumberInt(2)}));
-assert.writeOK(coll.insert({_id: 3, num: NumberInt(-2)}));
-assert.writeOK(coll.insert({_id: 4, num: NumberLong(1)}));
-assert.writeOK(coll.insert({_id: 5, num: NumberLong(-1)}));
-assert.writeOK(coll.insert({_id: 6, num: {}}));
-assert.writeOK(coll.insert({_id: 7, num: "str"}));
-assert.writeOK(coll.insert({_id: 8, num: "string"}));
-assert.writeOK(coll.insert({_id: 9}));
+assert.commandWorked(coll.insert({_id: 0, num: 3}));
+assert.commandWorked(coll.insert({_id: 1, num: -3}));
+assert.commandWorked(coll.insert({_id: 2, num: NumberInt(2)}));
+assert.commandWorked(coll.insert({_id: 3, num: NumberInt(-2)}));
+assert.commandWorked(coll.insert({_id: 4, num: NumberLong(1)}));
+assert.commandWorked(coll.insert({_id: 5, num: NumberLong(-1)}));
+assert.commandWorked(coll.insert({_id: 6, num: {}}));
+assert.commandWorked(coll.insert({_id: 7, num: "str"}));
+assert.commandWorked(coll.insert({_id: 8, num: "string"}));
+assert.commandWorked(coll.insert({_id: 9}));
// Test that $jsonSchema fails to parse if its argument is not an object.
assert.throws(function() {
@@ -176,10 +176,10 @@ assert.eq([{_id: 8}, {_id: 9}],
.toArray());
coll.drop();
-assert.writeOK(coll.insert({_id: 0, obj: 3}));
-assert.writeOK(coll.insert({_id: 1, obj: {f1: {f3: "str"}, f2: "str"}}));
-assert.writeOK(coll.insert({_id: 2, obj: {f1: "str", f2: "str"}}));
-assert.writeOK(coll.insert({_id: 3, obj: {f1: 1, f2: "str"}}));
+assert.commandWorked(coll.insert({_id: 0, obj: 3}));
+assert.commandWorked(coll.insert({_id: 1, obj: {f1: {f3: "str"}, f2: "str"}}));
+assert.commandWorked(coll.insert({_id: 2, obj: {f1: "str", f2: "str"}}));
+assert.commandWorked(coll.insert({_id: 3, obj: {f1: 1, f2: "str"}}));
// Test that properties keyword can be used recursively, and that it does not apply when the
// field does not contain on object.
@@ -224,11 +224,11 @@ assert.eq([{_id: 0}, {_id: 1}, {_id: 2}],
.toArray());
coll.drop();
-assert.writeOK(coll.insert({_id: 0, arr: 3}));
-assert.writeOK(coll.insert({_id: 1, arr: [1, "foo"]}));
-assert.writeOK(coll.insert({_id: 2, arr: [{a: 1}, {b: 2}]}));
-assert.writeOK(coll.insert({_id: 3, arr: []}));
-assert.writeOK(coll.insert({_id: 4, arr: {a: []}}));
+assert.commandWorked(coll.insert({_id: 0, arr: 3}));
+assert.commandWorked(coll.insert({_id: 1, arr: [1, "foo"]}));
+assert.commandWorked(coll.insert({_id: 2, arr: [{a: 1}, {b: 2}]}));
+assert.commandWorked(coll.insert({_id: 3, arr: []}));
+assert.commandWorked(coll.insert({_id: 4, arr: {a: []}}));
// Test that the type:"array" restriction works as expected.
assert.eq([{_id: 1}, {_id: 2}, {_id: 3}],
@@ -323,8 +323,8 @@ assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSc
// Test that $jsonSchema and various internal match expressions work correctly with sibling
// predicates.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, a: 1, b: 1}));
-assert.writeOK(coll.insert({_id: 2, a: 2, b: 2}));
+assert.commandWorked(coll.insert({_id: 1, a: 1, b: 1}));
+assert.commandWorked(coll.insert({_id: 2, a: 2, b: 2}));
assert.eq(
1,
diff --git a/jstests/core/json_schema/misc_validation.js b/jstests/core/json_schema/misc_validation.js
index fbd15e7b31a..045a7c74878 100644
--- a/jstests/core/json_schema/misc_validation.js
+++ b/jstests/core/json_schema/misc_validation.js
@@ -84,8 +84,8 @@ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
// Test that a valid $jsonSchema is legal in a count command.
coll.drop();
-assert.writeOK(coll.insert({a: 1, b: "str"}));
-assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: "str"}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
assert.eq(1, coll.count({$jsonSchema: {properties: {a: {type: "number"}, b: {type: "string"}}}}));
// Test that a valid $jsonSchema is legal in a $geoNear stage.
@@ -93,8 +93,8 @@ const point = {
type: "Point",
coordinates: [31.0, 41.0]
};
-assert.writeOK(coll.insert({geo: point, a: 1}));
-assert.writeOK(coll.insert({geo: point, a: 0}));
+assert.commandWorked(coll.insert({geo: point, a: 1}));
+assert.commandWorked(coll.insert({geo: point, a: 0}));
assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
res = coll.aggregate({
$geoNear: {
@@ -111,10 +111,10 @@ assert.eq(res[0].loc, point, tojson(res));
// Test that a valid $jsonSchema is legal in a distinct command.
coll.drop();
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 2}));
-assert.writeOK(coll.insert({a: "str"}));
-assert.writeOK(coll.insert({a: ["STR", "str"]}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: "str"}));
+assert.commandWorked(coll.insert({a: ["STR", "str"]}));
assert(arrayEq([1, 2], coll.distinct("a", {$jsonSchema: {properties: {a: {type: "number"}}}})));
@@ -127,16 +127,16 @@ const caseInsensitiveCollation = {
coll.drop();
assert.commandWorked(
testDB.createCollection(coll.getName(), {collation: caseInsensitiveCollation}));
-assert.writeOK(coll.insert({a: "str"}));
-assert.writeOK(coll.insert({a: ["STR", "sTr"]}));
+assert.commandWorked(coll.insert({a: "str"}));
+assert.commandWorked(coll.insert({a: ["STR", "sTr"]}));
assert.eq(0, coll.find({$jsonSchema: schema}).itcount());
assert.eq(2, coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}}).itcount());
assert.eq(2, coll.find({a: "STR"}).itcount());
// Test that $jsonSchema does not respect the collation set explicitly on a query.
coll.drop();
-assert.writeOK(coll.insert({a: "str"}));
-assert.writeOK(coll.insert({a: ["STR", "sTr"]}));
+assert.commandWorked(coll.insert({a: "str"}));
+assert.commandWorked(coll.insert({a: ["STR", "sTr"]}));
if (testDB.getMongo().useReadCommands()) {
assert.eq(0, coll.find({$jsonSchema: schema}).collation(caseInsensitiveCollation).itcount());
@@ -155,7 +155,7 @@ if (testDB.getMongo().useReadCommands()) {
bulk.insert({name: "John", age: "unknown"});
bulk.insert({name: "Mark"});
bulk.insert({});
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
assert.commandWorked(testDB.createView(
"seniorCitizens", coll.getName(), [{
@@ -186,9 +186,9 @@ const foreign = testDB.json_schema_foreign;
foreign.drop();
coll.drop();
for (let i = 0; i < 10; i++) {
- assert.writeOK(foreign.insert({_id: i, n: [i - 1, i + 1]}));
+ assert.commandWorked(foreign.insert({_id: i, n: [i - 1, i + 1]}));
}
-assert.writeOK(coll.insert({starting: 0}));
+assert.commandWorked(coll.insert({starting: 0}));
res = coll.aggregate({
$graphLookup: {
@@ -206,10 +206,10 @@ assert.eq(res[0].integers.length, 5);
// Test that $jsonSchema is legal in a delete command.
coll.drop();
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 2}));
-assert.writeOK(coll.insert({a: "str"}));
-assert.writeOK(coll.insert({a: [3]}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: "str"}));
+assert.commandWorked(coll.insert({a: [3]}));
schema = {
properties: {a: {type: "number", maximum: 2}}
@@ -235,18 +235,18 @@ if (db.getMongo().writeMode() === "commands") {
// Test that $jsonSchema is legal in an update command.
coll.drop();
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 2}));
res = coll.update({$jsonSchema: schema}, {$inc: {a: 1}}, {multi: true});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(2, res.nMatched);
assert.eq(1, coll.find({$jsonSchema: schema}).itcount());
// Test that $jsonSchema is legal in a findAndModify command.
coll.drop();
-assert.writeOK(coll.insert({a: "long_string"}));
-assert.writeOK(coll.insert({a: "short"}));
+assert.commandWorked(coll.insert({a: "long_string"}));
+assert.commandWorked(coll.insert({a: "short"}));
schema = {
properties: {a: {type: "string", minLength: 6}}
@@ -257,9 +257,9 @@ assert.eq(1, coll.find({$jsonSchema: schema}).itcount());
// Test that $jsonSchema works correctly in the presence of a basic b-tree index.
coll.drop();
-assert.writeOK(coll.insert({_id: 1, a: 1, b: 1}));
-assert.writeOK(coll.insert({_id: 2, a: 2, b: 2, point: [5, 5]}));
-assert.writeOK(coll.insert({_id: 3, a: "temp text test"}));
+assert.commandWorked(coll.insert({_id: 1, a: 1, b: 1}));
+assert.commandWorked(coll.insert({_id: 2, a: 2, b: 2, point: [5, 5]}));
+assert.commandWorked(coll.insert({_id: 3, a: "temp text test"}));
assert.commandWorked(coll.createIndex({a: 1}));
assert.eq(3, coll.find({$jsonSchema: {}}).itcount());
@@ -312,7 +312,7 @@ assert.eq(1, coll.find({$and: [{$jsonSchema: {}}, {$text: {$search: "TEST"}}]}).
if (!isMongos) {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: true}));
+ assert.commandWorked(coll.insert({_id: 0, a: true}));
// Test $jsonSchema in the precondition checking for applyOps.
res = testDB.adminCommand({
@@ -331,6 +331,6 @@ if (!isMongos) {
// Use majority write concern to clear the drop-pending that can cause lock conflicts with
// transactions.
coll.drop({writeConcern: {w: "majority"}});
- assert.writeOK(coll.insert({_id: 1, a: true}));
+ assert.commandWorked(coll.insert({_id: 1, a: true}));
}
}());
diff --git a/jstests/core/kill_cursors.js b/jstests/core/kill_cursors.js
index a65078028ed..b839a3bf8c8 100644
--- a/jstests/core/kill_cursors.js
+++ b/jstests/core/kill_cursors.js
@@ -20,7 +20,7 @@ var coll = db.jstest_killcursors;
coll.drop();
for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
// killCursors command should fail if the collection name is not a string.
diff --git a/jstests/core/killop_drop_collection.js b/jstests/core/killop_drop_collection.js
index b4efd13733b..a25bce9b2d6 100644
--- a/jstests/core/killop_drop_collection.js
+++ b/jstests/core/killop_drop_collection.js
@@ -17,9 +17,9 @@ var collectionName = "killop_drop";
let collection = db.getCollection(collectionName);
collection.drop();
for (let i = 0; i < 1000; i++) {
- assert.writeOK(collection.insert({x: i}));
+ assert.commandWorked(collection.insert({x: i}));
}
-assert.writeOK(collection.createIndex({x: 1}, {background: true}));
+assert.commandWorked(collection.createIndex({x: 1}, {background: true}));
// Attempt to fsyncLock the database, aborting early if the storage engine doesn't support it.
const storageEngine = jsTest.options().storageEngine;
diff --git a/jstests/core/min_max_bounds.js b/jstests/core/min_max_bounds.js
index c2171df23fb..8419e3c0eca 100644
--- a/jstests/core/min_max_bounds.js
+++ b/jstests/core/min_max_bounds.js
@@ -10,9 +10,9 @@ load('jstests/aggregation/extras/utils.js'); // For resultsEq.
var coll = db.query_bound_inclusion;
coll.drop();
-assert.writeOK(coll.insert({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: 2, b: 2}));
-assert.writeOK(coll.insert({a: 3, b: 3}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
+assert.commandWorked(coll.insert({a: 3, b: 3}));
assert.commandWorked(coll.createIndex({a: 1}));
diff --git a/jstests/core/min_max_key.js b/jstests/core/min_max_key.js
index 5314e8d3436..b9e9c3d6b89 100644
--- a/jstests/core/min_max_key.js
+++ b/jstests/core/min_max_key.js
@@ -18,7 +18,7 @@ const allElements = [
{_id: "a_string", a: "hello"}
];
-assert.writeOK(coll.insert(allElements));
+assert.commandWorked(coll.insert(allElements));
function testQueriesWithMinOrMaxKey() {
const eqMinRes = coll.find({a: {$eq: MinKey}}).toArray();
diff --git a/jstests/core/minmax_edge.js b/jstests/core/minmax_edge.js
index 081af7a347e..135d83337c3 100644
--- a/jstests/core/minmax_edge.js
+++ b/jstests/core/minmax_edge.js
@@ -35,17 +35,17 @@ function verifyResultIds(results, expectedIds) {
*/
function reset(t) {
t.drop();
- assert.writeOK(t.insert({_id: 0, a: 1, b: 1}));
- assert.writeOK(t.insert({_id: 1, a: 1, b: 2}));
- assert.writeOK(t.insert({_id: 2, a: 1, b: 3}));
+ assert.commandWorked(t.insert({_id: 0, a: 1, b: 1}));
+ assert.commandWorked(t.insert({_id: 1, a: 1, b: 2}));
+ assert.commandWorked(t.insert({_id: 2, a: 1, b: 3}));
- assert.writeOK(t.insert({_id: 3, a: 2, b: 1}));
- assert.writeOK(t.insert({_id: 4, a: 2, b: 2}));
- assert.writeOK(t.insert({_id: 5, a: 2, b: 3}));
+ assert.commandWorked(t.insert({_id: 3, a: 2, b: 1}));
+ assert.commandWorked(t.insert({_id: 4, a: 2, b: 2}));
+ assert.commandWorked(t.insert({_id: 5, a: 2, b: 3}));
- assert.writeOK(t.insert({_id: 6, a: 3, b: 1}));
- assert.writeOK(t.insert({_id: 7, a: 3, b: 2}));
- assert.writeOK(t.insert({_id: 8, a: 3, b: 3}));
+ assert.commandWorked(t.insert({_id: 6, a: 3, b: 1}));
+ assert.commandWorked(t.insert({_id: 7, a: 3, b: 2}));
+ assert.commandWorked(t.insert({_id: 8, a: 3, b: 3}));
}
// Two helpers to save typing
diff --git a/jstests/core/mr1.js b/jstests/core/mr1.js
index 245059de523..fa7add00b89 100644
--- a/jstests/core/mr1.js
+++ b/jstests/core/mr1.js
@@ -167,7 +167,7 @@ if (true) {
correct[k] = 1;
bulk.insert({x: i, tags: [k]});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
res = db.runCommand({mapreduce: "mr1", out: "mr1_foo", map: m, reduce: r});
d(res);
diff --git a/jstests/core/mr5.js b/jstests/core/mr5.js
index 1858eaa57a5..074002b5bc6 100644
--- a/jstests/core/mr5.js
+++ b/jstests/core/mr5.js
@@ -12,12 +12,12 @@ load("jstests/aggregation/extras/utils.js"); // For resultsEq.
const t = db.mr5;
t.drop();
-assert.writeOK(t.insert({"partner": 1, "visits": 9}));
-assert.writeOK(t.insert({"partner": 2, "visits": 9}));
-assert.writeOK(t.insert({"partner": 1, "visits": 11}));
-assert.writeOK(t.insert({"partner": 1, "visits": 30}));
-assert.writeOK(t.insert({"partner": 2, "visits": 41}));
-assert.writeOK(t.insert({"partner": 2, "visits": 41}));
+assert.commandWorked(t.insert({"partner": 1, "visits": 9}));
+assert.commandWorked(t.insert({"partner": 2, "visits": 9}));
+assert.commandWorked(t.insert({"partner": 1, "visits": 11}));
+assert.commandWorked(t.insert({"partner": 1, "visits": 30}));
+assert.commandWorked(t.insert({"partner": 2, "visits": 41}));
+assert.commandWorked(t.insert({"partner": 2, "visits": 41}));
let mapper = function() {
emit(this.partner, {stats: [this.visits]});
diff --git a/jstests/core/mr_bigobject_replace.js b/jstests/core/mr_bigobject_replace.js
index c02ee7f1fac..843afd3757e 100644
--- a/jstests/core/mr_bigobject_replace.js
+++ b/jstests/core/mr_bigobject_replace.js
@@ -45,7 +45,7 @@ function runTest(testOptions) {
db.mr_bigobject_replace.drop();
// Insert a document so the mapper gets run.
- assert.writeOK(db.input.insert({}));
+ assert.commandWorked(db.input.insert({}));
var res = db.runCommand(Object.extend({
mapReduce: "input",
diff --git a/jstests/core/mr_optim.js b/jstests/core/mr_optim.js
index d4d088f93e5..1fbcaabbea1 100644
--- a/jstests/core/mr_optim.js
+++ b/jstests/core/mr_optim.js
@@ -18,7 +18,7 @@ if (res.ok !== 1) {
}
for (var i = 0; i < 1000; ++i) {
- assert.writeOK(t.save({a: Math.random(1000), b: Math.random(10000)}));
+ assert.commandWorked(t.save({a: Math.random(1000), b: Math.random(10000)}));
}
function m() {
diff --git a/jstests/core/mr_tolerates_js_exception.js b/jstests/core/mr_tolerates_js_exception.js
index 2689bce8433..e291639cb92 100644
--- a/jstests/core/mr_tolerates_js_exception.js
+++ b/jstests/core/mr_tolerates_js_exception.js
@@ -15,7 +15,7 @@
let coll = db.mr_tolerates_js_exception;
coll.drop();
for (let i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i, a: 1}));
+ assert.commandWorked(coll.insert({_id: i, a: 1}));
}
// Test that the command fails with a JS interpreter failure error when the reduce function
diff --git a/jstests/core/nan.js b/jstests/core/nan.js
index 4cca00c4b66..fbdba566d7b 100644
--- a/jstests/core/nan.js
+++ b/jstests/core/nan.js
@@ -7,18 +7,18 @@
const coll = db.jstests_nan;
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: -Infinity}));
-assert.writeOK(coll.insert({_id: 1, a: -3}));
-assert.writeOK(coll.insert({_id: 2, a: 0}));
-assert.writeOK(coll.insert({_id: 3, a: 3}));
-assert.writeOK(coll.insert({_id: 4, a: Infinity}));
-assert.writeOK(coll.insert({_id: 5, a: NaN}));
-assert.writeOK(coll.insert({_id: 6, a: -NaN}));
-assert.writeOK(coll.insert({_id: 7, a: undefined}));
-assert.writeOK(coll.insert({_id: 8, a: null}));
-assert.writeOK(coll.insert({_id: 9, a: []}));
-assert.writeOK(coll.insert({_id: 10, a: {b: 1}}));
-assert.writeOK(coll.insert({_id: 11, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 0, a: -Infinity}));
+assert.commandWorked(coll.insert({_id: 1, a: -3}));
+assert.commandWorked(coll.insert({_id: 2, a: 0}));
+assert.commandWorked(coll.insert({_id: 3, a: 3}));
+assert.commandWorked(coll.insert({_id: 4, a: Infinity}));
+assert.commandWorked(coll.insert({_id: 5, a: NaN}));
+assert.commandWorked(coll.insert({_id: 6, a: -NaN}));
+assert.commandWorked(coll.insert({_id: 7, a: undefined}));
+assert.commandWorked(coll.insert({_id: 8, a: null}));
+assert.commandWorked(coll.insert({_id: 9, a: []}));
+assert.commandWorked(coll.insert({_id: 10, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 11, a: {b: 1}}));
/**
* Ensures correct results for EQ, LT, LTE, GT, and GTE cases.
diff --git a/jstests/core/natural.js b/jstests/core/natural.js
index 2471e2be495..fa7a511eda1 100644
--- a/jstests/core/natural.js
+++ b/jstests/core/natural.js
@@ -8,9 +8,9 @@ var coll = db.jstests_natural;
coll.drop();
assert.commandWorked(coll.ensureIndex({a: 1}));
-assert.writeOK(coll.insert({_id: 1, a: 3}));
-assert.writeOK(coll.insert({_id: 2, a: 2}));
-assert.writeOK(coll.insert({_id: 3, a: 1}));
+assert.commandWorked(coll.insert({_id: 1, a: 3}));
+assert.commandWorked(coll.insert({_id: 2, a: 2}));
+assert.commandWorked(coll.insert({_id: 3, a: 1}));
// Regression test for SERVER-20660. Ensures that documents returned with $natural don't have
// any extraneous fields.
diff --git a/jstests/core/no_db_created.js b/jstests/core/no_db_created.js
index e563a7cd468..bb0d998a3fe 100644
--- a/jstests/core/no_db_created.js
+++ b/jstests/core/no_db_created.js
@@ -22,16 +22,16 @@ var coll = mydb.fake;
// force:true is for replset passthroughs
assert.commandFailed(coll.runCommand("compact", {force: true}));
noDB(mydb);
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
mydb.dropDatabase();
assert.commandFailed(coll.runCommand("dropIndexes"));
noDB(mydb);
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
mydb.dropDatabase();
assert.commandFailed(coll.runCommand("collMod", {expireAfterSeconds: 1}));
noDB(mydb);
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
mydb.dropDatabase();
}()); \ No newline at end of file
diff --git a/jstests/core/not2.js b/jstests/core/not2.js
index 8f0f91da1d5..0692c297762 100644
--- a/jstests/core/not2.js
+++ b/jstests/core/not2.js
@@ -20,10 +20,10 @@ function fail(query) {
}
function doTest() {
- assert.writeOK(coll.remove({}));
+ assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({i: "a"}));
- assert.writeOK(coll.insert({i: "b"}));
+ assert.commandWorked(coll.insert({i: "a"}));
+ assert.commandWorked(coll.insert({i: "b"}));
// TODO SERVER-12735: We currently do not handle double negatives during query
// canonicalization.
@@ -58,14 +58,14 @@ function doTest() {
check({i: {$not: {$type: 1}}}, ["a", "b"]);
check({i: {$not: {$type: 2}}}, []);
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({i: 1}));
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.insert({i: 1}));
check({i: {$not: {$mod: [5, 1]}}}, []);
check({i: {$mod: [5, 2]}}, []);
check({i: {$not: {$mod: [5, 2]}}}, [1]);
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({i: ["a", "b"]}));
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.insert({i: ["a", "b"]}));
check({i: {$not: {$size: 2}}}, []);
check({i: {$not: {$size: 3}}}, [["a", "b"]]);
check({i: {$not: {$gt: "a"}}}, []);
@@ -73,9 +73,9 @@ function doTest() {
check({i: {$not: {$all: ["a", "b"]}}}, []);
check({i: {$not: {$all: ["c"]}}}, [["a", "b"]]);
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({i: [{j: "a"}]}));
- assert.writeOK(coll.insert({i: [{j: "b"}]}));
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.insert({i: [{j: "a"}]}));
+ assert.commandWorked(coll.insert({i: [{j: "b"}]}));
check({i: {$not: {$elemMatch: {j: "a"}}}}, [[{j: "b"}]]);
check({i: {$not: {$elemMatch: {j: "f"}}}}, [[{j: "a"}], [{j: "b"}]]);
}
diff --git a/jstests/core/null_query_semantics.js b/jstests/core/null_query_semantics.js
index 8f664a6d80b..00aaacd6ed0 100644
--- a/jstests/core/null_query_semantics.js
+++ b/jstests/core/null_query_semantics.js
@@ -19,7 +19,7 @@ function extractAValues(results) {
function testNotEqualsNullSemantics() {
// For the first portion of the test, only insert documents without arrays. This will avoid
// making the indexes multi-key, which may allow an index to be used to answer the queries.
- assert.writeOK(coll.insert([
+ assert.commandWorked(coll.insert([
{_id: "a_empty_subobject", a: {}},
{_id: "a_null", a: null},
{_id: "a_number", a: 4},
@@ -214,7 +214,7 @@ function testNotEqualsNullSemantics() {
assert.eq(writeResult.getWriteErrors()[0].code, 16766, tojson(writeResult));
return;
}
- assert.writeOK(writeResult);
+ assert.commandWorked(writeResult);
// Test the semantics of the query {a: {$eq: null}}.
(function testBasicNullQuery() {
diff --git a/jstests/core/opcounters_write_cmd.js b/jstests/core/opcounters_write_cmd.js
index 2aef8fe72c5..3ffdac0b0b5 100644
--- a/jstests/core/opcounters_write_cmd.js
+++ b/jstests/core/opcounters_write_cmd.js
@@ -31,13 +31,13 @@ t.drop();
// Single insert, no error.
opCounters = newdb.serverStatus().opcounters;
res = t.insert({_id: 0});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert);
// Bulk insert, no error.
opCounters = newdb.serverStatus().opcounters;
res = t.insert([{_id: 1}, {_id: 2}]);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert);
// Test is not run when in compatibility mode as errors are not counted
@@ -71,7 +71,7 @@ t.insert({_id: 0});
// Update, no error.
opCounters = newdb.serverStatus().opcounters;
res = t.update({_id: 0}, {$set: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update);
// Update, with error.
@@ -90,7 +90,7 @@ t.insert([{_id: 0}, {_id: 1}]);
// Delete, no error.
opCounters = newdb.serverStatus().opcounters;
res = t.remove({_id: 0});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete);
// Delete, with error.
diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js
index d81308192ac..5e2b6a49ef7 100644
--- a/jstests/core/operation_latency_histogram.js
+++ b/jstests/core/operation_latency_histogram.js
@@ -48,13 +48,13 @@ var lastHistogram = getHistogramStats(testColl);
// Insert
var numRecords = 100;
for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.insert({_id: i}));
+ assert.commandWorked(testColl.insert({_id: i}));
}
lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
// Update
for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}));
+ assert.commandWorked(testColl.update({_id: i}, {x: i}));
}
lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
@@ -85,13 +85,13 @@ lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, numRecords
// Remove
for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.remove({_id: i}));
+ assert.commandWorked(testColl.remove({_id: i}));
}
lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
// Upsert
for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
+ assert.commandWorked(testColl.update({_id: i}, {x: i}, {upsert: 1}));
}
lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
diff --git a/jstests/core/optimized_match_explain.js b/jstests/core/optimized_match_explain.js
index 5575b8498bb..f62474124b4 100644
--- a/jstests/core/optimized_match_explain.js
+++ b/jstests/core/optimized_match_explain.js
@@ -10,10 +10,10 @@ load("jstests/libs/analyze_plan.js");
const coll = db.match_explain;
coll.drop();
-assert.writeOK(coll.insert({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: 2, b: 3}));
-assert.writeOK(coll.insert({a: 1, b: 2}));
-assert.writeOK(coll.insert({a: 1, b: 4}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 2, b: 3}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 4}));
// Explain output should reflect optimizations.
// $and should not be in the explain output because it is optimized out.
diff --git a/jstests/core/or4.js b/jstests/core/or4.js
index 8e07a42efa7..73a5ddb825d 100644
--- a/jstests/core/or4.js
+++ b/jstests/core/or4.js
@@ -14,10 +14,10 @@ coll.drop();
coll.ensureIndex({a: 1});
coll.ensureIndex({b: 1});
-assert.writeOK(coll.insert({a: 2}));
-assert.writeOK(coll.insert({b: 3}));
-assert.writeOK(coll.insert({b: 3}));
-assert.writeOK(coll.insert({a: 2, b: 3}));
+assert.commandWorked(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({b: 3}));
+assert.commandWorked(coll.insert({b: 3}));
+assert.commandWorked(coll.insert({a: 2, b: 3}));
assert.eq(4, coll.count({$or: [{a: 2}, {b: 3}]}));
assert.eq(2, coll.count({$or: [{a: 2}, {a: 2}]}));
@@ -35,13 +35,13 @@ assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).count(true));
coll.remove({$or: [{a: 2}, {b: 3}]});
assert.eq(0, coll.count());
-assert.writeOK(coll.insert({b: 3}));
+assert.commandWorked(coll.insert({b: 3}));
coll.remove({$or: [{a: 2}, {b: 3}]});
assert.eq(0, coll.count());
-assert.writeOK(coll.insert({a: 2}));
-assert.writeOK(coll.insert({b: 3}));
-assert.writeOK(coll.insert({a: 2, b: 3}));
+assert.commandWorked(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({b: 3}));
+assert.commandWorked(coll.insert({a: 2, b: 3}));
coll.update({$or: [{a: 2}, {b: 3}]}, {$set: {z: 1}}, false, true);
assert.eq(3, coll.count({z: 1}));
@@ -53,13 +53,13 @@ assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).skip(1).toArray().length);
assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length);
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({b: 4}));
-assert.writeOK(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({b: 4}));
+assert.commandWorked(coll.insert({a: 2}));
assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length);
-assert.writeOK(coll.insert({a: 1, b: 3}));
+assert.commandWorked(coll.insert({a: 1, b: 3}));
assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).toArray().length);
assert.eq([1, 2], Array.sort(coll.distinct('a', {$or: [{a: 2}, {b: 3}]})));
@@ -77,7 +77,7 @@ assert.eq(5,
coll.remove({});
-assert.writeOK(coll.insert({a: [1, 2]}));
+assert.commandWorked(coll.insert({a: [1, 2]}));
assert.eq(1, coll.find({$or: [{a: 1}, {a: 2}]}).toArray().length);
assert.eq(1, coll.count({$or: [{a: 1}, {a: 2}]}));
assert.eq(1, coll.find({$or: [{a: 2}, {a: 1}]}).toArray().length);
diff --git a/jstests/core/or_always_false.js b/jstests/core/or_always_false.js
index 6760ee37775..0766806a223 100644
--- a/jstests/core/or_always_false.js
+++ b/jstests/core/or_always_false.js
@@ -5,7 +5,7 @@
const coll = db.or_always_false;
coll.drop();
-assert.writeOK(coll.insert([{}, {}, {}]));
+assert.commandWorked(coll.insert([{}, {}, {}]));
const emptyOrError = assert.throws(() => coll.find({$or: []}).itcount());
assert.eq(emptyOrError.code, ErrorCodes.BadValue);
diff --git a/jstests/core/profile2.js b/jstests/core/profile2.js
index d71471b2e5f..f8f0040683e 100644
--- a/jstests/core/profile2.js
+++ b/jstests/core/profile2.js
@@ -33,7 +33,7 @@ coll.getDB().system.profile.drop();
assert.commandWorked(coll.getDB().runCommand({profile: 2}));
// Test update with large string element in query portion.
-assert.writeOK(coll.update({a: hugeStr}, {}));
+assert.commandWorked(coll.update({a: hugeStr}, {}));
var results = coll.getDB().system.profile.find().toArray();
assert.eq(1, results.length);
var result = results[0];
@@ -50,7 +50,7 @@ coll.getDB().system.profile.drop();
assert.commandWorked(coll.getDB().runCommand({profile: 2}));
// Test update with large string element in update portion.
-assert.writeOK(coll.update({}, {a: hugeStr}));
+assert.commandWorked(coll.update({}, {a: hugeStr}));
var results = coll.getDB().system.profile.find().toArray();
assert.eq(1, results.length);
var result = results[0];
diff --git a/jstests/core/profile_agg.js b/jstests/core/profile_agg.js
index 02a29500cf6..0b67296d9c0 100644
--- a/jstests/core/profile_agg.js
+++ b/jstests/core/profile_agg.js
@@ -19,7 +19,7 @@ testDB.setProfilingLevel(2);
//
var i;
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
@@ -59,7 +59,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}]).itcount());
@@ -73,7 +73,7 @@ assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}], {hint: {_id: 1}}).itcount());
diff --git a/jstests/core/profile_count.js b/jstests/core/profile_count.js
index 103a08cb728..d2b998df850 100644
--- a/jstests/core/profile_count.js
+++ b/jstests/core/profile_count.js
@@ -20,7 +20,7 @@ testDB.setProfilingLevel(2);
//
var i;
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.eq(10, coll.count({}, {collation: {locale: "fr"}}));
@@ -45,7 +45,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
var query = {a: {$gte: 5}};
@@ -60,7 +60,7 @@ assert.eq(profileObj.docsExamined, 10, tojson(profileObj));
//
coll.drop();
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
@@ -83,7 +83,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.eq(1, coll.count({a: 3, b: 3}));
diff --git a/jstests/core/profile_delete.js b/jstests/core/profile_delete.js
index 29f3b3ff5e7..500320afe6c 100644
--- a/jstests/core/profile_delete.js
+++ b/jstests/core/profile_delete.js
@@ -19,14 +19,14 @@ testDB.setProfilingLevel(2);
//
var i;
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.remove({a: {$gte: 2}, b: {$gte: 2}},
- db.getMongo().writeMode() === "commands"
- ? {justOne: true, collation: {locale: "fr"}}
- : {justOne: true}));
+assert.commandWorked(coll.remove({a: {$gte: 2}, b: {$gte: 2}},
+ db.getMongo().writeMode() === "commands"
+ ? {justOne: true, collation: {locale: "fr"}}
+ : {justOne: true}));
var profileObj = getLatestProfilerEntry(testDB);
@@ -51,10 +51,10 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
-assert.writeOK(coll.remove({a: {$gte: 2}}));
+assert.commandWorked(coll.remove({a: {$gte: 2}}));
profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.ndeleted, 8, tojson(profileObj));
@@ -68,10 +68,10 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
-assert.writeOK(coll.remove({a: 3, b: 3}));
+assert.commandWorked(coll.remove({a: 3, b: 3}));
profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
diff --git a/jstests/core/profile_distinct.js b/jstests/core/profile_distinct.js
index 6a2272e0f8a..4bfc8895f88 100644
--- a/jstests/core/profile_distinct.js
+++ b/jstests/core/profile_distinct.js
@@ -20,7 +20,7 @@ testDB.setProfilingLevel(2);
//
var i;
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i % 5, b: i}));
+ assert.commandWorked(coll.insert({a: i % 5, b: i}));
}
assert.commandWorked(coll.createIndex({b: 1}));
@@ -49,7 +49,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
coll.distinct("a", {a: 3, b: 3});
diff --git a/jstests/core/profile_find.js b/jstests/core/profile_find.js
index 23bcf96e2ad..848405c8cd9 100644
--- a/jstests/core/profile_find.js
+++ b/jstests/core/profile_find.js
@@ -23,7 +23,7 @@ const profileEntryFilter = {
//
var i;
for (i = 0; i < 3; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr"}}));
@@ -70,7 +70,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (i = 0; i < 3; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
@@ -91,7 +91,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.neq(coll.findOne({a: 3, b: 3}), null);
@@ -110,8 +110,8 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
for (i = 0; i < 20; ++i) {
- assert.writeOK(coll.insert({a: 5, b: i}));
- assert.writeOK(coll.insert({a: i, b: 10}));
+ assert.commandWorked(coll.insert({a: 5, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: 10}));
}
// Until we get the failpoint described in the above comment (regarding SERVER-23620), we must
@@ -133,7 +133,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
// Confirm that query modifiers such as "hint" are in the profiler document.
//
coll.drop();
-assert.writeOK(coll.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: 2}));
assert.eq(coll.find().hint({_id: 1}).itcount(), 1);
profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
diff --git a/jstests/core/profile_findandmodify.js b/jstests/core/profile_findandmodify.js
index 3c646d6468c..53ff28e7bad 100644
--- a/jstests/core/profile_findandmodify.js
+++ b/jstests/core/profile_findandmodify.js
@@ -17,7 +17,7 @@ testDB.setProfilingLevel(2);
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i, b: [0]}));
+ assert.commandWorked(coll.insert({_id: i, a: i, b: [0]}));
}
assert.commandWorked(coll.createIndex({b: 1}));
@@ -53,7 +53,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
+ assert.commandWorked(coll.insert({_id: i, a: i}));
}
assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {a: 2}, remove: true}));
@@ -75,7 +75,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
+ assert.commandWorked(coll.insert({_id: i, a: i}));
}
assert.eq({_id: 4, a: 1},
@@ -100,7 +100,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
+ assert.commandWorked(coll.insert({_id: i, a: i}));
}
assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {_id: 2}, update: {$inc: {b: 1}}}));
@@ -117,7 +117,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
+ assert.commandWorked(coll.insert({_id: i, a: i}));
}
assert.eq({a: 2},
@@ -139,7 +139,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
+ assert.commandWorked(coll.insert({_id: i, a: i}));
}
assert.eq({a: 2}, coll.findAndModify({query: {a: 2}, remove: true, fields: {_id: 0, a: 1}}));
@@ -157,7 +157,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
+ assert.commandWorked(coll.insert({_id: i, a: i}));
}
assert.eq({_id: 0, a: 0},
@@ -174,7 +174,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
coll.findAndModify({query: {a: 3, b: 3}, update: {$set: {c: 1}}});
diff --git a/jstests/core/profile_getmore.js b/jstests/core/profile_getmore.js
index 5cdc3a51ffe..5cbccb95881 100644
--- a/jstests/core/profile_getmore.js
+++ b/jstests/core/profile_getmore.js
@@ -18,7 +18,7 @@ testDB.setProfilingLevel(2);
//
var i;
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
@@ -61,7 +61,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).batchSize(2);
@@ -78,7 +78,7 @@ assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
//
coll.drop();
for (i = 0; i < 3; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
cursor = coll.find().batchSize(2);
@@ -98,7 +98,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (i = 0; i < 20; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
@@ -136,7 +136,7 @@ for (i = 0; i < 501; i++) {
coll.drop();
for (i = 0; i < 4; i++) {
- assert.writeOK(coll.insert(docToInsert));
+ assert.commandWorked(coll.insert(docToInsert));
}
cursor = coll.find(docToInsert).comment("profile_getmore").batchSize(2);
diff --git a/jstests/core/profile_insert.js b/jstests/core/profile_insert.js
index fa53801521d..e45b1011c89 100644
--- a/jstests/core/profile_insert.js
+++ b/jstests/core/profile_insert.js
@@ -25,7 +25,7 @@ testDB.setProfilingLevel(2);
var doc = {_id: 1};
var result = coll.insert(doc);
if (isWriteCommand) {
- assert.writeOK(result);
+ assert.commandWorked(result);
}
var profileObj = getLatestProfilerEntry(testDB);
@@ -60,7 +60,7 @@ bulk.insert(docArray[0]);
bulk.insert(docArray[1]);
result = bulk.execute();
if (isWriteCommand) {
- assert.writeOK(result);
+ assert.commandWorked(result);
}
profileObj = getLatestProfilerEntry(testDB);
@@ -84,7 +84,7 @@ doc = {
_id: 1
};
var wtimeout = 60000;
-assert.writeOK(coll.insert(doc, {writeConcern: {w: 1, wtimeout: wtimeout}, ordered: false}));
+assert.commandWorked(coll.insert(doc, {writeConcern: {w: 1, wtimeout: wtimeout}, ordered: false}));
profileObj = getLatestProfilerEntry(testDB);
diff --git a/jstests/core/profile_mapreduce.js b/jstests/core/profile_mapreduce.js
index 0dc3c81a0c2..1516fe67d45 100644
--- a/jstests/core/profile_mapreduce.js
+++ b/jstests/core/profile_mapreduce.js
@@ -33,7 +33,7 @@ var reduceFunction = function(a, b) {
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
@@ -63,7 +63,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (var i = 0; i < 5; i++) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
coll.mapReduce(mapFunction, reduceFunction, {sort: {b: 1}, out: {inline: 1}});
@@ -77,7 +77,7 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
var outputCollectionName = "output_col";
@@ -93,7 +93,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
coll.mapReduce(mapFunction, reduceFunction, {query: {a: 3, b: 3}, out: {inline: 1}});
diff --git a/jstests/core/profile_query_hash.js b/jstests/core/profile_query_hash.js
index e635c7b6b56..d08f0de236a 100644
--- a/jstests/core/profile_query_hash.js
+++ b/jstests/core/profile_query_hash.js
@@ -20,10 +20,10 @@ function getShapes(collection) {
return res.shapes;
}
-assert.writeOK(coll.insert({a: 1, b: 1}));
-assert.writeOK(coll.insert({a: 1, b: 2}));
-assert.writeOK(coll.insert({a: 1, b: 2}));
-assert.writeOK(coll.insert({a: 2, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
// We need two indices since we do not currently create cache entries for queries with a single
// candidate plan.
diff --git a/jstests/core/profile_repair_cursor.js b/jstests/core/profile_repair_cursor.js
index c0b3a34a929..492d59d29e8 100644
--- a/jstests/core/profile_repair_cursor.js
+++ b/jstests/core/profile_repair_cursor.js
@@ -14,7 +14,7 @@ var testColl = testDB.testColl;
assert.commandWorked(testDB.dropDatabase());
// Insert some data to scan over.
-assert.writeOK(testColl.insert([{}, {}, {}, {}]));
+assert.commandWorked(testColl.insert([{}, {}, {}, {}]));
testDB.setProfilingLevel(2);
diff --git a/jstests/core/profile_sampling.js b/jstests/core/profile_sampling.js
index 2bd2261031e..802b04ad5e5 100644
--- a/jstests/core/profile_sampling.js
+++ b/jstests/core/profile_sampling.js
@@ -16,13 +16,13 @@ try {
assert.eq(0, profileDB.system.profile.count());
profileDB.createCollection(coll.getName());
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
assert.commandWorked(profileDB.setProfilingLevel(1, {sampleRate: 0, slowms: -1}));
assert.neq(null, coll.findOne({x: 1}));
assert.eq(1, coll.find({x: 1}).count());
- assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}}));
+ assert.commandWorked(coll.update({x: 1}, {$inc: {a: 1}}));
assert.commandWorked(profileDB.setProfilingLevel(0));
@@ -34,7 +34,7 @@ try {
// This should generate about 500 profile log entries.
for (let i = 0; i < 500; i++) {
assert.neq(null, coll.findOne({x: 1}));
- assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}}));
+ assert.commandWorked(coll.update({x: 1}, {$inc: {a: 1}}));
}
assert.commandWorked(profileDB.setProfilingLevel(0));
@@ -47,7 +47,7 @@ try {
// This should generate exactly 1000 profile log entries.
for (let i = 0; i < 5; i++) {
assert.neq(null, coll.findOne({x: 1}));
- assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}}));
+ assert.commandWorked(coll.update({x: 1}, {$inc: {a: 1}}));
}
assert.commandWorked(profileDB.setProfilingLevel(0));
assert.eq(10, profileDB.system.profile.count());
diff --git a/jstests/core/profile_update.js b/jstests/core/profile_update.js
index 8cde2ea6784..63ec6e96d76 100644
--- a/jstests/core/profile_update.js
+++ b/jstests/core/profile_update.js
@@ -19,11 +19,11 @@ testDB.setProfilingLevel(2);
//
var i;
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.update({a: {$gte: 2}}, {$set: {c: 1}, $inc: {a: -10}}));
+assert.commandWorked(coll.update({a: {$gte: 2}}, {$set: {c: 1}, $inc: {a: -10}}));
var profileObj = getLatestProfilerEntry(testDB);
@@ -48,9 +48,9 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [0]}));
- assert.writeOK(coll.update(
+ assert.commandWorked(coll.update(
{_id: 0}, {$set: {"a.$[i]": 1}}, {collation: {locale: "fr"}, arrayFilters: [{i: 0}]}));
profileObj = getLatestProfilerEntry(testDB);
@@ -66,11 +66,11 @@ if (db.getMongo().writeMode() === "commands") {
//
coll.drop();
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.update({a: {$gte: 5}}, {$set: {c: 1}, $inc: {a: -10}}, {multi: true}));
+assert.commandWorked(coll.update({a: {$gte: 5}}, {$set: {c: 1}, $inc: {a: -10}}, {multi: true}));
profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.keysExamined, 5, tojson(profileObj));
@@ -88,11 +88,11 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
coll.drop();
for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
-assert.writeOK(coll.update({_id: "new value", a: 4}, {$inc: {b: 1}}, {upsert: true}));
+assert.commandWorked(coll.update({_id: "new value", a: 4}, {$inc: {b: 1}}, {upsert: true}));
profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.command,
@@ -115,10 +115,10 @@ coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
-assert.writeOK(coll.update({a: 3, b: 3}, {$set: {c: 1}}));
+assert.commandWorked(coll.update({a: 3, b: 3}, {$set: {c: 1}}));
profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
diff --git a/jstests/core/projection_dotted_paths.js b/jstests/core/projection_dotted_paths.js
index 5af357bde02..48c2f1ae89f 100644
--- a/jstests/core/projection_dotted_paths.js
+++ b/jstests/core/projection_dotted_paths.js
@@ -14,7 +14,7 @@ load("jstests/libs/analyze_plan.js");
let coll = db["projection_dotted_paths"];
coll.drop();
assert.commandWorked(coll.createIndex({a: 1, "b.c": 1, "b.d": 1, c: 1}));
-assert.writeOK(coll.insert({_id: 1, a: 1, b: {c: 1, d: 1, e: 1}, c: 1, e: 1}));
+assert.commandWorked(coll.insert({_id: 1, a: 1, b: {c: 1, d: 1, e: 1}, c: 1, e: 1}));
// Project exactly the set of fields in the index. Verify that the projection is computed
// correctly and that the plan is covered.
@@ -64,7 +64,7 @@ assert(isIdhack(db, explain.queryPlanner.winningPlan));
// If we make a dotted path multikey, projections using that path cannot be covered. But
// projections which do not include the multikey path can still be covered.
-assert.writeOK(coll.insert({a: 2, b: {c: 1, d: [1, 2, 3]}}));
+assert.commandWorked(coll.insert({a: 2, b: {c: 1, d: [1, 2, 3]}}));
resultDoc = coll.findOne({a: 2}, {_id: 0, "b.c": 1, "b.d": 1});
assert.eq(resultDoc, {b: {c: 1, d: [1, 2, 3]}});
@@ -81,7 +81,7 @@ assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
// Verify that dotted projections work for multiple levels of nesting.
assert.commandWorked(coll.createIndex({a: 1, "x.y.y": 1, "x.y.z": 1, "x.z": 1}));
-assert.writeOK(coll.insert({a: 3, x: {y: {y: 1, f: 1, z: 1}, f: 1, z: 1}}));
+assert.commandWorked(coll.insert({a: 3, x: {y: {y: 1, f: 1, z: 1}, f: 1, z: 1}}));
resultDoc = coll.findOne({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1});
assert.eq(resultDoc, {x: {y: {y: 1, z: 1}, z: 1}});
explain = coll.find({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1}).explain("queryPlanner");
diff --git a/jstests/core/push.js b/jstests/core/push.js
index 50ff92cc2cd..2e2827945e7 100644
--- a/jstests/core/push.js
+++ b/jstests/core/push.js
@@ -26,27 +26,27 @@ t.update({_id: 2}, {$pop: {a: -1}});
assert.eq("4,5", t.findOne().a.toString(), "E3");
res = t.update({_id: 2}, {$pop: {a: -1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq("5", t.findOne().a.toString(), "E4");
res = t.update({_id: 2}, {$pop: {a: -1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq("", t.findOne().a.toString(), "E5");
res = t.update({_id: 2}, {$pop: {a: -1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq("", t.findOne().a.toString(), "E6");
res = t.update({_id: 2}, {$pop: {a: -1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq("", t.findOne().a.toString(), "E7");
res = t.update({_id: 2}, {$pop: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq("", t.findOne().a.toString(), "E8");
res = t.update({_id: 2}, {$pop: {b: -1}});
-assert.writeOK(res);
+assert.commandWorked(res);
res = t.update({_id: 2}, {$pop: {b: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
diff --git a/jstests/core/push_sort.js b/jstests/core/push_sort.js
index 815a88f0b7d..347f230a93d 100644
--- a/jstests/core/push_sort.js
+++ b/jstests/core/push_sort.js
@@ -64,10 +64,11 @@ assert.docEq(t.findOne({_id: 8}), doc8); // ensure doc was not changed
t.save({_id: 100, x: [{a: 1}]});
// Elements of the $each vector can be integers. In here, '2' is a valid $each.
-assert.writeOK(t.update({_id: 100}, {$push: {x: {$each: [2], $slice: -2, $sort: {a: 1}}}}));
+assert.commandWorked(t.update({_id: 100}, {$push: {x: {$each: [2], $slice: -2, $sort: {a: 1}}}}));
// For the same reason as above, '1' is an valid $each element.
-assert.writeOK(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}, 1], $slice: -2, $sort: {a: 1}}}}));
+assert.commandWorked(
+ t.update({_id: 100}, {$push: {x: {$each: [{a: 2}, 1], $slice: -2, $sort: {a: 1}}}}));
// The sort key pattern cannot be empty.
assert.writeErrorWithCode(
@@ -75,7 +76,8 @@ assert.writeErrorWithCode(
ErrorCodes.BadValue);
// Support positive $slice's (ie, trimming from the array's front).
-assert.writeOK(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: 2, $sort: {a: 1}}}}));
+assert.commandWorked(
+ t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: 2, $sort: {a: 1}}}}));
// A $slice cannot be a fractional value.
assert.writeErrorWithCode(
@@ -88,7 +90,7 @@ assert.writeErrorWithCode(
ErrorCodes.BadValue);
// Support sorting array alements that are not documents.
-assert.writeOK(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: 1}}}));
+assert.commandWorked(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: 1}}}));
// The key pattern 'a.' is an invalid value for $sort.
assert.writeErrorWithCode(
@@ -110,4 +112,5 @@ t.remove({});
// Existing values are validated in the array do not have to be objects during a $sort with $each.
t.save({_id: 100, x: [1, "foo"]});
-assert.writeOK(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: {a: 1}}}}));
+assert.commandWorked(
+ t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: {a: 1}}}}));
diff --git a/jstests/core/record_store_count.js b/jstests/core/record_store_count.js
index 61a1680fa94..89dcf8d4cdc 100644
--- a/jstests/core/record_store_count.js
+++ b/jstests/core/record_store_count.js
@@ -12,8 +12,8 @@ load("jstests/libs/fixture_helpers.js"); // For isMongos and isSharded.
var coll = db.record_store_count;
coll.drop();
-assert.writeOK(coll.insert({x: 0}));
-assert.writeOK(coll.insert({x: 1}));
+assert.commandWorked(coll.insert({x: 0}));
+assert.commandWorked(coll.insert({x: 1}));
assert.commandWorked(coll.ensureIndex({x: 1}));
diff --git a/jstests/core/regex.js b/jstests/core/regex.js
index 488d41f41d0..f74817e618c 100644
--- a/jstests/core/regex.js
+++ b/jstests/core/regex.js
@@ -8,33 +8,33 @@ assert.commandWorked(isMaster);
const isMongos = (isMaster.msg === "isdbgrid");
t.drop();
-assert.writeOK(t.save({a: "bcd"}));
+assert.commandWorked(t.save({a: "bcd"}));
assert.eq(1, t.count({a: /b/}), "A");
assert.eq(1, t.count({a: /bc/}), "B");
assert.eq(1, t.count({a: /bcd/}), "C");
assert.eq(0, t.count({a: /bcde/}), "D");
t.drop();
-assert.writeOK(t.save({a: {b: "cde"}}));
+assert.commandWorked(t.save({a: {b: "cde"}}));
assert.eq(1, t.count({'a.b': /de/}), "E");
t.drop();
-assert.writeOK(t.save({a: {b: ["cde"]}}));
+assert.commandWorked(t.save({a: {b: ["cde"]}}));
assert.eq(1, t.count({'a.b': /de/}), "F");
t.drop();
-assert.writeOK(t.save({a: [{b: "cde"}]}));
+assert.commandWorked(t.save({a: [{b: "cde"}]}));
assert.eq(1, t.count({'a.b': /de/}), "G");
t.drop();
-assert.writeOK(t.save({a: [{b: ["cde"]}]}));
+assert.commandWorked(t.save({a: [{b: ["cde"]}]}));
assert.eq(1, t.count({'a.b': /de/}), "H");
//
// Confirm match and explain serialization for $elemMatch with $regex.
//
t.drop();
-assert.writeOK(t.insert({x: ["abc"]}));
+assert.commandWorked(t.insert({x: ["abc"]}));
const query = {
x: {$elemMatch: {$regex: 'ABC', $options: 'i'}}
diff --git a/jstests/core/regex_not_id.js b/jstests/core/regex_not_id.js
index 35b2c858867..a38b452d423 100644
--- a/jstests/core/regex_not_id.js
+++ b/jstests/core/regex_not_id.js
@@ -3,7 +3,7 @@
var testColl = db.regex_not_id;
testColl.drop();
-assert.writeOK(testColl.insert({_id: "ABCDEF1"}));
+assert.commandWorked(testColl.insert({_id: "ABCDEF1"}));
// Should be an error.
assert.writeError(testColl.insert({_id: /^A/}));
diff --git a/jstests/core/remove2.js b/jstests/core/remove2.js
index 50fe507c134..d01c1e2e58c 100644
--- a/jstests/core/remove2.js
+++ b/jstests/core/remove2.js
@@ -27,7 +27,7 @@ function g() {
const res = t.remove({x: {$gte: 3}});
- assert.writeOK(res);
+ assert.commandWorked(res);
assert(t.findOne({x: 3}) == null);
assert(t.findOne({x: 8}) == null);
assert(t.validate().valid);
diff --git a/jstests/core/remove7.js b/jstests/core/remove7.js
index 2cda8945089..4a1548fbce2 100644
--- a/jstests/core/remove7.js
+++ b/jstests/core/remove7.js
@@ -25,8 +25,8 @@ for (i = 0; i < 200; i++) {
t.save({tags: getTags(100)});
var q = {tags: {$in: getTags(10)}};
var before = t.find(q).count();
- var res = assert.writeOK(t.remove(q));
+ var res = assert.commandWorked(t.remove(q));
var after = t.find(q).count();
assert.eq(0, after, "not zero after!");
- assert.writeOK(res);
+ assert.commandWorked(res);
}
diff --git a/jstests/core/remove9.js b/jstests/core/remove9.js
index 888625764ec..8756991e25e 100644
--- a/jstests/core/remove9.js
+++ b/jstests/core/remove9.js
@@ -17,7 +17,7 @@ const bulk = t.initializeUnorderedBulkOp();
for (let i = 0; i < 1000; ++i) {
bulk.insert({i: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
const s = startParallelShell(function() {
const t = db.jstests_remove9;
diff --git a/jstests/core/remove_undefined.js b/jstests/core/remove_undefined.js
index 6b97cc5d053..11463c107c5 100644
--- a/jstests/core/remove_undefined.js
+++ b/jstests/core/remove_undefined.js
@@ -6,9 +6,9 @@
const coll = db.remove_undefined;
coll.drop();
-assert.writeOK(coll.insert({_id: 1}));
-assert.writeOK(coll.insert({_id: 2}));
-assert.writeOK(coll.insert({_id: null}));
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: null}));
const obj = {
foo: 1,
@@ -27,7 +27,7 @@ assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue);
coll.remove({_id: obj.nullElem});
assert.eq(2, coll.count());
-assert.writeOK(coll.insert({_id: null}));
+assert.commandWorked(coll.insert({_id: null}));
assert.eq(3, coll.count());
assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue);
diff --git a/jstests/core/removea.js b/jstests/core/removea.js
index ee914662d92..2c45ee665f4 100644
--- a/jstests/core/removea.js
+++ b/jstests/core/removea.js
@@ -19,7 +19,7 @@ for (let v = 0; v < 2; ++v) { // Try each index version.
let i = y + (B * x);
batch.push({a: i});
}
- assert.writeOK(t.insert(batch));
+ assert.commandWorked(t.insert(batch));
}
assert.eq(t.count(), S * B);
@@ -27,6 +27,6 @@ for (let v = 0; v < 2; ++v) { // Try each index version.
for (let i = 0; i < S * B; ++i) {
toDrop.push(Random.randInt(10000)); // Dups in the query will be ignored.
}
- assert.writeOK(t.remove({a: {$in: toDrop}}));
+ assert.commandWorked(t.remove({a: {$in: toDrop}}));
}
})();
diff --git a/jstests/core/removeb.js b/jstests/core/removeb.js
index eeed0fc30bc..eb8aafd65f9 100644
--- a/jstests/core/removeb.js
+++ b/jstests/core/removeb.js
@@ -26,7 +26,7 @@ const insertDocs = function(collection, nDocs) {
bulk.insert({a: i});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
print("Bulk insert " + nDocs + " documents completed");
};
@@ -46,7 +46,7 @@ const p = startParallelShell(function() {
for (let j = 0; j < 100; ++j) {
bulk.insert({a: i + j});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
if (i % 1000 === 0) {
print(i - 20000 + " of second set of 20000 documents inserted");
}
diff --git a/jstests/core/rename4.js b/jstests/core/rename4.js
index 32f48f9cd0f..6b42bc8373e 100644
--- a/jstests/core/rename4.js
+++ b/jstests/core/rename4.js
@@ -64,7 +64,7 @@ function good(start, mod, expected) {
t.remove({});
t.save(start);
var res = t.update({}, mod);
- assert.writeOK(res);
+ assert.commandWorked(res);
var got = t.findOne();
delete got._id;
assert.docEq(expected, got);
@@ -129,7 +129,7 @@ function l(start, mod, query, expected) {
t.remove({});
t.save(start);
var res = t.update({}, mod);
- assert.writeOK(res);
+ assert.commandWorked(res);
var got = t.find(query).hint({a: 1}).next();
delete got._id;
assert.docEq(expected, got);
diff --git a/jstests/core/rename_change_target_type.js b/jstests/core/rename_change_target_type.js
index 859e1add0b2..a88759b0b65 100644
--- a/jstests/core/rename_change_target_type.js
+++ b/jstests/core/rename_change_target_type.js
@@ -6,8 +6,8 @@
let coll = db.rename_change_target_type;
coll.drop();
-assert.writeOK(coll.insert({to: NumberLong(100), from: 100}));
-assert.writeOK(coll.update({}, {$rename: {from: "to"}}));
+assert.commandWorked(coll.insert({to: NumberLong(100), from: 100}));
+assert.commandWorked(coll.update({}, {$rename: {from: "to"}}));
let aggResult = coll.aggregate([{$project: {toType: {$type: "$to"}}}]).toArray();
assert.eq(aggResult.length, 1);
diff --git a/jstests/core/return_key.js b/jstests/core/return_key.js
index 26dd01082b6..684b1952310 100644
--- a/jstests/core/return_key.js
+++ b/jstests/core/return_key.js
@@ -17,9 +17,9 @@ var explain;
var coll = db.jstests_returnkey;
coll.drop();
-assert.writeOK(coll.insert({a: 1, b: 3}));
-assert.writeOK(coll.insert({a: 2, b: 2}));
-assert.writeOK(coll.insert({a: 3, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 3}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
+assert.commandWorked(coll.insert({a: 3, b: 1}));
assert.commandWorked(coll.ensureIndex({a: 1}));
assert.commandWorked(coll.ensureIndex({b: 1}));
diff --git a/jstests/core/set7.js b/jstests/core/set7.js
index 64784a7216c..e1cdd0f3bf2 100644
--- a/jstests/core/set7.js
+++ b/jstests/core/set7.js
@@ -57,8 +57,8 @@ assert.eq([0, 1, -2, 3, 4, 5, 6, 7, 8, 9, 10, 11], t.findOne().a);
// Test multiple updates to a non-existent array element.
t.drop();
-assert.writeOK(t.insert({a: []}));
-assert.writeOK(t.update({}, {$set: {"a.2.b": 1, "a.2.c": 1}}));
+assert.commandWorked(t.insert({a: []}));
+assert.commandWorked(t.update({}, {$set: {"a.2.b": 1, "a.2.c": 1}}));
assert.docEq({a: [null, null, {b: 1, c: 1}]}, t.findOne({}, {_id: 0}));
// Test upsert case
@@ -70,7 +70,7 @@ assert.eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], t.findOne().a);
t.drop();
t.save({a: []});
res = t.update({}, {$set: {"a.1500000": 1}}); // current limit
-assert.writeOK(res);
+assert.commandWorked(res);
t.drop();
t.save({a: []});
diff --git a/jstests/core/set_type_change.js b/jstests/core/set_type_change.js
index 5b06449dce4..12035cc39a9 100644
--- a/jstests/core/set_type_change.js
+++ b/jstests/core/set_type_change.js
@@ -14,7 +14,7 @@ var coll = db.set_type_change;
coll.drop();
assert.commandWorked(coll.ensureIndex({a: 1}));
-assert.writeOK(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 2}));
var newVal = new NumberLong(2);
var res = coll.update({}, {$set: {a: newVal}});
diff --git a/jstests/core/shell_writeconcern.js b/jstests/core/shell_writeconcern.js
index 26d68304c63..ae431dab94b 100644
--- a/jstests/core/shell_writeconcern.js
+++ b/jstests/core/shell_writeconcern.js
@@ -33,7 +33,7 @@ assert.eq(undefined, collB.getWriteConcern());
assert.eq(undefined, db.getWriteConcern());
// test methods, by generating an error
-var res = assert.writeOK(collA.save({_id: 1}, {writeConcern: {w: 1}}));
+var res = assert.commandWorked(collA.save({_id: 1}, {writeConcern: {w: 1}}));
if (!db.getMongo().useWriteCommands()) {
assert.eq(1, res.n, tojson(res));
assert.eq(1, res.upserted, tojson(res));
@@ -41,34 +41,34 @@ if (!db.getMongo().useWriteCommands()) {
assert.eq(1, res.nUpserted, tojson(res));
}
-var res = assert.writeOK(collA.update({_id: 1}, {_id: 1}, {writeConcern: {w: 1}}));
+var res = assert.commandWorked(collA.update({_id: 1}, {_id: 1}, {writeConcern: {w: 1}}));
if (!db.getMongo().useWriteCommands()) {
assert.eq(1, res.n, tojson(res));
} else {
assert.eq(1, res.nMatched, tojson(res));
}
-var res = assert.writeOK(collA.update({_id: 1}, {_id: 1}, {writeConcern: {w: 1}}));
+var res = assert.commandWorked(collA.update({_id: 1}, {_id: 1}, {writeConcern: {w: 1}}));
if (!db.getMongo().useWriteCommands()) {
assert.eq(1, res.n, tojson(res));
} else {
assert.eq(1, res.nMatched, tojson(res));
}
-var res = assert.writeOK(collA.insert({_id: 2}, {writeConcern: {w: 1}}));
+var res = assert.commandWorked(collA.insert({_id: 2}, {writeConcern: {w: 1}}));
if (!db.getMongo().useWriteCommands()) {
assert.eq(0, res.n, tojson(res));
} else {
assert.eq(1, res.nInserted, tojson(res));
}
-var res = assert.writeOK(collA.remove({_id: 3}, {writeConcern: {w: 1}}));
+var res = assert.commandWorked(collA.remove({_id: 3}, {writeConcern: {w: 1}}));
if (!db.getMongo().useWriteCommands()) {
assert.eq(0, res.n, tojson(res));
} else {
assert.eq(0, res.nRemoved, tojson(res));
}
-var res = assert.writeOK(collA.remove({}, {justOne: true, writeConcern: {w: 1}}));
+var res = assert.commandWorked(collA.remove({}, {justOne: true, writeConcern: {w: 1}}));
if (!db.getMongo().useWriteCommands()) {
assert.eq(1, res.n, tojson(res));
} else {
@@ -76,7 +76,7 @@ if (!db.getMongo().useWriteCommands()) {
}
// Test ordered write concern, and that the write concern isn't run/error.
-assert.writeOK(collA.insert({_id: 1}));
+assert.commandWorked(collA.insert({_id: 1}));
var res =
assert.writeError(collA.insert([{_id: 1}, {_id: 1}], {ordered: true, writeConcern: {w: 1}}));
diff --git a/jstests/core/single_batch.js b/jstests/core/single_batch.js
index b06e5ce7aa5..d9ff6769157 100644
--- a/jstests/core/single_batch.js
+++ b/jstests/core/single_batch.js
@@ -10,7 +10,7 @@ var padding = new Array(1024 * 1024).join("x");
// Insert ~20 MB of data.
for (var i = 0; i < 20; i++) {
- assert.writeOK(coll.insert({_id: i, padding: padding}));
+ assert.commandWorked(coll.insert({_id: i, padding: padding}));
}
// The limit is 18, but we should end up with fewer documents since 18 docs won't fit in a
diff --git a/jstests/core/sort1.js b/jstests/core/sort1.js
index edd787306b0..7d5c227d8b4 100644
--- a/jstests/core/sort1.js
+++ b/jstests/core/sort1.js
@@ -42,9 +42,9 @@ assert(coll.validate().valid);
// 'numericOrdering' option which orders number-like strings by their numerical values.
if (db.getMongo().useReadCommands()) {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, str: '1000'}));
- assert.writeOK(coll.insert({_id: 1, str: '5'}));
- assert.writeOK(coll.insert({_id: 2, str: '200'}));
+ assert.commandWorked(coll.insert({_id: 0, str: '1000'}));
+ assert.commandWorked(coll.insert({_id: 1, str: '5'}));
+ assert.commandWorked(coll.insert({_id: 2, str: '200'}));
var cursor = coll.find().sort({str: -1}).collation({locale: 'en_US', numericOrdering: true});
assert.eq(cursor.next(), {_id: 0, str: '1000'});
@@ -56,8 +56,8 @@ if (db.getMongo().useReadCommands()) {
// Ensure that sorting of arrays correctly respects a collation with numeric ordering.
if (db.getMongo().useReadCommands()) {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, strs: ['1000', '500']}));
- assert.writeOK(coll.insert({_id: 1, strs: ['2000', '60']}));
+ assert.commandWorked(coll.insert({_id: 0, strs: ['1000', '500']}));
+ assert.commandWorked(coll.insert({_id: 1, strs: ['2000', '60']}));
cursor = coll.find({strs: {$lt: '1000'}}).sort({strs: 1}).collation({
locale: 'en_US',
numericOrdering: true
diff --git a/jstests/core/sort3.js b/jstests/core/sort3.js
index 5e5f3313f51..211c3d1e1c2 100644
--- a/jstests/core/sort3.js
+++ b/jstests/core/sort3.js
@@ -4,9 +4,9 @@
const coll = db.sort3;
coll.drop();
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 5}));
-assert.writeOK(coll.insert({a: 3}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 5}));
+assert.commandWorked(coll.insert({a: 3}));
assert.eq([1, 3, 5], coll.find().sort({a: 1}).toArray().map(doc => doc.a));
assert.eq([5, 3, 1], coll.find().sort({a: -1}).toArray().map(doc => doc.a));
diff --git a/jstests/core/sort4.js b/jstests/core/sort4.js
index 63d7f3810bd..0afe2cebe80 100644
--- a/jstests/core/sort4.js
+++ b/jstests/core/sort4.js
@@ -22,18 +22,18 @@ function nice(sort, correct, extra) {
return s;
}
-assert.writeOK(coll.insert({name: 'A', prename: 'B'}));
-assert.writeOK(coll.insert({name: 'A', prename: 'C'}));
-assert.writeOK(coll.insert({name: 'B', prename: 'B'}));
-assert.writeOK(coll.insert({name: 'B', prename: 'D'}));
+assert.commandWorked(coll.insert({name: 'A', prename: 'B'}));
+assert.commandWorked(coll.insert({name: 'A', prename: 'C'}));
+assert.commandWorked(coll.insert({name: 'B', prename: 'B'}));
+assert.commandWorked(coll.insert({name: 'B', prename: 'D'}));
nice({name: 1, prename: 1}, "AB,AC,BB,BD", "s3");
nice({prename: 1, name: 1}, "AB,BB,AC,BD", "s3");
-assert.writeOK(coll.insert({name: 'A'}));
+assert.commandWorked(coll.insert({name: 'A'}));
nice({name: 1, prename: 1}, "A,AB,AC,BB,BD", "e1");
-assert.writeOK(coll.insert({name: 'C'}));
+assert.commandWorked(coll.insert({name: 'C'}));
nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2"); // SERVER-282
assert.commandWorked(coll.ensureIndex({name: 1, prename: 1}));
diff --git a/jstests/core/sort_array.js b/jstests/core/sort_array.js
index 20ae0187693..bd30bcb169f 100644
--- a/jstests/core/sort_array.js
+++ b/jstests/core/sort_array.js
@@ -39,8 +39,8 @@ function testAggAndFindSort({filter, sort, project, hint, expected}) {
}
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
-assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]}));
+assert.commandWorked(coll.insert({_id: 0, a: [3, 0, 1]}));
+assert.commandWorked(coll.insert({_id: 1, a: [8, 4, -1]}));
// Sanity check that a sort on "_id" is usually pushed down into the query layer, but that
// $_internalInhibitOptimization prevents this from happening. This makes sure that this test is
@@ -58,9 +58,9 @@ testAggAndFindSort({
expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
});
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
-assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [3, 0, 1]}));
+assert.commandWorked(coll.insert({_id: 1, a: [0, 4, -1]}));
// Descending sort, without an index.
testAggAndFindSort({
@@ -70,9 +70,9 @@ testAggAndFindSort({
expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
});
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
-assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [3, 0, 1]}));
+assert.commandWorked(coll.insert({_id: 1, a: [8, 4, -1]}));
assert.commandWorked(coll.createIndex({a: 1}));
// Ascending sort, in the presence of an index. The multikey index should not be used to provide
@@ -84,9 +84,9 @@ testAggAndFindSort({
expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
});
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
-assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [3, 0, 1]}));
+assert.commandWorked(coll.insert({_id: 1, a: [0, 4, -1]}));
// Descending sort, in the presence of an index.
testAggAndFindSort({
@@ -96,9 +96,9 @@ testAggAndFindSort({
expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
});
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, x: [{y: [4, 0, 1], z: 7}, {y: 0, z: 9}]}));
-assert.writeOK(coll.insert({_id: 1, x: [{y: 1, z: 7}, {y: 0, z: [8, 6]}]}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, x: [{y: [4, 0, 1], z: 7}, {y: 0, z: 9}]}));
+assert.commandWorked(coll.insert({_id: 1, x: [{y: 1, z: 7}, {y: 0, z: [8, 6]}]}));
// Compound mixed ascending/descending sorts, without an index. Sort key for doc with _id: 0 is
// {'': 0, '': 9}. Sort key for doc with _id: 1 is {'': 0, '': 8}.
@@ -121,7 +121,7 @@ testAggAndFindSort(
// Test that a multikey index can provide a sort over a non-multikey field.
coll.drop();
assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
-assert.writeOK(coll.insert({a: [1, 2, 3], b: {c: 9}}));
+assert.commandWorked(coll.insert({a: [1, 2, 3], b: {c: 9}}));
explain = coll.find({a: 2}).sort({"b.c": -1}).explain();
assert(planHasStage(db, explain, "IXSCAN"));
assert(!planHasStage(db, explain, "SORT"));
@@ -135,9 +135,9 @@ assert(!planHasStage(db, explain, "SORT"));
// Test that we can correctly sort by an array field in agg when there are additional fields not
// involved in the sort pattern.
coll.drop();
-assert.writeOK(
+assert.commandWorked(
coll.insert({_id: 0, a: 1, b: {c: 1}, d: [{e: {f: 1, g: [6, 5, 4]}}, {e: {g: [3, 2, 1]}}]}));
-assert.writeOK(
+assert.commandWorked(
coll.insert({_id: 1, a: 2, b: {c: 2}, d: [{e: {f: 2, g: [5, 4, 3]}}, {e: {g: [2, 1, 0]}}]}));
testAggAndFindSort(
@@ -146,9 +146,9 @@ testAggAndFindSort(
// Test a sort over the trailing field of a compound index, where the two fields of the index
// share a path prefix. This is designed as a regression test for SERVER-31858.
coll.drop();
-assert.writeOK(coll.insert({_id: 2, a: [{b: 1, c: 2}, {b: 2, c: 3}]}));
-assert.writeOK(coll.insert({_id: 0, a: [{b: 2, c: 0}, {b: 1, c: 4}]}));
-assert.writeOK(coll.insert({_id: 1, a: [{b: 1, c: 5}, {b: 2, c: 1}]}));
+assert.commandWorked(coll.insert({_id: 2, a: [{b: 1, c: 2}, {b: 2, c: 3}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 2, c: 0}, {b: 1, c: 4}]}));
+assert.commandWorked(coll.insert({_id: 1, a: [{b: 1, c: 5}, {b: 2, c: 1}]}));
assert.commandWorked(coll.createIndex({"a.b": 1, "a.c": 1}));
testAggAndFindSort({
filter: {"a.b": 1},
diff --git a/jstests/core/sorta.js b/jstests/core/sorta.js
index f030cc6a673..fd7f8422187 100644
--- a/jstests/core/sorta.js
+++ b/jstests/core/sorta.js
@@ -21,7 +21,7 @@ const bulk = coll.initializeUnorderedBulkOp();
for (let doc of docs) {
bulk.insert(doc);
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), docs);
diff --git a/jstests/core/sortc.js b/jstests/core/sortc.js
index 3b6213a11c7..975b3acf764 100644
--- a/jstests/core/sortc.js
+++ b/jstests/core/sortc.js
@@ -5,8 +5,8 @@
const coll = db.jstests_sortc;
coll.drop();
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 2}));
function checkA(a, sort, skip, query) {
query = query || {};
diff --git a/jstests/core/sortl.js b/jstests/core/sortl.js
index d0d94473460..3d1a4adcd12 100644
--- a/jstests/core/sortl.js
+++ b/jstests/core/sortl.js
@@ -5,7 +5,7 @@
var coll = db.sortl;
coll.drop();
-assert.writeOK(coll.insert({_id: 1, a: 2}));
+assert.commandWorked(coll.insert({_id: 1, a: 2}));
var res = coll.find({_id: 1}).sort({a: 1});
assert.eq(res.next(), {_id: 1, a: 2});
assert.eq(res.hasNext(), false);
diff --git a/jstests/core/splitvector.js b/jstests/core/splitvector.js
index f30d2fdc386..62a89f3027f 100644
--- a/jstests/core/splitvector.js
+++ b/jstests/core/splitvector.js
@@ -73,7 +73,7 @@ let bulkInsertDocs = function(coll, numDocs, filler) {
for (let i = 1; i <= numDocs; i++) {
bulk.insert({x: i, y: filler});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
};
// Inserts numDocs into the given collection using a bulk operation. Each document's x value is set
@@ -83,7 +83,7 @@ let bulkInsertDocsFixedX = function(coll, numDocs, filler, xVal) {
for (let i = 1; i <= numDocs; i++) {
bulk.insert({x: xVal, y: filler});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
};
// -------------------------
diff --git a/jstests/core/stages_delete.js b/jstests/core/stages_delete.js
index 88d70a39ec3..29145d961bb 100644
--- a/jstests/core/stages_delete.js
+++ b/jstests/core/stages_delete.js
@@ -14,9 +14,9 @@ var deleteStage;
// Test delete stage with isMulti: true.
coll.drop();
-assert.writeOK(coll.insert({deleteMe: true}));
-assert.writeOK(coll.insert({deleteMe: true}));
-assert.writeOK(coll.insert({deleteMe: false}));
+assert.commandWorked(coll.insert({deleteMe: true}));
+assert.commandWorked(coll.insert({deleteMe: true}));
+assert.commandWorked(coll.insert({deleteMe: false}));
deleteStage = {
delete: {args: {node: collScanStage, isMulti: true}}
};
@@ -27,9 +27,9 @@ assert.eq(coll.count({deleteMe: false}), 1);
// Test delete stage with isMulti: false.
coll.drop();
-assert.writeOK(coll.insert({deleteMe: true}));
-assert.writeOK(coll.insert({deleteMe: true}));
-assert.writeOK(coll.insert({deleteMe: false}));
+assert.commandWorked(coll.insert({deleteMe: true}));
+assert.commandWorked(coll.insert({deleteMe: true}));
+assert.commandWorked(coll.insert({deleteMe: false}));
deleteStage = {
delete: {args: {node: collScanStage, isMulti: false}}
};
diff --git a/jstests/core/stages_ixscan.js b/jstests/core/stages_ixscan.js
index 7bfa4c5b4c4..78e22e997c4 100644
--- a/jstests/core/stages_ixscan.js
+++ b/jstests/core/stages_ixscan.js
@@ -182,7 +182,7 @@ var ixscanName = {
}
};
-assert.writeOK(t.insert([{a: "1234"}, {a: "124"}]));
+assert.commandWorked(t.insert([{a: "1234"}, {a: "124"}]));
var res = db.runCommand({stageDebug: {collection: collname, plan: ixscanName}});
assert.commandWorked(res);
assert.eq(res.results.map((doc) => doc.a), ["124", "1234"]);
diff --git a/jstests/core/system_profile.js b/jstests/core/system_profile.js
index 73e6e2587c5..06c8e48f99b 100644
--- a/jstests/core/system_profile.js
+++ b/jstests/core/system_profile.js
@@ -43,7 +43,7 @@ assert.commandFailed(testDB.system.profile.runCommand("findAndModify", {query: {
// Using mapReduce to write to "system.profile" should fail.
assert.commandWorked(testDB.dropDatabase());
-assert.writeOK(testDB.foo.insert({val: 1}));
+assert.commandWorked(testDB.foo.insert({val: 1}));
assert.commandFailed(testDB.foo.runCommand("mapReduce", {
map: function() {
emit(0, this.val);
@@ -56,7 +56,7 @@ assert.commandFailed(testDB.foo.runCommand("mapReduce", {
// Using aggregate to write to "system.profile" should fail.
assert.commandWorked(testDB.dropDatabase());
-assert.writeOK(testDB.foo.insert({val: 1}));
+assert.commandWorked(testDB.foo.insert({val: 1}));
assert.commandFailed(testDB.foo.runCommand("aggregate", {pipeline: [{$out: "system.profile"}]}));
// Renaming to/from "system.profile" should fail.
diff --git a/jstests/core/tailable_cursor_invalidation.js b/jstests/core/tailable_cursor_invalidation.js
index 2424bce64f7..08ff40b0bba 100644
--- a/jstests/core/tailable_cursor_invalidation.js
+++ b/jstests/core/tailable_cursor_invalidation.js
@@ -43,7 +43,7 @@ function dropAndRecreateColl() {
for (let i = 0; i < numDocs; ++i) {
bulk.insert({_id: i});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
dropAndRecreateColl();
diff --git a/jstests/core/tailable_getmore_batch_size.js b/jstests/core/tailable_getmore_batch_size.js
index 466fa25a686..05bab0ba341 100644
--- a/jstests/core/tailable_getmore_batch_size.js
+++ b/jstests/core/tailable_getmore_batch_size.js
@@ -16,7 +16,7 @@ function dropAndRecreateColl({numDocs}) {
for (let i = 0; i < numDocs; ++i) {
bulk.insert({_id: i});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
// Test that running a find with the 'tailable' option will return results immediately, even if
diff --git a/jstests/core/tailable_skip_limit.js b/jstests/core/tailable_skip_limit.js
index 672a52aeb3d..8d489ba9298 100644
--- a/jstests/core/tailable_skip_limit.js
+++ b/jstests/core/tailable_skip_limit.js
@@ -10,14 +10,14 @@ var t = db[collname];
t.drop();
assert.commandWorked(db.createCollection(collname, {capped: true, size: 1024}));
-assert.writeOK(t.insert({_id: 1}));
-assert.writeOK(t.insert({_id: 2}));
+assert.commandWorked(t.insert({_id: 1}));
+assert.commandWorked(t.insert({_id: 2}));
// Non-tailable with skip
var cursor = t.find().skip(1);
assert.eq(2, cursor.next()["_id"]);
assert(!cursor.hasNext());
-assert.writeOK(t.insert({_id: 3}));
+assert.commandWorked(t.insert({_id: 3}));
assert(!cursor.hasNext());
// Non-tailable with limit
@@ -26,7 +26,7 @@ for (var i = 1; i <= 3; i++) {
assert.eq(i, cursor.next()["_id"]);
}
assert(!cursor.hasNext());
-assert.writeOK(t.insert({_id: 4}));
+assert.commandWorked(t.insert({_id: 4}));
assert(!cursor.hasNext());
// Non-tailable with negative limit
@@ -35,14 +35,14 @@ for (var i = 1; i <= 4; i++) {
assert.eq(i, cursor.next()["_id"]);
}
assert(!cursor.hasNext());
-assert.writeOK(t.insert({_id: 5}));
+assert.commandWorked(t.insert({_id: 5}));
assert(!cursor.hasNext());
// Tailable with skip
cursor = t.find().addOption(2).skip(4);
assert.eq(5, cursor.next()["_id"]);
assert(!cursor.hasNext());
-assert.writeOK(t.insert({_id: 6}));
+assert.commandWorked(t.insert({_id: 6}));
assert(cursor.hasNext());
assert.eq(6, cursor.next()["_id"]);
@@ -52,7 +52,7 @@ for (var i = 1; i <= 6; i++) {
assert.eq(i, cursor.next()["_id"]);
}
assert(!cursor.hasNext());
-assert.writeOK(t.insert({_id: 7}));
+assert.commandWorked(t.insert({_id: 7}));
assert(cursor.hasNext());
assert.eq(7, cursor.next()["_id"]);
@@ -88,6 +88,6 @@ assert.eq(cmdRes.cursor.firstBatch.length, 0);
// Test that the cursor works in the shell.
assert.eq(t.find().addOption(2).itcount(), 0);
-assert.writeOK(t.insert({a: 1}));
+assert.commandWorked(t.insert({a: 1}));
assert.eq(t.find().addOption(2).itcount(), 1);
})();
diff --git a/jstests/core/text_covered_matching.js b/jstests/core/text_covered_matching.js
index a81dfd84e09..75814f04cce 100644
--- a/jstests/core/text_covered_matching.js
+++ b/jstests/core/text_covered_matching.js
@@ -17,9 +17,9 @@ const coll = db.text_covered_matching;
coll.drop();
assert.commandWorked(coll.createIndex({a: "text", b: 1}));
-assert.writeOK(coll.insert({a: "hello", b: 1, c: 1}));
-assert.writeOK(coll.insert({a: "world", b: 2, c: 2}));
-assert.writeOK(coll.insert({a: "hello world", b: 3, c: 3}));
+assert.commandWorked(coll.insert({a: "hello", b: 1, c: 1}));
+assert.commandWorked(coll.insert({a: "world", b: 2, c: 2}));
+assert.commandWorked(coll.insert({a: "hello world", b: 3, c: 3}));
//
// Test the query {$text: {$search: "hello"}, b: 1} with and without the 'textScore' in the
@@ -107,9 +107,9 @@ assert.eq(explainResult.executionStats.nReturned,
//
coll.drop();
assert.commandWorked(coll.createIndex({a: "text", "b.d": 1}));
-assert.writeOK(coll.insert({a: "hello", b: {d: 1}, c: {e: 1}}));
-assert.writeOK(coll.insert({a: "world", b: {d: 2}, c: {e: 2}}));
-assert.writeOK(coll.insert({a: "hello world", b: {d: 3}, c: {e: 3}}));
+assert.commandWorked(coll.insert({a: "hello", b: {d: 1}, c: {e: 1}}));
+assert.commandWorked(coll.insert({a: "world", b: {d: 2}, c: {e: 2}}));
+assert.commandWorked(coll.insert({a: "hello world", b: {d: 3}, c: {e: 3}}));
// Expected result:
// - We examine two keys, for the two documents with "hello" in their text;
diff --git a/jstests/core/top.js b/jstests/core/top.js
index 21b196a850d..7219192516f 100644
--- a/jstests/core/top.js
+++ b/jstests/core/top.js
@@ -35,14 +35,14 @@ var numRecords = 100;
// Insert
for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.insert({_id: i}));
+ assert.commandWorked(testColl.insert({_id: i}));
}
assertTopDiffEq(testColl, lastTop, "insert", numRecords);
lastTop = assertTopDiffEq(testColl, lastTop, "writeLock", numRecords);
// Update
for (i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}));
+ assert.commandWorked(testColl.update({_id: i}, {x: i}));
}
lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords);
@@ -65,13 +65,13 @@ lastTop = assertTopDiffEq(testColl, lastTop, "getmore", numRecords);
// Remove
for (i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.remove({_id: 1}));
+ assert.commandWorked(testColl.remove({_id: 1}));
}
lastTop = assertTopDiffEq(testColl, lastTop, "remove", numRecords);
// Upsert, note that these are counted as updates, not inserts
for (i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
+ assert.commandWorked(testColl.update({_id: i}, {x: i}, {upsert: 1}));
}
lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords);
diff --git a/jstests/core/ts1.js b/jstests/core/ts1.js
index a52995dd4c8..e1d937ee6c5 100644
--- a/jstests/core/ts1.js
+++ b/jstests/core/ts1.js
@@ -10,7 +10,7 @@ t.drop();
const N = 20;
for (let i = 0; i < N; i++) {
- assert.writeOK(t.insert({_id: i, x: new Timestamp()}));
+ assert.commandWorked(t.insert({_id: i, x: new Timestamp()}));
sleep(100);
}
@@ -37,7 +37,7 @@ for (let i = 0; i < N - 1; i++) {
assert.eq(N, t.find({x: {$type: 17}}).itcount());
assert.eq(0, t.find({x: {$type: 3}}).itcount());
-assert.writeOK(t.insert({_id: 100, x: new Timestamp(123456, 50)}));
+assert.commandWorked(t.insert({_id: 100, x: new Timestamp(123456, 50)}));
const x = t.findOne({_id: 100}).x;
assert.eq(123456, x.t);
assert.eq(50, x.i);
diff --git a/jstests/core/txns/abort_expired_transaction.js b/jstests/core/txns/abort_expired_transaction.js
index c64ed7407e5..b211ef7b132 100644
--- a/jstests/core/txns/abort_expired_transaction.js
+++ b/jstests/core/txns/abort_expired_transaction.js
@@ -26,7 +26,7 @@ try {
assert.commandWorked(db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: 1}));
jsTest.log("Create a collection '" + ns + "' outside of the transaction.");
- assert.writeOK(testColl.insert({foo: "bar"}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(testColl.insert({foo: "bar"}, {writeConcern: {w: "majority"}}));
jsTest.log("Set up the session.");
const sessionOptions = {causalConsistency: false};
diff --git a/jstests/core/txns/find_and_modify_in_transaction.js b/jstests/core/txns/find_and_modify_in_transaction.js
index 02c5a1639e1..835d94fbd61 100644
--- a/jstests/core/txns/find_and_modify_in_transaction.js
+++ b/jstests/core/txns/find_and_modify_in_transaction.js
@@ -20,8 +20,8 @@ const sessionDb = session.getDatabase(dbName);
const sessionColl = sessionDb[collName];
jsTest.log("Prepopulate the collection.");
-assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}],
- {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}],
+ {writeConcern: {w: "majority"}}));
/***********************************************************************************************
* Do a non-matching find-and-modify with remove.
diff --git a/jstests/core/txns/multi_delete_in_transaction.js b/jstests/core/txns/multi_delete_in_transaction.js
index c8aad0c5c79..7567a1b95df 100644
--- a/jstests/core/txns/multi_delete_in_transaction.js
+++ b/jstests/core/txns/multi_delete_in_transaction.js
@@ -20,8 +20,8 @@ const sessionDb = session.getDatabase(dbName);
const sessionColl = sessionDb[collName];
jsTest.log("Prepopulate the collection.");
-assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}],
- {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}],
+ {writeConcern: {w: "majority"}}));
jsTest.log("Do an empty multi-delete.");
session.startTransaction({writeConcern: {w: "majority"}});
diff --git a/jstests/core/txns/multi_update_in_transaction.js b/jstests/core/txns/multi_update_in_transaction.js
index c6d9f3e994c..d9754f586d8 100644
--- a/jstests/core/txns/multi_update_in_transaction.js
+++ b/jstests/core/txns/multi_update_in_transaction.js
@@ -20,8 +20,8 @@ const sessionDb = session.getDatabase(dbName);
const sessionColl = sessionDb[collName];
jsTest.log("Prepopulate the collection.");
-assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}],
- {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}],
+ {writeConcern: {w: "majority"}}));
jsTest.log("Do an empty multi-update.");
session.startTransaction({writeConcern: {w: "majority"}});
diff --git a/jstests/core/txns/read_concerns.js b/jstests/core/txns/read_concerns.js
index 409236a5ffd..fb7aa201131 100644
--- a/jstests/core/txns/read_concerns.js
+++ b/jstests/core/txns/read_concerns.js
@@ -18,7 +18,7 @@ function runTest(level, sessionOptions, supported) {
const sessionColl = sessionDB[collName];
// Set up the collection.
- assert.writeOK(sessionColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(sessionColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
if (level) {
session.startTransaction({readConcern: {level: level}});
diff --git a/jstests/core/txns/read_own_multikey_writes.js b/jstests/core/txns/read_own_multikey_writes.js
index 9af97dc4baa..1c3dc450280 100644
--- a/jstests/core/txns/read_own_multikey_writes.js
+++ b/jstests/core/txns/read_own_multikey_writes.js
@@ -15,11 +15,11 @@ const sessionColl = sessionDb.getCollection(collName);
assert.commandWorked(sessionDb.runCommand({create: collName}));
-assert.writeOK(sessionColl.insert({a: 1}));
+assert.commandWorked(sessionColl.insert({a: 1}));
assert.commandWorked(sessionColl.createIndex({a: 1}));
session.startTransaction();
-assert.writeOK(sessionColl.update({}, {$set: {a: [1, 2, 3]}}));
+assert.commandWorked(sessionColl.update({}, {$set: {a: [1, 2, 3]}}));
assert.eq(1, sessionColl.find({}, {_id: 0, a: 1}).sort({a: 1}).itcount());
assert.commandWorked(session.commitTransaction_forTesting());
diff --git a/jstests/core/txns/repeatable_reads_in_transaction.js b/jstests/core/txns/repeatable_reads_in_transaction.js
index 3286b6e72cb..870a1d58e6f 100644
--- a/jstests/core/txns/repeatable_reads_in_transaction.js
+++ b/jstests/core/txns/repeatable_reads_in_transaction.js
@@ -26,7 +26,8 @@ const session2Db = session2.getDatabase(dbName);
const session2Coll = session2Db.getCollection(collName);
jsTest.log("Prepopulate the collection.");
-assert.writeOK(testColl.insert([{_id: 0}, {_id: 1}, {_id: 2}], {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ testColl.insert([{_id: 0}, {_id: 1}, {_id: 2}], {writeConcern: {w: "majority"}}));
// Create a constant array of documents we expect to be returned during a read-only transaction.
// The value should not change since external changes should not be visible within this
@@ -61,7 +62,7 @@ assert.sameMembers(expectedDocs, sessionColl.find().toArray());
jsTestLog(
"Writes that occur outside of a transaction should not be visible to a read only transaction.");
-assert.writeOK(testColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
assert.sameMembers(expectedDocs, sessionColl.find().toArray());
diff --git a/jstests/core/txns/start_transaction_with_read.js b/jstests/core/txns/start_transaction_with_read.js
index 045b9af1083..e6af5ebe31b 100644
--- a/jstests/core/txns/start_transaction_with_read.js
+++ b/jstests/core/txns/start_transaction_with_read.js
@@ -25,7 +25,7 @@ const initialDoc = {
_id: "pretransaction1",
x: 0
};
-assert.writeOK(sessionColl.insert(initialDoc, {writeConcern: {w: "majority"}}));
+assert.commandWorked(sessionColl.insert(initialDoc, {writeConcern: {w: "majority"}}));
jsTest.log("Start a transaction with a read");
diff --git a/jstests/core/txns/statement_ids_accepted.js b/jstests/core/txns/statement_ids_accepted.js
index d93c0e818be..bf73cbff55b 100644
--- a/jstests/core/txns/statement_ids_accepted.js
+++ b/jstests/core/txns/statement_ids_accepted.js
@@ -92,7 +92,7 @@ assert.commandWorked(sessionDb.runCommand({
jsTestLog("Check that find and getmore accept a statement ID");
// Put in some data to find so getMore has a cursor to use.
-assert.writeOK(testColl.insert([{_id: 0}, {_id: 1}], {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.insert([{_id: 0}, {_id: 1}], {writeConcern: {w: "majority"}}));
let res = assert.commandWorked(sessionDb.runCommand({
find: collName,
batchSize: 1,
@@ -169,10 +169,10 @@ if (!isMongos) {
// Skip commands that do not exist on mongos.
jsTestLog("Check that geoSearch accepts a statement ID");
- assert.writeOK(testColl.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}),
- {writeConcern: {w: "majority"}});
- assert.writeOK(testColl.insert({geoh: {lat: 0, long: 0}, b: 0}),
- {writeConcern: {w: "majority"}});
+ assert.commandWorked(testColl.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}),
+ {writeConcern: {w: "majority"}});
+ assert.commandWorked(testColl.insert({geoh: {lat: 0, long: 0}, b: 0}),
+ {writeConcern: {w: "majority"}});
assert.commandWorked(sessionDb.runCommand({
createIndexes: collName,
indexes: [
diff --git a/jstests/core/type_array.js b/jstests/core/type_array.js
index 0bc3dc0f6f3..cec6ff71748 100644
--- a/jstests/core/type_array.js
+++ b/jstests/core/type_array.js
@@ -21,14 +21,14 @@ function extractSortedIdsFromCursor(cursor) {
}
function runTests() {
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3]}));
- assert.writeOK(coll.insert({_id: 2, a: [1, "foo", 3]}));
- assert.writeOK(coll.insert({_id: 3, a: []}));
- assert.writeOK(coll.insert({_id: 4, a: [[]]}));
- assert.writeOK(coll.insert({_id: 5, a: [[[]]]}));
- assert.writeOK(coll.insert({_id: 6, a: 1}));
- assert.writeOK(coll.insert({_id: 7, a: "foo"}));
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.insert({_id: 1, a: [1, 2, 3]}));
+ assert.commandWorked(coll.insert({_id: 2, a: [1, "foo", 3]}));
+ assert.commandWorked(coll.insert({_id: 3, a: []}));
+ assert.commandWorked(coll.insert({_id: 4, a: [[]]}));
+ assert.commandWorked(coll.insert({_id: 5, a: [[[]]]}));
+ assert.commandWorked(coll.insert({_id: 6, a: 1}));
+ assert.commandWorked(coll.insert({_id: 7, a: "foo"}));
assert.eq([1, 2, 6], extractSortedIdsFromCursor(coll.find({a: {$type: "number"}})));
assert.eq([2, 7], extractSortedIdsFromCursor(coll.find({a: {$type: "string"}})));
@@ -36,14 +36,14 @@ function runTests() {
assert.eq([4, 5], extractSortedIdsFromCursor(coll.find({"a.0": {$type: "array"}})));
assert.eq([5], extractSortedIdsFromCursor(coll.find({"a.0.0": {$type: "array"}})));
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: NumberInt(1)}));
- assert.writeOK(coll.insert({_id: 2, a: NumberLong(1)}));
- assert.writeOK(coll.insert({_id: 3, a: "str"}));
- assert.writeOK(coll.insert({_id: 4, a: []}));
- assert.writeOK(coll.insert({_id: 5, a: [NumberInt(1), "str"]}));
- assert.writeOK(coll.insert({_id: 6}));
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
+ assert.commandWorked(coll.insert({_id: 1, a: NumberInt(1)}));
+ assert.commandWorked(coll.insert({_id: 2, a: NumberLong(1)}));
+ assert.commandWorked(coll.insert({_id: 3, a: "str"}));
+ assert.commandWorked(coll.insert({_id: 4, a: []}));
+ assert.commandWorked(coll.insert({_id: 5, a: [NumberInt(1), "str"]}));
+ assert.commandWorked(coll.insert({_id: 6}));
// Test that $type fails when given array that contains an element that is neither a string
// nor a number.
@@ -67,6 +67,6 @@ function runTests() {
// Verify $type queries both with and without an index.
runTests();
-assert.writeOK(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
runTests();
}());
diff --git a/jstests/core/uniqueness.js b/jstests/core/uniqueness.js
index 54a5a71c276..d138f59d0ae 100644
--- a/jstests/core/uniqueness.js
+++ b/jstests/core/uniqueness.js
@@ -20,7 +20,7 @@ t.drop();
// test uniqueness of _id
res = t.save({_id: 3});
-assert.writeOK(res);
+assert.commandWorked(res);
// this should yield an error
res = t.insert({_id: 3});
@@ -28,7 +28,7 @@ assert.writeError(res);
assert.eq(1, t.count());
res = t.insert({_id: 4, x: 99});
-assert.writeOK(res);
+assert.commandWorked(res);
// this should yield an error
res = t.update({_id: 4}, {_id: 3, x: 99});
diff --git a/jstests/core/update_addToSet.js b/jstests/core/update_addToSet.js
index bcf5b752846..5927dd882f8 100644
--- a/jstests/core/update_addToSet.js
+++ b/jstests/core/update_addToSet.js
@@ -77,18 +77,18 @@ o = {
_id: 1,
a: [1, 2]
};
-assert.writeOK(t.insert(o));
+assert.commandWorked(t.insert(o));
-assert.writeOK(t.update({}, {$addToSet: {a: {'x.$.y': 'bad'}}}));
-assert.writeOK(t.update({}, {$addToSet: {a: {b: {'x.$.y': 'bad'}}}}));
+assert.commandWorked(t.update({}, {$addToSet: {a: {'x.$.y': 'bad'}}}));
+assert.commandWorked(t.update({}, {$addToSet: {a: {b: {'x.$.y': 'bad'}}}}));
assert.writeError(t.update({}, {$addToSet: {a: {"$bad": "bad"}}}));
assert.writeError(t.update({}, {$addToSet: {a: {b: {"$bad": "bad"}}}}));
-assert.writeOK(t.update({}, {$addToSet: {a: {_id: {"x.y": 2}}}}));
+assert.commandWorked(t.update({}, {$addToSet: {a: {_id: {"x.y": 2}}}}));
-assert.writeOK(t.update({}, {$addToSet: {a: {$each: [{'x.$.y': 'bad'}]}}}));
-assert.writeOK(t.update({}, {$addToSet: {a: {$each: [{b: {'x.$.y': 'bad'}}]}}}));
+assert.commandWorked(t.update({}, {$addToSet: {a: {$each: [{'x.$.y': 'bad'}]}}}));
+assert.commandWorked(t.update({}, {$addToSet: {a: {$each: [{b: {'x.$.y': 'bad'}}]}}}));
assert.writeError(t.update({}, {$addToSet: {a: {$each: [{'$bad': 'bad'}]}}}));
assert.writeError(t.update({}, {$addToSet: {a: {$each: [{b: {'$bad': 'bad'}}]}}}));
@@ -99,10 +99,10 @@ o = {
_id: 1,
a: [1, 2]
};
-assert.writeOK(t.insert(o));
+assert.commandWorked(t.insert(o));
-assert.writeOK(t.update({}, {$addToSet: {a: {_id: ["foo", "bar", "baz"]}}}));
-assert.writeOK(t.update({}, {$addToSet: {a: {_id: /acme.*corp/}}}));
+assert.commandWorked(t.update({}, {$addToSet: {a: {_id: ["foo", "bar", "baz"]}}}));
+assert.commandWorked(t.update({}, {$addToSet: {a: {_id: /acme.*corp/}}}));
// Test that DBRefs are allowed.
t.drop();
@@ -110,19 +110,19 @@ o = {
_id: 1,
a: [1, 2]
};
-assert.writeOK(t.insert(o));
+assert.commandWorked(t.insert(o));
foo = {
"foo": "bar"
};
-assert.writeOK(t.insert(foo));
+assert.commandWorked(t.insert(foo));
let fooDoc = t.findOne(foo);
assert.eq(fooDoc.foo, foo.foo);
let fooDocRef = {reference: new DBRef(t.getName(), fooDoc._id, t.getDB().getName())};
-assert.writeOK(t.update({_id: o._id}, {$addToSet: {a: fooDocRef}}));
+assert.commandWorked(t.update({_id: o._id}, {$addToSet: {a: fooDocRef}}));
assert.eq(t.findOne({_id: o._id}).a[2], fooDocRef);
-assert.writeOK(t.update({_id: o._id}, {$addToSet: {a: {b: fooDocRef}}}));
+assert.commandWorked(t.update({_id: o._id}, {$addToSet: {a: {b: fooDocRef}}}));
assert.eq(t.findOne({_id: o._id}).a[3].b, fooDocRef);
diff --git a/jstests/core/update_affects_indexes.js b/jstests/core/update_affects_indexes.js
index 956efadbf0e..41b0cb4a016 100644
--- a/jstests/core/update_affects_indexes.js
+++ b/jstests/core/update_affects_indexes.js
@@ -27,68 +27,68 @@ function assertExpectedIndexKeys(docId, expectedKeys, unexpectedKeys) {
}
// $set implicitly creates array element at end of array.
-assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 0}, {$set: {"a.1.c": 0}}));
+assert.commandWorked(coll.update({_id: 0}, {$set: {"a.1.c": 0}}));
assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}, {"a.b": null}], []);
// $set implicitly creates array element beyond end of array.
-assert.writeOK(coll.insert({_id: 1, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 1, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 1}, {$set: {"a.3.c": 0}}));
+assert.commandWorked(coll.update({_id: 1}, {$set: {"a.3.c": 0}}));
assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}, {"a.b": null}], []);
// $set implicitly creates array element in empty array (no index key changes needed).
-assert.writeOK(coll.insert({_id: 2, a: []}));
+assert.commandWorked(coll.insert({_id: 2, a: []}));
assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []);
-assert.writeOK(coll.update({_id: 2}, {$set: {"a.0.c": 0}}));
+assert.commandWorked(coll.update({_id: 2}, {$set: {"a.0.c": 0}}));
assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []);
// $inc implicitly creates array element at end of array.
-assert.writeOK(coll.insert({_id: 3, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 3, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 3}, {$inc: {"a.1.c": 0}}));
+assert.commandWorked(coll.update({_id: 3}, {$inc: {"a.1.c": 0}}));
assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}, {"a.b": null}], []);
// $mul implicitly creates array element at end of array.
-assert.writeOK(coll.insert({_id: 4, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 4, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 4}, {$mul: {"a.1.c": 0}}));
+assert.commandWorked(coll.update({_id: 4}, {$mul: {"a.1.c": 0}}));
assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}, {"a.b": null}], []);
// $addToSet implicitly creates array element at end of array.
-assert.writeOK(coll.insert({_id: 5, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 5, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 5}, {$addToSet: {"a.1.c": 0}}));
+assert.commandWorked(coll.update({_id: 5}, {$addToSet: {"a.1.c": 0}}));
assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}, {"a.b": null}], []);
// $bit implicitly creates array element at end of array.
-assert.writeOK(coll.insert({_id: 6, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 6, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 6}, {$bit: {"a.1.c": {and: NumberInt(1)}}}));
+assert.commandWorked(coll.update({_id: 6}, {$bit: {"a.1.c": {and: NumberInt(1)}}}));
assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}, {"a.b": null}], []);
// $min implicitly creates array element at end of array.
-assert.writeOK(coll.insert({_id: 7, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 7, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 7}, {$min: {"a.1.c": 0}}));
+assert.commandWorked(coll.update({_id: 7}, {$min: {"a.1.c": 0}}));
assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}, {"a.b": null}], []);
// $max implicitly creates array element at end of array.
-assert.writeOK(coll.insert({_id: 8, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 8, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 8}, {$max: {"a.1.c": 0}}));
+assert.commandWorked(coll.update({_id: 8}, {$max: {"a.1.c": 0}}));
assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}, {"a.b": null}], []);
// $currentDate implicitly creates array element at end of array.
-assert.writeOK(coll.insert({_id: 9, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 9, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 9}, {$currentDate: {"a.1.c": true}}));
+assert.commandWorked(coll.update({_id: 9}, {$currentDate: {"a.1.c": true}}));
assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}, {"a.b": null}], []);
// $push implicitly creates array element at end of array.
-assert.writeOK(coll.insert({_id: 10, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 10, a: [{b: 0}]}));
assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}], [{"a.b": null}]);
-assert.writeOK(coll.update({_id: 10}, {$push: {"a.1.c": 0}}));
+assert.commandWorked(coll.update({_id: 10}, {$push: {"a.1.c": 0}}));
assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}, {"a.b": null}], []);
}());
diff --git a/jstests/core/update_arrayFilters.js b/jstests/core/update_arrayFilters.js
index 55d7614495d..a7e517a3fe4 100644
--- a/jstests/core/update_arrayFilters.js
+++ b/jstests/core/update_arrayFilters.js
@@ -83,9 +83,9 @@ if (db.getMongo().writeMode() !== "commands") {
assert.writeErrorWithCode(res, ErrorCodes.QueryFeatureNotAllowed);
// Good value for arrayFilters succeeds.
- assert.writeOK(coll.update(
+ assert.commandWorked(coll.update(
{_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}]}));
- assert.writeOK(coll.update(
+ assert.commandWorked(coll.update(
{_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$or: [{i: 0}, {$and: [{}]}]}]}));
}
@@ -160,7 +160,7 @@ if (db.getMongo().writeMode() !== "commands") {
});
bulk = coll.initializeUnorderedBulkOp();
bulk.find({}).arrayFilters([{i: 0}]).update({$set: {"a.$[i]": 5}});
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
// updateOne().
bulk = coll.initializeUnorderedBulkOp();
@@ -170,7 +170,7 @@ if (db.getMongo().writeMode() !== "commands") {
});
bulk = coll.initializeUnorderedBulkOp();
bulk.find({_id: 0}).arrayFilters([{i: 0}]).updateOne({$set: {"a.$[i]": 5}});
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
//
@@ -273,51 +273,52 @@ assert.commandWorked(coll.explain().findAndModify(
// $set.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
+ assert.commandWorked(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2, 1]});
}
-assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[]": 3}}));
+assert.commandWorked(coll.update({_id: 0}, {$set: {"a.$[]": 3}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]});
// $unset.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]}));
+ assert.commandWorked(
+ coll.update({_id: 0}, {$unset: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, 1, null, 1]});
}
-assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[]": true}}));
+assert.commandWorked(coll.update({_id: 0}, {$unset: {"a.$[]": true}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, null, null, null]});
// $inc.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[i]": 1}}, {arrayFilters: [{i: 1}]}));
+ assert.commandWorked(coll.update({_id: 0}, {$inc: {"a.$[i]": 1}}, {arrayFilters: [{i: 1}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2, 0, 2]});
}
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
-assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[]": 1}}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+assert.commandWorked(coll.update({_id: 0}, {$inc: {"a.$[]": 1}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1, 2, 1, 2]});
// $mul.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 2, 0, 2]}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 2, 0, 2]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[i]": 3}}, {arrayFilters: [{i: 2}]}));
+ assert.commandWorked(coll.update({_id: 0}, {$mul: {"a.$[i]": 3}}, {arrayFilters: [{i: 2}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 6, 0, 6]});
}
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [1, 2, 1, 2]}));
-assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[]": 3}}));
+assert.commandWorked(coll.insert({_id: 0, a: [1, 2, 1, 2]}));
+assert.commandWorked(coll.update({_id: 0}, {$mul: {"a.$[]": 3}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 6, 3, 6]});
// $rename.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3, 4]}));
+assert.commandWorked(coll.insert({_id: 0, a: [1, 2, 3, 4]}));
if (db.getMongo().writeMode() === "commands") {
res = coll.update({_id: 0}, {$rename: {"a.$[i]": "b"}}, {arrayFilters: [{i: 0}]});
assert.writeErrorWithCode(res, ErrorCodes.BadValue);
@@ -334,7 +335,7 @@ if (db.getMongo().writeMode() === "commands") {
"updated failed for reason other than unused array filter");
}
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0], b: [1]}));
+assert.commandWorked(coll.insert({_id: 0, a: [0], b: [1]}));
res = coll.update({_id: 0}, {$rename: {"a.$[]": "b"}});
assert.writeErrorWithCode(res, ErrorCodes.BadValue);
assert.neq(
@@ -347,51 +348,53 @@ assert.neq(-1,
res.getWriteError().errmsg.indexOf(
"The destination field for $rename may not be dynamic: b.$[]"),
"update failed for a reason other than using array updates with $rename");
-assert.writeOK(coll.update({_id: 0}, {$rename: {"a": "b"}}));
+assert.commandWorked(coll.update({_id: 0}, {$rename: {"a": "b"}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, b: [0]});
// $setOnInsert.
coll.drop();
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update(
+ assert.commandWorked(coll.update(
{_id: 0, a: [0]}, {$setOnInsert: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}], upsert: true}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1]});
}
coll.drop();
-assert.writeOK(coll.update({_id: 0, a: [0]}, {$setOnInsert: {"a.$[]": 1}}, {upsert: true}));
+assert.commandWorked(coll.update({_id: 0, a: [0]}, {$setOnInsert: {"a.$[]": 1}}, {upsert: true}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1]});
// $min.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: 1}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: 1}]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$min: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]}));
+ assert.commandWorked(
+ coll.update({_id: 0}, {$min: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 1}]});
}
-assert.writeOK(coll.update({_id: 0}, {$min: {"a.$[].c": 0}}));
+assert.commandWorked(coll.update({_id: 0}, {$min: {"a.$[].c": 0}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 0}]});
// $max.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: -1}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: -1}]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$max: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]}));
+ assert.commandWorked(
+ coll.update({_id: 0}, {$max: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: -1}]});
}
-assert.writeOK(coll.update({_id: 0}, {$max: {"a.$[].c": 0}}));
+assert.commandWorked(coll.update({_id: 0}, {$max: {"a.$[].c": 0}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: 0}]});
// $currentDate.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1]}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(
+ assert.commandWorked(
coll.update({_id: 0}, {$currentDate: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]}));
let doc = coll.findOne({_id: 0});
assert(doc.a[0].constructor == Date, tojson(doc));
assert.eq(doc.a[1], 1, printjson(doc));
}
-assert.writeOK(coll.update({_id: 0}, {$currentDate: {"a.$[]": true}}));
+assert.commandWorked(coll.update({_id: 0}, {$currentDate: {"a.$[]": true}}));
let doc = coll.findOne({_id: 0});
assert(doc.a[0].constructor == Date, tojson(doc));
assert(doc.a[1].constructor == Date, tojson(doc));
@@ -399,74 +402,76 @@ assert(doc.a[1].constructor == Date, tojson(doc));
// $addToSet.
coll.drop();
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: 0, a: [[0], [1]]}));
- assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [[0], [1]]}));
+ assert.commandWorked(
+ coll.update({_id: 0}, {$addToSet: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1]]});
}
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [[0], [1]]}));
-assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[]": 2}}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0], [1]]}));
+assert.commandWorked(coll.update({_id: 0}, {$addToSet: {"a.$[]": 2}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1, 2]]});
// $pop.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]}));
+ assert.commandWorked(coll.update({_id: 0}, {$pop: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]}));
assert.eq({_id: 0, a: [[0], [1, 2]]}, coll.findOne());
}
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
-assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[]": 1}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0]]}));
+assert.commandWorked(coll.update({_id: 0}, {$pop: {"a.$[]": 1}}));
assert.eq({_id: 0, a: [[]]}, coll.findOne());
// $pullAll.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$pullAll: {"a.$[i]": [0, 2]}}, {arrayFilters: [{i: 0}]}));
+ assert.commandWorked(
+ coll.update({_id: 0}, {$pullAll: {"a.$[i]": [0, 2]}}, {arrayFilters: [{i: 0}]}));
assert.eq({_id: 0, a: [[1, 3], [1, 2, 3, 4]]}, coll.findOne());
}
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]}));
res = coll.update({_id: 0}, {$pullAll: {"a.$[]": [0, 2]}});
assert.eq({_id: 0, a: [[1, 3], [1, 3, 4]]}, coll.findOne());
// $pull.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[i]": 1}}, {arrayFilters: [{i: 2}]}));
+ assert.commandWorked(coll.update({_id: 0}, {$pull: {"a.$[i]": 1}}, {arrayFilters: [{i: 2}]}));
assert.eq({_id: 0, a: [[0, 1], [2]]}, coll.findOne());
}
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
-assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[]": 1}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
+assert.commandWorked(coll.update({_id: 0}, {$pull: {"a.$[]": 1}}));
assert.eq({_id: 0, a: [[0], [2]]}, coll.findOne());
// $push.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0, 1], [2, 3]]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[i]": 4}}, {arrayFilters: [{i: 0}]}));
+ assert.commandWorked(coll.update({_id: 0}, {$push: {"a.$[i]": 4}}, {arrayFilters: [{i: 0}]}));
assert.eq({_id: 0, a: [[0, 1, 4], [2, 3]]}, coll.findOne());
}
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]}));
-assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[]": 4}}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0, 1], [2, 3]]}));
+assert.commandWorked(coll.update({_id: 0}, {$push: {"a.$[]": 4}}));
assert.eq({_id: 0, a: [[0, 1, 4], [2, 3, 4]]}, coll.findOne());
// $bit.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]}));
+assert.commandWorked(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(
+ assert.commandWorked(
coll.update({_id: 0}, {$bit: {"a.$[i]": {or: NumberInt(10)}}}, {arrayFilters: [{i: 0}]}));
assert.eq({_id: 0, a: [NumberInt(10), NumberInt(2)]}, coll.findOne());
}
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]}));
-assert.writeOK(coll.update({_id: 0}, {$bit: {"a.$[]": {or: NumberInt(10)}}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]}));
+assert.commandWorked(coll.update({_id: 0}, {$bit: {"a.$[]": {or: NumberInt(10)}}}));
assert.eq({_id: 0, a: [NumberInt(10), NumberInt(10)]}, coll.findOne());
//
@@ -474,14 +479,15 @@ assert.eq({_id: 0, a: [NumberInt(10), NumberInt(10)]}, coll.findOne());
//
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
-assert.writeOK(coll.insert({_id: 1, a: [0, 2, 0, 2]}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+assert.commandWorked(coll.insert({_id: 1, a: [0, 2, 0, 2]}));
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({}, {$set: {"a.$[i]": 3}}, {multi: true, arrayFilters: [{i: 0}]}));
+ assert.commandWorked(
+ coll.update({}, {$set: {"a.$[i]": 3}}, {multi: true, arrayFilters: [{i: 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 1, 3, 1]});
assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 2, 3, 2]});
}
-assert.writeOK(coll.update({}, {$set: {"a.$[]": 3}}, {multi: true}));
+assert.commandWorked(coll.update({}, {$set: {"a.$[]": 3}}, {multi: true}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]});
assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 3, 3, 3]});
@@ -492,8 +498,8 @@ assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 3, 3, 3]});
if (db.getMongo().writeMode() === "commands") {
// arrayFilters respect operation collation.
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]}));
- assert.writeOK(
+ assert.commandWorked(coll.insert({_id: 0, a: ["foo", "FOO"]}));
+ assert.commandWorked(
coll.update({_id: 0},
{$set: {"a.$[i]": "bar"}},
{arrayFilters: [{i: "foo"}], collation: {locale: "en_US", strength: 2}}));
@@ -504,8 +510,9 @@ if (db.getMongo().writeMode() === "commands") {
assert.commandWorked(
db.createCollection(collName, {collation: {locale: "en_US", strength: 2}}));
coll = db[collName];
- assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": "bar"}}, {arrayFilters: [{i: "foo"}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: ["foo", "FOO"]}));
+ assert.commandWorked(
+ coll.update({_id: 0}, {$set: {"a.$[i]": "bar"}}, {arrayFilters: [{i: "foo"}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]});
}
@@ -515,40 +522,42 @@ if (db.getMongo().writeMode() === "commands") {
// Update all documents in array.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
-assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].b": 2}}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
+assert.commandWorked(coll.update({_id: 0}, {$set: {"a.$[].b": 2}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 2}]});
// Update all matching documents in array.
if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i].b": 2}}, {arrayFilters: [{"i.b": 0}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
+ assert.commandWorked(
+ coll.update({_id: 0}, {$set: {"a.$[i].b": 2}}, {arrayFilters: [{"i.b": 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 1}]});
}
// Update all matching scalars in array.
if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1]}));
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [0, 1]}));
+ assert.commandWorked(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1]});
}
// Update all matching scalars in array of arrays.
if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [0, 1]]}));
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].$[j]": 2}}, {arrayFilters: [{j: 0}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [[0, 1], [0, 1]]}));
+ assert.commandWorked(
+ coll.update({_id: 0}, {$set: {"a.$[].$[j]": 2}}, {arrayFilters: [{j: 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[2, 1], [2, 1]]});
}
// Update all matching documents in nested array.
if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(
+ assert.commandWorked(
coll.insert({_id: 0, a: [{b: 0, c: [{d: 0}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]}));
- assert.writeOK(coll.update(
+ assert.commandWorked(coll.update(
{_id: 0}, {$set: {"a.$[i].c.$[j].d": 2}}, {arrayFilters: [{"i.b": 0}, {"j.d": 0}]}));
assert.eq(coll.findOne({_id: 0}),
{_id: 0, a: [{b: 0, c: [{d: 2}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]});
@@ -557,8 +566,8 @@ if (db.getMongo().writeMode() === "commands") {
// Update all scalars in array matching a logical predicate.
if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 3]}));
- assert.writeOK(
+ assert.commandWorked(coll.insert({_id: 0, a: [0, 1, 3]}));
+ assert.commandWorked(
coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{$or: [{i: 0}, {i: 3}]}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2]});
}
@@ -606,7 +615,7 @@ if (db.getMongo().writeMode() === "commands") {
// Include an implicit array traversal in a path in an update modifier.
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 0}]}));
res = coll.update({_id: 0}, {$set: {"a.b": 1}});
assert.writeErrorWithCode(res, ErrorCodes.PathNotViable);
assert.neq(
@@ -633,7 +642,7 @@ if (db.getMongo().writeMode() === "commands") {
"update failed for a reason other than bad array filter identifier: " +
tojson(res.getWriteError()));
- assert.writeOK(coll.insert({_id: 0, a: [0], b: [{j: 0}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [0], b: [{j: 0}]}));
res = coll.update({_id: 0}, {$set: {"a.$[i.j]": 1, "b.$[i]": 1}}, {arrayFilters: [{"i.j": 0}]});
assert.writeErrorWithCode(res, ErrorCodes.PathNotViable);
assert.neq(
@@ -650,15 +659,15 @@ if (db.getMongo().writeMode() === "commands") {
// "a.$[i].b.$[k].c" and "a.$[j].b.$[k].d" are not a conflict, even if i and j are not
// disjoint.
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0, d: 0}]}]}));
- assert.writeOK(coll.update({_id: 0},
- {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].d": 1}},
- {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0, d: 0}]}]}));
+ assert.commandWorked(coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].d": 1}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{x: 0, b: [{y: 0, c: 1, d: 1}]}]});
// "a.$[i].b.$[k].c" and "a.$[j].b.$[k].c" are a conflict iff i and j are not disjoint.
coll.drop();
- assert.writeOK(
+ assert.commandWorked(
coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}]}]}));
res = coll.update({_id: 0},
@@ -669,16 +678,16 @@ if (db.getMongo().writeMode() === "commands") {
res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.$[k].c'"),
"update failed for a reason other than conflicting array updates");
- assert.writeOK(coll.update({_id: 0},
- {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}},
- {arrayFilters: [{"i.x": 0}, {"j.x": 1}, {"k.y": 0}]}));
+ assert.commandWorked(coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 1}, {"k.y": 0}]}));
assert.eq(coll.findOne({_id: 0}),
{_id: 0, a: [{x: 0, b: [{y: 0, c: 1}]}, {x: 1, b: [{y: 0, c: 2}]}]});
// "a.$[i].b.$[k].c" and "a.$[j].b.$[m].c" are a conflict iff k and m intersect for some
// element of a matching i and j.
coll.drop();
- assert.writeOK(coll.insert(
+ assert.commandWorked(coll.insert(
{_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}, {y: 1, c: 0}]}]}));
res = coll.update({_id: 0},
@@ -689,9 +698,10 @@ if (db.getMongo().writeMode() === "commands") {
res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.0.c'"),
"update failed for a reason other than conflicting array updates");
- assert.writeOK(coll.update({_id: 0},
- {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}},
- {arrayFilters: [{"i.x": 1}, {"j.x": 1}, {"k.y": 0}, {"m.y": 1}]}));
+ assert.commandWorked(
+ coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}},
+ {arrayFilters: [{"i.x": 1}, {"j.x": 1}, {"k.y": 0}, {"m.y": 1}]}));
assert.eq(coll.findOne({_id: 0}),
{_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 1}, {y: 1, c: 2}]}]});
}
diff --git a/jstests/core/update_array_offset_positional.js b/jstests/core/update_array_offset_positional.js
index 8e433831c01..210e4d65bb7 100644
--- a/jstests/core/update_array_offset_positional.js
+++ b/jstests/core/update_array_offset_positional.js
@@ -12,19 +12,19 @@ coll.drop();
//
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0]}));
+assert.commandWorked(coll.insert({_id: 0, a: [0]}));
assert.writeError(coll.update({_id: 0, "a.0": 0}, {$set: {"a.$": 1}}));
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 0}]}));
assert.writeError(coll.update({_id: 0, "a.0.b": 0}, {$set: {"a.$.b": 1}}));
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
+assert.commandWorked(coll.insert({_id: 0, a: [[0]]}));
assert.writeError(coll.update({_id: 0, "a.0.0": 0}, {$set: {"a.$.0": 1}}));
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: [0]}]}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: [0]}]}));
assert.writeError(coll.update({_id: 0, "a.0.b.0": 0}, {$set: {"a.$.b.0": 1}}));
//
@@ -33,13 +33,13 @@ assert.writeError(coll.update({_id: 0, "a.0.b.0": 0}, {$set: {"a.$.b.0": 1}}));
//
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]}));
-assert.writeOK(coll.update({_id: 0, "a.0.b": 1}, {$set: {"a.0.b.$": 2}}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: [0, 1]}]}));
+assert.commandWorked(coll.update({_id: 0, "a.0.b": 1}, {$set: {"a.0.b.$": 2}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]});
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]}));
-assert.writeOK(coll.update({_id: 0, "a.b.1": 1}, {$set: {"a.$.b.1": 2}}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: [0, 1]}]}));
+assert.commandWorked(coll.update({_id: 0, "a.b.1": 1}, {$set: {"a.$.b.1": 2}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]});
//
@@ -48,22 +48,22 @@ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]});
//
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [0]}));
-assert.writeOK(coll.update({_id: 0, a: 1, "b.0": 0}, {$set: {"a.$": 2}}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1], b: [0]}));
+assert.commandWorked(coll.update({_id: 0, a: 1, "b.0": 0}, {$set: {"a.$": 2}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [0]});
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: 0}]}));
-assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c": 0}, {$set: {"a.$": 2}}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1], b: [{c: 0}]}));
+assert.commandWorked(coll.update({_id: 0, a: 1, "b.0.c": 0}, {$set: {"a.$": 2}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: 0}]});
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [[0]]}));
-assert.writeOK(coll.update({_id: 0, a: 1, "b.0.0": 0}, {$set: {"a.$": 2}}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1], b: [[0]]}));
+assert.commandWorked(coll.update({_id: 0, a: 1, "b.0.0": 0}, {$set: {"a.$": 2}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [[0]]});
coll.drop();
-assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: [0]}]}));
-assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c.0": 0}, {$set: {"a.$": 2}}));
+assert.commandWorked(coll.insert({_id: 0, a: [0, 1], b: [{c: [0]}]}));
+assert.commandWorked(coll.update({_id: 0, a: 1, "b.0.c.0": 0}, {$set: {"a.$": 2}}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: [0]}]});
}());
diff --git a/jstests/core/update_arraymatch6.js b/jstests/core/update_arraymatch6.js
index a32bbf3c7d7..ec318370561 100644
--- a/jstests/core/update_arraymatch6.js
+++ b/jstests/core/update_arraymatch6.js
@@ -10,7 +10,7 @@ t.drop();
function doTest() {
t.save({a: [{id: 1, x: [5, 6, 7]}, {id: 2, x: [8, 9, 10]}]});
res = t.update({'a.id': 1}, {$set: {'a.$.x': [1, 1, 1]}});
- assert.writeOK(res);
+ assert.commandWorked(res);
assert.eq.automsg("1", "t.findOne().a[ 0 ].x[ 0 ]");
}
diff --git a/jstests/core/update_bit_examples.js b/jstests/core/update_bit_examples.js
index 6545751aacd..0b8f868ea17 100644
--- a/jstests/core/update_bit_examples.js
+++ b/jstests/core/update_bit_examples.js
@@ -12,21 +12,21 @@ coll.drop();
coll.remove({});
coll.save({_id: 1, a: NumberInt(2)});
res = coll.update({}, {$bit: {a: {and: NumberInt(4)}}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne().a, 0);
// $bit or
coll.remove({});
coll.save({_id: 1, a: NumberInt(2)});
res = coll.update({}, {$bit: {a: {or: NumberInt(4)}}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne().a, 6);
// $bit xor
coll.remove({});
coll.save({_id: 1, a: NumberInt(0)});
res = coll.update({}, {$bit: {a: {xor: NumberInt(4)}}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne().a, 4);
// SERVER-19706 Empty bit operation.
diff --git a/jstests/core/update_currentdate_examples.js b/jstests/core/update_currentdate_examples.js
index 9cde5cad16e..e8a3da2ea09 100644
--- a/jstests/core/update_currentdate_examples.js
+++ b/jstests/core/update_currentdate_examples.js
@@ -12,19 +12,19 @@ coll.drop();
coll.remove({});
coll.save({_id: 1, a: 2});
res = coll.update({}, {$currentDate: {a: true}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert(coll.findOne().a.constructor == Date);
// $currentDate type = date
coll.remove({});
coll.save({_id: 1, a: 2});
res = coll.update({}, {$currentDate: {a: {$type: "date"}}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert(coll.findOne().a.constructor == Date);
// $currentDate type = timestamp
coll.remove({});
coll.save({_id: 1, a: 2});
res = coll.update({}, {$currentDate: {a: {$type: "timestamp"}}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert(coll.findOne().a.constructor == Timestamp);
diff --git a/jstests/core/update_min_max_examples.js b/jstests/core/update_min_max_examples.js
index 3ec86705a1f..e8fa949811b 100644
--- a/jstests/core/update_min_max_examples.js
+++ b/jstests/core/update_min_max_examples.js
@@ -9,13 +9,13 @@ coll.drop();
// $min for number
coll.insert({_id: 1, a: 2});
res = coll.update({_id: 1}, {$min: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne({_id: 1}).a, 1);
// $max for number
coll.insert({_id: 2, a: 2});
res = coll.update({_id: 2}, {$max: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne({_id: 2}).a, 2);
// $min for Date
@@ -36,7 +36,7 @@ date.setMilliseconds(date.getMilliseconds() + 2);
// Test that we have advanced the date and it's no longer the same as the one we inserted.
assert.eq(null, coll.findOne({_id: 4, a: date}));
res = coll.update({_id: 4}, {$max: {a: date}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne({_id: 4}).a, date);
// $max for small number
@@ -44,20 +44,20 @@ coll.insert({_id: 5, a: 1e-15});
// Slightly bigger than 1e-15.
const biggerval = 0.000000000000001000000000000001;
res = coll.update({_id: 5}, {$max: {a: biggerval}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne({_id: 5}).a, biggerval);
// $min for a small number
coll.insert({_id: 6, a: biggerval});
res = coll.update({_id: 6}, {$min: {a: 1e-15}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne({_id: 6}).a, 1e-15);
// $max with positional operator
let insertdoc = {_id: 7, y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]};
coll.insert(insertdoc);
res = coll.update({_id: 7, "y.a": 6}, {$max: {"y.$.a": 7}});
-assert.writeOK(res);
+assert.commandWorked(res);
insertdoc.y[1].a = 7;
assert.docEq(coll.findOne({_id: 7}), insertdoc);
@@ -68,7 +68,7 @@ insertdoc = {
};
coll.insert(insertdoc);
res = coll.update({_id: 8, "y.a": 6}, {$min: {"y.$.a": 5}});
-assert.writeOK(res);
+assert.commandWorked(res);
insertdoc.y[1].a = 5;
assert.docEq(coll.findOne({_id: 8}), insertdoc);
}());
diff --git a/jstests/core/update_modifier_pop.js b/jstests/core/update_modifier_pop.js
index 77c6bae702c..75d43fc5967 100644
--- a/jstests/core/update_modifier_pop.js
+++ b/jstests/core/update_modifier_pop.js
@@ -6,7 +6,7 @@
let coll = db.update_modifier_pop;
coll.drop();
-assert.writeOK(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
// $pop with value of 0 fails to parse.
assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 0}}), ErrorCodes.FailedToParse);
@@ -24,49 +24,49 @@ assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 1.1}}), ErrorCode
assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {a: {b: 1}}}), ErrorCodes.FailedToParse);
// $pop is a no-op when the path does not exist.
-let writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
+let writeRes = assert.commandWorked(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
assert.eq(writeRes.nMatched, 1);
if (db.getMongo().writeMode() === "commands") {
assert.eq(writeRes.nModified, 0);
}
// $pop is a no-op when the path partially exists.
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: {c: 1}}));
-writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: {c: 1}}));
+writeRes = assert.commandWorked(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
assert.eq(writeRes.nMatched, 1);
if (db.getMongo().writeMode() === "commands") {
assert.eq(writeRes.nModified, 0);
}
// $pop fails when the path is blocked by a scalar element.
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: {b: 1}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: {b: 1}}));
assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}}));
// $pop fails when the path is blocked by an array element.
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2]}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: {b: [1, 2]}}));
assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}}));
// $pop fails when the path exists but is not an array.
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: {b: {c: 1}}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: {b: {c: 1}}}));
assert.writeError(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
// $pop is a no-op when the path contains an empty array.
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: {b: []}}));
-writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: {b: []}}));
+writeRes = assert.commandWorked(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
assert.eq(writeRes.nMatched, 1);
if (db.getMongo().writeMode() === "commands") {
assert.eq(writeRes.nModified, 0);
}
// Successfully pop from the end of an array.
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2, 3]}}));
-writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: {b: [1, 2, 3]}}));
+writeRes = assert.commandWorked(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
assert.eq(writeRes.nMatched, 1);
if (db.getMongo().writeMode() === "commands") {
assert.eq(writeRes.nModified, 1);
@@ -74,7 +74,7 @@ if (db.getMongo().writeMode() === "commands") {
assert.eq({_id: 0, a: {b: [1, 2]}}, coll.findOne());
// Successfully pop from the beginning of an array.
-writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": -1}}));
+writeRes = assert.commandWorked(coll.update({_id: 0}, {$pop: {"a.b": -1}}));
assert.eq(writeRes.nMatched, 1);
if (db.getMongo().writeMode() === "commands") {
assert.eq(writeRes.nModified, 1);
@@ -82,29 +82,30 @@ if (db.getMongo().writeMode() === "commands") {
assert.eq({_id: 0, a: {b: [2]}}, coll.findOne());
// $pop with the positional ($) operator.
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5, 6]}]}));
-assert.writeOK(coll.update({_id: 0, "a.b": 5}, {$pop: {"a.$.b": 1}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5, 6]}]}));
+assert.commandWorked(coll.update({_id: 0, "a.b": 5}, {$pop: {"a.$.b": 1}}));
assert.eq({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5]}]}, coll.findOne());
// $pop with arrayFilters.
if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2]}, {b: [4, 5]}, {b: [2, 3]}]}));
- assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[i].b": -1}}, {arrayFilters: [{"i.b": 2}]}));
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.insert({_id: 0, a: [{b: [1, 2]}, {b: [4, 5]}, {b: [2, 3]}]}));
+ assert.commandWorked(
+ coll.update({_id: 0}, {$pop: {"a.$[i].b": -1}}, {arrayFilters: [{"i.b": 2}]}));
assert.eq({_id: 0, a: [{b: [2]}, {b: [4, 5]}, {b: [3]}]}, coll.findOne());
}
// $pop from a nested array.
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: [1, [2, 3, 4]]}));
-assert.writeOK(coll.update({_id: 0}, {$pop: {"a.1": 1}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [1, [2, 3, 4]]}));
+assert.commandWorked(coll.update({_id: 0}, {$pop: {"a.1": 1}}));
assert.eq({_id: 0, a: [1, [2, 3]]}, coll.findOne());
// $pop is a no-op when array element in path does not exist.
-assert.writeOK(coll.remove({}));
-assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
-writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.2.b": 1}}));
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
+writeRes = assert.commandWorked(coll.update({_id: 0}, {$pop: {"a.2.b": 1}}));
assert.eq(writeRes.nMatched, 1);
if (db.getMongo().writeMode() === "commands") {
assert.eq(writeRes.nModified, 0);
diff --git a/jstests/core/update_mul_examples.js b/jstests/core/update_mul_examples.js
index 831bd257380..89a0ac70532 100644
--- a/jstests/core/update_mul_examples.js
+++ b/jstests/core/update_mul_examples.js
@@ -12,33 +12,33 @@ coll.drop();
coll.remove({});
coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: 10}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne().a, 20);
// $mul negative
coll.remove({});
coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: -10}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne().a, -20);
// $mul zero
coll.remove({});
coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: 0}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne().a, 0);
// $mul decimal
coll.remove({});
coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: 1.1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne().a, 2.2);
// $mul negative decimal
coll.remove({});
coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: -0.1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(coll.findOne().a, -0.2);
diff --git a/jstests/core/update_multi5.js b/jstests/core/update_multi5.js
index 8f797d8de2f..c8823ef1904 100644
--- a/jstests/core/update_multi5.js
+++ b/jstests/core/update_multi5.js
@@ -6,10 +6,11 @@
var t = db.update_multi5;
t.drop();
-assert.writeOK(t.insert({path: 'r1', subscribers: [1, 2]}));
-assert.writeOK(t.insert({path: 'r2', subscribers: [3, 4]}));
+assert.commandWorked(t.insert({path: 'r1', subscribers: [1, 2]}));
+assert.commandWorked(t.insert({path: 'r2', subscribers: [3, 4]}));
-var res = assert.writeOK(t.update({}, {$addToSet: {subscribers: 5}}, {upsert: false, multi: true}));
+var res =
+ assert.commandWorked(t.update({}, {$addToSet: {subscribers: 5}}, {upsert: false, multi: true}));
assert.eq(res.nMatched, 2, tojson(res));
diff --git a/jstests/core/update_server-12848.js b/jstests/core/update_server-12848.js
index 0f86e0135b3..9f14feb09cd 100644
--- a/jstests/core/update_server-12848.js
+++ b/jstests/core/update_server-12848.js
@@ -10,11 +10,11 @@ t.drop();
var orig = {"_id": 1, "a": [1, []]};
res = t.insert(orig);
-assert.writeOK(res, "insert");
+assert.commandWorked(res, "insert");
assert.eq(orig, t.findOne());
res = t.update({"_id": 1}, {$addToSet: {"a.1": 1}});
-assert.writeOK(res, "update");
+assert.commandWorked(res, "update");
var updated = {"_id": 1, "a": [1, [1]]};
assert.eq(updated, t.findOne());
diff --git a/jstests/core/updatea.js b/jstests/core/updatea.js
index dd54ed04b59..99938c433fa 100644
--- a/jstests/core/updatea.js
+++ b/jstests/core/updatea.js
@@ -13,22 +13,22 @@ orig = {
};
res = t.save(orig);
-assert.writeOK(res);
+assert.commandWorked(res);
// SERVER-181
res = t.update({}, {$set: {"a.0.x": 3}});
-assert.writeOK(res);
+assert.commandWorked(res);
orig.a[0].x = 3;
assert.eq(orig, t.findOne(), "A1");
res = t.update({}, {$set: {"a.1.z": 17}});
-assert.writeOK(res);
+assert.commandWorked(res);
orig.a[1].z = 17;
assert.eq(orig, t.findOne(), "A2");
// SERVER-273
res = t.update({}, {$unset: {"a.1.y": 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
delete orig.a[1].y;
assert.eq(orig, t.findOne(), "A3");
@@ -39,10 +39,10 @@ orig = {
comments: [{name: "blah", rate_up: 0, rate_ups: []}]
};
res = t.save(orig);
-assert.writeOK(res);
+assert.commandWorked(res);
res = t.update({}, {$inc: {"comments.0.rate_up": 1}, $push: {"comments.0.rate_ups": 99}});
-assert.writeOK(res);
+assert.commandWorked(res);
orig.comments[0].rate_up++;
orig.comments[0].rate_ups.push(99);
assert.eq(orig, t.findOne(), "B1");
@@ -56,22 +56,22 @@ for (i = 0; i < 12; i++)
orig.a.push(i);
res = t.save(orig);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(orig, t.findOne(), "C1");
res = t.update({}, {$inc: {"a.0": 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
orig.a[0]++;
assert.eq(orig, t.findOne(), "C2");
res = t.update({}, {$inc: {"a.10": 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
orig.a[10]++;
// SERVER-3218
t.drop();
t.insert({"a": {"c00": 1}, 'c': 2});
res = t.update({"c": 2}, {'$inc': {'a.c000': 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq({"c00": 1, "c000": 1}, t.findOne().a, "D1");
diff --git a/jstests/core/updateh.js b/jstests/core/updateh.js
index 706f329d0e0..1fd3d62750d 100644
--- a/jstests/core/updateh.js
+++ b/jstests/core/updateh.js
@@ -12,7 +12,7 @@ t.drop();
t.insert({x: 1});
res = t.update({x: 1}, {$set: {y: 1}}); // ok
-assert.writeOK(res);
+assert.commandWorked(res);
res = t.update({x: 1}, {$set: {$z: 1}}); // not ok
assert.writeError(res);
@@ -46,7 +46,7 @@ res = t.update({n: 0}, {$set: {"$secret.agent.x": 1}});
assert.writeError(res);
res = t.update({n: 0}, {$set: {"secret.agent$": 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.save({_id: 0, n: 0});
// Test that you cannot update database references into top level fields
@@ -73,15 +73,15 @@ assert.writeError(res);
// the correct case ( subdoc)
// SERVER-3231
res = t.update({n: 0}, {$set: {x: {$ref: '1', $id: 1, $db: '1'}}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.save({_id: 0, n: 0});
// Test that '$' can occur elsewhere in a field name.
// SERVER-7557
res = t.update({n: 0}, {$set: {ke$sha: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.save({_id: 0, n: 0});
res = t.update({n: 0}, {$set: {more$$moreproblem$: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.save({_id: 0, n: 0});
diff --git a/jstests/core/updatel.js b/jstests/core/updatel.js
index ce5bd4d5dc7..9990aa6c6f2 100644
--- a/jstests/core/updatel.js
+++ b/jstests/core/updatel.js
@@ -47,11 +47,11 @@ assert.eq([{_id: 0, a: [{b: {c: 1}}]}], t.find().toArray(), "No update occurred.
t.drop();
t.insert({_id: 1, arr: [{a: "z", b: 1}]});
res = t.update({"arr.a": /^z$/}, {$set: {"arr.$.b": 2}}, false, true);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(t.findOne().arr[0], {a: "z", b: 2});
t.drop();
t.insert({_id: 1, arr: [{a: "z", b: 1}, {a: "abc", b: 2}, {a: "lmn", b: 3}]});
res = t.update({"arr.a": /l/}, {$inc: {"arr.$.b": 2}}, false, true);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(t.findOne().arr[2], {a: "lmn", b: 5});
diff --git a/jstests/core/upsert_and.js b/jstests/core/upsert_and.js
index 111dc140b8f..8c13f9b63d4 100644
--- a/jstests/core/upsert_and.js
+++ b/jstests/core/upsert_and.js
@@ -9,34 +9,34 @@ coll = db.upsert4;
coll.drop();
res = coll.update({_id: 1, $and: [{c: 1}, {d: 1}], a: 12}, {$inc: {y: 1}}, true);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1, a: 12, y: 1});
coll.remove({});
res = coll.update({$and: [{c: 1}, {d: 1}]}, {$setOnInsert: {_id: 1}}, true);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
coll.remove({});
res = coll.update({$and: [{c: 1}, {d: 1}, {$or: [{x: 1}]}]}, {$setOnInsert: {_id: 1}}, true);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1, x: 1});
coll.remove({});
res = coll.update({$and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]}, {$setOnInsert: {_id: 1}}, true);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
coll.remove({});
res = coll.update(
{r: {$gt: 3}, $and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]}, {$setOnInsert: {_id: 1}}, true);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
coll.remove({});
res = coll.update(
{r: /s/, $and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]}, {$setOnInsert: {_id: 1}}, true);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
coll.remove({});
diff --git a/jstests/core/upsert_fields.js b/jstests/core/upsert_fields.js
index 3a76ac93cb1..c8944424bf8 100644
--- a/jstests/core/upsert_fields.js
+++ b/jstests/core/upsert_fields.js
@@ -17,7 +17,7 @@ var upsertedResult = function(query, expr) {
};
var upsertedField = function(query, expr, fieldName) {
- var res = assert.writeOK(upsertedResult(query, expr));
+ var res = assert.commandWorked(upsertedResult(query, expr));
var doc = coll.findOne();
assert.neq(doc, null, "findOne query returned no results! UpdateRes: " + tojson(res));
return doc[fieldName];
diff --git a/jstests/core/verify_update_mods.js b/jstests/core/verify_update_mods.js
index 134668e62bd..1bd9ccdc465 100644
--- a/jstests/core/verify_update_mods.js
+++ b/jstests/core/verify_update_mods.js
@@ -10,36 +10,36 @@ t.drop();
t.save({_id: 1});
res = t.update({}, {$set: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
t.save({_id: 1});
res = t.update({}, {$unset: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
t.save({_id: 1});
res = t.update({}, {$inc: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
t.save({_id: 1});
res = t.update({}, {$mul: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
t.save({_id: 1});
res = t.update({}, {$push: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
res = t.update({}, {$addToSet: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
t.save({_id: 1});
res = t.update({}, {$pull: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
t.save({_id: 1});
@@ -49,12 +49,12 @@ t.remove({});
t.save({_id: 1});
res = t.update({}, {$rename: {a: "b"}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
t.save({_id: 1});
res = t.update({}, {$bit: {a: {and: NumberLong(1)}}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
// SERVER-3223 test $bit can do an upsert
@@ -68,15 +68,15 @@ t.remove({});
t.save({_id: 1});
res = t.update({}, {$currentDate: {a: true}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
t.save({_id: 1});
res = t.update({}, {$max: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
t.save({_id: 1});
res = t.update({}, {$min: {a: 1}});
-assert.writeOK(res);
+assert.commandWorked(res);
t.remove({});
diff --git a/jstests/core/views/duplicate_ns.js b/jstests/core/views/duplicate_ns.js
index f7693549164..8b22e6769a4 100644
--- a/jstests/core/views/duplicate_ns.js
+++ b/jstests/core/views/duplicate_ns.js
@@ -14,9 +14,9 @@ const collName = "myns";
const viewId = dbName + "." + collName;
assert.commandWorked(viewsDb.dropDatabase());
-assert.writeOK(viewsDb.system.views.remove({_id: viewId}));
+assert.commandWorked(viewsDb.system.views.remove({_id: viewId}));
assert.commandWorked(viewsDb.runCommand({create: collName}));
-assert.writeOK(viewsDb.system.views.insert({
+assert.commandWorked(viewsDb.system.views.insert({
_id: viewId,
viewOn: "coll",
pipeline: [],
diff --git a/jstests/core/views/invalid_system_views.js b/jstests/core/views/invalid_system_views.js
index cdfd8240589..c4358a26dd2 100644
--- a/jstests/core/views/invalid_system_views.js
+++ b/jstests/core/views/invalid_system_views.js
@@ -20,12 +20,12 @@ function runTest(badViewDefinition) {
assert.commandWorked(viewsDB.dropDatabase());
// Create a regular collection, then insert an invalid view into system.views.
- assert.writeOK(viewsDB.collection.insert({x: 1}));
+ assert.commandWorked(viewsDB.collection.insert({x: 1}));
assert.commandWorked(viewsDB.runCommand({create: "collection2"}));
assert.commandWorked(viewsDB.runCommand({create: "collection3"}));
assert.commandWorked(viewsDB.collection.createIndex({x: 1}));
- assert.writeOK(viewsDB.system.views.insert(badViewDefinition),
- "failed to insert " + tojson(badViewDefinition));
+ assert.commandWorked(viewsDB.system.views.insert(badViewDefinition),
+ "failed to insert " + tojson(badViewDefinition));
// Test that a command involving views properly fails with a views-specific error code.
assert.commandFailedWithCode(
@@ -49,12 +49,12 @@ function runTest(badViewDefinition) {
makeErrorMessage("applyOps"));
}
- assert.writeOK(viewsDB.collection.insert({y: "baz"}), makeErrorMessage("insert"));
+ assert.commandWorked(viewsDB.collection.insert({y: "baz"}), makeErrorMessage("insert"));
- assert.writeOK(viewsDB.collection.update({y: "baz"}, {$set: {y: "qux"}}),
- makeErrorMessage("update"));
+ assert.commandWorked(viewsDB.collection.update({y: "baz"}, {$set: {y: "qux"}}),
+ makeErrorMessage("update"));
- assert.writeOK(viewsDB.collection.remove({y: "baz"}), makeErrorMessage("remove"));
+ assert.commandWorked(viewsDB.collection.remove({y: "baz"}), makeErrorMessage("remove"));
assert.commandWorked(
viewsDB.runCommand({findAndModify: "collection", query: {x: 1}, update: {x: 2}}),
@@ -116,7 +116,7 @@ function runTest(badViewDefinition) {
assert.commandWorked(viewsDB.runCommand({drop: "collection2"}), makeErrorMessage("drop"));
// Drop the offending view so that the validate hook succeeds.
- assert.writeOK(viewsDB.system.views.remove(badViewDefinition));
+ assert.commandWorked(viewsDB.system.views.remove(badViewDefinition));
}
runTest({_id: "invalid_system_views.badViewStringPipeline", viewOn: "collection", pipeline: "bad"});
diff --git a/jstests/core/views/views_aggregation.js b/jstests/core/views/views_aggregation.js
index fb78211307f..db833937dda 100644
--- a/jstests/core/views/views_aggregation.js
+++ b/jstests/core/views/views_aggregation.js
@@ -44,7 +44,7 @@ let bulk = coll.initializeUnorderedBulkOp();
allDocuments.forEach(function(doc) {
bulk.insert(doc);
});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Create views on the data.
assert.commandWorked(viewsDB.runCommand({create: "emptyPipelineView", viewOn: "coll"}));
@@ -104,7 +104,7 @@ assert.commandWorked(
viewsDB.invalidDocs.drop();
viewsDB.invalidDocsView.drop();
-assert.writeOK(viewsDB.invalidDocs.insert({illegalField: "present"}));
+assert.commandWorked(viewsDB.invalidDocs.insert({illegalField: "present"}));
assert.commandWorked(viewsDB.createView("invalidDocsView", "invalidDocs", []));
assert.commandWorked(
@@ -122,7 +122,7 @@ const largeStrSize = 10 * 1024 * 1024;
const largeStr = new Array(largeStrSize).join('x');
viewsDB.largeColl.drop();
for (let i = 0; i <= extSortLimit / largeStrSize; ++i) {
- assert.writeOK(viewsDB.largeColl.insert({x: i, largeStr: largeStr}));
+ assert.commandWorked(viewsDB.largeColl.insert({x: i, largeStr: largeStr}));
}
assertErrorCode(viewsDB.largeColl,
[{$sort: {x: -1}}],
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 6fba75008b3..ca92980eba8 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -254,8 +254,8 @@ let viewsCommandTests = {
getLog: {skip: isUnrelated},
getMore: {
setup: function(conn) {
- assert.writeOK(conn.collection.remove({}));
- assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}]));
+ assert.commandWorked(conn.collection.remove({}));
+ assert.commandWorked(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}]));
},
command: function(conn) {
function testGetMoreForCommand(cmd) {
@@ -308,8 +308,8 @@ let viewsCommandTests = {
isMaster: {skip: isUnrelated},
killCursors: {
setup: function(conn) {
- assert.writeOK(conn.collection.remove({}));
- assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}]));
+ assert.commandWorked(conn.collection.remove({}));
+ assert.commandWorked(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}]));
},
command: function(conn) {
// First get and check a partial result for an aggregate command.
@@ -592,7 +592,7 @@ for (let command of commands) {
// 'collection'.
assert.commandWorked(dbHandle.dropDatabase());
assert.commandWorked(dbHandle.runCommand({create: "view", viewOn: "collection"}));
- assert.writeOK(dbHandle.collection.insert({x: 1}));
+ assert.commandWorked(dbHandle.collection.insert({x: 1}));
if (subtest.setup !== undefined)
subtest.setup(dbHandle);
diff --git a/jstests/core/views/views_basic.js b/jstests/core/views/views_basic.js
index 1186dbcd779..0b4dcda6001 100644
--- a/jstests/core/views/views_basic.js
+++ b/jstests/core/views/views_basic.js
@@ -27,7 +27,7 @@ bulk.insert({_id: "Oakland", state: "CA", pop: 3});
bulk.insert({_id: "Palo Alto", state: "CA", pop: 10});
bulk.insert({_id: "San Francisco", state: "CA", pop: 4});
bulk.insert({_id: "Trenton", state: "NJ", pop: 5});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Test creating views on both collections and other views, using the database command and the
// shell helper.
diff --git a/jstests/core/views/views_change.js b/jstests/core/views/views_change.js
index 94521013136..acb8253b963 100644
--- a/jstests/core/views/views_change.js
+++ b/jstests/core/views/views_change.js
@@ -42,7 +42,7 @@ resetCollectionAndViews();
// A view is updated when its viewOn is modified. When auth is enabled, we expect collMod to
// fail when specifying "viewOn" but not "pipeline".
-assert.writeOK(collection.insert(doc));
+assert.commandWorked(collection.insert(doc));
assertFindResultEq("view", [doc]);
let res = viewDB.runCommand({collMod: "view", viewOn: "nonexistent"});
if (jsTest.options().auth) {
@@ -56,8 +56,8 @@ resetCollectionAndViews();
// A view is updated when its pipeline is modified. When auth is enabled, we expect collMod to
// fail when specifying "pipeline" but not "viewOn".
-assert.writeOK(collection.insert(doc));
-assert.writeOK(collection.insert({a: 7}));
+assert.commandWorked(collection.insert(doc));
+assert.commandWorked(collection.insert({a: 7}));
assertFindResultEq("view", [doc]);
res = viewDB.runCommand({collMod: "view", pipeline: [{$match: {a: {$gt: 4}}}]});
if (jsTest.options().auth) {
@@ -70,15 +70,15 @@ if (jsTest.options().auth) {
resetCollectionAndViews();
// A view is updated when the backing collection is updated.
-assert.writeOK(collection.insert(doc));
+assert.commandWorked(collection.insert(doc));
assertFindResultEq("view", [doc]);
-assert.writeOK(collection.update({a: 1}, {$set: {a: 2}}));
+assert.commandWorked(collection.update({a: 1}, {$set: {a: 2}}));
assertFindResultEq("view", []);
resetCollectionAndViews();
// A view is updated when a backing view is updated.
-assert.writeOK(collection.insert(doc));
+assert.commandWorked(collection.insert(doc));
assertFindResultEq("viewOnView", [doc]);
assert.commandWorked(viewDB.runCommand(
{collMod: "view", viewOn: "collection", pipeline: [{$match: {nonexistent: 1}}]}));
@@ -87,7 +87,7 @@ assertFindResultEq("viewOnView", []);
resetCollectionAndViews();
// A view appears empty if the backing collection is dropped.
-assert.writeOK(collection.insert(doc));
+assert.commandWorked(collection.insert(doc));
assertFindResultEq("view", [doc]);
assert.commandWorked(viewDB.runCommand({drop: "collection"}));
assertFindResultEq("view", []);
@@ -95,7 +95,7 @@ assertFindResultEq("view", []);
resetCollectionAndViews();
// A view appears empty if a backing view is dropped.
-assert.writeOK(collection.insert(doc));
+assert.commandWorked(collection.insert(doc));
assertFindResultEq("viewOnView", [doc]);
assert.commandWorked(viewDB.runCommand({drop: "view"}));
assertFindResultEq("viewOnView", []);
diff --git a/jstests/core/views/views_collation.js b/jstests/core/views/views_collation.js
index 9c18c27a41b..13eb65c3db7 100644
--- a/jstests/core/views/views_collation.js
+++ b/jstests/core/views/views_collation.js
@@ -468,9 +468,9 @@ assert.commandWorked(viewsDB.runCommand({
collation: {locale: "en", strength: 1}
}));
-assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "case"}));
-assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "Case"}));
-assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "CASE"}));
+assert.commandWorked(viewsDB.case_sensitive_coll.insert({f: "case"}));
+assert.commandWorked(viewsDB.case_sensitive_coll.insert({f: "Case"}));
+assert.commandWorked(viewsDB.case_sensitive_coll.insert({f: "CASE"}));
let explain, cursorStage;
diff --git a/jstests/core/views/views_count.js b/jstests/core/views/views_count.js
index 8fa24191959..fa676fe6f02 100644
--- a/jstests/core/views/views_count.js
+++ b/jstests/core/views/views_count.js
@@ -14,7 +14,7 @@ let bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < 10; i++) {
bulk.insert({x: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Create views on the data.
assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"}));
diff --git a/jstests/core/views/views_distinct.js b/jstests/core/views/views_distinct.js
index 8ef9e208a81..0f25ac3beb1 100644
--- a/jstests/core/views/views_distinct.js
+++ b/jstests/core/views/views_distinct.js
@@ -22,7 +22,7 @@ let bulk = coll.initializeUnorderedBulkOp();
allDocuments.forEach(function(doc) {
bulk.insert(doc);
});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Create views on the data.
assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"}));
@@ -106,7 +106,7 @@ bulk = coll.initializeUnorderedBulkOp();
allDocuments.forEach(function(doc) {
bulk.insert(doc);
});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assertIdentityViewDistinctMatchesCollection("a");
assertIdentityViewDistinctMatchesCollection("b");
diff --git a/jstests/core/views/views_drop.js b/jstests/core/views/views_drop.js
index 2f0b9b7e62e..c1e6b20cf30 100644
--- a/jstests/core/views/views_drop.js
+++ b/jstests/core/views/views_drop.js
@@ -14,7 +14,7 @@ let viewsDB = db.getSiblingDB(viewsDBName);
viewsDB.dropDatabase();
// Create collection and a view on it.
-assert.writeOK(viewsDB.coll.insert({x: 1}));
+assert.commandWorked(viewsDB.coll.insert({x: 1}));
assert.commandWorked(viewsDB.createView("view", "coll", []));
assert.eq(
viewsDB.view.find({}, {_id: 0}).toArray(), [{x: 1}], "couldn't find expected doc in view");
diff --git a/jstests/core/views/views_find.js b/jstests/core/views/views_find.js
index 3a7f5f80ce6..1468870d40e 100644
--- a/jstests/core/views/views_find.js
+++ b/jstests/core/views/views_find.js
@@ -37,7 +37,7 @@ let bulk = coll.initializeUnorderedBulkOp();
allDocuments.forEach(function(doc) {
bulk.insert(doc);
});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Create views on the data.
assert.commandWorked(
@@ -93,7 +93,7 @@ assert.eq(explainPlan.executionStats.nReturned, 5);
assert(explainPlan.executionStats.hasOwnProperty("allPlansExecution"));
// Only simple 0 or 1 projections are allowed on views.
-assert.writeOK(viewsDB.coll.insert({arr: [{x: 1}]}));
+assert.commandWorked(viewsDB.coll.insert({arr: [{x: 1}]}));
assert.commandFailedWithCode(
viewsDB.runCommand({find: "identityView", projection: {arr: {$elemMatch: {x: 1}}}}),
ErrorCodes.InvalidPipelineOperator);
diff --git a/jstests/core/views/views_rename.js b/jstests/core/views/views_rename.js
index 9d4f1238810..ee0d2bfbd11 100644
--- a/jstests/core/views/views_rename.js
+++ b/jstests/core/views/views_rename.js
@@ -14,7 +14,7 @@ let coll = db.getCollection(collName);
db.view.drop();
coll.drop();
assert.commandWorked(db.createView("view", collName, []));
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
assert.eq(db.view.find().count(), 1, "couldn't find document in view");
assert.commandWorked(db.system.views.renameCollection("views", /*dropTarget*/ true));
assert.eq(db.view.find().count(),
diff --git a/jstests/core/views/views_stats.js b/jstests/core/views/views_stats.js
index 28c5fa0b9a2..6f098955a00 100644
--- a/jstests/core/views/views_stats.js
+++ b/jstests/core/views/views_stats.js
@@ -69,10 +69,10 @@ if (lastTop === undefined) {
}
lastHistogram = getHistogramStats(view);
-assert.writeOK(coll.insert({}));
-assert.writeOK(coll.update({}, {$set: {x: 1}}));
+assert.commandWorked(coll.insert({}));
+assert.commandWorked(coll.update({}, {$set: {x: 1}}));
coll.aggregate([{$match: {}}]);
-assert.writeOK(coll.remove({}));
+assert.commandWorked(coll.remove({}));
assertTopDiffEq(view, lastTop, "insert", 0);
assertTopDiffEq(view, lastTop, "update", 0);
diff --git a/jstests/core/where4.js b/jstests/core/where4.js
index a4997d07561..123afb2baaf 100644
--- a/jstests/core/where4.js
+++ b/jstests/core/where4.js
@@ -12,30 +12,30 @@ var myDB = db.getSiblingDB("where4");
myDB.dropDatabase();
-assert.writeOK(myDB.where4.insert({x: 1, y: 1}));
-assert.writeOK(myDB.where4.insert({x: 2, y: 1}));
+assert.commandWorked(myDB.where4.insert({x: 1, y: 1}));
+assert.commandWorked(myDB.where4.insert({x: 2, y: 1}));
-assert.writeOK(myDB.where4.update({
+assert.commandWorked(myDB.where4.update({
$where: function() {
return this.x == 1;
}
},
- {$inc: {y: 1}},
- false,
- true));
+ {$inc: {y: 1}},
+ false,
+ true));
assert.eq(2, myDB.where4.findOne({x: 1}).y);
assert.eq(1, myDB.where4.findOne({x: 2}).y);
// Test that where queries work with stored javascript
-assert.writeOK(myDB.system.js.save({
+assert.commandWorked(myDB.system.js.save({
_id: "where4_addOne",
value: function(x) {
return x + 1;
}
}));
-assert.writeOK(
+assert.commandWorked(
myDB.where4.update({$where: "where4_addOne(this.x) == 2"}, {$inc: {y: 1}}, false, true));
assert.eq(3, myDB.where4.findOne({x: 1}).y);
diff --git a/jstests/decimal/decimal_constructors.js b/jstests/decimal/decimal_constructors.js
index 93a5bfe9703..387a1bee1a9 100644
--- a/jstests/decimal/decimal_constructors.js
+++ b/jstests/decimal/decimal_constructors.js
@@ -7,7 +7,7 @@ col.drop();
// Insert some sample data.
-assert.writeOK(col.insert([
+assert.commandWorked(col.insert([
{d: NumberDecimal('1')},
{d: NumberDecimal(1)},
{d: NumberDecimal(NumberLong('1'))},
@@ -15,7 +15,7 @@ assert.writeOK(col.insert([
{d: NumberDecimal('NaN')},
{d: NumberDecimal('-NaN')}
]),
- 'Initial insertion of decimals failed');
+ 'Initial insertion of decimals failed');
var exactDoubleString = "1427247692705959881058285969449495136382746624";
var exactDoubleTinyString =
diff --git a/jstests/decimal/decimal_find_basic.js b/jstests/decimal/decimal_find_basic.js
index 28a9040a912..c2069699d37 100644
--- a/jstests/decimal/decimal_find_basic.js
+++ b/jstests/decimal/decimal_find_basic.js
@@ -7,7 +7,7 @@ col.drop();
// Insert some sample data.
-assert.writeOK(col.insert([
+assert.commandWorked(col.insert([
{"decimal": NumberDecimal("0")},
{"decimal": NumberDecimal("0.00")},
{"decimal": NumberDecimal("-0")},
@@ -20,7 +20,7 @@ assert.writeOK(col.insert([
{"decimal": NumberDecimal("-Infinity")},
{"decimal": NumberDecimal("Infinity")},
]),
- "Initial insertion of decimals failed");
+ "Initial insertion of decimals failed");
// Zeros
assert.eq(col.find({"decimal": NumberDecimal("0")}).count(), "3");
@@ -44,7 +44,7 @@ assert.eq(col.find({"decimal": NumberDecimal("12345678901234567890.1234567890123
col.drop();
// Maximum and Minimum Values
-assert.writeOK(col.insert([
+assert.commandWorked(col.insert([
{"max": NumberDecimal("9999999999999999999999999999999999E6111")},
{"min": NumberDecimal("1E-6176")}
]));
diff --git a/jstests/decimal/decimal_find_mixed.js b/jstests/decimal/decimal_find_mixed.js
index 0224c1b3fd8..96957033669 100644
--- a/jstests/decimal/decimal_find_mixed.js
+++ b/jstests/decimal/decimal_find_mixed.js
@@ -7,7 +7,7 @@ col.drop();
// Insert some sample data.
-assert.writeOK(col.insert([
+assert.commandWorked(col.insert([
{"a": -1},
{"a": NumberDecimal("-1")},
{"a": NumberLong("-1")},
@@ -37,7 +37,7 @@ assert.writeOK(col.insert([
{"a": NumberDecimal("Infinity")},
{"a": Infinity}
]),
- "Initial decimal insertion failed");
+ "Initial decimal insertion failed");
// Simple finds
assert.eq(col.find({"a": -1}).count(), 4, "A1");
@@ -54,14 +54,14 @@ assert.eq(col.find({$and: [{"a": {$gte: 0}}, {"a": {$lte: 2}}]}).count(), 14, "C
// Proper mixed ordering of decimals and doubles
col.drop();
-assert.writeOK(col.insert([{"a": NumberDecimal("0.3")}, {"a": 0.3}], "2 insertion failed"));
+assert.commandWorked(col.insert([{"a": NumberDecimal("0.3")}, {"a": 0.3}], "2 insertion failed"));
assert.eq(col.find({"a": {$lt: NumberDecimal("0.3")}}).count(), 1, "D1");
assert.eq(col.find({"a": {$gt: 0.3}}).count(), 1, "D1");
// Find with NumberLong, but not Double
col.drop();
-assert.writeOK(col.insert([{"a": NumberDecimal("36028797018963967")}], "3 insertion failed"));
+assert.commandWorked(col.insert([{"a": NumberDecimal("36028797018963967")}], "3 insertion failed"));
assert.eq(col.find({"a": NumberDecimal("36028797018963967")}).count(), 1, "E1");
// Not representable as double
diff --git a/jstests/decimal/decimal_find_query.js b/jstests/decimal/decimal_find_query.js
index e584e9e1f47..0c2ea7e8563 100644
--- a/jstests/decimal/decimal_find_query.js
+++ b/jstests/decimal/decimal_find_query.js
@@ -7,7 +7,7 @@ col.drop();
// Insert some sample data.
-assert.writeOK(col.insert([
+assert.commandWorked(col.insert([
{'decimal': NumberDecimal('0')},
{'decimal': NumberDecimal('0.00')},
{'decimal': NumberDecimal('-0')},
@@ -20,7 +20,7 @@ assert.writeOK(col.insert([
{'decimal': NumberDecimal('Infinity')},
{'decimal': NumberDecimal('-Infinity')},
]),
- 'Initial insertion failed');
+ 'Initial insertion failed');
assert.eq(col.find({'decimal': {$eq: NumberDecimal('1')}}).count(), '2');
assert.eq(col.find({'decimal': {$lt: NumberDecimal('1.00000000000001')}}).count(), 6);
@@ -36,7 +36,7 @@ assert.eq(
// Test $mod
col.drop();
-assert.writeOK(col.insert([
+assert.commandWorked(col.insert([
{'decimal': NumberDecimal('0')},
{'decimal': NumberDecimal('0.00')},
{'decimal': NumberDecimal('-0')},
@@ -44,6 +44,6 @@ assert.writeOK(col.insert([
{'decimal': NumberDecimal('1.00')},
{'decimal': NumberDecimal('2.00')},
]),
- '2 insertion failed');
+ '2 insertion failed');
assert.eq(col.find({'decimal': {$mod: [2, 0]}}).count(), 4, "$mod count incorrect");
}());
diff --git a/jstests/decimal/decimal_roundtrip_basic.js b/jstests/decimal/decimal_roundtrip_basic.js
index 612b6fcd631..4b3dc7e0f32 100644
--- a/jstests/decimal/decimal_roundtrip_basic.js
+++ b/jstests/decimal/decimal_roundtrip_basic.js
@@ -7,7 +7,7 @@ col.drop();
// Insert some sample data.
-assert.writeOK(col.insert([
+assert.commandWorked(col.insert([
{"decimal": NumberDecimal("0")},
{"decimal": NumberDecimal("0.00")},
{"decimal": NumberDecimal("-0")},
@@ -22,7 +22,7 @@ assert.writeOK(col.insert([
{"decimal": NumberDecimal("9999999999999999999999999999999999E6111")},
{"decimal": NumberDecimal("1E-6176")},
]),
- "Initial insertion of decimals failed");
+ "Initial insertion of decimals failed");
// Check that the searching for queryValue results in finding expectedValues.
// All arguments are string representations of NumberDecimal values.
diff --git a/jstests/decimal/decimal_update.js b/jstests/decimal/decimal_update.js
index f50994ce32b..74bca69a842 100644
--- a/jstests/decimal/decimal_update.js
+++ b/jstests/decimal/decimal_update.js
@@ -14,26 +14,26 @@ var docs = [
{'a': 1}
];
-assert.writeOK(col.insert(docs), "Initial insertion failed");
+assert.commandWorked(col.insert(docs), "Initial insertion failed");
-assert.writeOK(col.update({}, {$inc: {'a': NumberDecimal("10")}}, {multi: true}),
- "update $inc failed");
+assert.commandWorked(col.update({}, {$inc: {'a': NumberDecimal("10")}}, {multi: true}),
+ "update $inc failed");
assert.eq(col.find({a: 11}).count(), 4, "count after $inc incorrect");
-assert.writeOK(col.update({}, {$inc: {'a': NumberDecimal("0")}}, {multi: true}),
- "update $inc 0 failed");
+assert.commandWorked(col.update({}, {$inc: {'a': NumberDecimal("0")}}, {multi: true}),
+ "update $inc 0 failed");
assert.eq(col.find({a: 11}).count(), 4, "count after $inc 0 incorrect");
col.drop();
-assert.writeOK(col.insert(docs), "Second insertion failed");
+assert.commandWorked(col.insert(docs), "Second insertion failed");
-assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("1")}}, {multi: true}),
- "update $mul failed");
+assert.commandWorked(col.update({}, {$mul: {'a': NumberDecimal("1")}}, {multi: true}),
+ "update $mul failed");
assert.eq(col.find({a: 1}).count(), 4, "count after $mul incorrect");
-assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("2")}}, {multi: true}),
- "update $mul 2 failed");
+assert.commandWorked(col.update({}, {$mul: {'a': NumberDecimal("2")}}, {multi: true}),
+ "update $mul 2 failed");
assert.eq(col.find({a: 2}).count(), 4, "count after $mul incorrect");
-assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("0")}}, {multi: true}),
- "update $mul 0 failed");
+assert.commandWorked(col.update({}, {$mul: {'a': NumberDecimal("0")}}, {multi: true}),
+ "update $mul 0 failed");
assert.eq(col.find({a: 0}).count(), 5, "count after $mul 0 incorrect");
assert.writeError(col.update({}, {$bit: {'a': {and: 1}}}, {multi: true}), "$bit should fail");
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js
index 285b5588115..de2b2a272dd 100644
--- a/jstests/disk/directoryperdb.js
+++ b/jstests/disk/directoryperdb.js
@@ -127,13 +127,13 @@ if (storageEngine == 'wiredTiger') {
// Create a database named 'a' repeated 63 times.
var dbNameAA = Array(64).join('a');
var dbAA = m.getDB(dbNameAA);
-assert.writeOK(dbAA[baseName].insert({}));
+assert.commandWorked(dbAA[baseName].insert({}));
assertDocumentCount(dbAA, 1);
m = checkDBFilesInDBDirectory(m, dbAA);
// Create a database named '&'.
var dbAnd = m.getDB('&');
-assert.writeOK(dbAnd[baseName].insert({}));
+assert.commandWorked(dbAnd[baseName].insert({}));
assertDocumentCount(dbAnd, 1);
m = checkDBFilesInDBDirectory(m, dbAnd);
@@ -144,14 +144,14 @@ if (!_isWindows()) {
// Create a database named '処'.
var dbNameU = '処';
var dbU = m.getDB(dbNameU);
- assert.writeOK(dbU[baseName].insert({}));
+ assert.commandWorked(dbU[baseName].insert({}));
assertDocumentCount(dbU, 1);
m = checkDBFilesInDBDirectory(m, dbU);
// Create a database named '処' repeated 21 times.
var dbNameUU = Array(22).join('処');
var dbUU = m.getDB(dbNameUU);
- assert.writeOK(dbUU[baseName].insert({}));
+ assert.commandWorked(dbUU[baseName].insert({}));
assertDocumentCount(dbUU, 1);
m = checkDBFilesInDBDirectory(m, dbUU);
}
diff --git a/jstests/disk/killall.js b/jstests/disk/killall.js
index 675c2a42c5f..e8bb7ee71b5 100644
--- a/jstests/disk/killall.js
+++ b/jstests/disk/killall.js
@@ -12,7 +12,7 @@ var dbpath = MongoRunner.dataPath + baseName;
var mongod = MongoRunner.runMongod({dbpath: dbpath});
var db = mongod.getDB("test");
var collection = db.getCollection(baseName);
-assert.writeOK(collection.insert({}));
+assert.commandWorked(collection.insert({}));
var awaitShell = startParallelShell(
"db." + baseName + ".count( { $where: function() { while( 1 ) { ; } } } )", mongod.port);
diff --git a/jstests/disk/too_many_fds.js b/jstests/disk/too_many_fds.js
index b8334bbb717..68d70396900 100644
--- a/jstests/disk/too_many_fds.js
+++ b/jstests/disk/too_many_fds.js
@@ -10,7 +10,7 @@ function doTest() {
for (var i = 1; i < 1026; ++i) {
var db = m.getDB("db" + i);
var coll = db.getCollection("coll" + i);
- assert.writeOK(coll.insert({}));
+ assert.commandWorked(coll.insert({}));
}
MongoRunner.stopMongod(m);
diff --git a/jstests/gle/get_last_error.js b/jstests/gle/get_last_error.js
index 7d4c517ca82..9309ac2921d 100644
--- a/jstests/gle/get_last_error.js
+++ b/jstests/gle/get_last_error.js
@@ -9,7 +9,7 @@ var master = replTest.getPrimary();
var mdb = master.getDB("test");
// synchronize replication
-assert.writeOK(mdb.foo.insert({_id: "1"}, {writeConcern: {w: 3, wtimeout: 5 * 60 * 1000}}));
+assert.commandWorked(mdb.foo.insert({_id: "1"}, {writeConcern: {w: 3, wtimeout: 5 * 60 * 1000}}));
var gle = master.getDB("test").runCommand({getLastError: 1, j: true});
print('Trying j=true');
@@ -51,7 +51,7 @@ replTest.stop(2);
master = replTest.getPrimary();
mdb = master.getDB("test");
// do w:2 write so secondary is caught up before calling {gle w:3}.
-assert.writeOK(mdb.foo.insert({_id: "3"}, {writeConcern: {w: 2, wtimeout: 5 * 60 * 1000}}));
+assert.commandWorked(mdb.foo.insert({_id: "3"}, {writeConcern: {w: 2, wtimeout: 5 * 60 * 1000}}));
gle = mdb.getLastErrorObj(3, 1000);
print('Trying w=3 with 2 nodes up, 1000ms timeout.');
printjson(gle);
diff --git a/jstests/libs/assert_schema_match.js b/jstests/libs/assert_schema_match.js
index 46d38fa3537..f59e7a1c2c4 100644
--- a/jstests/libs/assert_schema_match.js
+++ b/jstests/libs/assert_schema_match.js
@@ -18,7 +18,7 @@ function assertSchemaMatch(coll, schema, doc, valid) {
// Test that after inserting 'doc', we can find it again using $jsonSchema in the find command
// iff 'valid' is true.
coll.drop();
- assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.insert(doc));
let count = coll.find({$jsonSchema: schema}).itcount();
assert.eq(count, valid ? 1 : 0, errmsg);
@@ -34,7 +34,7 @@ function assertSchemaMatch(coll, schema, doc, valid) {
assert.commandWorked(coll.runCommand("create", {validator: {$jsonSchema: schema}}));
let res = coll.insert(doc);
if (valid) {
- assert.writeOK(res, errmsg + " during insert document validation");
+ assert.commandWorked(res, errmsg + " during insert document validation");
} else {
assert.writeErrorWithCode(res,
ErrorCodes.DocumentValidationFailure,
@@ -44,7 +44,7 @@ function assertSchemaMatch(coll, schema, doc, valid) {
// Test that we can update an existing document to look like 'doc' when the collection has
// 'schema' as its document validator in "strict" mode iff 'valid' is true.
assert.commandWorked(coll.runCommand("drop"));
- assert.writeOK(coll.insert({_id: 0}));
+ assert.commandWorked(coll.insert({_id: 0}));
assert.commandWorked(
coll.runCommand("collMod", {validator: {$jsonSchema: schema}, validationLevel: "strict"}));
@@ -54,7 +54,7 @@ function assertSchemaMatch(coll, schema, doc, valid) {
delete docCopy._id;
res = coll.update({_id: 0}, docCopy);
if (valid) {
- assert.writeOK(res, errmsg + " during update document validation in strict mode");
+ assert.commandWorked(res, errmsg + " during update document validation in strict mode");
} else {
assert.writeErrorWithCode(res,
ErrorCodes.DocumentValidationFailure,
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
index fd1fe36e799..b17ddabc1a4 100644
--- a/jstests/libs/geo_near_random.js
+++ b/jstests/libs/geo_near_random.js
@@ -34,7 +34,7 @@ GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
for (var i = 0; i < nPts; i++) {
bulk.insert({_id: i, loc: this.mkPt(scale, indexBounds)});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
if (!indexBounds)
this.t.ensureIndex({loc: '2d'});
diff --git a/jstests/libs/override_methods/mongos_manual_intervention_actions.js b/jstests/libs/override_methods/mongos_manual_intervention_actions.js
index fb0a7080585..b30de265f24 100644
--- a/jstests/libs/override_methods/mongos_manual_intervention_actions.js
+++ b/jstests/libs/override_methods/mongos_manual_intervention_actions.js
@@ -17,7 +17,7 @@ var ManualInterventionActions = (function() {
let stillHasChunks = true;
while (stillHasChunks) {
- let writeRes = assert.writeOK(mongosConn.getDB('config').chunks.remove(
+ let writeRes = assert.commandWorked(mongosConn.getDB('config').chunks.remove(
{ns: ns}, {justOne: true, writeConcern: {w: 'majority'}}));
stillHasChunks = writeRes.nRemoved > 0;
}
diff --git a/jstests/libs/pin_getmore_cursor.js b/jstests/libs/pin_getmore_cursor.js
index a4541bc6bd8..4e5d0eba770 100644
--- a/jstests/libs/pin_getmore_cursor.js
+++ b/jstests/libs/pin_getmore_cursor.js
@@ -21,7 +21,7 @@ function withPinnedCursor(
coll.drop();
db.active_cursor_sentinel.drop();
for (let i = 0; i < 100; ++i) {
- assert.writeOK(coll.insert({value: i}));
+ assert.commandWorked(coll.insert({value: i}));
}
let cleanup = null;
try {
diff --git a/jstests/multiVersion/clone_helper.js b/jstests/multiVersion/clone_helper.js
index 9253c0ffc31..893993239bf 100644
--- a/jstests/multiVersion/clone_helper.js
+++ b/jstests/multiVersion/clone_helper.js
@@ -32,7 +32,7 @@ let bulk = masterDB[testColName].initializeUnorderedBulkOp();
for (let i = 0; i < numDocs; i++) {
bulk.insert({x: i, text: str});
}
-assert.writeOK(bulk.execute({w: 3}));
+assert.commandWorked(bulk.execute({w: 3}));
jsTest.log("Create view on replica set");
assert.commandWorked(masterDB.runCommand({create: testViewName, viewOn: testColName}));
diff --git a/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js b/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
index c49abeafa8f..5b3b5955770 100644
--- a/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
+++ b/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
@@ -36,7 +36,7 @@ assert.neq(null,
// Ensure that the 'lastStable' binary mongos can perform reads and writes to the shards in the
// cluster.
-assert.writeOK(lastStableMongos.getDB("test").foo.insert({x: 1}));
+assert.commandWorked(lastStableMongos.getDB("test").foo.insert({x: 1}));
let foundDoc = lastStableMongos.getDB("test").foo.findOne({x: 1});
assert.neq(null, foundDoc);
assert.eq(1, foundDoc.x, tojson(foundDoc));
diff --git a/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js b/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
index 2551f0b38a7..3faf6a704b5 100644
--- a/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
@@ -27,9 +27,10 @@ let insertDataForConn = function(conn, dbs, nodeOptions) {
continue;
}
// Config servers have a majority write concern.
- assert.writeOK(conn.getDB(dbs[j]).foo.insert(doc, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ conn.getDB(dbs[j]).foo.insert(doc, {writeConcern: {w: "majority"}}));
} else {
- assert.writeOK(conn.getDB(dbs[j]).foo.insert(doc));
+ assert.commandWorked(conn.getDB(dbs[j]).foo.insert(doc));
}
}
}
diff --git a/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js b/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js
index 964b0c583da..95a4a7a8874 100644
--- a/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js
+++ b/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js
@@ -43,7 +43,7 @@ function runDowngradeTest() {
let coll = new Mongo(rsURL).getCollection(collParam);
let count = 10;
while (!isFinished()) {
- assert.writeOK(coll.insert({_id: count, str: "hello world"}));
+ assert.commandWorked(coll.insert({_id: count, str: "hello world"}));
count++;
}
}
diff --git a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
index 273695dbc05..35621d7cfb4 100644
--- a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
@@ -175,7 +175,7 @@ for (let i = 0; i < versions.length; i++) {
assert.commandWorked(testDB.createCollection(version.testCollection));
// Insert a document into the new collection.
- assert.writeOK(testDB[version.testCollection].insert({a: 1}));
+ assert.commandWorked(testDB[version.testCollection].insert({a: 1}));
assert.eq(1,
testDB[version.testCollection].count(),
`mongo should have inserted 1 document into collection ${version.testCollection}; ` +
@@ -236,7 +236,7 @@ for (let i = 0; i < versions.length; i++) {
// Connect to the 'test' database.
let testDB = primary.getDB('test');
assert.commandWorked(testDB.createCollection(version.testCollection));
- assert.writeOK(testDB[version.testCollection].insert({a: 1}));
+ assert.commandWorked(testDB[version.testCollection].insert({a: 1}));
assert.eq(1,
testDB[version.testCollection].count(),
`mongo should have inserted 1 document into collection ${version.testCollection}; ` +
@@ -262,7 +262,7 @@ for (let i = 0; i < versions.length; i++) {
assert.binVersion(primary, version.binVersion);
testDB = primary.getDB('test');
- assert.writeOK(testDB[version.testCollection].insert({b: 1}));
+ assert.commandWorked(testDB[version.testCollection].insert({b: 1}));
assert.eq(2,
testDB[version.testCollection].count(),
`mongo should have inserted 2 documents into collection ${version.testCollection}; ` +
diff --git a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
index 9c62bdb5ee1..900505b013a 100644
--- a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
@@ -120,7 +120,7 @@ MongoRunner.stopMongod(conn);
conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: lastStable});
assert.neq(
null, conn, "mongod was unable to start up with version=" + lastStable + " and no data files");
-assert.writeOK(conn.getDB("test").coll.insert({a: 5}));
+assert.commandWorked(conn.getDB("test").coll.insert({a: 5}));
adminDB = conn.getDB("admin");
checkFCV(adminDB, lastStableFCV);
MongoRunner.stopMongod(conn);
@@ -227,7 +227,7 @@ replSetConfig.members[2].priority = 0;
reconfig(rst, replSetConfig);
// Verify that the 'lastStable' secondary successfully performed its initial sync.
-assert.writeOK(
+assert.commandWorked(
primaryAdminDB.getSiblingDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
// Test that a 'lastStable' secondary can no longer replicate from the primary after the FCV is
@@ -238,7 +238,7 @@ stopServerReplication(secondary);
assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
restartServerReplication(secondary);
checkFCV(secondaryAdminDB, lastStableFCV);
-assert.writeOK(primaryAdminDB.getSiblingDB("test").coll.insert({shouldReplicate: false}));
+assert.commandWorked(primaryAdminDB.getSiblingDB("test").coll.insert({shouldReplicate: false}));
assert.eq(secondaryAdminDB.getSiblingDB("test").coll.find({shouldReplicate: false}).itcount(), 0);
rst.stopSet();
@@ -257,7 +257,7 @@ secondary = rst.add({binVersion: lastStable});
rst.reInitiate();
// Ensure the 'lastStable' binary node succeeded its initial sync.
-assert.writeOK(primary.getDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
+assert.commandWorked(primary.getDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
// Run {setFCV: lastStableFCV}. This should be idempotent.
assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
diff --git a/jstests/multiVersion/libs/multiversion_rollback.js b/jstests/multiVersion/libs/multiversion_rollback.js
index 7ae61aaf732..66db2114ad9 100644
--- a/jstests/multiVersion/libs/multiversion_rollback.js
+++ b/jstests/multiVersion/libs/multiversion_rollback.js
@@ -26,10 +26,10 @@ function testMultiversionRollback(testName, rollbackNodeVersion, syncSourceVersi
let CommonOps = (node) => {
// Insert four documents on both nodes.
- assert.writeOK(node.getDB(dbName)["bothNodesKeep"].insert({a: 1}));
- assert.writeOK(node.getDB(dbName)["rollbackNodeDeletes"].insert({b: 1}));
- assert.writeOK(node.getDB(dbName)["rollbackNodeUpdates"].insert({c: 1}));
- assert.writeOK(node.getDB(dbName)["bothNodesUpdate"].insert({d: 1}));
+ assert.commandWorked(node.getDB(dbName)["bothNodesKeep"].insert({a: 1}));
+ assert.commandWorked(node.getDB(dbName)["rollbackNodeDeletes"].insert({b: 1}));
+ assert.commandWorked(node.getDB(dbName)["rollbackNodeUpdates"].insert({c: 1}));
+ assert.commandWorked(node.getDB(dbName)["bothNodesUpdate"].insert({d: 1}));
};
let RollbackOps = (node) => {
@@ -38,17 +38,17 @@ function testMultiversionRollback(testName, rollbackNodeVersion, syncSourceVersi
// 2. Update a document only on this node.
// 3. Update a document on both nodes.
// All three documents will be refetched during rollback.
- assert.writeOK(node.getDB(dbName)["rollbackNodeDeletes"].remove({b: 1}));
- assert.writeOK(node.getDB(dbName)["rollbackNodeUpdates"].update({c: 1}, {c: 0}));
- assert.writeOK(node.getDB(dbName)["bothNodesUpdate"].update({d: 1}, {d: 0}));
+ assert.commandWorked(node.getDB(dbName)["rollbackNodeDeletes"].remove({b: 1}));
+ assert.commandWorked(node.getDB(dbName)["rollbackNodeUpdates"].update({c: 1}, {c: 0}));
+ assert.commandWorked(node.getDB(dbName)["bothNodesUpdate"].update({d: 1}, {d: 0}));
};
let SyncSourceOps = (node) => {
// Perform operations only on the sync source:
// 1. Make a conflicting write on one of the documents the rollback node updates.
// 2. Insert a new document.
- assert.writeOK(node.getDB(dbName)["bothNodesUpdate"].update({d: 1}, {d: 2}));
- assert.writeOK(node.getDB(dbName)["syncSourceInserts"].insert({e: 1}));
+ assert.commandWorked(node.getDB(dbName)["bothNodesUpdate"].update({d: 1}, {d: 2}));
+ assert.commandWorked(node.getDB(dbName)["syncSourceInserts"].insert({e: 1}));
};
// Set up replica set.
diff --git a/jstests/multiVersion/skip_level_upgrade.js b/jstests/multiVersion/skip_level_upgrade.js
index 6f268be451a..183ae9aae32 100644
--- a/jstests/multiVersion/skip_level_upgrade.js
+++ b/jstests/multiVersion/skip_level_upgrade.js
@@ -53,7 +53,7 @@ for (let i = 0; i < versions.length; i++) {
// then shut it down.
let testDB = conn.getDB('test');
assert.commandWorked(testDB.createCollection(version.testCollection));
- assert.writeOK(testDB[version.testCollection].insert({a: 1}));
+ assert.commandWorked(testDB[version.testCollection].insert({a: 1}));
assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
MongoRunner.stopMongod(conn);
diff --git a/jstests/multiVersion/upgrade_downgrade_cluster.js b/jstests/multiVersion/upgrade_downgrade_cluster.js
index 7e450c87b71..3d159cee442 100644
--- a/jstests/multiVersion/upgrade_downgrade_cluster.js
+++ b/jstests/multiVersion/upgrade_downgrade_cluster.js
@@ -31,10 +31,10 @@ var runTest = function(isRSCluster) {
"...");
var testCRUDAndAgg = function(db) {
- assert.writeOK(db.foo.insert({x: 1}));
- assert.writeOK(db.foo.insert({x: -1}));
- assert.writeOK(db.foo.update({x: 1}, {$set: {y: 1}}));
- assert.writeOK(db.foo.update({x: -1}, {$set: {y: 1}}));
+ assert.commandWorked(db.foo.insert({x: 1}));
+ assert.commandWorked(db.foo.insert({x: -1}));
+ assert.commandWorked(db.foo.update({x: 1}, {$set: {y: 1}}));
+ assert.commandWorked(db.foo.update({x: -1}, {$set: {y: 1}}));
var doc1 = db.foo.findOne({x: 1});
assert.eq(1, doc1.y);
var doc2 = db.foo.findOne({x: -1});
@@ -50,8 +50,8 @@ var runTest = function(isRSCluster) {
assert.eq(2, db.sanity_check.find().itcount());
}());
- assert.writeOK(db.foo.remove({x: 1}, true));
- assert.writeOK(db.foo.remove({x: -1}, true));
+ assert.commandWorked(db.foo.remove({x: 1}, true));
+ assert.commandWorked(db.foo.remove({x: -1}, true));
assert.eq(null, db.foo.findOne());
};
diff --git a/jstests/noPassthrough/aggregation_cursor_invalidations.js b/jstests/noPassthrough/aggregation_cursor_invalidations.js
index 7192e9595bc..07155ff7135 100644
--- a/jstests/noPassthrough/aggregation_cursor_invalidations.js
+++ b/jstests/noPassthrough/aggregation_cursor_invalidations.js
@@ -43,13 +43,13 @@ function setup() {
sourceCollection.drop();
foreignCollection.drop();
for (let i = 0; i < numMatches; ++i) {
- assert.writeOK(sourceCollection.insert({_id: i, local: i}));
+ assert.commandWorked(sourceCollection.insert({_id: i, local: i}));
// We want to be able to pause a $lookup stage in a state where it has returned some but
// not all of the results for a single lookup, so we need to insert at least
// 'numMatches' matches for each source document.
for (let j = 0; j < numMatches; ++j) {
- assert.writeOK(foreignCollection.insert({_id: numMatches * i + j, foreign: i}));
+ assert.commandWorked(foreignCollection.insert({_id: numMatches * i + j, foreign: i}));
}
}
}
@@ -271,14 +271,14 @@ assert.commandWorked(testDB.runCommand(
{create: sourceCollection.getName(), capped: true, size: maxCappedSizeBytes, max: maxNumDocs}));
// Fill up about half of the collection.
for (let i = 0; i < maxNumDocs / 2; ++i) {
- assert.writeOK(sourceCollection.insert({_id: i}));
+ assert.commandWorked(sourceCollection.insert({_id: i}));
}
// Start an aggregation.
assert.gt(maxNumDocs / 2, batchSize);
res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
// Insert enough to force a truncation.
for (let i = maxNumDocs / 2; i < 2 * maxNumDocs; ++i) {
- assert.writeOK(sourceCollection.insert({_id: i}));
+ assert.commandWorked(sourceCollection.insert({_id: i}));
}
assert.eq(maxNumDocs, sourceCollection.count());
assert.commandFailedWithCode(
diff --git a/jstests/noPassthrough/aggregation_zero_batchsize.js b/jstests/noPassthrough/aggregation_zero_batchsize.js
index d143c75ede6..68ecfe24455 100644
--- a/jstests/noPassthrough/aggregation_zero_batchsize.js
+++ b/jstests/noPassthrough/aggregation_zero_batchsize.js
@@ -34,7 +34,7 @@ const bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < nDocs; i++) {
bulk.insert({_id: i, stringField: "string"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
res = assert.commandWorked(
testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}));
diff --git a/jstests/noPassthrough/apply_ops_mode.js b/jstests/noPassthrough/apply_ops_mode.js
index 385cf0d532b..2376ed1c30f 100644
--- a/jstests/noPassthrough/apply_ops_mode.js
+++ b/jstests/noPassthrough/apply_ops_mode.js
@@ -13,7 +13,7 @@ var db = standalone.getDB("test");
var coll = db.getCollection("apply_ops_mode1");
coll.drop();
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
// ------------ Testing normal updates ---------------
@@ -42,7 +42,7 @@ assert.eq(coll.count({x: 1}), 1);
coll = db.getCollection("apply_ops_mode2");
coll.drop();
updateOp.ns = coll.getFullName();
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
// Test default succeeds in 'InitialSync' mode.
assert.commandWorked(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"}));
diff --git a/jstests/noPassthrough/atomic_rename_collection.js b/jstests/noPassthrough/atomic_rename_collection.js
index a6f39c1c40f..621abd43d39 100644
--- a/jstests/noPassthrough/atomic_rename_collection.js
+++ b/jstests/noPassthrough/atomic_rename_collection.js
@@ -27,8 +27,8 @@ const tests = [
];
tests.forEach((test) => {
test.source.drop();
- assert.writeOK(test.source.insert({}));
- assert.writeOK(test.target.insert({}));
+ assert.commandWorked(test.source.insert({}));
+ assert.commandWorked(test.target.insert({}));
let ts = local.oplog.rs.find().sort({$natural: -1}).limit(1).next().ts;
let cmd = {
diff --git a/jstests/noPassthrough/auto_retry_on_network_error.js b/jstests/noPassthrough/auto_retry_on_network_error.js
index 1c5f8465ebb..fa20b1d61c1 100644
--- a/jstests/noPassthrough/auto_retry_on_network_error.js
+++ b/jstests/noPassthrough/auto_retry_on_network_error.js
@@ -68,7 +68,7 @@ assert.commandWorked(db.runCommandWithMetadata({find: collName}, {}).commandRepl
// Retryable write commands that can be retried succeed.
failNextCommand(db, "insert");
-assert.writeOK(db[collName].insert({x: 1}));
+assert.commandWorked(db[collName].insert({x: 1}));
failNextCommand(db, "insert");
assert.commandWorked(db.runCommandWithMetadata({
diff --git a/jstests/noPassthrough/change_stream_failover.js b/jstests/noPassthrough/change_stream_failover.js
index b8ec132fdd8..ca14d36ea0a 100644
--- a/jstests/noPassthrough/change_stream_failover.js
+++ b/jstests/noPassthrough/change_stream_failover.js
@@ -35,9 +35,9 @@ for (let key of Object.keys(ChangeStreamWatchMode)) {
// Be sure we can read from the change stream. Use {w: "majority"} so that we're still
// guaranteed to be able to read after the failover.
- assert.writeOK(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
const firstChange = cst.getOneChange(changeStream);
assert.docEq(firstChange.fullDocument, {_id: 0});
diff --git a/jstests/noPassthrough/change_streams_require_majority_read_concern.js b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
index 6fdc4c2ee37..8bae0bcc287 100644
--- a/jstests/noPassthrough/change_streams_require_majority_read_concern.js
+++ b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
@@ -49,7 +49,7 @@ function assertNextBatchIsEmpty(cursor) {
// Test read concerns other than "majority" are not supported.
const primaryColl = db.foo;
-assert.writeOK(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
let res = primaryColl.runCommand({
aggregate: primaryColl.getName(),
pipeline: [{$changeStream: {}}],
@@ -81,7 +81,7 @@ let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collecti
assert.eq(cursor.firstBatch.length, 0);
// Insert a document on the primary only.
-assert.writeOK(primaryColl.insert({_id: 2}, {writeConcern: {w: 1}}));
+assert.commandWorked(primaryColl.insert({_id: 2}, {writeConcern: {w: 1}}));
assertNextBatchIsEmpty(cursor);
// Restart data replicaiton and wait until the new write becomes visible.
diff --git a/jstests/noPassthrough/change_streams_update_lookup_collation.js b/jstests/noPassthrough/change_streams_update_lookup_collation.js
index 996ce0e2c98..c6ed67b6dae 100644
--- a/jstests/noPassthrough/change_streams_update_lookup_collation.js
+++ b/jstests/noPassthrough/change_streams_update_lookup_collation.js
@@ -37,9 +37,9 @@ assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensi
// Insert some documents that have similar _ids, but differ by case and diacritics. These _ids
// would all match the collation on the strengthOneChangeStream, but should not be confused
// during the update lookup using the strength 2 collection default collation.
-assert.writeOK(coll.insert({_id: "abc", x: "abc"}));
-assert.writeOK(coll.insert({_id: "abç", x: "ABC"}));
-assert.writeOK(coll.insert({_id: "åbC", x: "AbÇ"}));
+assert.commandWorked(coll.insert({_id: "abc", x: "abc"}));
+assert.commandWorked(coll.insert({_id: "abç", x: "ABC"}));
+assert.commandWorked(coll.insert({_id: "åbC", x: "AbÇ"}));
const changeStreamDefaultCollation = coll.aggregate(
[{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
@@ -54,7 +54,7 @@ const strengthOneChangeStream = coll.aggregate(
[{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
{collation: strengthOneCollation});
-assert.writeOK(coll.update({_id: "abc"}, {$set: {updated: true}}));
+assert.commandWorked(coll.update({_id: "abc"}, {$set: {updated: true}}));
// Track the number of _id index usages to prove that the update lookup uses the _id index (and
// therefore is using the correct collation for the lookup).
@@ -72,7 +72,7 @@ assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 1);
assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abc", x: "abc", updated: true});
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 2);
-assert.writeOK(coll.update({_id: "abç"}, {$set: {updated: true}}));
+assert.commandWorked(coll.update({_id: "abç"}, {$set: {updated: true}}));
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 3);
// Again, both cursors should produce a document describing this update.
@@ -83,7 +83,7 @@ assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 4);
assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abç", x: "ABC", updated: true});
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 5);
-assert.writeOK(coll.update({_id: "Ã¥bC"}, {$set: {updated: true}}));
+assert.commandWorked(coll.update({_id: "Ã¥bC"}, {$set: {updated: true}}));
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 6);
// Both $changeStream stages will see this update and both will look up the full document using
diff --git a/jstests/noPassthrough/characterize_index_builds_on_restart.js b/jstests/noPassthrough/characterize_index_builds_on_restart.js
index 35b0c7c9a7c..d618a249635 100644
--- a/jstests/noPassthrough/characterize_index_builds_on_restart.js
+++ b/jstests/noPassthrough/characterize_index_builds_on_restart.js
@@ -67,7 +67,7 @@ function addTestDocuments(db) {
for (var i = 0; i < size; ++i) {
bulk.insert({i: i, j: i * i, k: 1});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
function startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, writeConcern, secondaryDB) {
diff --git a/jstests/noPassthrough/client_metadata_log.js b/jstests/noPassthrough/client_metadata_log.js
index 419a19a9ebb..d2662d8905d 100644
--- a/jstests/noPassthrough/client_metadata_log.js
+++ b/jstests/noPassthrough/client_metadata_log.js
@@ -7,7 +7,7 @@
let checkLog = function(conn) {
let coll = conn.getCollection("test.foo");
- assert.writeOK(coll.insert({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 1}));
print(`Checking ${conn.fullOptions.logFile} for client metadata message`);
let log = cat(conn.fullOptions.logFile);
diff --git a/jstests/noPassthrough/client_metadata_slowlog.js b/jstests/noPassthrough/client_metadata_slowlog.js
index aab419023fe..ca4a2da4cf9 100644
--- a/jstests/noPassthrough/client_metadata_slowlog.js
+++ b/jstests/noPassthrough/client_metadata_slowlog.js
@@ -8,7 +8,7 @@ let conn = MongoRunner.runMongod({useLogFiles: true});
assert.neq(null, conn, 'mongod was unable to start up');
let coll = conn.getCollection("test.foo");
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
// Do a really slow query beyond the 100ms threshold
let count = coll.count({
diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js
index 6811bf77ec2..cdc96579830 100644
--- a/jstests/noPassthrough/commands_handle_kill.js
+++ b/jstests/noPassthrough/commands_handle_kill.js
@@ -34,7 +34,7 @@ function setupCollection() {
for (let i = 0; i < nDocs; i++) {
bulk.insert({_id: i, a: i});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
assert.commandWorked(coll.createIndex({a: 1}));
}
diff --git a/jstests/noPassthrough/commands_preserve_exec_error_code.js b/jstests/noPassthrough/commands_preserve_exec_error_code.js
index 621fa72411e..3d0d1136f92 100644
--- a/jstests/noPassthrough/commands_preserve_exec_error_code.js
+++ b/jstests/noPassthrough/commands_preserve_exec_error_code.js
@@ -11,7 +11,7 @@ const db = mongod.getDB("test");
const coll = db.commands_preserve_exec_error_code;
coll.drop();
-assert.writeOK(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
assert.commandWorked(coll.createIndex({geo: "2d"}));
assert.commandWorked(
diff --git a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js
index 2183e6da600..1fe50111a8e 100644
--- a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js
+++ b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js
@@ -46,7 +46,7 @@ let coll = testDB.security_501;
coll.drop();
for (let i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
// Create our two users.
diff --git a/jstests/noPassthrough/currentop_includes_await_time.js b/jstests/noPassthrough/currentop_includes_await_time.js
index 5a5dee2f5ce..45506e05726 100644
--- a/jstests/noPassthrough/currentop_includes_await_time.js
+++ b/jstests/noPassthrough/currentop_includes_await_time.js
@@ -17,7 +17,7 @@ const coll = testDB.currentop_includes_await_time;
coll.drop();
assert.commandWorked(testDB.createCollection(coll.getName(), {capped: true, size: 1024}));
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
let cmdRes = assert.commandWorked(
testDB.runCommand({find: coll.getName(), tailable: true, awaitData: true}));
@@ -46,7 +46,7 @@ assert.soon(function() {
// A capped insertion should unblock the getMore, allowing the test to complete before the
// getMore's awaitData time expires.
-assert.writeOK(coll.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: 2}));
cleanupShell();
MongoRunner.stopMongod(conn);
diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js
index 15e655d568a..82dcc421de3 100644
--- a/jstests/noPassthrough/currentop_query.js
+++ b/jstests/noPassthrough/currentop_query.js
@@ -71,7 +71,7 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
dropAndRecreateTestCollection();
for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({_id: i, a: i}));
+ assert.commandWorked(coll.insert({_id: i, a: i}));
}
const isLocalMongosCurOp = (FixtureHelpers.isMongos(testDB) && localOps);
@@ -280,8 +280,8 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
},
{
test: function(db) {
- assert.writeOK(db.currentop_query.remove({a: 2, $comment: "currentop_query"},
- {collation: {locale: "fr"}}));
+ assert.commandWorked(db.currentop_query.remove(
+ {a: 2, $comment: "currentop_query"}, {collation: {locale: "fr"}}));
},
operation: "remove",
planSummary: "COLLSCAN",
@@ -294,7 +294,7 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
},
{
test: function(db) {
- assert.writeOK(
+ assert.commandWorked(
db.currentop_query.update({a: 1, $comment: "currentop_query"},
{$inc: {b: 1}},
{collation: {locale: "fr"}, multi: true}));
@@ -372,7 +372,7 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
//
dropAndRecreateTestCollection();
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
const originatingCommands = {
@@ -493,7 +493,7 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
*/
function runTruncationTests() {
dropAndRecreateTestCollection();
- assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorked(coll.insert({a: 1}));
// When the currentOp command serializes the query object as a string, individual string
// values inside it are truncated at 150 characters. To test "total length" truncation
diff --git a/jstests/noPassthrough/durable_view_catalog.js b/jstests/noPassthrough/durable_view_catalog.js
index 23de01b4b30..f6888b2f830 100644
--- a/jstests/noPassthrough/durable_view_catalog.js
+++ b/jstests/noPassthrough/durable_view_catalog.js
@@ -51,7 +51,7 @@ let listedViews =
assert.sameMembers(listedViews, expectedViews, "persisted view definitions not correctly loaded");
// Insert an invalid view definition directly into system.views to bypass normal validation.
-assert.writeOK(viewsDB.system.views.insert({_id: "badView", pipeline: "badType"}));
+assert.commandWorked(viewsDB.system.views.insert({_id: "badView", pipeline: "badType"}));
// Skip collection validation during stopMongod if invalid views exists.
TestData.skipValidationOnInvalidViewDefinitions = true;
@@ -78,7 +78,7 @@ assert.commandFailedWithCode(viewsDB.runCommand({listCollections: 1}),
// Manually remove the invalid view definition from system.views, and then verify that view
// operations work successfully without requiring a server restart.
-assert.writeOK(viewsDB.system.views.remove({_id: "badView"}));
+assert.commandWorked(viewsDB.system.views.remove({_id: "badView"}));
assert.commandWorked(viewsDB.runCommand({find: "view2"}));
assert.commandWorked(viewsDB.runCommand({create: "view4", viewOn: "collection"}));
assert.commandWorked(viewsDB.runCommand({collMod: "view2", viewOn: "view4"}));
diff --git a/jstests/noPassthrough/feature_compatibility_version.js b/jstests/noPassthrough/feature_compatibility_version.js
index 4f7cd42f450..319b253fa99 100644
--- a/jstests/noPassthrough/feature_compatibility_version.js
+++ b/jstests/noPassthrough/feature_compatibility_version.js
@@ -16,21 +16,21 @@ checkFCV(adminDB, latestFCV);
// Updating the featureCompatibilityVersion document changes the featureCompatibilityVersion
// server parameter.
-assert.writeOK(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV}}));
+assert.commandWorked(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV}}));
checkFCV(adminDB, lastStableFCV);
-assert.writeOK(
+assert.commandWorked(
adminDB.system.version.update({_id: "featureCompatibilityVersion"},
{$set: {version: lastStableFCV, targetVersion: latestFCV}}));
checkFCV(adminDB, lastStableFCV, latestFCV);
-assert.writeOK(
+assert.commandWorked(
adminDB.system.version.update({_id: "featureCompatibilityVersion"},
{$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
checkFCV(adminDB, lastStableFCV, lastStableFCV);
-assert.writeOK(
+assert.commandWorked(
adminDB.system.version.update({_id: "featureCompatibilityVersion"},
{$set: {version: latestFCV}, $unset: {targetVersion: true}}));
checkFCV(adminDB, latestFCV);
diff --git a/jstests/noPassthrough/filemd5_kill_during_yield.js b/jstests/noPassthrough/filemd5_kill_during_yield.js
index e2f74bcb1ce..20e717d0b6c 100644
--- a/jstests/noPassthrough/filemd5_kill_during_yield.js
+++ b/jstests/noPassthrough/filemd5_kill_during_yield.js
@@ -8,8 +8,8 @@ const conn = MongoRunner.runMongod();
assert.neq(null, conn);
const db = conn.getDB("test");
db.fs.chunks.drop();
-assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "64string")}));
-assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 1, data: new BinData(0, "test")}));
+assert.commandWorked(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "64string")}));
+assert.commandWorked(db.fs.chunks.insert({files_id: 1, n: 1, data: new BinData(0, "test")}));
db.fs.chunks.ensureIndex({files_id: 1, n: 1});
const kFailPointName = "waitInFilemd5DuringManualYield";
diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js
index 7ffd8e90c50..87cc6661587 100644
--- a/jstests/noPassthrough/geo_full.js
+++ b/jstests/noPassthrough/geo_full.js
@@ -394,7 +394,7 @@ for (var test = 0; test < numTests; test++) {
doc._id = i;
bulk.insert(doc);
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
var indexDoc = {"locs.loc": "2d"};
randIndexAdditions(indexDoc);
diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js
index 9f402db0d16..467c46e3698 100644
--- a/jstests/noPassthrough/geo_mnypts_plus_fields.js
+++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js
@@ -39,7 +39,7 @@ for (var fields = 1; fields < maxFields; fields++) {
bulk.insert(doc);
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
// Create the query for the additional fields
const queryFields = {};
diff --git a/jstests/noPassthrough/global_operation_latency_histogram.js b/jstests/noPassthrough/global_operation_latency_histogram.js
index 2f103e70a96..2045391c71c 100644
--- a/jstests/noPassthrough/global_operation_latency_histogram.js
+++ b/jstests/noPassthrough/global_operation_latency_histogram.js
@@ -31,13 +31,13 @@ function checkHistogramDiff(reads, writes, commands) {
// Insert
var numRecords = 100;
for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.insert({_id: i}));
+ assert.commandWorked(testColl.insert({_id: i}));
}
lastHistogram = checkHistogramDiff(0, numRecords, 0);
// Update
for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}));
+ assert.commandWorked(testColl.update({_id: i}, {x: i}));
}
lastHistogram = checkHistogramDiff(0, numRecords, 0);
@@ -68,13 +68,13 @@ lastHistogram = checkHistogramDiff(0, 0, numRecords - 1);
// Remove
for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.remove({_id: i}));
+ assert.commandWorked(testColl.remove({_id: i}));
}
lastHistogram = checkHistogramDiff(0, numRecords, 0);
// Upsert
for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
+ assert.commandWorked(testColl.update({_id: i}, {x: i}, {upsert: 1}));
}
lastHistogram = checkHistogramDiff(0, numRecords, 0);
diff --git a/jstests/noPassthrough/ignore_notablescan.js b/jstests/noPassthrough/ignore_notablescan.js
index 255b646f757..8a0730b79ef 100644
--- a/jstests/noPassthrough/ignore_notablescan.js
+++ b/jstests/noPassthrough/ignore_notablescan.js
@@ -14,11 +14,11 @@ function runTests(ServerType) {
const primaryDB = session.getDatabase(dbName);
// Implicitly create the collection outside of the transaction.
- assert.writeOK(primaryDB.getCollection(collName).insert({x: 1}));
+ assert.commandWorked(primaryDB.getCollection(collName).insert({x: 1}));
// Run a transaction so the 'config.transactions' collection is implicitly created.
session.startTransaction();
- assert.writeOK(primaryDB.getCollection(collName).insert({x: 2}));
+ assert.commandWorked(primaryDB.getCollection(collName).insert({x: 2}));
assert.commandWorked(session.commitTransaction_forTesting());
// Run a predicate query that would fail if we did not ignore the 'notablescan' flag.
diff --git a/jstests/noPassthrough/implicit_sessions.js b/jstests/noPassthrough/implicit_sessions.js
index f0bb9d972f9..315f78cd5f1 100644
--- a/jstests/noPassthrough/implicit_sessions.js
+++ b/jstests/noPassthrough/implicit_sessions.js
@@ -69,7 +69,7 @@ function runTest() {
const testDB = conn.getDB("test");
const coll = testDB.getCollection("foo");
const implicitId = inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
}, {shouldIncludeId: true});
// Unacknowledged writes have no session id.
@@ -88,43 +88,43 @@ function runTest() {
// Further commands run on the same database should reuse the implicit session.
inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: implicitId});
// New collections from the same database should inherit the implicit session.
const collTwo = testDB.getCollection("bar");
inspectCommandForSessionId(function() {
- assert.writeOK(collTwo.insert({x: 1}));
+ assert.commandWorked(collTwo.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: implicitId});
// Sibling databases should inherit the implicit session.
let siblingColl = testDB.getSiblingDB("foo").getCollection("bar");
inspectCommandForSessionId(function() {
- assert.writeOK(siblingColl.insert({x: 1}));
+ assert.commandWorked(siblingColl.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: implicitId});
// A new database from the same connection should inherit the implicit session.
const newCollSameConn = conn.getDB("testTwo").getCollection("foo");
inspectCommandForSessionId(function() {
- assert.writeOK(newCollSameConn.insert({x: 1}));
+ assert.commandWorked(newCollSameConn.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: implicitId});
// A new database from a new connection should use a different implicit session.
const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
inspectCommandForSessionId(function() {
- assert.writeOK(newCollNewConn.insert({x: 1}));
+ assert.commandWorked(newCollNewConn.insert({x: 1}));
}, {shouldIncludeId: true, differentFromId: implicitId});
// The original implicit session should still live on the first database.
inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: implicitId});
// Databases created from an explicit session should override any implicit sessions.
const session = conn.startSession();
const sessionColl = session.getDatabase("test").getCollection("foo");
const explicitId = inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
+ assert.commandWorked(sessionColl.insert({x: 1}));
}, {shouldIncludeId: true, differentFromId: implicitId});
assert(bsonBinaryEqual(session.getSessionId(), explicitId),
@@ -137,14 +137,14 @@ function runTest() {
// The original implicit session should still live on the first database.
inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: implicitId});
// New databases on the same connection as the explicit session should still inherit the
// original implicit session.
const newCollSameConnAfter = conn.getDB("testThree").getCollection("foo");
inspectCommandForSessionId(function() {
- assert.writeOK(newCollSameConnAfter.insert({x: 1}));
+ assert.commandWorked(newCollSameConnAfter.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: implicitId});
session.endSession();
@@ -158,13 +158,13 @@ function runTestTransitionToDisabled() {
// Existing implicit sessions should be erased when the disable flag is set.
const coll = conn.getDB("test").getCollection("foo");
const implicitId = inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
}, {shouldIncludeId: true});
TestData.disableImplicitSessions = true;
inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
}, {shouldIncludeId: false});
// After the flag is unset, databases using existing connections with implicit sessions will
@@ -173,30 +173,30 @@ function runTestTransitionToDisabled() {
TestData.disableImplicitSessions = false;
inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: implicitId});
const newColl = conn.getDB("test").getCollection("foo");
inspectCommandForSessionId(function() {
- assert.writeOK(newColl.insert({x: 1}));
+ assert.commandWorked(newColl.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: implicitId});
const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
inspectCommandForSessionId(function() {
- assert.writeOK(newCollNewConn.insert({x: 1}));
+ assert.commandWorked(newCollNewConn.insert({x: 1}));
}, {shouldIncludeId: true, differentFromId: implicitId});
// Explicit sessions should not be affected by the disable flag being set.
const session = conn.startSession();
const sessionColl = session.getDatabase("test").getCollection("foo");
const explicitId = inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
+ assert.commandWorked(sessionColl.insert({x: 1}));
}, {shouldIncludeId: true});
TestData.disableImplicitSessions = true;
inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
+ assert.commandWorked(sessionColl.insert({x: 1}));
}, {shouldIncludeId: true, expectedId: explicitId});
session.endSession();
@@ -210,14 +210,14 @@ function runTestDisabled() {
// Commands run without an explicit session should not use an implicit one.
const coll = conn.getDB("test").getCollection("foo");
inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
}, {shouldIncludeId: false});
// Explicit sessions should still include session ids.
const session = conn.startSession();
const sessionColl = session.getDatabase("test").getCollection("foo");
inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
+ assert.commandWorked(sessionColl.insert({x: 1}));
}, {shouldIncludeId: true});
// Commands run in a parallel shell inherit the disable flag.
@@ -225,7 +225,7 @@ function runTestDisabled() {
const awaitShell = startParallelShell(function() {
const parallelColl = db.getCollection("foo");
TestData.inspectCommandForSessionId(function() {
- assert.writeOK(parallelColl.insert({x: 1}));
+ assert.commandWorked(parallelColl.insert({x: 1}));
}, {shouldIncludeId: false});
}, conn.port);
awaitShell();
diff --git a/jstests/noPassthrough/index_killop_standalone.js b/jstests/noPassthrough/index_killop_standalone.js
index 6692eda9e31..be4a3aff1e1 100644
--- a/jstests/noPassthrough/index_killop_standalone.js
+++ b/jstests/noPassthrough/index_killop_standalone.js
@@ -11,7 +11,7 @@ assert.neq(null, conn, "mongod was unable to start up");
const testDB = conn.getDB("test");
assert.commandWorked(testDB.dropDatabase());
-assert.writeOK(testDB.test.insert({a: 1}));
+assert.commandWorked(testDB.test.insert({a: 1}));
const coll = testDB.test;
// Test that building an index with 'options' can be aborted using killop.
diff --git a/jstests/noPassthrough/index_partial_no_explain_cmds.js b/jstests/noPassthrough/index_partial_no_explain_cmds.js
index f1295e5531c..9d0dc8eb246 100644
--- a/jstests/noPassthrough/index_partial_no_explain_cmds.js
+++ b/jstests/noPassthrough/index_partial_no_explain_cmds.js
@@ -10,8 +10,8 @@ coll.drop();
assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}}));
-assert.writeOK(coll.insert({_id: 1, x: 5, a: 2})); // Not in index.
-assert.writeOK(coll.insert({_id: 2, x: 6, a: 1})); // In index.
+assert.commandWorked(coll.insert({_id: 1, x: 5, a: 2})); // Not in index.
+assert.commandWorked(coll.insert({_id: 2, x: 6, a: 1})); // In index.
// Verify we will throw if the partial index can't be used.
assert.throws(function() {
diff --git a/jstests/noPassthrough/index_stepdown_after_init.js b/jstests/noPassthrough/index_stepdown_after_init.js
index 4289d18ec8a..d104cd4693c 100644
--- a/jstests/noPassthrough/index_stepdown_after_init.js
+++ b/jstests/noPassthrough/index_stepdown_after_init.js
@@ -39,7 +39,7 @@ if (!enableIndexBuildsCoordinator) {
return;
}
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
assert.commandWorked(primary.adminCommand(
{configureFailPoint: 'hangAfterInitializingIndexBuild', mode: 'alwaysOn'}));
diff --git a/jstests/noPassthrough/index_stepdown_during_scan.js b/jstests/noPassthrough/index_stepdown_during_scan.js
index e8d003506e9..9628318c208 100644
--- a/jstests/noPassthrough/index_stepdown_during_scan.js
+++ b/jstests/noPassthrough/index_stepdown_during_scan.js
@@ -27,7 +27,7 @@ const primary = rst.getPrimary();
const testDB = primary.getDB('test');
const coll = testDB.getCollection('test');
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
IndexBuildTest.pauseIndexBuilds(primary);
diff --git a/jstests/noPassthrough/index_version_v2.js b/jstests/noPassthrough/index_version_v2.js
index 886c7c39590..16000891cc9 100644
--- a/jstests/noPassthrough/index_version_v2.js
+++ b/jstests/noPassthrough/index_version_v2.js
@@ -57,7 +57,7 @@ indexSpec = getIndexSpecByName(testDB.collation, "withCollation");
assert.eq(2, indexSpec.v, tojson(indexSpec));
// Test that indexing decimal data succeeds.
-assert.writeOK(testDB.decimal.insert({_id: new NumberDecimal("42")}));
+assert.commandWorked(testDB.decimal.insert({_id: new NumberDecimal("42")}));
//
// Index version v=1
@@ -97,7 +97,7 @@ if (storageEnginesUsingKeyString.has(storageEngine)) {
assert.writeErrorWithCode(testDB.decimal.insert({num: new NumberDecimal("42")}),
ErrorCodes.UnsupportedFormat);
} else {
- assert.writeOK(testDB.decimal.insert({num: new NumberDecimal("42")}));
+ assert.commandWorked(testDB.decimal.insert({num: new NumberDecimal("42")}));
}
//
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index 1b06b881e30..c2bdf180be3 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -53,7 +53,7 @@ while (1) { // if indexing finishes before we can run checks, try indexing w/ m
for (var i = 0; i < size; ++i) {
bulk.insert({i: i});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
assert.eq(size, t.count());
bgIndexBuildPid = doParallel(fullName + ".ensureIndex( {i:1}, {background:true} )");
@@ -76,14 +76,14 @@ while (1) { // if indexing finishes before we can run checks, try indexing w/ m
assert(ex.executionStats.totalKeysExamined < 1000,
"took too long to find 100: " + tojson(ex));
- assert.writeOK(t.remove({i: 40}, true)); // table scan
- assert.writeOK(t.update({i: 10}, {i: -10})); // should scan 10
+ assert.commandWorked(t.remove({i: 40}, true)); // table scan
+ assert.commandWorked(t.update({i: 10}, {i: -10})); // should scan 10
var id = t.find().hint({$natural: -1}).next()._id;
- assert.writeOK(t.update({_id: id}, {i: -2}));
- assert.writeOK(t.save({i: -50}));
- assert.writeOK(t.save({i: size + 2}));
+ assert.commandWorked(t.update({_id: id}, {i: -2}));
+ assert.commandWorked(t.save({i: -50}));
+ assert.commandWorked(t.save({i: size + 2}));
assert.eq(size + 1, t.count());
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index c7a119048ec..ec873eccf7e 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -58,7 +58,7 @@ let turnFailPointOff = function(failPointName) {
// for the duration of the build.
let failOnExistingDuplicateValue = function(coll) {
let duplicateKey = 0;
- assert.writeOK(coll.save({i: duplicateKey}));
+ assert.commandWorked(coll.save({i: duplicateKey}));
let bgIndexBuildPid = indexBuild();
waitProgram(bgIndexBuildPid);
@@ -81,7 +81,7 @@ let failOnInsertedDuplicateValue = function(coll) {
jsTestLog("Waiting to hang before index build of i=" + duplicateKey);
checkLog.contains(conn, "Hanging before index build of i=" + duplicateKey);
- assert.writeOK(coll.save({i: duplicateKey}));
+ assert.commandWorked(coll.save({i: duplicateKey}));
} finally {
turnFailPointOff("hangBeforeIndexBuildOf");
}
@@ -135,7 +135,7 @@ let doTest = function() {
coll.drop();
for (let i = 0; i < size; ++i) {
- assert.writeOK(coll.save({i: i}));
+ assert.commandWorked(coll.save({i: i}));
}
assert.eq(size, coll.count());
assert.eq(1, coll.getIndexes().length, "_id index should already exist");
diff --git a/jstests/noPassthrough/indexbg_drop.js b/jstests/noPassthrough/indexbg_drop.js
index 6ee8e47a54d..ff192c9f3d4 100644
--- a/jstests/noPassthrough/indexbg_drop.js
+++ b/jstests/noPassthrough/indexbg_drop.js
@@ -45,7 +45,7 @@ var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
for (i = 0; i < size; ++i) {
bulk.insert({i: Random.rand()});
}
-assert.writeOK(bulk.execute({w: 2, wtimeout: replTest.kDefaultTimeoutMS}));
+assert.commandWorked(bulk.execute({w: 2, wtimeout: replTest.kDefaultTimeoutMS}));
assert.commandWorked(
secondDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
diff --git a/jstests/noPassthrough/indexbg_killop_apply_ops.js b/jstests/noPassthrough/indexbg_killop_apply_ops.js
index dc35b1d0b98..0a41b18749c 100644
--- a/jstests/noPassthrough/indexbg_killop_apply_ops.js
+++ b/jstests/noPassthrough/indexbg_killop_apply_ops.js
@@ -29,7 +29,7 @@ const primary = rst.getPrimary();
const testDB = primary.getDB('test');
const coll = testDB.getCollection('test');
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
IndexBuildTest.pauseIndexBuilds(primary);
diff --git a/jstests/noPassthrough/indexbg_killop_primary.js b/jstests/noPassthrough/indexbg_killop_primary.js
index 27042c77b5b..cad36b1f688 100644
--- a/jstests/noPassthrough/indexbg_killop_primary.js
+++ b/jstests/noPassthrough/indexbg_killop_primary.js
@@ -26,7 +26,7 @@ const primary = rst.getPrimary();
const testDB = primary.getDB('test');
const coll = testDB.getCollection('test');
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
IndexBuildTest.pauseIndexBuilds(primary);
diff --git a/jstests/noPassthrough/indexbg_killop_primary_after_init.js b/jstests/noPassthrough/indexbg_killop_primary_after_init.js
index 7cd5c318380..2fe700533fc 100644
--- a/jstests/noPassthrough/indexbg_killop_primary_after_init.js
+++ b/jstests/noPassthrough/indexbg_killop_primary_after_init.js
@@ -39,7 +39,7 @@ if (!enableIndexBuildsCoordinator) {
return;
}
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
assert.commandWorked(primary.adminCommand(
{configureFailPoint: 'hangAfterInitializingIndexBuild', mode: 'alwaysOn'}));
diff --git a/jstests/noPassthrough/indexbg_killop_secondary.js b/jstests/noPassthrough/indexbg_killop_secondary.js
index 261d65788de..f83077de565 100644
--- a/jstests/noPassthrough/indexbg_killop_secondary.js
+++ b/jstests/noPassthrough/indexbg_killop_secondary.js
@@ -26,7 +26,7 @@ const primary = rst.getPrimary();
const testDB = primary.getDB('test');
const coll = testDB.getCollection('test');
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
const secondary = rst.getSecondary();
IndexBuildTest.pauseIndexBuilds(secondary);
diff --git a/jstests/noPassthrough/indexbg_shutdown.js b/jstests/noPassthrough/indexbg_shutdown.js
index 88007a29e1a..7907780140c 100644
--- a/jstests/noPassthrough/indexbg_shutdown.js
+++ b/jstests/noPassthrough/indexbg_shutdown.js
@@ -47,7 +47,7 @@ var bulk = masterColl.initializeUnorderedBulkOp();
for (var i = 0; i < size; ++i) {
bulk.insert({i: i, j: i * i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
IndexBuildTest.pauseIndexBuilds(second);
diff --git a/jstests/noPassthrough/initial_sync_wt_cache_full.js b/jstests/noPassthrough/initial_sync_wt_cache_full.js
index 90d19a172ab..fb47472a674 100644
--- a/jstests/noPassthrough/initial_sync_wt_cache_full.js
+++ b/jstests/noPassthrough/initial_sync_wt_cache_full.js
@@ -34,7 +34,7 @@ const numDocs = 2;
const minDocSizeMB = 10;
for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
+ assert.commandWorked(
coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
{writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
}
@@ -57,7 +57,7 @@ checkLog.contains(secondary,
'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
for (let i = 0; i < numDocs; ++i) {
for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
+ assert.commandWorked(coll.update({_id: i}, {$inc: {i: 1}}));
}
}
diff --git a/jstests/noPassthrough/js_protection.js b/jstests/noPassthrough/js_protection.js
index 7783488a663..2937e488143 100644
--- a/jstests/noPassthrough/js_protection.js
+++ b/jstests/noPassthrough/js_protection.js
@@ -62,7 +62,7 @@ function assertNoStoredWhere() {
{$set: {y: 100}},
false,
true);
- assert.writeOK(res);
+ assert.commandWorked(res);
doc = t.findOne({name: "testdoc"});
assert.neq(null, doc);
diff --git a/jstests/noPassthrough/js_protection_roundtrip.js b/jstests/noPassthrough/js_protection_roundtrip.js
index 5c0c0b4da10..0abef97ebc2 100644
--- a/jstests/noPassthrough/js_protection_roundtrip.js
+++ b/jstests/noPassthrough/js_protection_roundtrip.js
@@ -48,7 +48,7 @@ var result = t.insert({
return "yes";
}
});
-assert.writeOK(result);
+assert.commandWorked(result);
testFunctionUnmarshall(true, withJavaScriptProtection);
testFunctionUnmarshall(false, withoutJavaScriptProtection);
diff --git a/jstests/noPassthrough/killop.js b/jstests/noPassthrough/killop.js
index 53f14b1f838..50445470323 100644
--- a/jstests/noPassthrough/killop.js
+++ b/jstests/noPassthrough/killop.js
@@ -12,7 +12,7 @@ const collName = "test";
function runTest(conn, shardConn) {
const db = conn.getDB(dbName);
assert.commandWorked(db.dropDatabase());
- assert.writeOK(db.getCollection(collName).insert({x: 1}));
+ assert.commandWorked(db.getCollection(collName).insert({x: 1}));
assert.commandWorked(
shardConn.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
diff --git a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
index 5b1757188e7..1532954f96a 100644
--- a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
+++ b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
@@ -45,7 +45,7 @@ function runTests() {
// lines, we are just verifying that the log line appears, which implies that the recorded
// latency exceeds slowms.
runWithWait(hangMillis, function() {
- assert.writeOK(testColl.insert({a: 1}));
+ assert.commandWorked(testColl.insert({a: 1}));
});
let profileEntry;
if (conn.writeMode() === "commands") {
@@ -66,7 +66,7 @@ function runTests() {
// Test that update profiler/logs include lock acquisition time.
runWithWait(hangMillis, function() {
- assert.writeOK(testColl.update({}, {$set: {b: 1}}));
+ assert.commandWorked(testColl.update({}, {$set: {b: 1}}));
});
profileEntry = getLatestProfilerEntry(testDB, {
ns: testColl.getFullName(),
@@ -87,7 +87,7 @@ function runTests() {
checkLog.contains(conn, "find { find: \"lock_acquisition_time\"");
// Test that getMore profiler/logs include lock acquisition time.
- assert.writeOK(testColl.insert([{a: 2}, {a: 3}]));
+ assert.commandWorked(testColl.insert([{a: 2}, {a: 3}]));
runWithWait(hangMillis, function() {
// Include a batchSize in order to ensure that a getMore is issued.
assert.eq(3, testColl.find().batchSize(2).itcount());
@@ -98,7 +98,7 @@ function runTests() {
});
assert.gte(profileEntry.millis, hangMillis - padding);
checkLog.contains(conn, "originatingCommand: { find: \"lock_acquisition_time\"");
- assert.writeOK(testColl.remove({a: {$gt: 1}}));
+ assert.commandWorked(testColl.remove({a: {$gt: 1}}));
// Test that aggregate profiler/logs include lock acquisition time.
runWithWait(hangMillis, function() {
@@ -135,7 +135,7 @@ function runTests() {
// Test that delete profiler/logs include lock acquisition time.
runWithWait(hangMillis, function() {
- assert.writeOK(testColl.remove({b: 1}));
+ assert.commandWorked(testColl.remove({b: 1}));
});
profileEntry = getLatestProfilerEntry(testDB, {
ns: testColl.getFullName(),
diff --git a/jstests/noPassthrough/libs/backup_restore.js b/jstests/noPassthrough/libs/backup_restore.js
index 37411b9d061..b09eb6ea937 100644
--- a/jstests/noPassthrough/libs/backup_restore.js
+++ b/jstests/noPassthrough/libs/backup_restore.js
@@ -83,20 +83,20 @@ var BackupRestoreTest = function(options) {
doc: largeValue.substring(0, match % largeValue.length),
});
}
- assert.writeOK(bulk.execute(writeConcern));
+ assert.commandWorked(bulk.execute(writeConcern));
} else if (op < 0.4) {
// 20% of the operations: update docs.
var updateOpts = {upsert: true, multi: true, writeConcern: writeConcern};
- assert.writeOK(coll.update({x: {$gte: match}},
- {$inc: {x: baseNum}, $set: {n: 'hello'}},
- updateOpts));
+ assert.commandWorked(coll.update({x: {$gte: match}},
+ {$inc: {x: baseNum}, $set: {n: 'hello'}},
+ updateOpts));
} else if (op < 0.9) {
// 50% of the operations: find matchings docs.
// itcount() consumes the cursor
coll.find({x: {$gte: match}}).itcount();
} else {
// 10% of the operations: remove matching docs.
- assert.writeOK(
+ assert.commandWorked(
coll.remove({x: {$gte: match}}, {writeConcern: writeConcern}));
}
} catch (e) {
@@ -393,7 +393,7 @@ var BackupRestoreTest = function(options) {
jsTestLog('Inserting single document into primary ' + primary.host +
' with writeConcern w:' + rst.nodes.length);
- var writeResult = assert.writeOK(primary.getDB("test").foo.insert(
+ var writeResult = assert.commandWorked(primary.getDB("test").foo.insert(
{}, {writeConcern: {w: rst.nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
// Stop set.
diff --git a/jstests/noPassthrough/libs/concurrent_rename.js b/jstests/noPassthrough/libs/concurrent_rename.js
index 79d1a0074c3..11d6d8a925b 100644
--- a/jstests/noPassthrough/libs/concurrent_rename.js
+++ b/jstests/noPassthrough/libs/concurrent_rename.js
@@ -4,7 +4,7 @@
function doRenames(dbName, collName, otherName) {
const repeatRename = 200;
// Signal to the parent shell that the parallel shell has started.
- assert.writeOK(db.await_data.insert({_id: "signal parent shell"}));
+ assert.commandWorked(db.await_data.insert({_id: "signal parent shell"}));
let renameDB = db.getSiblingDB(dbName);
for (let i = 0; i < repeatRename; i++) {
// Rename the collection back and forth.
@@ -12,5 +12,5 @@ function doRenames(dbName, collName, otherName) {
assert.commandWorked(renameDB[otherName].renameCollection(collName));
}
// Signal to the parent shell that the renames have completed.
- assert.writeOK(db.await_data.insert({_id: "rename has ended"}));
+ assert.commandWorked(db.await_data.insert({_id: "rename has ended"}));
}
diff --git a/jstests/noPassthrough/list_indexes_with_build_uuids.js b/jstests/noPassthrough/list_indexes_with_build_uuids.js
index a52b58578a5..102f6560c1e 100644
--- a/jstests/noPassthrough/list_indexes_with_build_uuids.js
+++ b/jstests/noPassthrough/list_indexes_with_build_uuids.js
@@ -19,7 +19,7 @@ function addTestDocuments(db) {
for (var i = 0; i < size; ++i) {
bulk.insert({i: i, j: i * i});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2});
diff --git a/jstests/noPassthrough/log_find_getmore.js b/jstests/noPassthrough/log_find_getmore.js
index dc7f6d83c91..a3a2ecf7d7d 100644
--- a/jstests/noPassthrough/log_find_getmore.js
+++ b/jstests/noPassthrough/log_find_getmore.js
@@ -39,7 +39,7 @@ const coll = testDB.test;
assert.commandWorked(testDB.dropDatabase());
for (let i = 1; i <= 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
diff --git a/jstests/noPassthrough/logical_session_cache_find_getmore.js b/jstests/noPassthrough/logical_session_cache_find_getmore.js
index 4857443d032..c8e0f5bfbae 100644
--- a/jstests/noPassthrough/logical_session_cache_find_getmore.js
+++ b/jstests/noPassthrough/logical_session_cache_find_getmore.js
@@ -6,8 +6,8 @@ TestData.disableImplicitSessions = true;
var conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}});
var testDB = conn.getDB("test");
-assert.writeOK(testDB.foo.insert({data: 1}));
-assert.writeOK(testDB.foo.insert({data: 2}));
+assert.commandWorked(testDB.foo.insert({data: 1}));
+assert.commandWorked(testDB.foo.insert({data: 2}));
for (var i = 0; i < 2; i++) {
var session = conn.startSession();
diff --git a/jstests/noPassthrough/logical_session_cursor_checks.js b/jstests/noPassthrough/logical_session_cursor_checks.js
index 5664fb1ef28..f2af849ab68 100644
--- a/jstests/noPassthrough/logical_session_cursor_checks.js
+++ b/jstests/noPassthrough/logical_session_cursor_checks.js
@@ -16,8 +16,8 @@ function runFixture(Fixture) {
admin.logout();
data.auth("user0", "password");
- assert.writeOK(data.test.insert({name: "first", data: 1}));
- assert.writeOK(data.test.insert({name: "second", data: 2}));
+ assert.commandWorked(data.test.insert({name: "first", data: 1}));
+ assert.commandWorked(data.test.insert({name: "second", data: 2}));
// Test that getMore works correctly on the same session.
{
diff --git a/jstests/noPassthrough/max_bson_depth_parameter.js b/jstests/noPassthrough/max_bson_depth_parameter.js
index bd39676bb98..3aef0995d34 100644
--- a/jstests/noPassthrough/max_bson_depth_parameter.js
+++ b/jstests/noPassthrough/max_bson_depth_parameter.js
@@ -20,8 +20,8 @@ assert.commandFailedWithCode(
"Expected server to reject command for exceeding the nesting depth limit");
// Confirm depth limits for $lookup.
-assert.writeOK(testDB.coll1.insert({_id: 1}));
-assert.writeOK(testDB.coll2.insert({_id: 1}));
+assert.commandWorked(testDB.coll1.insert({_id: 1}));
+assert.commandWorked(testDB.coll2.insert({_id: 1}));
assert.commandWorked(testDB.runCommand({
aggregate: "coll1",
diff --git a/jstests/noPassthrough/minvalid.js b/jstests/noPassthrough/minvalid.js
index 462016a2e73..0691908fbb2 100644
--- a/jstests/noPassthrough/minvalid.js
+++ b/jstests/noPassthrough/minvalid.js
@@ -22,7 +22,7 @@ var lastOp = local.oplog.rs.find().sort({$natural: -1}).limit(1).next();
printjson(lastOp);
print("3: change minvalid");
-assert.writeOK(local.replset.minvalid.update(
+assert.commandWorked(local.replset.minvalid.update(
{}, {$set: {ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1)}}, {upsert: true}));
printjson(local.replset.minvalid.findOne());
diff --git a/jstests/noPassthrough/minvalid2.js b/jstests/noPassthrough/minvalid2.js
index b5f29a8a97c..5a731f0ffb7 100644
--- a/jstests/noPassthrough/minvalid2.js
+++ b/jstests/noPassthrough/minvalid2.js
@@ -54,7 +54,7 @@ print("2: shut down slave");
replTest.stop(slaveId);
print("3: write to master");
-assert.writeOK(mdb.foo.insert({a: 1001}, {writeConcern: {w: 1}}));
+assert.commandWorked(mdb.foo.insert({a: 1001}, {writeConcern: {w: 1}}));
print("4: modify master's minvalid");
var local = master.getDB("local");
diff --git a/jstests/noPassthrough/noncapped_oplog_creation.js b/jstests/noPassthrough/noncapped_oplog_creation.js
index 577074e1bb9..9d2f0865fdb 100644
--- a/jstests/noPassthrough/noncapped_oplog_creation.js
+++ b/jstests/noPassthrough/noncapped_oplog_creation.js
@@ -28,7 +28,7 @@ assert.writeError(localDB.oplog.rs.insert({}));
assert.commandFailed(localDB.runCommand({godinsert: 'oplog.$main', obj: {}}));
// Test that creating a non-capped oplog collection fails when using $out.
-assert.writeOK(localDB.input.insert({}));
+assert.commandWorked(localDB.input.insert({}));
assert.commandFailed(localDB.runCommand({
aggregate: 'input',
pipeline: [{$out: 'oplog.aggregation'}],
diff --git a/jstests/noPassthrough/ns1.js b/jstests/noPassthrough/ns1.js
index 63c7baacb0f..2a9a1659244 100644
--- a/jstests/noPassthrough/ns1.js
+++ b/jstests/noPassthrough/ns1.js
@@ -8,7 +8,7 @@ const check = function(n, isNew) {
var coll = mydb["x" + n];
if (isNew) {
assert.eq(0, coll.count(), "pop a: " + n);
- assert.writeOK(coll.insert({_id: n}));
+ assert.commandWorked(coll.insert({_id: n}));
}
assert.eq(1, coll.count(), "pop b: " + n);
assert.eq(n, coll.findOne()._id, "pop c: " + n);
diff --git a/jstests/noPassthrough/predictive_connpool.js b/jstests/noPassthrough/predictive_connpool.js
index d92d1ba9a2f..2ed10ecfac5 100644
--- a/jstests/noPassthrough/predictive_connpool.js
+++ b/jstests/noPassthrough/predictive_connpool.js
@@ -131,9 +131,9 @@ function walkThroughBehavior({primaryFollows, secondaryFollows}) {
dropConnections();
}
-assert.writeOK(mongos.test.insert({x: 1}));
-assert.writeOK(mongos.test.insert({x: 2}));
-assert.writeOK(mongos.test.insert({x: 3}));
+assert.commandWorked(mongos.test.insert({x: 1}));
+assert.commandWorked(mongos.test.insert({x: 2}));
+assert.commandWorked(mongos.test.insert({x: 3}));
st.rs0.awaitReplication();
jsTestLog("Following disabled");
diff --git a/jstests/noPassthrough/profile_agg_multiple_batches.js b/jstests/noPassthrough/profile_agg_multiple_batches.js
index 6d21e254bde..62a2f713a93 100644
--- a/jstests/noPassthrough/profile_agg_multiple_batches.js
+++ b/jstests/noPassthrough/profile_agg_multiple_batches.js
@@ -21,7 +21,7 @@ const coll = testDB.getCollection("coll");
testDB.setProfilingLevel(2);
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
+ assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
diff --git a/jstests/noPassthrough/query_yield_reset_timer.js b/jstests/noPassthrough/query_yield_reset_timer.js
index cd7d9cf7d16..5af89432957 100644
--- a/jstests/noPassthrough/query_yield_reset_timer.js
+++ b/jstests/noPassthrough/query_yield_reset_timer.js
@@ -33,7 +33,7 @@ assert.commandWorked(coll.getDB().adminCommand({
// timing-based yield (incorrect accounting for timing-based yields was the cause for
// SERVER-21341).
for (var i = 0; i < 40; ++i) {
- assert.writeOK(coll.insert({}));
+ assert.commandWorked(coll.insert({}));
}
var explainRes = coll.find().explain("executionStats");
// We expect 4 yields, but we throw in a fudge factor of 2 for test reliability. We also can
diff --git a/jstests/noPassthrough/readConcern_snapshot_mongos.js b/jstests/noPassthrough/readConcern_snapshot_mongos.js
index ab346a12937..472da1af4de 100644
--- a/jstests/noPassthrough/readConcern_snapshot_mongos.js
+++ b/jstests/noPassthrough/readConcern_snapshot_mongos.js
@@ -30,7 +30,7 @@ let testDB = st.getDB(dbName);
let coll = testDB.coll;
// Insert data to create the collection.
-assert.writeOK(testDB[collName].insert({x: 1}));
+assert.commandWorked(testDB[collName].insert({x: 1}));
flushRoutersAndRefreshShardMetadata(st, {ns: dbName + "." + collName, dbNames: [dbName]});
diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js
index 3e03b8124ae..fb1425dea97 100644
--- a/jstests/noPassthrough/read_majority.js
+++ b/jstests/noPassthrough/read_majority.js
@@ -102,7 +102,7 @@ function testReadConcernLevel(level) {
var snapshot2 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
for (var i = 0; i < 10; i++) {
- assert.writeOK(t.insert({_id: i, version: 3}));
+ assert.commandWorked(t.insert({_id: i, version: 3}));
}
assertNoSnapshotAvailableForReadConcernLevel();
@@ -111,7 +111,7 @@ function testReadConcernLevel(level) {
assertNoSnapshotAvailableForReadConcernLevel();
- assert.writeOK(t.update({}, {$set: {version: 4}}, false, true));
+ assert.commandWorked(t.update({}, {$set: {version: 4}}, false, true));
var snapshot4 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
// Collection didn't exist in snapshot 1.
@@ -171,7 +171,7 @@ function testReadConcernLevel(level) {
assert.eq(getCursorForReadConcernLevel().itcount(), 10);
// Reindex bumps the min snapshot.
- assert.writeOK(t.bump.insert({a: 1})); // Bump timestamp.
+ assert.commandWorked(t.bump.insert({a: 1})); // Bump timestamp.
t.reIndex();
assertNoSnapshotAvailableForReadConcernLevel();
newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
diff --git a/jstests/noPassthrough/read_majority_reads.js b/jstests/noPassthrough/read_majority_reads.js
index f76363a0b28..065db43f426 100644
--- a/jstests/noPassthrough/read_majority_reads.js
+++ b/jstests/noPassthrough/read_majority_reads.js
@@ -129,15 +129,15 @@ function runTests(coll, mongodConnection) {
var getCursor = cursorTestCases[testName];
// Setup initial state.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.save({_id: 1, state: 'before', point: [0, 0]}));
setCommittedSnapshot(makeSnapshot());
// Check initial conditions.
assert.eq(getCursor(coll).next().state, 'before');
// Change state without making it committed.
- assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
+ assert.commandWorked(coll.save({_id: 1, state: 'after', point: [0, 0]}));
// Cursor still sees old state.
assert.eq(getCursor(coll).next().state, 'before');
@@ -163,15 +163,15 @@ function runTests(coll, mongodConnection) {
var expectedAfter = nonCursorTestCases[testName].expectedAfter;
// Setup initial state.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.save({_id: 1, state: 'before', point: [0, 0]}));
setCommittedSnapshot(makeSnapshot());
// Check initial conditions.
assert.eq(getResult(coll), expectedBefore);
// Change state without making it committed.
- assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
+ assert.commandWorked(coll.save({_id: 1, state: 'after', point: [0, 0]}));
// Cursor still sees old state.
assert.eq(getResult(coll), expectedBefore);
diff --git a/jstests/noPassthrough/recovery_wt_cache_full.js b/jstests/noPassthrough/recovery_wt_cache_full.js
index 7d7dc171296..72e36a13eb1 100644
--- a/jstests/noPassthrough/recovery_wt_cache_full.js
+++ b/jstests/noPassthrough/recovery_wt_cache_full.js
@@ -41,7 +41,7 @@ const numDocs = 2;
const minDocSizeMB = 10;
for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
+ assert.commandWorked(
coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
{writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
}
@@ -64,7 +64,7 @@ jsTestLog('Writing ' + numUpdates + ' updates to ' + numDocs +
' documents on secondary after disabling snapshots.');
for (let i = 0; i < numDocs; ++i) {
for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
+ assert.commandWorked(coll.update({_id: i}, {$inc: {i: 1}}));
}
}
diff --git a/jstests/noPassthrough/replica_set_connection_getmore.js b/jstests/noPassthrough/replica_set_connection_getmore.js
index e7167fbd5eb..a27ff808878 100644
--- a/jstests/noPassthrough/replica_set_connection_getmore.js
+++ b/jstests/noPassthrough/replica_set_connection_getmore.js
@@ -28,7 +28,7 @@ coll.drop();
// Insert several document so that we can use a cursor to fetch them in multiple batches.
var res = coll.insert([{}, {}, {}, {}, {}]);
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(5, res.nInserted);
// Wait for the secondary to catch up because we're going to try and do reads from it.
diff --git a/jstests/noPassthrough/rollback_wt_cache_full.js b/jstests/noPassthrough/rollback_wt_cache_full.js
index 6ea271b1dba..f7733a0110b 100644
--- a/jstests/noPassthrough/rollback_wt_cache_full.js
+++ b/jstests/noPassthrough/rollback_wt_cache_full.js
@@ -59,7 +59,7 @@ let CommonOps = (node) => {
jsTestLog('Inserting ' + numDocs + ' documents of ' + minDocSizeMB + ' MB each into ' +
collName + '.');
for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
+ assert.commandWorked(
coll.save({_id: i, a: 0, x: largeString},
{writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
}
@@ -72,7 +72,7 @@ let RollbackOps = (node) => {
jsTestLog('Updating ' + numDocs +
' documents on the primary. These updates will be rolled back.');
for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(coll.update({_id: i}, {$inc: {a: 1}}));
+ assert.commandWorked(coll.update({_id: i}, {$inc: {a: 1}}));
}
};
diff --git a/jstests/noPassthrough/set_step_params.js b/jstests/noPassthrough/set_step_params.js
index d3fbe5deb02..dcde5d4c0d7 100644
--- a/jstests/noPassthrough/set_step_params.js
+++ b/jstests/noPassthrough/set_step_params.js
@@ -122,9 +122,9 @@ function runSubTest(name, fun) {
updateSetParameters(stepParams);
}
-assert.writeOK(mongosDB.test.insert({x: 1}));
-assert.writeOK(mongosDB.test.insert({x: 2}));
-assert.writeOK(mongosDB.test.insert({x: 3}));
+assert.commandWorked(mongosDB.test.insert({x: 1}));
+assert.commandWorked(mongosDB.test.insert({x: 2}));
+assert.commandWorked(mongosDB.test.insert({x: 3}));
st.rs0.awaitReplication();
runSubTest("MinSize", function() {
diff --git a/jstests/noPassthrough/shell_can_use_read_concern.js b/jstests/noPassthrough/shell_can_use_read_concern.js
index f3d567960e0..73facc09214 100644
--- a/jstests/noPassthrough/shell_can_use_read_concern.js
+++ b/jstests/noPassthrough/shell_can_use_read_concern.js
@@ -97,7 +97,7 @@ function runTests({withSession}) {
{
testCommandCanBeCausallyConsistent(function() {
- assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
+ assert.commandWorked(coll.insert([{}, {}, {}, {}, {}]));
}, {expectedSession: withSession, expectedAfterClusterTime: false});
testCommandCanBeCausallyConsistent(function() {
diff --git a/jstests/noPassthrough/shell_cmd_assertions.js b/jstests/noPassthrough/shell_cmd_assertions.js
index 4bc800663f8..85d40386964 100644
--- a/jstests/noPassthrough/shell_cmd_assertions.js
+++ b/jstests/noPassthrough/shell_cmd_assertions.js
@@ -25,7 +25,7 @@ const sampleWriteConcernError = {
function setup() {
db.coll.drop();
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
}
// Raw command responses.
diff --git a/jstests/noPassthrough/shell_gossip_cluster_time.js b/jstests/noPassthrough/shell_gossip_cluster_time.js
index 119ba1e23dc..dc46a1173a7 100644
--- a/jstests/noPassthrough/shell_gossip_cluster_time.js
+++ b/jstests/noPassthrough/shell_gossip_cluster_time.js
@@ -70,7 +70,7 @@ assert(session2.getClusterTime() === undefined,
// Advance the clusterTime outside of either of the sessions.
testCommandGossipedWithClusterTime(function() {
- assert.writeOK(coll.insert({}));
+ assert.commandWorked(coll.insert({}));
}, primary.getClusterTime());
assert(session1.getClusterTime() === undefined,
@@ -82,14 +82,14 @@ assert(session2.getClusterTime() === undefined,
// since session1 hasn't been used yet.
testCommandGossipedWithClusterTime(function() {
const coll = session1.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
+ assert.commandWorked(coll.insert({}));
}, primary.getClusterTime());
assert.eq(session1.getClusterTime(), primary.getClusterTime());
testCommandGossipedWithClusterTime(function() {
const coll = session1.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
+ assert.commandWorked(coll.insert({}));
}, session1.getClusterTime());
assert(session2.getClusterTime() === undefined,
@@ -105,7 +105,7 @@ assert(primary.getClusterTime() === undefined,
session2.advanceClusterTime(session1.getClusterTime());
testCommandGossipedWithClusterTime(function() {
const coll = session2.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
+ assert.commandWorked(coll.insert({}));
}, session2.getClusterTime());
assert.eq(session2.getClusterTime(), primary.getClusterTime());
@@ -120,7 +120,7 @@ assert(primary.getClusterTime() === undefined,
primary.advanceClusterTime(session1.getClusterTime());
testCommandGossipedWithClusterTime(function() {
const coll = session2.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
+ assert.commandWorked(coll.insert({}));
}, session2.getClusterTime());
rst.stopSet();
diff --git a/jstests/noPassthrough/shell_retry_writes_uri.js b/jstests/noPassthrough/shell_retry_writes_uri.js
index bb591438280..56f23981adc 100644
--- a/jstests/noPassthrough/shell_retry_writes_uri.js
+++ b/jstests/noPassthrough/shell_retry_writes_uri.js
@@ -66,14 +66,14 @@ function runShellScript(uri, cmdArgs, insertShouldHaveTxnNumber, shellFn) {
// Tests --retryWrites command line parameter.
runShellScript(mongoUri, ["--retryWrites"], true, function flagWorks() {
assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(db.coll.insert({}), "cannot insert");
+ assert.commandWorked(db.coll.insert({}), "cannot insert");
});
// The uri param should override --retryWrites.
runShellScript(
mongoUri + "?retryWrites=false", ["--retryWrites"], false, function flagOverridenByUri() {
assert(!db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(db.coll.insert({}), "cannot insert");
+ assert.commandWorked(db.coll.insert({}), "cannot insert");
});
// Even if initial connection has retryWrites=false in uri, new connections should not be
@@ -83,7 +83,7 @@ runShellScript(
let connUri = db.getMongo().host; // does not have ?retryWrites=false.
let sess = new Mongo(connUri).startSession();
assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+ assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
// Unless that uri also specifies retryWrites.
@@ -92,7 +92,7 @@ runShellScript(
let connUri = "mongodb://" + db.getMongo().host + "/test?retryWrites=false";
let sess = new Mongo(connUri).startSession();
assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+ assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
// Session options should override --retryWrites as well.
@@ -100,19 +100,19 @@ runShellScript(mongoUri, ["--retryWrites"], false, function flagOverridenByOpts(
let connUri = "mongodb://" + db.getMongo().host + "/test";
let sess = new Mongo(connUri).startSession({retryWrites: false});
assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+ assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
// Test uri retryWrites parameter.
runShellScript(mongoUri + "?retryWrites=true", [], true, function uriTrueWorks() {
assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(db.coll.insert({}), "cannot insert");
+ assert.commandWorked(db.coll.insert({}), "cannot insert");
});
// Test that uri retryWrites=false works.
runShellScript(mongoUri + "?retryWrites=false", [], false, function uriFalseWorks() {
assert(!db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(db.coll.insert({}), "cannot insert");
+ assert.commandWorked(db.coll.insert({}), "cannot insert");
});
// Test SessionOptions retryWrites option.
@@ -120,7 +120,7 @@ runShellScript(mongoUri, [], true, function sessOptTrueWorks() {
let connUri = "mongodb://" + db.getMongo().host + "/test";
let sess = new Mongo(connUri).startSession({retryWrites: true});
assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+ assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
// Test that SessionOptions retryWrites:false works.
@@ -128,14 +128,14 @@ runShellScript(mongoUri, [], false, function sessOptFalseWorks() {
let connUri = "mongodb://" + db.getMongo().host + "/test";
let sess = new Mongo(connUri).startSession({retryWrites: false});
assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+ assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
// Test that session option overrides uri option.
runShellScript(mongoUri + "?retryWrites=true", [], false, function sessOptOverridesUri() {
let sess = db.getMongo().startSession({retryWrites: false});
assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+ assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
rst.stopSet();
diff --git a/jstests/noPassthrough/snapshot_reads.js b/jstests/noPassthrough/snapshot_reads.js
index 9c82a24af7e..8dd75ad75fd 100644
--- a/jstests/noPassthrough/snapshot_reads.js
+++ b/jstests/noPassthrough/snapshot_reads.js
@@ -58,7 +58,7 @@ function runTest({useCausalConsistency, establishCursorCmd, readConcern}) {
// Insert an 11th document which should not be visible to the snapshot cursor. This write is
// performed outside of the session.
- assert.writeOK(primaryDB.coll.insert({_id: 10}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(primaryDB.coll.insert({_id: 10}, {writeConcern: {w: "majority"}}));
// Fetch the first 5 documents.
res = assert.commandWorked(
diff --git a/jstests/noPassthrough/socket_disconnect_kills.js b/jstests/noPassthrough/socket_disconnect_kills.js
index 3d6eb513b24..ec6c613cd1c 100644
--- a/jstests/noPassthrough/socket_disconnect_kills.js
+++ b/jstests/noPassthrough/socket_disconnect_kills.js
@@ -129,9 +129,9 @@ function runCommand(cmd) {
function runTests(client) {
let admin = client.getDB("admin");
- assert.writeOK(client.getDB(testName).test.insert({x: 1}));
- assert.writeOK(client.getDB(testName).test.insert({x: 2}));
- assert.writeOK(client.getDB(testName).test.insert({x: 3}));
+ assert.commandWorked(client.getDB(testName).test.insert({x: 1}));
+ assert.commandWorked(client.getDB(testName).test.insert({x: 2}));
+ assert.commandWorked(client.getDB(testName).test.insert({x: 3}));
[[checkClosedEarly, runCommand({find: "test", filter: {}})],
[
diff --git a/jstests/noPassthrough/step_down_during_drop_database.js b/jstests/noPassthrough/step_down_during_drop_database.js
index 5480605b1c3..e31d876ad6a 100644
--- a/jstests/noPassthrough/step_down_during_drop_database.js
+++ b/jstests/noPassthrough/step_down_during_drop_database.js
@@ -26,7 +26,7 @@ var bulk = testDB.getCollection(collName).initializeUnorderedBulkOp();
for (var i = 0; i < size; ++i) {
bulk.insert({i: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
replSet.awaitReplication();
const failpoint = "dropDatabaseHangAfterAllCollectionsDrop";
diff --git a/jstests/noPassthrough/stepdown_query.js b/jstests/noPassthrough/stepdown_query.js
index 4e8cc001840..f52bd3c875c 100644
--- a/jstests/noPassthrough/stepdown_query.js
+++ b/jstests/noPassthrough/stepdown_query.js
@@ -29,7 +29,7 @@ function runTest(host, rst, waitForPrimary) {
var conn = new Mongo(host);
var coll = conn.getDB(dbName).getCollection(collName);
assert(!coll.exists());
- assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
+ assert.commandWorked(coll.insert([{}, {}, {}, {}, {}]));
var cursor = coll.find().batchSize(2);
// Retrieve the first batch of results.
cursor.next();
diff --git a/jstests/noPassthrough/sync_write.js b/jstests/noPassthrough/sync_write.js
index a4c0d1ebe38..a76d86ec7f0 100644
--- a/jstests/noPassthrough/sync_write.js
+++ b/jstests/noPassthrough/sync_write.js
@@ -19,7 +19,7 @@ assert.neq(null, conn, 'mongod was unable to start up');
// Now connect to the mongod, do a journaled write and abruptly stop the server.
var testDB = conn.getDB('test');
-assert.writeOK(testDB.synced.insert({synced: true}, {writeConcern: {j: true}}));
+assert.commandWorked(testDB.synced.insert({synced: true}, {writeConcern: {j: true}}));
MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
// Restart the mongod.
diff --git a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
index 0d29b065e7b..ef604f4d887 100644
--- a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
+++ b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
@@ -10,7 +10,7 @@ const st = new ShardingTest({shards: 2});
const db = st.s.getDB("test");
const coll = db.capped;
assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 1024}));
-assert.writeOK(coll.insert({}));
+assert.commandWorked(coll.insert({}));
const findResult = assert.commandWorked(
db.runCommand({find: "capped", filter: {}, tailable: true, awaitData: true}));
diff --git a/jstests/noPassthrough/transaction_reaper.js b/jstests/noPassthrough/transaction_reaper.js
index 5f0536f0d7e..58198741da9 100644
--- a/jstests/noPassthrough/transaction_reaper.js
+++ b/jstests/noPassthrough/transaction_reaper.js
@@ -90,7 +90,7 @@ function Fixture(impl) {
for (var i = 0; i < nSessions; i++) {
// make a session and get it to the collection
var session = this.sessions[i];
- assert.writeOK(session.getDatabase("test").test.save({a: 1}));
+ assert.commandWorked(session.getDatabase("test").test.save({a: 1}));
}
// Ensure a write flushes a transaction
diff --git a/jstests/noPassthrough/ttl_capped.js b/jstests/noPassthrough/ttl_capped.js
index c9eabbc0df7..662e5781e03 100644
--- a/jstests/noPassthrough/ttl_capped.js
+++ b/jstests/noPassthrough/ttl_capped.js
@@ -43,7 +43,8 @@ for (var i = 0; i < numCollectionsToCreate; i++) {
// Insert a single document with a 'date' field that is already expired according to the
// index definition.
- assert.writeOK(testDB[collName].insert({date: new Date(now - expireAfterSeconds * 1000)}));
+ assert.commandWorked(
+ testDB[collName].insert({date: new Date(now - expireAfterSeconds * 1000)}));
}
// Increase the verbosity of the TTL monitor's output.
diff --git a/jstests/noPassthrough/ttl_partial_index.js b/jstests/noPassthrough/ttl_partial_index.js
index af4c9c1a7fb..61cc5e6aff1 100644
--- a/jstests/noPassthrough/ttl_partial_index.js
+++ b/jstests/noPassthrough/ttl_partial_index.js
@@ -12,8 +12,8 @@ assert.commandWorked(coll.ensureIndex(
{x: 1}, {expireAfterSeconds: 0, partialFilterExpression: {z: {$exists: true}}}));
var now = new Date();
-assert.writeOK(coll.insert({x: now, z: 2}));
-assert.writeOK(coll.insert({x: now}));
+assert.commandWorked(coll.insert({x: now, z: 2}));
+assert.commandWorked(coll.insert({x: now}));
// Wait for the TTL monitor to run at least twice (in case we weren't finished setting up our
// collection when it ran the first time).
diff --git a/jstests/noPassthrough/txn_override_causal_consistency.js b/jstests/noPassthrough/txn_override_causal_consistency.js
index ac7c9758c96..ef5605a1b2d 100644
--- a/jstests/noPassthrough/txn_override_causal_consistency.js
+++ b/jstests/noPassthrough/txn_override_causal_consistency.js
@@ -102,7 +102,7 @@ function inspectFirstCommandForAfterClusterTime(conn, cmdName, isCausal, expectR
function testInsert(conn, isCausal, expectRetry) {
inspectFirstCommandForAfterClusterTime(conn, "insert", isCausal, expectRetry, (coll) => {
- assert.writeOK(coll.insert({x: 1}));
+ assert.commandWorked(coll.insert({x: 1}));
});
}
diff --git a/jstests/noPassthrough/unsupported_change_stream_deployments.js b/jstests/noPassthrough/unsupported_change_stream_deployments.js
index c342341da69..6a4d3dcd1c6 100644
--- a/jstests/noPassthrough/unsupported_change_stream_deployments.js
+++ b/jstests/noPassthrough/unsupported_change_stream_deployments.js
@@ -32,7 +32,7 @@ function assertChangeStreamNotSupportedOnConnection(conn) {
const conn = MongoRunner.runMongod({enableMajorityReadConcern: ""});
assert.neq(null, conn, "mongod was unable to start up");
// $changeStream cannot run on a non-existent database.
-assert.writeOK(conn.getDB("test").ensure_db_exists.insert({}));
+assert.commandWorked(conn.getDB("test").ensure_db_exists.insert({}));
assertChangeStreamNotSupportedOnConnection(conn);
assert.eq(0, MongoRunner.stopMongod(conn));
diff --git a/jstests/noPassthrough/update_post_image_validation.js b/jstests/noPassthrough/update_post_image_validation.js
index ad78227a09b..0b2c2d93a4b 100644
--- a/jstests/noPassthrough/update_post_image_validation.js
+++ b/jstests/noPassthrough/update_post_image_validation.js
@@ -9,7 +9,7 @@ const testDB = conn.getDB("test");
// Test validation of elements added to an array that is represented in a "deserialized" format
// in mutablebson. The added element is invalid because it is a DBRef with a missing $id.
-assert.writeOK(testDB.coll.insert({_id: 0, a: []}));
+assert.commandWorked(testDB.coll.insert({_id: 0, a: []}));
assert.writeErrorWithCode(
testDB.coll.update({_id: 0}, {$set: {"a.1": 0, "a.0": {$ref: "coll", $db: "test"}}}),
ErrorCodes.InvalidDBRef);
@@ -18,7 +18,7 @@ assert.docEq(testDB.coll.findOne({_id: 0}), {_id: 0, a: []});
// Test validation of modified array elements that are accessed using a string that is
// numerically equivalent to their fieldname. The modified element is invalid because it is a
// DBRef with a missing $id.
-assert.writeOK(testDB.coll.insert({_id: 1, a: [0]}));
+assert.commandWorked(testDB.coll.insert({_id: 1, a: [0]}));
assert.writeErrorWithCode(
testDB.coll.update({_id: 1}, {$set: {"a.00": {$ref: "coll", $db: "test"}}}),
ErrorCodes.InvalidDBRef);
diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js
index dd18a14d72c..876f9688c6f 100644
--- a/jstests/noPassthrough/update_server-5552.js
+++ b/jstests/noPassthrough/update_server-5552.js
@@ -14,7 +14,7 @@ var bulk = t.initializeUnorderedBulkOp();
for (let i = 0; i < N; i++) {
bulk.insert({_id: i, x: 1});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
const join = startParallelShell(
"while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );");
diff --git a/jstests/noPassthrough/use_disk.js b/jstests/noPassthrough/use_disk.js
index 0e91fdcdca2..f4323bf3d35 100644
--- a/jstests/noPassthrough/use_disk.js
+++ b/jstests/noPassthrough/use_disk.js
@@ -16,14 +16,14 @@ testDB.setProfilingLevel(2);
function resetCollection() {
coll.drop();
for (var i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
}
function resetForeignCollection() {
testDB.foreign.drop();
const forColl = testDB.getCollection("foreign");
for (var i = 4; i < 18; i += 2)
- assert.writeOK(forColl.insert({b: i}));
+ assert.commandWorked(forColl.insert({b: i}));
}
//
// Confirm hasSortStage with in-memory sort.
diff --git a/jstests/noPassthrough/utf8_paths.js b/jstests/noPassthrough/utf8_paths.js
index b7b17355457..e7c0f56fa5d 100644
--- a/jstests/noPassthrough/utf8_paths.js
+++ b/jstests/noPassthrough/utf8_paths.js
@@ -25,7 +25,7 @@ let testMongoD = function() {
assert.neq(null, conn, 'mongod was unable to start up');
let coll = conn.getCollection(db_name + ".foo");
- assert.writeOK(coll.insert({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 1}));
MongoRunner.stopMongod(conn);
};
diff --git a/jstests/noPassthrough/views_legacy.js b/jstests/noPassthrough/views_legacy.js
index 8ded34730ae..efedb669a25 100644
--- a/jstests/noPassthrough/views_legacy.js
+++ b/jstests/noPassthrough/views_legacy.js
@@ -13,7 +13,7 @@ assert.commandWorked(viewsDB.createView("view", "collection", []));
let coll = viewsDB.getCollection("collection");
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
conn.forceReadMode("legacy");
diff --git a/jstests/noPassthrough/wt_cache_full.js b/jstests/noPassthrough/wt_cache_full.js
index 29be77da891..71a39764f68 100644
--- a/jstests/noPassthrough/wt_cache_full.js
+++ b/jstests/noPassthrough/wt_cache_full.js
@@ -33,7 +33,7 @@ const numDocs = 2;
const minDocSizeMB = 10;
for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
+ assert.commandWorked(
coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
{writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
}
@@ -52,7 +52,7 @@ assert.commandWorked(
secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
for (let i = 0; i < numDocs; ++i) {
for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
+ assert.commandWorked(coll.update({_id: i}, {$inc: {i: 1}}));
}
}
diff --git a/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js b/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js
index 71383f91454..9fa3cd508de 100644
--- a/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js
+++ b/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js
@@ -35,7 +35,7 @@ const numDocs = 2;
const minDocSizeMB = 10;
for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
+ assert.commandWorked(
coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
{writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
}
@@ -56,7 +56,7 @@ const sessionColl = sessionDB.getCollection(coll.getName());
session.startTransaction();
for (let i = 0; i < numDocs; ++i) {
for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(sessionColl.update({_id: i}, {$inc: {i: 1}}));
+ assert.commandWorked(sessionColl.update({_id: i}, {$inc: {i: 1}}));
}
}
assert.commandWorked(session.commitTransaction_forTesting());
diff --git a/jstests/noPassthrough/wt_cache_full_restart.js b/jstests/noPassthrough/wt_cache_full_restart.js
index 29aed83c67f..94140324ddf 100644
--- a/jstests/noPassthrough/wt_cache_full_restart.js
+++ b/jstests/noPassthrough/wt_cache_full_restart.js
@@ -33,7 +33,7 @@ const numDocs = 2;
const minDocSizeMB = 10;
for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
+ assert.commandWorked(
coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
{writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
}
@@ -54,7 +54,7 @@ jsTestLog('Stopped secondary. Writing ' + numUpdates + ' updates to ' + numDocs
const startTime = Date.now();
for (let i = 0; i < numDocs; ++i) {
for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
+ assert.commandWorked(coll.update({_id: i}, {$inc: {i: 1}}));
}
}
const totalTime = Date.now() - startTime;
diff --git a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
index 3470a04e24b..34d84bf7471 100644
--- a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
+++ b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
@@ -68,7 +68,7 @@ if (storageEngine !== "wiredTiger") {
for (var j = 0; j < 100; j++) {
batch.insert({a: bigstr});
}
- assert.writeOK(batch.execute());
+ assert.commandWorked(batch.execute());
}
rst.stopSet();
}
diff --git a/jstests/noPassthrough/wt_nojournal_skip_recovery.js b/jstests/noPassthrough/wt_nojournal_skip_recovery.js
index df813e080d3..b33e354860f 100644
--- a/jstests/noPassthrough/wt_nojournal_skip_recovery.js
+++ b/jstests/noPassthrough/wt_nojournal_skip_recovery.js
@@ -38,8 +38,8 @@ var awaitShell = startParallelShell(function() {
for (var i = 0; i < 100; ++i) {
bulk.insert({unjournaled: i});
}
- assert.writeOK(bulk.execute({j: false}));
- assert.writeOK(db.nojournal.insert({journaled: loopNum}, {writeConcern: {j: true}}));
+ assert.commandWorked(bulk.execute({j: false}));
+ assert.commandWorked(db.nojournal.insert({journaled: loopNum}, {writeConcern: {j: true}}));
// Create a checkpoint slightly before the mongod is terminated.
if (loopNum === 90) {
diff --git a/jstests/noPassthrough/wt_nojournal_toggle.js b/jstests/noPassthrough/wt_nojournal_toggle.js
index 50d5483aa26..41e286b39fc 100644
--- a/jstests/noPassthrough/wt_nojournal_toggle.js
+++ b/jstests/noPassthrough/wt_nojournal_toggle.js
@@ -21,8 +21,8 @@ function insertFunctionFactory(checkpoint) {
for (var i = 0; i < 100; ++i) {
bulk.insert({unjournaled: i});
}
- assert.writeOK(bulk.execute({j: false}));
- assert.writeOK(db.nojournal.insert({journaled: iter}, {writeConcern: {j: true}}));
+ assert.commandWorked(bulk.execute({j: false}));
+ assert.commandWorked(db.nojournal.insert({journaled: iter}, {writeConcern: {j: true}}));
if (__checkpoint_template_placeholder__ && iter === 50) {
assert.commandWorked(db.adminCommand({fsync: 1}));
}
@@ -59,7 +59,7 @@ function runTest(options) {
// We saw 100 journaled inserts, but visibility does not guarantee durability, so
// do an extra journaled write to make all visible commits durable, before killing
// the mongod.
- assert.writeOK(testDB.nojournal.insert({final: true}, {writeConcern: {j: true}}));
+ assert.commandWorked(testDB.nojournal.insert({final: true}, {writeConcern: {j: true}}));
MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
return true;
}
@@ -84,7 +84,7 @@ function runTest(options) {
'journaled write operations since the last checkpoint were not replayed');
var initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
- assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
+ assert.commandWorked(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
assert.eq(initialNumLogWrites,
testDB.serverStatus().wiredTiger.log['log write operations'],
'journaling is still enabled even though --nojournal was specified');
@@ -103,7 +103,7 @@ function runTest(options) {
testDB = conn.getDB('test');
initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
- assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
+ assert.commandWorked(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
assert.lt(initialNumLogWrites,
testDB.serverStatus().wiredTiger.log['log write operations'],
'journaling is still disabled even though --journal was specified');
diff --git a/jstests/noPassthrough/wt_operation_stats.js b/jstests/noPassthrough/wt_operation_stats.js
index e273dd34170..f3bc4f5aa9d 100644
--- a/jstests/noPassthrough/wt_operation_stats.js
+++ b/jstests/noPassthrough/wt_operation_stats.js
@@ -51,7 +51,7 @@ if (jsTest.options().storageEngine && (jsTest.options().storageEngine !== "wired
jsTestLog("insert data");
for (let i = 0; i < 200; i++) {
- assert.writeOK(testDB.foo.insert({x: value}));
+ assert.commandWorked(testDB.foo.insert({x: value}));
}
let connport = conn.port;
diff --git a/jstests/noPassthrough/yield_during_writes.js b/jstests/noPassthrough/yield_during_writes.js
index d1e6845b58e..ab283382c5a 100644
--- a/jstests/noPassthrough/yield_during_writes.js
+++ b/jstests/noPassthrough/yield_during_writes.js
@@ -27,16 +27,16 @@ const coll = mongod.getDB('test').yield_during_writes;
coll.drop();
for (let i = 0; i < nDocsToInsert; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
// A multi-update doing a collection scan should yield about nDocsToInsert / worksPerYield
// times.
-assert.writeOK(coll.update({}, {$inc: {counter: 1}}, {multi: true}));
+assert.commandWorked(coll.update({}, {$inc: {counter: 1}}, {multi: true}));
assert.gt(countOpYields(coll, 'update'), (nDocsToInsert / worksPerYield) - 2);
// Likewise, a multi-remove should also yield approximately every worksPerYield documents.
-assert.writeOK(coll.remove({}, {multi: true}));
+assert.commandWorked(coll.remove({}, {multi: true}));
assert.gt(countOpYields(coll, 'remove'), (nDocsToInsert / worksPerYield) - 2);
MongoRunner.stopMongod(mongod);
diff --git a/jstests/noPassthroughWithMongod/background.js b/jstests/noPassthroughWithMongod/background.js
index 7624d24471c..ef591fa5114 100644
--- a/jstests/noPassthroughWithMongod/background.js
+++ b/jstests/noPassthroughWithMongod/background.js
@@ -11,7 +11,7 @@ var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 100000; i++) {
bulk.insert({y: 'aaaaaaaaaaaa', i: i});
if (i % 10000 == 0) {
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
bulk = t.initializeUnorderedBulkOp();
print(i);
}
@@ -26,13 +26,13 @@ for (var i = 0; i < 100000; i++) {
bulk.insert({i: i});
if (i % 10000 == 0) {
printjson(db.currentOp());
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
bulk = t.initializeUnorderedBulkOp();
print(i);
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
printjson(db.currentOp());
diff --git a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
index e6db1e1bcb9..24b7f8858ef 100644
--- a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
+++ b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
@@ -42,7 +42,7 @@ function testInsert(docs, writeCmd, wc) {
function testFind(readCmd) {
coll.drop();
for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({}));
+ assert.commandWorked(coll.insert({}));
}
var res = executeBenchRun([
@@ -54,7 +54,7 @@ function testFind(readCmd) {
function testFindOne(readCmd) {
coll.drop();
for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({}));
+ assert.commandWorked(coll.insert({}));
}
var res =
diff --git a/jstests/noPassthroughWithMongod/benchrun_substitution.js b/jstests/noPassthroughWithMongod/benchrun_substitution.js
index b8562040e11..8d8eee3cc09 100644
--- a/jstests/noPassthroughWithMongod/benchrun_substitution.js
+++ b/jstests/noPassthroughWithMongod/benchrun_substitution.js
@@ -40,7 +40,7 @@ function benchrun_sub_update(use_write_command) {
}];
for (var i = 0; i < 100; ++i) {
- assert.writeOK(t.insert({x: i}));
+ assert.commandWorked(t.insert({x: i}));
}
res = benchRun({parallel: 1, seconds: 10, ops: ops, host: db.getMongo().host});
@@ -65,7 +65,7 @@ function benchrun_sub_remove(use_write_command) {
}];
for (var i = 0; i < 100; ++i) {
- assert.writeOK(t.insert({x: i}));
+ assert.commandWorked(t.insert({x: i}));
}
res = benchRun({parallel: 1, seconds: 10, ops: ops, host: db.getMongo().host});
diff --git a/jstests/noPassthroughWithMongod/btreedel.js b/jstests/noPassthroughWithMongod/btreedel.js
index 76a0f909cc0..868a26fafba 100644
--- a/jstests/noPassthroughWithMongod/btreedel.js
+++ b/jstests/noPassthroughWithMongod/btreedel.js
@@ -8,7 +8,7 @@ var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 1000000; i++) {
bulk.insert({_id: i, x: 'a b'});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
print("1 insert done count: " + t.count());
diff --git a/jstests/noPassthroughWithMongod/capped_truncate.js b/jstests/noPassthroughWithMongod/capped_truncate.js
index 25e023c890d..4fef292493c 100644
--- a/jstests/noPassthroughWithMongod/capped_truncate.js
+++ b/jstests/noPassthroughWithMongod/capped_truncate.js
@@ -23,7 +23,7 @@ assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 0}),
"captrunc didn't return an error when attempting to remove 0 documents");
for (var j = 1; j <= 10; j++) {
- assert.writeOK(t.insert({x: j}));
+ assert.commandWorked(t.insert({x: j}));
}
// It is an error to try and remove more documents than what exist in the capped collection.
@@ -50,7 +50,7 @@ db[collName].drop();
assert.commandWorked(db.runCommand({create: collName, capped: false}));
for (var j = 1; j <= 10; j++) {
- assert.writeOK(db[collName].insert({x: j}));
+ assert.commandWorked(db[collName].insert({x: j}));
}
assert.commandFailed(db.runCommand({captrunc: collName, n: 5}),
"captrunc didn't return an error for a non-capped collection");
diff --git a/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js b/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js
index 865809f63b2..ab02eb6960b 100644
--- a/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js
+++ b/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js
@@ -4,9 +4,9 @@
(function() {
var coll = db[jsTest.name()];
coll.drop();
-assert.writeOK(coll.insert({_id: 1}));
-assert.writeOK(coll.insert({_id: 2}));
-assert.writeOK(coll.insert({_id: 3}));
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: 3}));
assert.eq(3, coll.find().count());
diff --git a/jstests/noPassthroughWithMongod/find_and_modify_server16469.js b/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
index 43f6ed3910c..70fe0651748 100644
--- a/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
+++ b/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
@@ -24,7 +24,7 @@ try {
largeStr += 'x';
}
for (var i = 0; i < 100; ++i) {
- assert.writeOK(coll.insert({a: largeStr, b: i}));
+ assert.commandWorked(coll.insert({a: largeStr, b: i}));
}
// Verify that an unindexed sort of this data fails with a find() if no limit is specified.
diff --git a/jstests/noPassthroughWithMongod/find_cmd.js b/jstests/noPassthroughWithMongod/find_cmd.js
index a1eac6b24f5..7ce8e52855d 100644
--- a/jstests/noPassthroughWithMongod/find_cmd.js
+++ b/jstests/noPassthroughWithMongod/find_cmd.js
@@ -25,7 +25,7 @@ assert.eq([], res.cursor.firstBatch);
if (jsTest.options().storageEngine !== "mobile") {
coll.drop();
assert.commandWorked(coll.getDB().createCollection(collname, {capped: true, size: 2048}));
- assert.writeOK(coll.insert({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 1}));
res = coll.runCommand("find", {tailable: true});
assert.commandWorked(res);
assert.neq(0, res.cursor.id);
@@ -35,7 +35,7 @@ if (jsTest.options().storageEngine !== "mobile") {
// Multiple batches.
coll.drop();
for (var i = 0; i < 150; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
res = coll.runCommand("find", {filter: {_id: {$lt: 140}}});
assert.commandWorked(res);
diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js
index 5a936a3490d..d3ae714b69b 100644
--- a/jstests/noPassthroughWithMongod/geo_mnypts.js
+++ b/jstests/noPassthroughWithMongod/geo_mnypts.js
@@ -11,7 +11,7 @@ for (var i = 0; i < totalPts; i++) {
var ii = i % 10000;
bulk.insert({loc: [ii % 100, Math.floor(ii / 100)]});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
coll.ensureIndex({loc: "2d"});
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index 4d10d62a7dd..ce7f9ebf67c 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -13,7 +13,7 @@ for (x = -180; x < 180; x += .5) {
bulk.insert(o);
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var numTests = 31;
for (var n = 0; n < numTests; n++) {
diff --git a/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js b/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
index e63d4f828ae..e925222c3f7 100644
--- a/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
+++ b/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
@@ -8,9 +8,9 @@
const coll = db.getmore_awaitdata_opcounters;
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
-assert.writeOK(coll.insert({_id: 1}));
-assert.writeOK(coll.insert({_id: 2}));
-assert.writeOK(coll.insert({_id: 3}));
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: 3}));
function getGlobalLatencyStats() {
return db.serverStatus().opLatencies.reads;
diff --git a/jstests/noPassthroughWithMongod/index_boundary_values_validate.js b/jstests/noPassthroughWithMongod/index_boundary_values_validate.js
index 5ff5a44ef93..50ab5497c32 100644
--- a/jstests/noPassthroughWithMongod/index_boundary_values_validate.js
+++ b/jstests/noPassthroughWithMongod/index_boundary_values_validate.js
@@ -6,15 +6,15 @@
var t = db.index_boundary_values_validate;
t.drop();
-assert.writeOK(t.insert({a: MaxKey, b: MaxKey}));
-assert.writeOK(t.insert({a: MaxKey, b: MinKey}));
-assert.writeOK(t.insert({a: MinKey, b: MaxKey}));
-assert.writeOK(t.insert({a: MinKey, b: MinKey}));
+assert.commandWorked(t.insert({a: MaxKey, b: MaxKey}));
+assert.commandWorked(t.insert({a: MaxKey, b: MinKey}));
+assert.commandWorked(t.insert({a: MinKey, b: MaxKey}));
+assert.commandWorked(t.insert({a: MinKey, b: MinKey}));
-assert.writeOK(t.insert({a: {}}));
-assert.writeOK(t.insert({b: {}}));
-assert.writeOK(t.insert({unindexed_field: {}}));
-assert.writeOK(t.insert({a: {}, b: {}}));
+assert.commandWorked(t.insert({a: {}}));
+assert.commandWorked(t.insert({b: {}}));
+assert.commandWorked(t.insert({unindexed_field: {}}));
+assert.commandWorked(t.insert({a: {}, b: {}}));
assert.commandWorked(t.createIndex({a: 1, b: 1}));
assert.commandWorked(t.createIndex({a: 1, b: -1}));
diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js
index 2816eecdb4b..e0786a4e539 100644
--- a/jstests/noPassthroughWithMongod/index_check10.js
+++ b/jstests/noPassthroughWithMongod/index_check10.js
@@ -106,7 +106,7 @@ function doIt() {
for (var i = 0; i < 10000; ++i) {
bulk.insert(obj());
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
t.ensureIndex(idx);
check();
@@ -120,12 +120,12 @@ function doIt() {
}
if (Random.rand() > 0.999) {
print(i);
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
check();
bulk = t.initializeUnorderedBulkOp();
}
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
check();
}
diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js
index fe158efbdad..8a693604bb8 100644
--- a/jstests/noPassthroughWithMongod/index_check9.js
+++ b/jstests/noPassthroughWithMongod/index_check9.js
@@ -107,7 +107,7 @@ function doIt() {
bulk.insert(obj());
if (Random.rand() > 0.999) {
print(i);
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
check();
bulk = t.initializeUnorderedBulkOp();
}
@@ -122,12 +122,12 @@ function doIt() {
}
if (Random.rand() > 0.999) {
print(i);
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
check();
bulk = t.initializeUnorderedBulkOp();
}
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
check();
}
diff --git a/jstests/noPassthroughWithMongod/index_hammer1.js b/jstests/noPassthroughWithMongod/index_hammer1.js
index 5d89223093e..d1ecd2c5213 100644
--- a/jstests/noPassthroughWithMongod/index_hammer1.js
+++ b/jstests/noPassthroughWithMongod/index_hammer1.js
@@ -5,7 +5,7 @@ t.drop();
var bulk = t.initializeUnorderedBulkOp();
for (i = 0; i < 10000; i++)
bulk.insert({x: i, y: i});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
ops = [];
diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
index d25a32e857b..218002b0cc6 100644
--- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js
+++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
@@ -73,7 +73,7 @@ for (var idx = 0; idx < dropAction.length; idx++) {
for (var i = 0; i < size; ++i) {
bulk.insert({i: i});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc));
masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
index 446502905cb..2e5d2e85bf6 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
@@ -40,7 +40,7 @@ var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
for (var i = 0; i < size; ++i) {
bulk.insert({i: i});
}
-assert.writeOK(bulk.execute({j: true}));
+assert.commandWorked(bulk.execute({j: true}));
assert.eq(size, coll.count(), 'unexpected number of documents after bulk insert.');
// Make sure the documents make it to the secondary.
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js
index 16165ce3f96..19fbb4b4879 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js
@@ -56,7 +56,7 @@ var bulk = masterColl.initializeUnorderedBulkOp();
for (var i = 0; i < size; ++i) {
bulk.insert({i: i});
}
-assert.writeOK(bulk.execute({j: true}));
+assert.commandWorked(bulk.execute({j: true}));
assert.eq(size, masterColl.count(), 'unexpected number of documents after bulk insert.');
// Make sure the documents get replicated to the secondary.
diff --git a/jstests/noPassthroughWithMongod/indexbg_updates.js b/jstests/noPassthroughWithMongod/indexbg_updates.js
index 5511c83074d..136c0118e0e 100644
--- a/jstests/noPassthroughWithMongod/indexbg_updates.js
+++ b/jstests/noPassthroughWithMongod/indexbg_updates.js
@@ -19,7 +19,7 @@ for (var i = 0; i < numDocs; i++) {
bulk.insert(doc);
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Perform a bulk update on a single document, targeting the updates on the
// field being actively indexed in the background
@@ -50,7 +50,7 @@ var backgroundIndexBuildShell = startParallelShell(
);
print("Do some sets and unsets");
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
print("Start background index build");
backgroundIndexBuildShell();
diff --git a/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js b/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js
index 10a019e95cf..a14fecd2d5b 100644
--- a/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js
+++ b/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js
@@ -22,7 +22,7 @@ assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
// When the featureCompatibilityVersion is upgrading, running isMaster with internalClient
// returns minWireVersion == maxWireVersion.
-assert.writeOK(
+assert.commandWorked(
adminDB.system.version.update({_id: "featureCompatibilityVersion"},
{$set: {version: lastStableFCV, targetVersion: latestFCV}}));
res = adminDB.runCommand(isMasterCommand);
@@ -31,7 +31,7 @@ assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
// When the featureCompatibilityVersion is downgrading, running isMaster with internalClient
// returns minWireVersion == maxWireVersion.
-assert.writeOK(
+assert.commandWorked(
adminDB.system.version.update({_id: "featureCompatibilityVersion"},
{$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
res = adminDB.runCommand(isMasterCommand);
diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
index 589e072b631..52bd8b22ceb 100644
--- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
+++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
@@ -21,7 +21,7 @@ for (var i = 0; i < 10; i++) {
}
expectedOutColl.push({_id: i, value: j - 1});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function mapFn() {
emit(this.idx, 1);
diff --git a/jstests/noPassthroughWithMongod/mr_writeconflict.js b/jstests/noPassthroughWithMongod/mr_writeconflict.js
index b7fd81e21c7..6dfd3189196 100644
--- a/jstests/noPassthroughWithMongod/mr_writeconflict.js
+++ b/jstests/noPassthroughWithMongod/mr_writeconflict.js
@@ -49,7 +49,7 @@ for (i = 0; i < numDocs; ++i) {
}
var res = bulk.execute();
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(numDocs, res.nInserted);
db.dest.drop();
diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js
index b548ce13cd9..6549126a9dc 100644
--- a/jstests/noPassthroughWithMongod/no_balance_collection.js
+++ b/jstests/noPassthroughWithMongod/no_balance_collection.js
@@ -81,7 +81,7 @@ var bulk = collB.initializeUnorderedBulkOp();
for (var i = 0; i < 1000000; i++) {
bulk.insert({_id: i, hello: "world"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
printjson(lastMigration);
printjson(sh._lastMigration(collB));
diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js
index b7da7b58f95..ba5fd220795 100644
--- a/jstests/noPassthroughWithMongod/remove9.js
+++ b/jstests/noPassthroughWithMongod/remove9.js
@@ -7,7 +7,7 @@ pid = startMongoProgramNoConnect("mongo", "--eval", js, db ? db.getMongo().host
Random.setRandomSeed();
for (var i = 0; i < 10000; ++i) {
- assert.writeOK(t.remove({i: Random.randInt(10000)}));
+ assert.commandWorked(t.remove({i: Random.randInt(10000)}));
}
stopMongoProgramByPid(pid);
diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
index 38542deed0a..7deaf3999a8 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
@@ -40,13 +40,13 @@ coll.insert({_id: -2, d: data15PlusMB});
coll.insert({_id: -1, d: data15PlusMB});
// Docs of assorted sizes
-assert.writeOK(coll.insert({_id: 0, d: "x"}));
-assert.writeOK(coll.insert({_id: 1, d: data15PlusMB}));
-assert.writeOK(coll.insert({_id: 2, d: "x"}));
-assert.writeOK(coll.insert({_id: 3, d: data15MB}));
-assert.writeOK(coll.insert({_id: 4, d: "x"}));
-assert.writeOK(coll.insert({_id: 5, d: data1MB}));
-assert.writeOK(coll.insert({_id: 6, d: "x"}));
+assert.commandWorked(coll.insert({_id: 0, d: "x"}));
+assert.commandWorked(coll.insert({_id: 1, d: data15PlusMB}));
+assert.commandWorked(coll.insert({_id: 2, d: "x"}));
+assert.commandWorked(coll.insert({_id: 3, d: data15MB}));
+assert.commandWorked(coll.insert({_id: 4, d: "x"}));
+assert.commandWorked(coll.insert({_id: 5, d: data1MB}));
+assert.commandWorked(coll.insert({_id: 6, d: "x"}));
assert.eq(9, coll.find().itcount());
diff --git a/jstests/noPassthroughWithMongod/skip_shell_cursor_finalize.js b/jstests/noPassthroughWithMongod/skip_shell_cursor_finalize.js
index a1968b7db90..6071aed73e5 100644
--- a/jstests/noPassthroughWithMongod/skip_shell_cursor_finalize.js
+++ b/jstests/noPassthroughWithMongod/skip_shell_cursor_finalize.js
@@ -6,7 +6,7 @@ const coll = db.skip_shell_cursor_finalize;
coll.drop();
for (let i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
function checkShellCursorFinalize(skip = true) {
diff --git a/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js b/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
index 600d8be4733..54c00aa554d 100644
--- a/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
+++ b/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
@@ -15,7 +15,7 @@ coll.drop();
assert.commandWorked(db.runCommand({create: coll.getName(), capped: true, size: 1024}));
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
const findResponse = assert.commandWorked(
diff --git a/jstests/noPassthroughWithMongod/top_drop.js b/jstests/noPassthroughWithMongod/top_drop.js
index 61f1736a2bb..bf4866f05c6 100644
--- a/jstests/noPassthroughWithMongod/top_drop.js
+++ b/jstests/noPassthroughWithMongod/top_drop.js
@@ -56,9 +56,9 @@ function checkTopEntries(expectedEntries) {
}
// Create a few entries in top.
-assert.writeOK(topDB.coll1.insert({}));
-assert.writeOK(topDB.coll2.insert({}));
-assert.writeOK(topDB.coll3.insert({}));
+assert.commandWorked(topDB.coll1.insert({}));
+assert.commandWorked(topDB.coll2.insert({}));
+assert.commandWorked(topDB.coll3.insert({}));
checkTopEntries([topDB.coll1, topDB.coll2, topDB.coll3]);
// Check that dropping a collection removes that collection but leaves the others.
diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js
index b0c7c342987..1d1f3f001ee 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl.js
@@ -35,7 +35,7 @@ var bulk = mastercol.initializeUnorderedBulkOp();
for (i = 0; i < 24; i++) {
bulk.insert({x: new Date(now - (3600 * 1000 * i))});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
rt.awaitReplication();
assert.eq(24, mastercol.count(), "docs not inserted on primary");
assert.eq(24, slave1col.count(), "docs not inserted on secondary");
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
index 4616a306084..0cdec8e1845 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
@@ -36,7 +36,7 @@ var restartWithConfig = function() {
var restartWithoutConfig = function() {
var localDB = conn.getDB("local");
- assert.writeOK(localDB.system.replset.remove({}));
+ assert.commandWorked(localDB.system.replset.remove({}));
MongoRunner.stopMongod(conn);
diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js
index 6788dfcdcf9..5c59b1851ad 100644
--- a/jstests/noPassthroughWithMongod/ttl_sharded.js
+++ b/jstests/noPassthroughWithMongod/ttl_sharded.js
@@ -28,7 +28,7 @@ for (var i = 0; i < 24; i++) {
var past = new Date(now - (3600 * 1000 * i));
bulk.insert({_id: i, x: past});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(t.count(), 24, "initial docs not inserted");
// create the TTL index which delete anything older than ~5.5 hours
diff --git a/jstests/noPassthroughWithMongod/validate_interrupt.js b/jstests/noPassthroughWithMongod/validate_interrupt.js
index c19e682eae3..ecba9cb8756 100644
--- a/jstests/noPassthroughWithMongod/validate_interrupt.js
+++ b/jstests/noPassthroughWithMongod/validate_interrupt.js
@@ -13,7 +13,7 @@ var i;
for (i = 0; i < 1000; i++) {
bulk.insert({a: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function setTimeoutFailPoint(mode) {
var res = db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: mode});
diff --git a/jstests/noPassthroughWithMongod/views_invalid.js b/jstests/noPassthroughWithMongod/views_invalid.js
index a525b68d32f..dfe253c8848 100644
--- a/jstests/noPassthroughWithMongod/views_invalid.js
+++ b/jstests/noPassthroughWithMongod/views_invalid.js
@@ -6,17 +6,18 @@ let invalidDB = db.getSiblingDB(dbname);
// Wait for the invalid view definition to be replicated to any secondaries and then drop the
// database.
-assert.writeOK(invalidDB.system.views.insert({z: '\0\uFFFFf'}), {writeConcern: {w: "majority"}});
+assert.commandWorked(invalidDB.system.views.insert({z: '\0\uFFFFf'}),
+ {writeConcern: {w: "majority"}});
invalidDB.dropDatabase();
// Create a database with one valid and one invalid view through direct system.views writes.
-assert.writeOK(invalidDB.coll.insert({x: 1}));
-assert.writeOK(
+assert.commandWorked(invalidDB.coll.insert({x: 1}));
+assert.commandWorked(
invalidDB.system.views.insert({_id: dbname + '.view', viewOn: 'coll', pipeline: []}));
assert.eq(invalidDB.view.findOne({}, {_id: 0}),
{x: 1},
'find on view created with direct write to views catalog should work');
-assert.writeOK(invalidDB.system.views.insert({_id: 'invalid', pipeline: 3.0}));
+assert.commandWorked(invalidDB.system.views.insert({_id: 'invalid', pipeline: 3.0}));
// Check that view-related commands fail with an invalid view catalog, but other commands on
// existing collections still succeed.
@@ -28,8 +29,8 @@ assert.eq(invalidDB.coll.findOne({}, {_id: 0}),
{x: 1},
'find on existing collection in DB with invalid views catalog should work');
-assert.writeOK(invalidDB.coll.insert({x: 2}),
- 'insert in existing collection in DB with invalid views catalog should work');
+assert.commandWorked(invalidDB.coll.insert({x: 2}),
+ 'insert in existing collection in DB with invalid views catalog should work');
assert.writeError(invalidDB.x.insert({x: 2}),
'insert into new collection in DB with invalid views catalog should fail');
@@ -53,10 +54,11 @@ assert.commandFailedWithCode(
'find on non-existent collection in DB with invalid system.views should fail');
// Now fix the database by removing the invalid system.views entry, and check all is OK.
-assert.writeOK(invalidDB.system.views.remove({_id: 'invalid'}),
- 'should be able to remove invalid view with direct write to view catalog');
-assert.writeOK(invalidDB.coll.insert({x: 1}),
- 'after remove invalid view from catalog, should be able to create new collection');
+assert.commandWorked(invalidDB.system.views.remove({_id: 'invalid'}),
+ 'should be able to remove invalid view with direct write to view catalog');
+assert.commandWorked(
+ invalidDB.coll.insert({x: 1}),
+ 'after remove invalid view from catalog, should be able to create new collection');
assert.eq(invalidDB.view.findOne({}, {_id: 0}),
{x: 1},
'find on view should work again after removing invalid view from catalog');
diff --git a/jstests/readonly/count.js b/jstests/readonly/count.js
index cc13f5c3337..5804a7423e9 100644
--- a/jstests/readonly/count.js
+++ b/jstests/readonly/count.js
@@ -25,7 +25,7 @@ runReadOnlyTest(function() {
bulk.insert({x: 70});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
},
exec: function(readableCollection) {
assert.eq(readableCollection.find({x: {$lt: 10}}).count(), this.countLt10);
diff --git a/jstests/readonly/distinct.js b/jstests/readonly/distinct.js
index b269d3eae68..41a60e0b46f 100644
--- a/jstests/readonly/distinct.js
+++ b/jstests/readonly/distinct.js
@@ -19,7 +19,7 @@ runReadOnlyTest(function() {
for (var [color, num] of zip2(cycleN(this.colors, N), cycleN(this.nums, N))) {
bulk.insert({color, num});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
},
exec: function(readableCollection) {
var distinctColors = readableCollection.distinct('color');
diff --git a/jstests/readonly/find.js b/jstests/readonly/find.js
index 6dd458c5374..b98d8dd8682 100644
--- a/jstests/readonly/find.js
+++ b/jstests/readonly/find.js
@@ -6,7 +6,7 @@ runReadOnlyTest(function() {
name: 'find',
load: function(writableCollection) {
for (var i = 0; i < 10; ++i) {
- assert.writeOK(writableCollection.insert({x: i, y: 2 * i}));
+ assert.commandWorked(writableCollection.insert({x: i, y: 2 * i}));
}
},
exec: function(readableCollection) {
diff --git a/jstests/readonly/get_more.js b/jstests/readonly/get_more.js
index 56719a034ae..78112385c92 100644
--- a/jstests/readonly/get_more.js
+++ b/jstests/readonly/get_more.js
@@ -16,7 +16,7 @@ runReadOnlyTest(function() {
var idx = i * bulkSize + j;
bulk.insert({x: idx, y: idx + 1});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
assert.eq(writableCollection.count(), this.count);
},
diff --git a/jstests/readonly/temp_collection.js b/jstests/readonly/temp_collection.js
index adb10506d19..35ae73dc2a5 100644
--- a/jstests/readonly/temp_collection.js
+++ b/jstests/readonly/temp_collection.js
@@ -31,7 +31,7 @@ runReadOnlyTest((function() {
}
});
assert(collectionExists, 'Can not find collection in collectionInfos');
- assert.writeOK(writableCollection.insert({a: 1}));
+ assert.commandWorked(writableCollection.insert({a: 1}));
},
exec: function(readableCollection) {
diff --git a/jstests/readonly/write_ops.js b/jstests/readonly/write_ops.js
index a5d43077cbc..ca967e0189f 100644
--- a/jstests/readonly/write_ops.js
+++ b/jstests/readonly/write_ops.js
@@ -5,7 +5,7 @@ runReadOnlyTest(function() {
return {
name: 'write_ops',
load: function(writableCollection) {
- assert.writeOK(writableCollection.insert({_id: 0, x: 1}));
+ assert.commandWorked(writableCollection.insert({_id: 0, x: 1}));
},
exec: function(readableCollection) {
// Test that insert fails.
diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js
index a82ca0880cb..210bcc139af 100644
--- a/jstests/replsets/apply_batch_only_goes_forward.js
+++ b/jstests/replsets/apply_batch_only_goes_forward.js
@@ -43,10 +43,10 @@ var stepDownSecs = 30;
var stepDownCmd = {replSetStepDown: stepDownSecs, force: true};
// Write op
-assert.writeOK(
+assert.commandWorked(
mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
replTest.waitForState(slave, ReplSetTest.State.SECONDARY);
-assert.writeOK(
+assert.commandWorked(
mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
// Set minvalid to something far in the future for the current primary, to simulate recovery.
@@ -63,7 +63,7 @@ const minValidUpdate = {
};
jsTestLog("Current minvalid is " + tojson(mMinvalid.findOne()));
jsTestLog("Updating minValid to: " + tojson(minValidUpdate));
-printjson(assert.writeOK(mMinvalid.update(
+printjson(assert.commandWorked(mMinvalid.update(
{},
minValidUpdate,
{upsert: true, writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}})));
@@ -80,7 +80,7 @@ replTest.awaitNodesAgreeOnPrimary();
// Slave is now master... Do a write to advance the optime on the primary so that it will be
// considered as a sync source - this is more relevant to PV0 because we do not write a new
// entry to the oplog on becoming primary.
-assert.writeOK(replTest.getPrimary().getDB("test").foo.save(
+assert.commandWorked(replTest.getPrimary().getDB("test").foo.save(
{}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
// Sync source selection will log this message if it does not detect min valid in the sync
diff --git a/jstests/replsets/apply_batches_totalMillis.js b/jstests/replsets/apply_batches_totalMillis.js
index fd8b2872065..5470ac28173 100644
--- a/jstests/replsets/apply_batches_totalMillis.js
+++ b/jstests/replsets/apply_batches_totalMillis.js
@@ -21,7 +21,7 @@ function performBulkInsert(coll, key, num) {
doc[key] = i;
bulk.insert(doc);
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
rst.awaitReplication();
}
@@ -35,7 +35,7 @@ let secondary = rst.getSecondary();
let coll = primary.getDB(name)["foo"];
// Perform an initial write on the system and ensure steady state.
-assert.writeOK(coll.insert({init: 0}));
+assert.commandWorked(coll.insert({init: 0}));
rst.awaitReplication();
let baseTime = getTotalMillis(secondary);
diff --git a/jstests/replsets/apply_ops_create_view.js b/jstests/replsets/apply_ops_create_view.js
index 9535790f5a9..8be1cdcc5bc 100644
--- a/jstests/replsets/apply_ops_create_view.js
+++ b/jstests/replsets/apply_ops_create_view.js
@@ -9,7 +9,7 @@ replTest.initiate();
const db = replTest.getPrimary().getDB('test');
assert.commandWorked(db.createCollection("bar"));
-assert.writeOK(db.bar.insert({a: 1, b: "hi"}));
+assert.commandWorked(db.bar.insert({a: 1, b: "hi"}));
const cmd = {
applyOps: [{op: "c", ns: db + ".$cmd", o: {create: "foo", viewOn: "bar"}}]
diff --git a/jstests/replsets/apply_ops_idempotency.js b/jstests/replsets/apply_ops_idempotency.js
index 16761c27bee..5ec1a6bf134 100644
--- a/jstests/replsets/apply_ops_idempotency.js
+++ b/jstests/replsets/apply_ops_idempotency.js
@@ -80,28 +80,28 @@ var getCollections = (mydb, prefixes) => prefixes.map((prefix) => mydb[prefix]);
var tests = {
crud: (mydb) => {
let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
- assert.writeOK(x.insert({_id: 1}));
- assert.writeOK(x.update({_id: 1}, {$set: {x: 1}}));
- assert.writeOK(x.remove({_id: 1}));
-
- assert.writeOK(y.update({_id: 1}, {y: 1}));
- assert.writeOK(y.insert({_id: 2, y: false, z: false}));
- assert.writeOK(y.update({_id: 2}, {y: 2}));
-
- assert.writeOK(z.insert({_id: 1, z: 1}));
- assert.writeOK(z.remove({_id: 1}));
- assert.writeOK(z.insert({_id: 1}));
- assert.writeOK(z.insert({_id: 2, z: 2}));
+ assert.commandWorked(x.insert({_id: 1}));
+ assert.commandWorked(x.update({_id: 1}, {$set: {x: 1}}));
+ assert.commandWorked(x.remove({_id: 1}));
+
+ assert.commandWorked(y.update({_id: 1}, {y: 1}));
+ assert.commandWorked(y.insert({_id: 2, y: false, z: false}));
+ assert.commandWorked(y.update({_id: 2}, {y: 2}));
+
+ assert.commandWorked(z.insert({_id: 1, z: 1}));
+ assert.commandWorked(z.remove({_id: 1}));
+ assert.commandWorked(z.insert({_id: 1}));
+ assert.commandWorked(z.insert({_id: 2, z: 2}));
},
renameCollectionWithinDatabase: (mydb) => {
let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
- assert.writeOK(x.insert({_id: 1, x: 1}));
- assert.writeOK(y.insert({_id: 1, y: 1}));
+ assert.commandWorked(x.insert({_id: 1, x: 1}));
+ assert.commandWorked(y.insert({_id: 1, y: 1}));
assert.commandWorked(x.renameCollection(z.getName()));
- assert.writeOK(z.insert({_id: 2, x: 2}));
- assert.writeOK(x.insert({_id: 2, x: false}));
- assert.writeOK(y.insert({y: 2}));
+ assert.commandWorked(z.insert({_id: 2, x: 2}));
+ assert.commandWorked(x.insert({_id: 2, x: false}));
+ assert.commandWorked(y.insert({y: 2}));
assert.commandWorked(y.renameCollection(x.getName(), true));
assert.commandWorked(z.renameCollection(y.getName()));
@@ -129,14 +129,14 @@ var tests = {
let otherdb = mydb.getSiblingDB(mydb + '_');
let [x, y] = getCollections(mydb, ['x', 'y']);
let [z] = getCollections(otherdb, ['z']);
- assert.writeOK(x.insert({_id: 1, x: 1}));
- assert.writeOK(y.insert({_id: 1, y: 1}));
+ assert.commandWorked(x.insert({_id: 1, x: 1}));
+ assert.commandWorked(y.insert({_id: 1, y: 1}));
assert.commandWorked(
mydb.adminCommand({renameCollection: x.getFullName(), to: z.getFullName()}));
- assert.writeOK(z.insert({_id: 2, x: 2}));
- assert.writeOK(x.insert({_id: 2, x: false}));
- assert.writeOK(y.insert({y: 2}));
+ assert.commandWorked(z.insert({_id: 2, x: 2}));
+ assert.commandWorked(x.insert({_id: 2, x: false}));
+ assert.commandWorked(y.insert({y: 2}));
assert.commandWorked(mydb.adminCommand(
{renameCollection: y.getFullName(), to: x.getFullName(), dropTarget: true}));
@@ -170,10 +170,10 @@ var tests = {
createIndex: (mydb) => {
let [x, y] = getCollections(mydb, ['x', 'y']);
assert.commandWorked(x.createIndex({x: 1}));
- assert.writeOK(x.insert({_id: 1, x: 1}));
- assert.writeOK(y.insert({_id: 1, y: 1}));
+ assert.commandWorked(x.insert({_id: 1, x: 1}));
+ assert.commandWorked(y.insert({_id: 1, y: 1}));
assert.commandWorked(y.createIndex({y: 1}));
- assert.writeOK(y.insert({_id: 2, y: 2}));
+ assert.commandWorked(y.insert({_id: 2, y: 2}));
},
};
diff --git a/jstests/replsets/apply_ops_lastop.js b/jstests/replsets/apply_ops_lastop.js
index e1c9fdb1823..c6257fe907c 100644
--- a/jstests/replsets/apply_ops_lastop.js
+++ b/jstests/replsets/apply_ops_lastop.js
@@ -30,7 +30,7 @@ var badPreCondition = [{ns: 'foo.bar', q: {_id: 10, a: "aaa"}, res: {a: "aaa"}}]
var majorityWriteConcern = {w: 'majority', wtimeout: 30000};
// Set up some data
-assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works
+assert.commandWorked(coll.insert({x: 1})); // creating the collection so applyOps works
assert.commandWorked(
m1.getDB('foo').runCommand({applyOps: insertApplyOps, writeConcern: majorityWriteConcern}));
var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index 42dc2638c28..4d599df4010 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -73,7 +73,7 @@ rs.awaitSecondaryNodes();
var mId = rs.getNodeId(master);
var slave = rs._slaves[0];
assert.eq(1, master.getDB("admin").auth("foo", "bar"));
-assert.writeOK(master.getDB("test").foo.insert(
+assert.commandWorked(master.getDB("test").foo.insert(
{x: 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
print("try some legal and illegal reads");
@@ -110,7 +110,7 @@ var bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
for (var i = 0; i < 1000; i++) {
bulk.insert({x: i, foo: "bar"});
}
-assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
+assert.commandWorked(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
print("fail over");
rs.stop(mId);
@@ -123,7 +123,7 @@ bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
for (var i = 0; i < 1000; i++) {
bulk.insert({x: i, foo: "bar"});
}
-assert.writeOK(bulk.execute({w: 2}));
+assert.commandWorked(bulk.execute({w: 2}));
print("resync");
rs.restart(mId, {"keyFile": key1_600});
diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js
index 179edf015d6..16a94763b04 100644
--- a/jstests/replsets/auth_no_pri.js
+++ b/jstests/replsets/auth_no_pri.js
@@ -14,7 +14,7 @@ master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {
// Can authenticate replset connection when whole set is up.
var conn = new Mongo(rs.getURL());
assert(conn.getDB('admin').auth('admin', 'pwd'));
-assert.writeOK(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}}));
+assert.commandWorked(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}}));
// Make sure there is no primary
rs.stop(0);
diff --git a/jstests/replsets/await_replication_timeout.js b/jstests/replsets/await_replication_timeout.js
index ce89a30c296..dad1a172b95 100644
--- a/jstests/replsets/await_replication_timeout.js
+++ b/jstests/replsets/await_replication_timeout.js
@@ -13,7 +13,7 @@ var testColl = testDB.getCollection(collName);
// Insert a document and implicitly create the collection.
let resetCollection = function(w) {
- assert.writeOK(
+ assert.commandWorked(
testColl.insert({_id: 0}, {writeConcern: {w: w, wtimeout: replTest.kDefaultTimeoutMS}}));
assert.eq(1, testColl.find().itcount());
};
diff --git a/jstests/replsets/background_index.js b/jstests/replsets/background_index.js
index 3b302644438..662abef771d 100644
--- a/jstests/replsets/background_index.js
+++ b/jstests/replsets/background_index.js
@@ -19,7 +19,7 @@ var coll = primary.getCollection("test.foo");
var adminDB = primary.getDB("admin");
for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i, x: i * 3, str: "hello world"}));
+ assert.commandWorked(coll.insert({_id: i, x: i * 3, str: "hello world"}));
}
// Add a background index.
diff --git a/jstests/replsets/capped_insert_order.js b/jstests/replsets/capped_insert_order.js
index f1a63ea683e..91f94f9579b 100644
--- a/jstests/replsets/capped_insert_order.js
+++ b/jstests/replsets/capped_insert_order.js
@@ -28,7 +28,7 @@ var batch = masterColl.initializeOrderedBulkOp();
for (var i = 0; i < nDocuments; i++) {
batch.insert({_id: i});
}
-assert.writeOK(batch.execute());
+assert.commandWorked(batch.execute());
replTest.awaitReplication();
function checkCollection(coll) {
diff --git a/jstests/replsets/catchup.js b/jstests/replsets/catchup.js
index 9a837346c33..a5b6292fa95 100644
--- a/jstests/replsets/catchup.js
+++ b/jstests/replsets/catchup.js
@@ -63,7 +63,7 @@ function stopReplicationAndEnforceNewPrimaryToCatchUp() {
var oldPrimary = rst.getPrimary();
stopServerReplication(oldSecondaries);
for (var i = 0; i < 3; i++) {
- assert.writeOK(oldPrimary.getDB("test").foo.insert({x: i}));
+ assert.commandWorked(oldPrimary.getDB("test").foo.insert({x: i}));
}
var latestOpOnOldPrimary = getLatestOp(oldPrimary);
diff --git a/jstests/replsets/catchup_takeover_one_high_priority.js b/jstests/replsets/catchup_takeover_one_high_priority.js
index 10c6b99307f..39beecd3dce 100644
--- a/jstests/replsets/catchup_takeover_one_high_priority.js
+++ b/jstests/replsets/catchup_takeover_one_high_priority.js
@@ -57,14 +57,14 @@ sleep(3000);
var primary = replSet.getPrimary();
var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
-assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
+assert.commandWorked(primary.getDB(name).bar.insert({y: 100}, writeConcern));
// Write something so that node 0 is ahead of node 1.
stopServerReplication(nodes[1]);
writeConcern = {
writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}
};
-assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
+assert.commandWorked(primary.getDB(name).bar.insert({x: 100}, writeConcern));
nodes[2].reconnect(nodes[0]);
nodes[2].reconnect(nodes[1]);
diff --git a/jstests/replsets/catchup_takeover_two_nodes_ahead.js b/jstests/replsets/catchup_takeover_two_nodes_ahead.js
index 31b78302329..28d60b2ba0a 100644
--- a/jstests/replsets/catchup_takeover_two_nodes_ahead.js
+++ b/jstests/replsets/catchup_takeover_two_nodes_ahead.js
@@ -31,14 +31,14 @@ replSet.awaitReplication();
stopServerReplication(nodes.slice(2, 5));
var primary = replSet.getPrimary();
var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
-assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
+assert.commandWorked(primary.getDB(name).bar.insert({x: 100}, writeConcern));
// Write something so that node 0 is ahead of node 1.
stopServerReplication(nodes[1]);
writeConcern = {
writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}
};
-assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
+assert.commandWorked(primary.getDB(name).bar.insert({y: 100}, writeConcern));
const initialPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
const initialNode2Status = assert.commandWorked(nodes[2].adminCommand({serverStatus: 1}));
diff --git a/jstests/replsets/chaining_removal.js b/jstests/replsets/chaining_removal.js
index 1569cad2f71..11ab2e9e43d 100644
--- a/jstests/replsets/chaining_removal.js
+++ b/jstests/replsets/chaining_removal.js
@@ -43,7 +43,7 @@ syncFrom(nodes[4], nodes[1], replTest);
// write that should reach all nodes
var timeout = ReplSetTest.kDefaultTimeoutMS;
var options = {writeConcern: {w: numNodes, wtimeout: timeout}};
-assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
+assert.commandWorked(primary.getDB(name).foo.insert({x: 1}, options));
// Re-enable 'maxSyncSourceLagSecs' checking on sync source.
assert.commandWorked(nodes[1].getDB('admin').runCommand(
@@ -67,7 +67,7 @@ primary = replTest.getPrimary();
const liveSlaves = [nodes[1], nodes[2], nodes[3]];
replTest.awaitReplication(null, null, liveSlaves);
options.writeConcern.w = 4;
-assert.writeOK(primary.getDB(name).foo.insert({x: 2}, options));
+assert.commandWorked(primary.getDB(name).foo.insert({x: 2}, options));
replTest.stopSet();
}());
diff --git a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js b/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
index 29beca07a26..9fa0cf055c0 100644
--- a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
+++ b/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
@@ -46,9 +46,9 @@ assert.commandWorked(secondaryDB.adminCommand(
stopServerReplication(secondary);
jsTestLog("Do some writes on the primary.");
-assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 1}}));
-assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 2}}));
-assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 3}}));
+assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 1}}));
+assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 2}}));
+assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 3}}));
// Restart server replication on secondary and wait for the failpoint to be hit.
jsTestLog("Restarting server replication on secondary.");
diff --git a/jstests/replsets/clean_shutdown_oplog_state.js b/jstests/replsets/clean_shutdown_oplog_state.js
index 35957ed44b3..33d68dc4887 100644
--- a/jstests/replsets/clean_shutdown_oplog_state.js
+++ b/jstests/replsets/clean_shutdown_oplog_state.js
@@ -34,7 +34,7 @@ primary.getCollection("test.coll").insert({_id: -1});
// Start a w:2 write that will block until replication is resumed.
var waitForReplStart = startParallelShell(function() {
- printjson(assert.writeOK(
+ printjson(assert.commandWorked(
db.getCollection('side').insert({}, {writeConcern: {w: 2, wtimeout: 30 * 60 * 1000}})));
}, primary.host.split(':')[1]);
@@ -43,7 +43,7 @@ var op = primary.getCollection("test.coll").initializeUnorderedBulkOp();
for (var i = 0; i < 1000 * 1000; i++) {
op.insert({_id: i});
}
-assert.writeOK(op.execute());
+assert.commandWorked(op.execute());
// Resume replication and wait for ops to start replicating, then do a clean shutdown on the
// secondary.
diff --git a/jstests/replsets/collate_id.js b/jstests/replsets/collate_id.js
index 588c02e979a..e51a0ffcb88 100644
--- a/jstests/replsets/collate_id.js
+++ b/jstests/replsets/collate_id.js
@@ -51,8 +51,8 @@ for (var i = 0; i < 1000; i++) {
}
}
- assert.writeOK(primaryColl.insert({_id: strId}));
- assert.writeOK(primaryColl.remove({_id: strId}));
+ assert.commandWorked(primaryColl.insert({_id: strId}));
+ assert.commandWorked(primaryColl.remove({_id: strId}));
}
// Since the inserts and deletes happen in pairs, we should be left with an empty collection on
diff --git a/jstests/replsets/dbhash_system_collections.js b/jstests/replsets/dbhash_system_collections.js
index d3f7b83c323..9922bc1968f 100644
--- a/jstests/replsets/dbhash_system_collections.js
+++ b/jstests/replsets/dbhash_system_collections.js
@@ -10,14 +10,14 @@ var primary = rst.getPrimary();
var secondary = rst.getSecondary();
var testDB = primary.getDB('test');
-assert.writeOK(testDB.system.users.insert({users: 1}));
-assert.writeOK(testDB.system.js.insert({js: 1}));
+assert.commandWorked(testDB.system.users.insert({users: 1}));
+assert.commandWorked(testDB.system.js.insert({js: 1}));
var adminDB = primary.getDB('admin');
-assert.writeOK(adminDB.system.roles.insert({roles: 1}));
-assert.writeOK(adminDB.system.version.insert({version: 1}));
-assert.writeOK(adminDB.system.new_users.insert({new_users: 1}));
-assert.writeOK(adminDB.system.backup_users.insert({backup_users: 1}));
+assert.commandWorked(adminDB.system.roles.insert({roles: 1}));
+assert.commandWorked(adminDB.system.version.insert({version: 1}));
+assert.commandWorked(adminDB.system.new_users.insert({new_users: 1}));
+assert.commandWorked(adminDB.system.backup_users.insert({backup_users: 1}));
rst.awaitReplication();
diff --git a/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js b/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js
index 2577744902e..676817cb31b 100644
--- a/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js
+++ b/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js
@@ -42,7 +42,7 @@ let bulk = testColl.initializeUnorderedBulkOp();
for (let i = 0; i < 2; ++i) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Setting up a prepared transaction...");
const session = primary.startSession();
diff --git a/jstests/replsets/drain.js b/jstests/replsets/drain.js
index e1d008aebc0..7a8bdea6a53 100644
--- a/jstests/replsets/drain.js
+++ b/jstests/replsets/drain.js
@@ -32,7 +32,7 @@ var secondary = replSet.getSecondary();
var numDocuments = 20;
var bulk = primary.getDB("foo").foo.initializeUnorderedBulkOp();
var bigString = Array(1024 * 1024).toString();
-assert.writeOK(primary.getDB("foo").foo.insert({big: bigString}));
+assert.commandWorked(primary.getDB("foo").foo.insert({big: bigString}));
replSet.awaitReplication();
assert.commandWorked(
secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
@@ -42,7 +42,7 @@ var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffe
for (var i = 1; i < numDocuments; ++i) {
bulk.insert({big: bigString});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
@@ -101,7 +101,7 @@ assert.commandWorked(
// Ensure new primary is writable
jsTestLog('New primary should be writable after draining is complete');
-assert.writeOK(primary.getDB("foo").flag.insert({sentinel: 1}));
+assert.commandWorked(primary.getDB("foo").flag.insert({sentinel: 1}));
// Check for at least two entries. There was one prior to freezing op application on the
// secondary and we cannot guarantee all writes reached the secondary's op queue prior to
// shutting down the original primary.
diff --git a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
index d7b16cdc790..78f7436853e 100644
--- a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
+++ b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
@@ -57,8 +57,8 @@ assert.commandWorked(toColl.ensureIndex({a: 1}, {name: longIndexName}));
assert.commandWorked(toColl.ensureIndex({b: 1}, {name: shortIndexName}));
// Insert documents into both collections so that we can tell them apart.
-assert.writeOK(fromColl.insert({_id: 'from'}));
-assert.writeOK(toColl.insert({_id: 'to'}));
+assert.commandWorked(fromColl.insert({_id: 'from'}));
+assert.commandWorked(toColl.insert({_id: 'to'}));
replTest.awaitReplication();
// Prevent renameCollection from being applied on the secondary so that we can examine the state
diff --git a/jstests/replsets/drop_collections_two_phase_write_concern.js b/jstests/replsets/drop_collections_two_phase_write_concern.js
index e7b60eb18fb..7a55c28d967 100644
--- a/jstests/replsets/drop_collections_two_phase_write_concern.js
+++ b/jstests/replsets/drop_collections_two_phase_write_concern.js
@@ -47,7 +47,7 @@ const writeConcernForSuccessfulOp = {
w: 'majority',
wtimeout: replTest.kDefaultTimeoutMS
};
-assert.writeOK(collForInserts.insert({_id: 0}, {writeConcern: writeConcernForSuccessfulOp}));
+assert.commandWorked(collForInserts.insert({_id: 0}, {writeConcern: writeConcernForSuccessfulOp}));
// PREPARE collection drop.
twoPhaseDropTest.prepareDropCollection(collName);
@@ -80,7 +80,7 @@ try {
// After the reaper is unblocked, an operation waiting on a majority write concern should run
// complete successfully.
-assert.writeOK(collForInserts.insert({_id: 3}, {writeConcern: writeConcernForSuccessfulOp}));
+assert.commandWorked(collForInserts.insert({_id: 3}, {writeConcern: writeConcernForSuccessfulOp}));
assert.eq(4, collForInserts.find().itcount());
// COMMIT collection drop.
diff --git a/jstests/replsets/drop_databases_two_phase.js b/jstests/replsets/drop_databases_two_phase.js
index 5a00ebe2e9e..9fc7e175592 100644
--- a/jstests/replsets/drop_databases_two_phase.js
+++ b/jstests/replsets/drop_databases_two_phase.js
@@ -65,7 +65,7 @@ var collNameToDrop = "collectionToDrop";
// Create the collection that will be dropped and let it replicate.
var collToDrop = dbToDrop.getCollection(collNameToDrop);
-assert.writeOK(
+assert.commandWorked(
collToDrop.insert({_id: 0}, {writeConcern: {w: 2, wtimeout: replTest.kDefaultTimeoutMS}}));
assert.eq(1, collToDrop.find().itcount());
diff --git a/jstests/replsets/drop_oplog.js b/jstests/replsets/drop_oplog.js
index a53da5ae483..2a64ef6c436 100644
--- a/jstests/replsets/drop_oplog.js
+++ b/jstests/replsets/drop_oplog.js
@@ -28,7 +28,7 @@ let renameOutput = localDB.oplog.rs.renameCollection("poison");
assert.eq(renameOutput.ok, 0);
assert.eq(renameOutput.errmsg, "can't rename live oplog while replicating");
-assert.writeOK(localDB.foo.insert({a: 1}));
+assert.commandWorked(localDB.foo.insert({a: 1}));
renameOutput = localDB.foo.renameCollection("oplog.rs");
assert.eq(renameOutput.ok, 0);
assert.eq(renameOutput.errmsg, "can't rename to live oplog while replicating");
diff --git a/jstests/replsets/emptycapped.js b/jstests/replsets/emptycapped.js
index e15322935eb..462d6e16f58 100644
--- a/jstests/replsets/emptycapped.js
+++ b/jstests/replsets/emptycapped.js
@@ -12,7 +12,7 @@ var primaryAdminDB = rst.getPrimary().getDB('admin');
var secondaryTestDB = rst.getSecondary().getDB('test');
// Truncate a non-capped collection.
-assert.writeOK(primaryTestDB.noncapped.insert({x: 1}));
+assert.commandWorked(primaryTestDB.noncapped.insert({x: 1}));
assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'noncapped'}));
assert.eq(primaryTestDB.noncapped.find().itcount(),
0,
@@ -31,7 +31,7 @@ assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: 'nonexistent
// Truncate a capped collection.
assert.commandWorked(primaryTestDB.createCollection("capped", {capped: true, size: 4096}));
-assert.writeOK(primaryTestDB.capped.insert({}));
+assert.commandWorked(primaryTestDB.capped.insert({}));
assert.eq(primaryTestDB.capped.find().itcount(), 1, "Expected 1 document to exist after an insert");
assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'capped'}));
assert.eq(primaryTestDB.capped.find().itcount(),
@@ -49,7 +49,7 @@ assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "oplog.rs"}
// Test system collections, which cannot be truncated except system.profile.
// Truncate the local system.js collection.
-assert.writeOK(primaryTestDB.system.js.insert({_id: "mystring", value: "var root = this;"}));
+assert.commandWorked(primaryTestDB.system.js.insert({_id: "mystring", value: "var root = this;"}));
assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.js"}),
ErrorCodes.IllegalOperation);
diff --git a/jstests/replsets/index_delete.js b/jstests/replsets/index_delete.js
index 59486bb2932..09a877ec845 100644
--- a/jstests/replsets/index_delete.js
+++ b/jstests/replsets/index_delete.js
@@ -43,7 +43,7 @@ var bulk = masterDB[collName].initializeUnorderedBulkOp();
for (var i = 0; i < size; ++i) {
bulk.insert({i: i, j: i, k: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Creating index");
masterDB[collName].ensureIndex({i: 1});
diff --git a/jstests/replsets/index_restart_secondary.js b/jstests/replsets/index_restart_secondary.js
index 0b391d5f8e2..d785a394ee3 100644
--- a/jstests/replsets/index_restart_secondary.js
+++ b/jstests/replsets/index_restart_secondary.js
@@ -42,7 +42,7 @@ if (conns[0].getDB('test').serverBuildInfo().bits !== 32) {
for (var i = 0; i < size; ++i) {
bulk.insert({i: i});
}
- assert.writeOK(bulk.execute({w: "majority"}));
+ assert.commandWorked(bulk.execute({w: "majority"}));
jsTest.log("Creating index");
masterDB.jstests_fgsec.ensureIndex({i: 1});
diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js
index 92ace5d0252..0a536b4d601 100644
--- a/jstests/replsets/initial_sync1.js
+++ b/jstests/replsets/initial_sync1.js
@@ -39,7 +39,7 @@ var bulk = foo.bar.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({date: new Date(), x: i, str: "all the talk on the market"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
print("total in foo: " + foo.bar.find().itcount());
print("4. Make sure synced");
@@ -97,7 +97,7 @@ bulk = foo.bar.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({date: new Date(), x: i, str: "all the talk on the market"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
print("11. Everyone happy eventually");
replTest.awaitReplication();
diff --git a/jstests/replsets/initial_sync4.js b/jstests/replsets/initial_sync4.js
index 504e7e737ab..80103839bfb 100644
--- a/jstests/replsets/initial_sync4.js
+++ b/jstests/replsets/initial_sync4.js
@@ -22,7 +22,7 @@
for (var i = 0; i < N; ++i) {
bulk.insert({_id: i, x: i, a: {}});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
jsTestLog("3. Make sure synced");
replTest.awaitReplication();
diff --git a/jstests/replsets/initial_sync_applier_error.js b/jstests/replsets/initial_sync_applier_error.js
index 2bd65f51e12..51454c98ea9 100644
--- a/jstests/replsets/initial_sync_applier_error.js
+++ b/jstests/replsets/initial_sync_applier_error.js
@@ -24,7 +24,7 @@ replSet.initiate();
var primary = replSet.getPrimary();
var coll = primary.getDB('test').getCollection(name);
-assert.writeOK(coll.insert({_id: 0, content: "hi"}));
+assert.commandWorked(coll.insert({_id: 0, content: "hi"}));
// Add a secondary node but make it hang after retrieving the last op on the source
// but before copying databases.
diff --git a/jstests/replsets/initial_sync_capped_index.js b/jstests/replsets/initial_sync_capped_index.js
index a7c1a2a3de4..905c785d6e5 100644
--- a/jstests/replsets/initial_sync_capped_index.js
+++ b/jstests/replsets/initial_sync_capped_index.js
@@ -34,7 +34,7 @@ load("jstests/libs/check_log.js");
*/
function overflowCappedColl(coll, docToInsert) {
// Insert one document and save its _id.
- assert.writeOK(coll.insert(docToInsert));
+ assert.commandWorked(coll.insert(docToInsert));
var origFirstDocId = coll.findOne()["_id"];
// Detect overflow by seeing if the original first doc of the collection is still present.
@@ -99,7 +99,7 @@ checkLog.contains(
// additional documents.
var docsToAppend = 2;
for (var i = 0; i < docsToAppend; i++) {
- assert.writeOK(primaryDB[cappedCollName].insert(largeDoc));
+ assert.commandWorked(primaryDB[cappedCollName].insert(largeDoc));
}
// Let the 'getMore' requests for the capped collection clone continue.
diff --git a/jstests/replsets/initial_sync_document_validation.js b/jstests/replsets/initial_sync_document_validation.js
index 79d06f75140..fad4601e797 100644
--- a/jstests/replsets/initial_sync_document_validation.js
+++ b/jstests/replsets/initial_sync_document_validation.js
@@ -15,7 +15,7 @@ var primary = replSet.getPrimary();
var secondary = replSet.getSecondary();
var coll = primary.getDB('test').getCollection(name);
-assert.writeOK(coll.insert({_id: 0, x: 1}));
+assert.commandWorked(coll.insert({_id: 0, x: 1}));
assert.commandWorked(coll.runCommand("collMod", {"validator": {a: {$exists: true}}}));
secondary = replSet.restart(secondary, {startClean: true});
diff --git a/jstests/replsets/initial_sync_drop_collection.js b/jstests/replsets/initial_sync_drop_collection.js
index 6488f55e01e..72fa5b5d273 100644
--- a/jstests/replsets/initial_sync_drop_collection.js
+++ b/jstests/replsets/initial_sync_drop_collection.js
@@ -35,7 +35,7 @@ var nss = primaryColl.getFullName();
// the collection on the secondary is empty.
function setupTest({failPoint, secondaryStartupParams}) {
jsTestLog("Writing data to collection.");
- assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
+ assert.commandWorked(primaryColl.insert([{_id: 1}, {_id: 2}]));
jsTestLog("Restarting secondary with failPoint " + failPoint + " set for " + nss);
secondaryStartupParams = secondaryStartupParams || {};
@@ -69,7 +69,7 @@ function finishTest({failPoint, secondaryStartupParams, expectedLog, waitForDrop
if (createNew) {
jsTestLog("Creating a new collection with the same name: " + primaryColl.getFullName());
- assert.writeOK(primaryColl.insert({_id: "not the same collection"}));
+ assert.commandWorked(primaryColl.insert({_id: "not the same collection"}));
}
jsTestLog("Allowing secondary to continue.");
diff --git a/jstests/replsets/initial_sync_during_stepdown.js b/jstests/replsets/initial_sync_during_stepdown.js
index 9d68ac69c49..fe5d1a79587 100644
--- a/jstests/replsets/initial_sync_during_stepdown.js
+++ b/jstests/replsets/initial_sync_during_stepdown.js
@@ -34,7 +34,7 @@ function setupTest({
secondaryStartupParams: secondaryStartupParams = {}
}) {
jsTestLog("Writing data to collection.");
- assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
+ assert.commandWorked(primaryColl.insert([{_id: 1}, {_id: 2}]));
jsTestLog("Stopping secondary.");
rst.stop(secondary);
@@ -145,7 +145,7 @@ checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled
// Insert more data so that these are replicated to secondary node via oplog fetcher.
jsTestLog("Inserting more data on primary.");
-assert.writeOK(primaryColl.insert([{_id: 3}, {_id: 4}]));
+assert.commandWorked(primaryColl.insert([{_id: 3}, {_id: 4}]));
// Insert is successful. So, enable fail point "waitWithPinnedCursorDuringGetMoreBatch"
// such that it doesn't drop locks when getmore cmd waits inside the fail point block.
diff --git a/jstests/replsets/initial_sync_fail_insert_once.js b/jstests/replsets/initial_sync_fail_insert_once.js
index 0a1f0a11a8a..079b9d34358 100644
--- a/jstests/replsets/initial_sync_fail_insert_once.js
+++ b/jstests/replsets/initial_sync_fail_insert_once.js
@@ -15,7 +15,7 @@ var primary = replSet.getPrimary();
var secondary = replSet.getSecondary();
var coll = primary.getDB('test').getCollection(name);
-assert.writeOK(coll.insert({_id: 0, x: 1}, {writeConcern: {w: 2}}));
+assert.commandWorked(coll.insert({_id: 0, x: 1}, {writeConcern: {w: 2}}));
jsTest.log("Enabling Failpoint failCollectionInserts on " + tojson(secondary));
assert.commandWorked(secondary.getDB("admin").adminCommand({
diff --git a/jstests/replsets/initial_sync_fcv.js b/jstests/replsets/initial_sync_fcv.js
index af0a466848c..f805f602f06 100644
--- a/jstests/replsets/initial_sync_fcv.js
+++ b/jstests/replsets/initial_sync_fcv.js
@@ -24,7 +24,7 @@ const primary = rst.getPrimary();
const dbName = 'foo';
const collName = 'bar';
-assert.writeOK(primary.getDB(dbName).getCollection(collName).insert({a: 1}));
+assert.commandWorked(primary.getDB(dbName).getCollection(collName).insert({a: 1}));
function runInitialSync(cmd, initialFCV) {
assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: initialFCV}));
diff --git a/jstests/replsets/initial_sync_invalid_views.js b/jstests/replsets/initial_sync_invalid_views.js
index a02498aaa40..5f00bc5e2a3 100644
--- a/jstests/replsets/initial_sync_invalid_views.js
+++ b/jstests/replsets/initial_sync_invalid_views.js
@@ -14,7 +14,7 @@ replSet.initiate();
let primary = replSet.getPrimary();
let coll = primary.getDB('test').foo;
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
// Add a secondary node but make it hang before copying databases.
let secondary = replSet.add();
@@ -24,7 +24,7 @@ assert.commandWorked(secondary.getDB('admin').runCommand(
{configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
replSet.reInitiate();
-assert.writeOK(primary.getDB('test').system.views.insert({invalid: NumberLong(1000)}));
+assert.commandWorked(primary.getDB('test').system.views.insert({invalid: NumberLong(1000)}));
assert.commandWorked(secondary.getDB('admin').runCommand(
{configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
diff --git a/jstests/replsets/initial_sync_move_forward.js b/jstests/replsets/initial_sync_move_forward.js
index 2561e16b0c1..534a2d8d72e 100644
--- a/jstests/replsets/initial_sync_move_forward.js
+++ b/jstests/replsets/initial_sync_move_forward.js
@@ -32,7 +32,7 @@ for (var i = 0; i < count - 2; ++i) {
var longString = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
bulk.insert({_id: count - 2, x: count - 2, longString: longString});
bulk.insert({_id: count - 1, x: count - 1, longString: longString});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Create a unique index on {x: 1}.
assert.commandWorked(masterColl.ensureIndex({x: 1}, {unique: true}));
@@ -63,16 +63,16 @@ assert.soon(function() {
// Delete {_id: count - 2} to make a hole. Grow {_id: 0} so that it moves into that hole. This
// will cause the secondary to clone {_id: 0} again.
// Change the value for 'x' so that we are not testing the uniqueness of 'x' in this case.
-assert.writeOK(masterColl.remove({_id: 0, x: 0}));
-assert.writeOK(masterColl.remove({_id: count - 2, x: count - 2}));
-assert.writeOK(masterColl.insert({_id: 0, x: count, longString: longString}));
+assert.commandWorked(masterColl.remove({_id: 0, x: 0}));
+assert.commandWorked(masterColl.remove({_id: count - 2, x: count - 2}));
+assert.commandWorked(masterColl.insert({_id: 0, x: count, longString: longString}));
// Delete {_id: count - 1} to make a hole. Grow {x: 1} so that it moves into that hole. This
// will cause the secondary to clone {x: 1} again.
// Change the value for _id so that we are not testing the uniqueness of _id in this case.
-assert.writeOK(masterColl.remove({_id: 1, x: 1}));
-assert.writeOK(masterColl.remove({_id: count - 1, x: count - 1}));
-assert.writeOK(masterColl.insert({_id: count, x: 1, longString: longString}));
+assert.commandWorked(masterColl.remove({_id: 1, x: 1}));
+assert.commandWorked(masterColl.remove({_id: count - 1, x: count - 1}));
+assert.commandWorked(masterColl.insert({_id: count, x: 1, longString: longString}));
// Resume initial sync.
assert.commandWorked(secondary.adminCommand(
diff --git a/jstests/replsets/initial_sync_oplog_hole.js b/jstests/replsets/initial_sync_oplog_hole.js
index 190099cd571..a5bd24b96b3 100644
--- a/jstests/replsets/initial_sync_oplog_hole.js
+++ b/jstests/replsets/initial_sync_oplog_hole.js
@@ -35,7 +35,7 @@ TestData.testName = testName;
TestData.collectionName = collName;
jsTestLog("Writing data before oplog hole to collection.");
-assert.writeOK(primaryColl.insert({_id: "a"}));
+assert.commandWorked(primaryColl.insert({_id: "a"}));
// Make sure it gets written out.
assert.eq(primaryColl.find({_id: "a"}).itcount(), 1);
@@ -55,7 +55,7 @@ checkLog.contains(primaryDB.getMongo(),
"hangAfterCollectionInserts fail point enabled for " + primaryColl.getFullName());
jsTest.log("Create a write following the uncommitted write.");
-assert.writeOK(primaryColl.insert({_id: "c"}));
+assert.commandWorked(primaryColl.insert({_id: "c"}));
// Make sure it gets written out.
assert.eq(primaryColl.find({_id: "c"}).itcount(), 1);
diff --git a/jstests/replsets/initial_sync_oplog_rollover.js b/jstests/replsets/initial_sync_oplog_rollover.js
index 7ffe8c98dd4..268548f1f80 100644
--- a/jstests/replsets/initial_sync_oplog_rollover.js
+++ b/jstests/replsets/initial_sync_oplog_rollover.js
@@ -30,7 +30,7 @@ replSet.initiate();
var primary = replSet.getPrimary();
var coll = primary.getDB('test').foo;
-assert.writeOK(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
function getFirstOplogEntry(conn) {
return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0];
@@ -53,7 +53,7 @@ checkLog.contains(secondary,
const largeStr = new Array(4 * 1024 * oplogSizeOnPrimary).join('aaaaaaaa');
var i = 0;
while (bsonWoCompare(getFirstOplogEntry(primary), firstOplogEntry) === 0) {
- assert.writeOK(coll.insert({a: 2, x: i++, long_str: largeStr}));
+ assert.commandWorked(coll.insert({a: 2, x: i++, long_str: largeStr}));
sleep(100);
}
diff --git a/jstests/replsets/initial_sync_rename_collection.js b/jstests/replsets/initial_sync_rename_collection.js
index 9c63d7c55b1..957dae9db8a 100644
--- a/jstests/replsets/initial_sync_rename_collection.js
+++ b/jstests/replsets/initial_sync_rename_collection.js
@@ -32,8 +32,8 @@ const collAcrossFinal_name = 'renamed_across';
// Create two collections on the same database. One will be renamed within the database
// and the other will be renamed to a different database.
-assert.writeOK(primary_db0[collRenameWithinDB_name].save({}));
-assert.writeOK(primary_db0[collRenameAcrossDBs_name].save({}));
+assert.commandWorked(primary_db0[collRenameWithinDB_name].save({}));
+assert.commandWorked(primary_db0[collRenameAcrossDBs_name].save({}));
jsTestLog('Waiting for replication');
rst.awaitReplication();
diff --git a/jstests/replsets/initial_sync_replSetGetStatus.js b/jstests/replsets/initial_sync_replSetGetStatus.js
index 60fd36a9c77..7d325997328 100644
--- a/jstests/replsets/initial_sync_replSetGetStatus.js
+++ b/jstests/replsets/initial_sync_replSetGetStatus.js
@@ -18,8 +18,8 @@ replSet.initiate();
var primary = replSet.getPrimary();
var coll = primary.getDB('test').foo;
-assert.writeOK(coll.insert({a: 1}));
-assert.writeOK(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 2}));
// Add a secondary node but make it hang before copying databases.
var secondary = replSet.add();
@@ -47,8 +47,8 @@ assert(!res.initialSyncStatus,
assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "t"}),
ErrorCodes.TypeMismatch);
-assert.writeOK(coll.insert({a: 3}));
-assert.writeOK(coll.insert({a: 4}));
+assert.commandWorked(coll.insert({a: 3}));
+assert.commandWorked(coll.insert({a: 4}));
// Let initial sync continue working.
assert.commandWorked(secondary.getDB('admin').runCommand(
diff --git a/jstests/replsets/initial_sync_unsupported_auth_schema.js b/jstests/replsets/initial_sync_unsupported_auth_schema.js
index bb3013b38a3..bff265a0653 100644
--- a/jstests/replsets/initial_sync_unsupported_auth_schema.js
+++ b/jstests/replsets/initial_sync_unsupported_auth_schema.js
@@ -16,7 +16,7 @@ function testInitialSyncAbortsWithUnsupportedAuthSchema(schema) {
// invalid or outdated version
var versionColl = rst.getPrimary().getDB('admin').system.version;
var res = versionColl.insert(schema);
- assert.writeOK(res);
+ assert.commandWorked(res);
// Add another node to the replica set to allow an initial sync to occur
var initSyncNode = rst.add({setParameter: 'numInitialSyncAttempts=1'});
@@ -63,7 +63,7 @@ function testInitialSyncAbortsWithExistingUserAndNoAuthSchema() {
// a corresponding auth schema
var userColl = rst.getPrimary().getDB('admin').system.users;
var res = userColl.insert({});
- assert.writeOK(res);
+ assert.commandWorked(res);
// Add another node to the replica set to allow an initial sync to occur
var initSyncNode = rst.add({setParameter: 'numInitialSyncAttempts=1'});
diff --git a/jstests/replsets/initial_sync_uuid_not_found.js b/jstests/replsets/initial_sync_uuid_not_found.js
index 0942ac1f54b..50cc9f6b11b 100644
--- a/jstests/replsets/initial_sync_uuid_not_found.js
+++ b/jstests/replsets/initial_sync_uuid_not_found.js
@@ -22,7 +22,7 @@ const primaryDB = primary.getDB('d');
const primaryColl = primaryDB.coll;
jsTestLog('Create a collection (with a UUID) and insert a document.');
-assert.writeOK(primaryColl.insert({_id: 0}));
+assert.commandWorked(primaryColl.insert({_id: 0}));
const collInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
assert(collInfo.info.uuid, 'newly created collection expected to have a UUID: ' + tojson(collInfo));
@@ -48,7 +48,7 @@ function ResyncWithFailpoint(failpointName, failpointData) {
jsTestLog('Remove collection on the primary and insert a new document, recreating it.');
assert(primaryColl.drop());
- assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 'majority'}}));
+ assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: 'majority'}}));
const newCollInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
assert(collInfo.info.uuid, 'recreated collection expected to have a UUID: ' + tojson(collInfo));
assert.neq(collInfo.info.uuid,
diff --git a/jstests/replsets/initial_sync_views.js b/jstests/replsets/initial_sync_views.js
index ae202aff0e7..b9866272f28 100644
--- a/jstests/replsets/initial_sync_views.js
+++ b/jstests/replsets/initial_sync_views.js
@@ -16,7 +16,7 @@ replTest.initiate();
let primaryDB = replTest.getPrimary().getDB(testName);
for (let i = 0; i < 10; ++i) {
- assert.writeOK(primaryDB.coll.insert({a: i}));
+ assert.commandWorked(primaryDB.coll.insert({a: i}));
}
// Setup view.
diff --git a/jstests/replsets/initial_sync_with_write_load.js b/jstests/replsets/initial_sync_with_write_load.js
index 180487abe50..fc1164c6c43 100644
--- a/jstests/replsets/initial_sync_with_write_load.js
+++ b/jstests/replsets/initial_sync_with_write_load.js
@@ -35,7 +35,7 @@ assert(master == conns[0], "conns[0] assumed to be master");
assert(a_conn.host == master.host);
// create an oplog entry with an insert
-assert.writeOK(
+assert.commandWorked(
A.foo.insert({x: 1}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
replTest.stop(BID);
@@ -44,7 +44,7 @@ var work = function() {
print("starting loadgen");
var start = new Date().getTime();
- assert.writeOK(db.timeToStartTrigger.insert({_id: 1}));
+ assert.commandWorked(db.timeToStartTrigger.insert({_id: 1}));
while (true) {
for (x = 0; x < 100; x++) {
diff --git a/jstests/replsets/interrupted_batch_insert.js b/jstests/replsets/interrupted_batch_insert.js
index d8b6419398b..b5c9e62c18b 100644
--- a/jstests/replsets/interrupted_batch_insert.js
+++ b/jstests/replsets/interrupted_batch_insert.js
@@ -79,7 +79,7 @@ restartServerReplication(conns[2]);
// Issue a write to the new primary.
var collOnNewPrimary = replTest.nodes[1].getCollection(collName);
-assert.writeOK(collOnNewPrimary.insert({singleDoc: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(collOnNewPrimary.insert({singleDoc: 1}, {writeConcern: {w: "majority"}}));
// Isolate node 1, forcing it to step down as primary, and reconnect node 0, allowing it to step
// up again.
diff --git a/jstests/replsets/last_vote.js b/jstests/replsets/last_vote.js
index 62901259364..4b77dcbd334 100644
--- a/jstests/replsets/last_vote.js
+++ b/jstests/replsets/last_vote.js
@@ -37,7 +37,7 @@ function getLastVoteDoc(conn) {
function setLastVoteDoc(conn, term, candidate) {
var newLastVote = {term: term, candidateIndex: rst.getNodeId(candidate)};
- return assert.writeOK(conn.getCollection(lastVoteNS).update({}, newLastVote));
+ return assert.commandWorked(conn.getCollection(lastVoteNS).update({}, newLastVote));
}
function assertNodeHasLastVote(node, term, candidate) {
diff --git a/jstests/replsets/lastop.js b/jstests/replsets/lastop.js
index c1fa2ffb21f..1abcd15abeb 100644
--- a/jstests/replsets/lastop.js
+++ b/jstests/replsets/lastop.js
@@ -15,35 +15,35 @@ var m2 = new Mongo(primary.host);
// Do a write with m1, then a write with m2, then a no-op write with m1. m1 should have a lastOp
// of m2's write.
-assert.writeOK(m1.getCollection("test.foo").insert({m1: 1}));
+assert.commandWorked(m1.getCollection("test.foo").insert({m1: 1}));
var firstOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
-assert.writeOK(m2.getCollection("test.foo").insert({m2: 99}));
+assert.commandWorked(m2.getCollection("test.foo").insert({m2: 99}));
var secondOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// No-op update
-assert.writeOK(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}}));
+assert.commandWorked(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}}));
var noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, secondOp);
-assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
+assert.commandWorked(m1.getCollection("test.foo").remove({m1: 1}));
var thirdOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
-assert.writeOK(m2.getCollection("test.foo").insert({m2: 98}));
+assert.commandWorked(m2.getCollection("test.foo").insert({m2: 98}));
var fourthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// No-op delete
-assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
+assert.commandWorked(m1.getCollection("test.foo").remove({m1: 1}));
noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, fourthOp);
// Dummy write, for a new lastOp.
-assert.writeOK(m1.getCollection("test.foo").insert({m1: 99}));
+assert.commandWorked(m1.getCollection("test.foo").insert({m1: 99}));
var fifthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
-assert.writeOK(m2.getCollection("test.foo").insert({m2: 97}));
+assert.commandWorked(m2.getCollection("test.foo").insert({m2: 97}));
var sixthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// No-op find-and-modify delete
@@ -55,7 +55,7 @@ assert.eq(noOp, sixthOp);
assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
var seventhOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
-assert.writeOK(m2.getCollection("test.foo").insert({m2: 96}));
+assert.commandWorked(m2.getCollection("test.foo").insert({m2: 96}));
var eighthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// No-op create index.
@@ -64,10 +64,10 @@ noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, eighthOp);
-assert.writeOK(m1.getCollection("test.foo").insert({_id: 1, x: 1}));
+assert.commandWorked(m1.getCollection("test.foo").insert({_id: 1, x: 1}));
var ninthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
-assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
+assert.commandWorked(m2.getCollection("test.foo").insert({m2: 991}));
var tenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// update with immutable field error
@@ -78,7 +78,7 @@ noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, tenthOp);
-assert.writeOK(m2.getCollection("test.foo").insert({m2: 992}));
+assert.commandWorked(m2.getCollection("test.foo").insert({m2: 992}));
var eleventhOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// find-and-modify immutable field error
@@ -94,13 +94,13 @@ noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, eleventhOp);
var bigString = new Array(3000).toString();
-assert.writeOK(m2.getCollection("test.foo").insert({m2: 994, m3: bigString}));
+assert.commandWorked(m2.getCollection("test.foo").insert({m2: 994, m3: bigString}));
// No-op insert
-assert.writeOK(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
+assert.commandWorked(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
var thirteenthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
-assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
+assert.commandWorked(m2.getCollection("test.foo").insert({m2: 991}));
var fourteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// Hits DuplicateKey error and fails insert -- no-op
diff --git a/jstests/replsets/libs/election_handoff.js b/jstests/replsets/libs/election_handoff.js
index f05e6b702d7..f29f55853bd 100644
--- a/jstests/replsets/libs/election_handoff.js
+++ b/jstests/replsets/libs/election_handoff.js
@@ -50,7 +50,7 @@ var ElectionHandoffTest = (function() {
// Make sure all secondaries are ready before stepping down. We must additionally
// make sure that the primary is aware that the secondaries are ready and caught up
// to the primary's lastApplied, so we issue a dummy write and wait on its optime.
- assert.writeOK(primary.getDB("test").secondariesMustBeCaughtUpToHere.insert(
+ assert.commandWorked(primary.getDB("test").secondariesMustBeCaughtUpToHere.insert(
{"a": 1}, {writeConcern: {w: rst.nodes.length}}));
rst.awaitNodesAgreeOnAppliedOpTime();
diff --git a/jstests/replsets/libs/rename_across_dbs.js b/jstests/replsets/libs/rename_across_dbs.js
index fe42cab63b6..d32d6a11627 100644
--- a/jstests/replsets/libs/rename_across_dbs.js
+++ b/jstests/replsets/libs/rename_across_dbs.js
@@ -105,7 +105,7 @@ var RenameAcrossDatabasesTest = function(options) {
// options.dropTarget is true.
const dropTarget = options.dropTarget || false;
if (dropTarget) {
- assert.writeOK(targetColl.insert({_id: 1000, target: 1}));
+ assert.commandWorked(targetColl.insert({_id: 1000, target: 1}));
assert.commandWorked(targetColl.createIndex({target: 1}));
}
@@ -116,7 +116,7 @@ var RenameAcrossDatabasesTest = function(options) {
const numDocs = 10;
_testLog('Inserting ' + numDocs + ' documents into source collection.');
for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(sourceColl.insert({_id: i, source: 1}));
+ assert.commandWorked(sourceColl.insert({_id: i, source: 1}));
}
const numNonIdIndexes = 3;
_testLog('Creating ' + numNonIdIndexes + ' indexes.');
diff --git a/jstests/replsets/libs/rollback_test.js b/jstests/replsets/libs/rollback_test.js
index 3abb424585b..478034a6ba8 100644
--- a/jstests/replsets/libs/rollback_test.js
+++ b/jstests/replsets/libs/rollback_test.js
@@ -309,7 +309,7 @@ function RollbackTest(name = "RollbackTest", replSet) {
// Insert one document to ensure rollback will not be skipped.
let dbName = "EnsureThereIsAtLeastOneOperationToRollback";
- assert.writeOK(curPrimary.getDB(dbName).ensureRollback.insert(
+ assert.commandWorked(curPrimary.getDB(dbName).ensureRollback.insert(
{thisDocument: 'is inserted to ensure rollback is not skipped'}));
log(`Isolating the primary ${curPrimary.host} so it will step down`);
diff --git a/jstests/replsets/libs/secondary_reads_test.js b/jstests/replsets/libs/secondary_reads_test.js
index 192421827f8..1d712fce05a 100644
--- a/jstests/replsets/libs/secondary_reads_test.js
+++ b/jstests/replsets/libs/secondary_reads_test.js
@@ -97,7 +97,7 @@ function SecondaryReadsTest(name = "secondary_reads_test") {
this.stopReaders = function() {
print("signaling readers to stop...");
assert.gt(readers.length, 0, "no readers to stop");
- assert.writeOK(primaryDB.getCollection(signalColl).insert({_id: testDoneId}));
+ assert.commandWorked(primaryDB.getCollection(signalColl).insert({_id: testDoneId}));
for (let i = 0; i < readers.length; i++) {
const await = readers[i];
await ();
diff --git a/jstests/replsets/libs/tags.js b/jstests/replsets/libs/tags.js
index 2f52516e4b3..e5861ee0bad 100644
--- a/jstests/replsets/libs/tags.js
+++ b/jstests/replsets/libs/tags.js
@@ -174,7 +174,7 @@ var TagsTest = function(options) {
var writeConcern = {
writeConcern: {w: expectedWritableNodesCount, wtimeout: replTest.kDefaultTimeoutMS}
};
- assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
+ assert.commandWorked(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
jsTestLog('ensurePrimary - Successfully written a document to primary node (' +
replTest.nodes[nodeId].host +
') using a write concern of w:' + expectedWritableNodesCount);
@@ -218,7 +218,7 @@ var TagsTest = function(options) {
jsTestLog('Non-existent write concern should be rejected.');
options = {writeConcern: {w: 'blahblah', wtimeout: ReplSetTest.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
var result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
assert.eq(ErrorCodes.UnknownReplWriteConcern,
@@ -227,7 +227,7 @@ var TagsTest = function(options) {
jsTestLog('Write concern "3 or 4" should fail - 3 and 4 are not connected to the primary.');
var options = {writeConcern: {w: '3 or 4', wtimeout: failTimeout}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
result = primary.getDB('foo').bar.insert(doc, options);
assert.neq(null, result.getWriteConcernError());
assert(result.getWriteConcernError().errInfo.wtimeout);
@@ -240,12 +240,12 @@ var TagsTest = function(options) {
jsTestLog('Write concern "3 or 4" should work - 4 is now connected to the primary ' +
primary.host + ' via node 1 ' + replTest.nodes[1].host);
options = {writeConcern: {w: '3 or 4', wtimeout: ReplSetTest.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
- assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "3 and 4" should fail - 3 is not connected to the primary.');
options = {writeConcern: {w: '3 and 4', wtimeout: failTimeout}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
assert(result.getWriteConcernError().errInfo.wtimeout,
@@ -259,24 +259,24 @@ var TagsTest = function(options) {
jsTestLog('Write concern "3 and 4" should work - ' +
'nodes 3 and 4 are connected to primary via node 1.');
options = {writeConcern: {w: '3 and 4', wtimeout: ReplSetTest.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
- assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" - writes to primary only.');
options = {writeConcern: {w: '2', wtimeout: 0}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
- assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "1 and 2"');
options = {writeConcern: {w: '1 and 2', wtimeout: 0}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
- assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2 dc and 3 server"');
primary = ensurePrimary(2, replTest.nodes.slice(0, 3), replTest.nodes.length);
options = {writeConcern: {w: '2 dc and 3 server', wtimeout: ReplSetTest.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
- assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Bringing down current primary node 2 ' + primary.host + ' to allow node 1 ' +
replTest.nodes[1].host + ' to become primary.');
@@ -295,13 +295,13 @@ var TagsTest = function(options) {
jsTestLog('Write concern "3 and 4" should still work with new primary node 1 ' +
primary.host);
options = {writeConcern: {w: '3 and 4', wtimeout: ReplSetTest.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
- assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" should fail because node 2 ' + replTest.nodes[2].host +
' is down.');
options = {writeConcern: {w: '2', wtimeout: failTimeout}};
- assert.writeOK(primary.getDB('foo').bar.insert(doc));
+ assert.commandWorked(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
assert(result.getWriteConcernError().errInfo.wtimeout);
diff --git a/jstests/replsets/linearizable_read_concern.js b/jstests/replsets/linearizable_read_concern.js
index 5984577a4ed..58f4f633a6c 100644
--- a/jstests/replsets/linearizable_read_concern.js
+++ b/jstests/replsets/linearizable_read_concern.js
@@ -52,12 +52,12 @@ var primary = replTest.getPrimary();
var secondaries = replTest.getSecondaries();
// Do a write to have something to read.
-assert.writeOK(primary.getDB("test").foo.insert(
+assert.commandWorked(primary.getDB("test").foo.insert(
{"number": 7}, {"writeConcern": {"w": "majority", "wtimeout": ReplSetTest.kDefaultTimeoutMS}}));
jsTestLog("Testing linearizable readConcern parsing");
// This command is sent to the primary, and the primary is fully connected so it should work.
-var goodRead = assert.writeOK(primary.getDB("test").runCommand(
+var goodRead = assert.commandWorked(primary.getDB("test").runCommand(
{'find': 'foo', readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
assert.eq(goodRead.cursor.firstBatch[0].number, 7);
diff --git a/jstests/replsets/localhostAuthBypass.js b/jstests/replsets/localhostAuthBypass.js
index 5a0a1b95562..1470167021e 100644
--- a/jstests/replsets/localhostAuthBypass.js
+++ b/jstests/replsets/localhostAuthBypass.js
@@ -85,9 +85,9 @@ var assertCanRunCommands = function(mongo) {
// will throw on failure
test.system.users.findOne();
- assert.writeOK(test.foo.save({_id: 0}));
- assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeOK(test.foo.remove({_id: 0}));
+ assert.commandWorked(test.foo.save({_id: 0}));
+ assert.commandWorked(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.commandWorked(test.foo.remove({_id: 0}));
test.foo.mapReduce(
function() {
diff --git a/jstests/replsets/maintenance_non-blocking.js b/jstests/replsets/maintenance_non-blocking.js
index 5581ffe3546..ffc2c374c21 100644
--- a/jstests/replsets/maintenance_non-blocking.js
+++ b/jstests/replsets/maintenance_non-blocking.js
@@ -20,7 +20,7 @@ doTest = function() {
// save some records
var len = 100;
for (var i = 0; i < len; ++i) {
- assert.writeOK(mColl.save({a: i}));
+ assert.commandWorked(mColl.save({a: i}));
}
print("******* replSetMaintenance called on secondary ************* ");
@@ -31,7 +31,7 @@ doTest = function() {
assert.eq(false, ismaster.secondary);
print("******* writing to primary ************* ");
- assert.writeOK(mColl.save({_id: -1}));
+ assert.commandWorked(mColl.save({_id: -1}));
printjson(sDB.currentOp());
assert.neq(null, mColl.findOne());
diff --git a/jstests/replsets/mr_nonrepl_coll_in_local_db.js b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
index a6d6cad7e7a..5edce6d6a87 100644
--- a/jstests/replsets/mr_nonrepl_coll_in_local_db.js
+++ b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
@@ -29,7 +29,7 @@ for (let i = 0; i < 1000; i++) {
const array = Array.from({lengthToInsert: 10000}, _ => Math.floor(Math.random() * 100));
bulk.insert({arr: array});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Run a simple map-reduce.
const result = coll.mapReduce(
diff --git a/jstests/replsets/noop_writes_wait_for_write_concern.js b/jstests/replsets/noop_writes_wait_for_write_concern.js
index d5731c2b7c4..d8c2a970a0d 100644
--- a/jstests/replsets/noop_writes_wait_for_write_concern.js
+++ b/jstests/replsets/noop_writes_wait_for_write_concern.js
@@ -51,7 +51,7 @@ var commands = [];
commands.push({
req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1}}]},
setupFunc: function() {
- assert.writeOK(coll.insert({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 1}));
},
confirmFunc: function(res) {
assert.commandWorkedIgnoringWriteConcernErrors(res);
@@ -66,8 +66,8 @@ commands.push({
commands.push({
req: {update: collName, updates: [{q: {a: 1}, u: {b: 2}}]},
setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.update({a: 1}, {b: 2}));
+ assert.commandWorked(coll.insert({a: 1}));
+ assert.commandWorked(coll.update({a: 1}, {b: 2}));
},
confirmFunc: function(res) {
assert.commandWorkedIgnoringWriteConcernErrors(res);
@@ -82,8 +82,8 @@ commands.push({
commands.push({
req: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 2}}}]},
setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.update({a: 1}, {$set: {b: 2}}));
+ assert.commandWorked(coll.insert({a: 1}));
+ assert.commandWorked(coll.update({a: 1}, {$set: {b: 2}}));
},
confirmFunc: function(res) {
assert.commandWorkedIgnoringWriteConcernErrors(res);
@@ -97,8 +97,8 @@ commands.push({
commands.push({
req: {delete: collName, deletes: [{q: {a: 1}, limit: 1}]},
setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.remove({a: 1}));
+ assert.commandWorked(coll.insert({a: 1}));
+ assert.commandWorked(coll.remove({a: 1}));
},
confirmFunc: function(res) {
assert.commandWorkedIgnoringWriteConcernErrors(res);
@@ -110,7 +110,7 @@ commands.push({
commands.push({
req: {createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]},
setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorked(coll.insert({a: 1}));
assert.commandWorkedIgnoringWriteConcernErrors(
db.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}));
},
@@ -125,7 +125,7 @@ commands.push({
commands.push({
req: {findAndModify: collName, query: {a: 1}, update: {b: 2}},
setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorked(coll.insert({a: 1}));
assert.commandWorkedIgnoringWriteConcernErrors(
db.runCommand({findAndModify: collName, query: {a: 1}, update: {b: 2}}));
},
@@ -141,7 +141,7 @@ commands.push({
commands.push({
req: {findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}},
setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorked(coll.insert({a: 1}));
assert.commandWorkedIgnoringWriteConcernErrors(
db.runCommand({findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}}));
},
@@ -156,7 +156,7 @@ commands.push({
commands.push({
req: {dropDatabase: 1},
setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorked(coll.insert({a: 1}));
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({dropDatabase: 1}));
},
confirmFunc: function(res) {
@@ -167,7 +167,7 @@ commands.push({
commands.push({
req: {drop: collName},
setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorked(coll.insert({a: 1}));
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({drop: collName}));
},
confirmFunc: function(res) {
@@ -188,7 +188,7 @@ commands.push({
commands.push({
req: {insert: collName, documents: [{_id: 1}]},
setupFunc: function() {
- assert.writeOK(coll.insert({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 1}));
},
confirmFunc: function(res) {
assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res);
diff --git a/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js b/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js
index e024a9853f7..4c295b38b73 100644
--- a/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js
+++ b/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js
@@ -39,7 +39,7 @@ function testFCVNoop(targetVersion) {
replTest.stop(1);
// Insert a document to ensure there is a last optime.
- assert.writeOK(primary.getDB("test").foo.insert({x: 1}));
+ assert.commandWorked(primary.getDB("test").foo.insert({x: 1}));
// We run the command on a different connection. If the the command were run on the same
// connection, then the client last op for the noop write would be the last op of the
diff --git a/jstests/replsets/opcounters_repl.js b/jstests/replsets/opcounters_repl.js
index 5bf31a1f5ee..7ba6d802b89 100644
--- a/jstests/replsets/opcounters_repl.js
+++ b/jstests/replsets/opcounters_repl.js
@@ -66,21 +66,21 @@ assert.eq(diff.secondary.command, 1);
// 2. Insert a document.
diff = getOpCountersDiff(() => {
- assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 2}}));
+ assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: 2}}));
});
assert.eq(diff.primary.insert, 1);
assert.eq(diff.secondary.insert, 1);
// 3. Update a document.
diff = getOpCountersDiff(() => {
- assert.writeOK(primaryColl.update({_id: 0}, {$set: {a: 1}}, {writeConcern: {w: 2}}));
+ assert.commandWorked(primaryColl.update({_id: 0}, {$set: {a: 1}}, {writeConcern: {w: 2}}));
});
assert.eq(diff.primary.update, 1);
assert.eq(diff.secondary.update, 1);
// 4. Delete a document.
diff = getOpCountersDiff(() => {
- assert.writeOK(primaryColl.remove({_id: 0}, {writeConcern: {w: 2}}));
+ assert.commandWorked(primaryColl.remove({_id: 0}, {writeConcern: {w: 2}}));
});
assert.eq(diff.primary.delete, 1);
assert.eq(diff.secondary.delete, 1);
diff --git a/jstests/replsets/oplog_format.js b/jstests/replsets/oplog_format.js
index d4ae2345f30..fc72b20d581 100644
--- a/jstests/replsets/oplog_format.js
+++ b/jstests/replsets/oplog_format.js
@@ -34,7 +34,7 @@ assertLastOplog({_id: 1}, null, "save -- setup ");
var msg = "IncRewriteExistingField: $inc $set";
coll.save({_id: 1, a: 2});
assertLastOplog({_id: 1, a: 2}, {_id: 1}, "save " + msg);
-var res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}}));
+var res = assert.commandWorked(coll.update({}, {$inc: {a: 1}, $set: {b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 3, b: 2}, coll.findOne({}), msg);
assertLastOplog({$v: 1, $set: {a: 3, b: 2}}, {_id: 1}, msg);
@@ -42,7 +42,7 @@ assertLastOplog({$v: 1, $set: {a: 3, b: 2}}, {_id: 1}, msg);
var msg = "IncRewriteNonExistingField: $inc $set";
coll.save({_id: 1, c: 0});
assertLastOplog({_id: 1, c: 0}, {_id: 1}, "save " + msg);
-res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}}));
+res = assert.commandWorked(coll.update({}, {$inc: {a: 1}, $set: {b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, c: 0, a: 1, b: 2}, coll.findOne({}), msg);
assertLastOplog({$v: 1, $set: {a: 1, b: 2}}, {_id: 1}, msg);
@@ -50,7 +50,7 @@ assertLastOplog({$v: 1, $set: {a: 1, b: 2}}, {_id: 1}, msg);
var msg = "TwoNestedPulls: two $pull";
coll.save({_id: 1, a: {b: [1, 2], c: [1, 2]}});
assertLastOplog({_id: 1, a: {b: [1, 2], c: [1, 2]}}, {_id: 1}, "save " + msg);
-res = assert.writeOK(coll.update({}, {$pull: {'a.b': 2, 'a.c': 2}}));
+res = assert.commandWorked(coll.update({}, {$pull: {'a.b': 2, 'a.c': 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [1], c: [1]}}, coll.findOne({}), msg);
assertLastOplog({$v: 1, $set: {'a.b': [1], 'a.c': [1]}}, {_id: 1}, msg);
@@ -58,7 +58,7 @@ assertLastOplog({$v: 1, $set: {'a.b': [1], 'a.c': [1]}}, {_id: 1}, msg);
var msg = "MultiSets: two $set";
coll.save({_id: 1, a: 1, b: 1});
assertLastOplog({_id: 1, a: 1, b: 1}, {_id: 1}, "save " + msg);
-res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}}));
+res = assert.commandWorked(coll.update({}, {$set: {a: 2, b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg);
assertLastOplog({$v: 1, $set: {a: 2, b: 2}}, {_id: 1}, msg);
@@ -68,36 +68,36 @@ assertLastOplog({$v: 1, $set: {a: 2, b: 2}}, {_id: 1}, msg);
var msg = "bad single $set";
coll.save({_id: 1, a: 1});
assertLastOplog({_id: 1, a: 1}, {_id: 1}, "save " + msg);
-res = assert.writeOK(coll.update({}, {$set: {a: 2}}));
+res = assert.commandWorked(coll.update({}, {$set: {a: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 2}, coll.findOne({}), msg);
assertLastOplog({$v: 1, $set: {a: 2}}, {_id: 1}, msg);
var msg = "bad single $inc";
-res = assert.writeOK(coll.update({}, {$inc: {a: 1}}));
+res = assert.commandWorked(coll.update({}, {$inc: {a: 1}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 3}, coll.findOne({}), msg);
assertLastOplog({$v: 1, $set: {a: 3}}, {_id: 1}, msg);
var msg = "bad double $set";
-res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}}));
+res = assert.commandWorked(coll.update({}, {$set: {a: 2, b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg);
assertLastOplog({$v: 1, $set: {a: 2, b: 2}}, {_id: 1}, msg);
var msg = "bad save";
-assert.writeOK(coll.save({_id: 1, a: [2]}));
+assert.commandWorked(coll.save({_id: 1, a: [2]}));
assert.docEq({_id: 1, a: [2]}, coll.findOne({}), msg);
assertLastOplog({_id: 1, a: [2]}, {_id: 1}, msg);
var msg = "bad array $inc";
-res = assert.writeOK(coll.update({}, {$inc: {"a.0": 1}}));
+res = assert.commandWorked(coll.update({}, {$inc: {"a.0": 1}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg);
var lastTS = assertLastOplog({$v: 1, $set: {"a.0": 3}}, {_id: 1}, msg);
var msg = "bad $setOnInsert";
-res = assert.writeOK(coll.update({}, {$setOnInsert: {a: -1}}));
+res = assert.commandWorked(coll.update({}, {$setOnInsert: {a: -1}}));
assert.eq(res.nMatched, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg); // No-op
var otherTS = assertLastOplog({$v: 1, $set: {"a.0": 3}}, {_id: 1}, msg); // Nothing new
@@ -107,7 +107,7 @@ coll.remove({});
assert.eq(coll.find().itcount(), 0, "collection not empty");
var msg = "bad $setOnInsert w/upsert";
-res = assert.writeOK(coll.update({}, {$setOnInsert: {a: 200}}, {upsert: true})); // upsert
+res = assert.commandWorked(coll.update({}, {$setOnInsert: {a: 200}}, {upsert: true})); // upsert
assert.eq(res.nUpserted, 1, "update failed for '" + msg + "': " + res.toString());
var id = res.getUpsertedId()._id;
assert.docEq({_id: id, a: 200}, coll.findOne({}), msg); // No-op
@@ -131,21 +131,22 @@ assertLastOplog({$set:{"a": [1,2,3]}}, {_id:1}, msg); // new format
var msg = "bad array $push 2";
coll.save({_id: 1, a: "foo"});
-res = assert.writeOK(coll.update({}, {$push: {c: 18}}));
+res = assert.commandWorked(coll.update({}, {$push: {c: 18}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: "foo", c: [18]}, coll.findOne({}), msg);
assertLastOplog({$v: 1, $set: {"c": [18]}}, {_id: 1}, msg);
var msg = "bad array $push $slice";
coll.save({_id: 1, a: {b: [18]}});
-res = assert.writeOK(coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [1, 2], $slice: -2}}}));
+res = assert.commandWorked(
+ coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [1, 2], $slice: -2}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [1, 2]}}, coll.findOne({}), msg);
assertLastOplog({$v: 1, $set: {"a.b": [1, 2]}}, {_id: 1}, msg);
var msg = "bad array $push $sort ($slice -100)";
coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}});
-res = assert.writeOK(
+res = assert.commandWorked(
coll.update({}, {$push: {"a.b": {$each: [{c: -1}], $sort: {c: 1}, $slice: -100}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [{c: -1}, {c: 1}, {c: 2}]}}, coll.findOne({}), msg);
@@ -153,7 +154,7 @@ assertLastOplog({$v: 1, $set: {"a.b": [{c: -1}, {c: 1}, {c: 2}]}}, {_id: 1}, msg
var msg = "bad array $push $slice $sort";
coll.save({_id: 1, a: [{b: 2}, {b: 1}]});
-res = assert.writeOK(
+res = assert.commandWorked(
coll.update({_id: {$gt: 0}}, {$push: {a: {$each: [{b: -1}], $slice: -2, $sort: {b: 1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: [{b: 1}, {b: 2}]}, coll.findOne({}), msg);
@@ -161,7 +162,7 @@ assertLastOplog({$v: 1, $set: {a: [{b: 1}, {b: 2}]}}, {_id: 1}, msg);
var msg = "bad array $push $slice $sort first two";
coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}});
-res = assert.writeOK(
+res = assert.commandWorked(
coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: 1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [{c: 1}, {c: 2}]}}, coll.findOne({}), msg);
@@ -169,7 +170,7 @@ assertLastOplog({$v: 1, $set: {"a.b": [{c: 1}, {c: 2}]}}, {_id: 1}, msg);
var msg = "bad array $push $slice $sort reversed first two";
coll.save({_id: 1, a: {b: [{c: 1}, {c: 2}]}});
-res = assert.writeOK(
+res = assert.commandWorked(
coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: -1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [{c: 1}, {c: -1}]}}, coll.findOne({}), msg);
diff --git a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
index 04f12dc7c1c..cf7911c248e 100644
--- a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
+++ b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
@@ -30,7 +30,7 @@ var lastTs = lastOplogDoc.ts;
var newTs = Timestamp(lastTs.t + 1, 1);
var term = lastOplogDoc.t;
-assert.writeOK(oplog.insert({
+assert.commandWorked(oplog.insert({
ts: newTs,
t: term,
h: 1,
@@ -56,8 +56,8 @@ var injectedMinValidDoc = {
// This weird mechanism is the only way to bypass mongod's attempt to fill in null
// Timestamps.
var minValidColl = conn.getCollection('local.replset.minvalid');
-assert.writeOK(minValidColl.remove({}));
-assert.writeOK(minValidColl.update({}, {$set: injectedMinValidDoc}, {upsert: true}));
+assert.commandWorked(minValidColl.remove({}));
+assert.commandWorked(minValidColl.update({}, {$set: injectedMinValidDoc}, {upsert: true}));
assert.eq(minValidColl.findOne(),
injectedMinValidDoc,
"If the Timestamps differ, the server may be filling in the null timestamps");
diff --git a/jstests/replsets/oplog_term.js b/jstests/replsets/oplog_term.js
index f21e01f4a98..b3de5b8c57d 100644
--- a/jstests/replsets/oplog_term.js
+++ b/jstests/replsets/oplog_term.js
@@ -12,7 +12,7 @@ replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000);
// Default protocol version is 1 - 'term' field should present in oplog entry.
var primary = replSet.getPrimary();
var collection = primary.getDB('test').getCollection(name);
-assert.writeOK(collection.save({_id: 1}));
+assert.commandWorked(collection.save({_id: 1}));
var oplogEntry = getLatestOp(primary);
assert(oplogEntry, 'unexpected empty oplog');
diff --git a/jstests/replsets/oplog_wallclock.js b/jstests/replsets/oplog_wallclock.js
index a744c207d7e..47cf929e3ea 100644
--- a/jstests/replsets/oplog_wallclock.js
+++ b/jstests/replsets/oplog_wallclock.js
@@ -17,13 +17,13 @@ replSet.initiate();
var primary = replSet.getPrimary();
var collection = primary.getDB('test').getCollection(name);
-assert.writeOK(collection.insert({_id: 1, val: 'x'}));
+assert.commandWorked(collection.insert({_id: 1, val: 'x'}));
assertLastOplogHasWT(primary, 'insert');
-assert.writeOK(collection.update({_id: 1}, {val: 'y'}));
+assert.commandWorked(collection.update({_id: 1}, {val: 'y'}));
assertLastOplogHasWT(primary, 'update');
-assert.writeOK(collection.remove({_id: 1}));
+assert.commandWorked(collection.remove({_id: 1}));
assertLastOplogHasWT(primary, 'remove');
replSet.stopSet();
diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js
index 867c6258441..be355209f3f 100644
--- a/jstests/replsets/optime.js
+++ b/jstests/replsets/optime.js
@@ -77,7 +77,7 @@ let initialReplStatusInfo = master.getDB('admin').runCommand({replSetGetStatus:
// Do an insert to increment optime, but without rolling the oplog
// latestOptime should be updated, but earliestOptime should be unchanged
var options = {writeConcern: {w: replTest.nodes.length}};
-assert.writeOK(master.getDB('test').foo.insert({a: 1}, options));
+assert.commandWorked(master.getDB('test').foo.insert({a: 1}, options));
assert.soon(function() {
return optimesAndWallTimesAreEqual(replTest, isPersistent);
});
diff --git a/jstests/replsets/prepare_transaction_index_build.js b/jstests/replsets/prepare_transaction_index_build.js
index aa5d53673e1..36c5533a08e 100644
--- a/jstests/replsets/prepare_transaction_index_build.js
+++ b/jstests/replsets/prepare_transaction_index_build.js
@@ -29,7 +29,7 @@ const bulk = testColl.initializeUnorderedBulkOp();
for (let i = 0; i < 10; ++i) {
bulk.insert({x: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// activate failpoint to hang index build on secondary.
secondary.getDB("admin").runCommand(
diff --git a/jstests/replsets/read_after_optime.js b/jstests/replsets/read_after_optime.js
index dad48acd925..ef2b736ab83 100644
--- a/jstests/replsets/read_after_optime.js
+++ b/jstests/replsets/read_after_optime.js
@@ -11,7 +11,7 @@ var config = replTest.getReplSetConfigFromNode();
var runTest = function(testDB, primaryConn) {
var dbName = testDB.getName();
- assert.writeOK(primaryConn.getDB(dbName).user.insert({x: 1}, {writeConcern: {w: 2}}));
+ assert.commandWorked(primaryConn.getDB(dbName).user.insert({x: 1}, {writeConcern: {w: 2}}));
var localDB = primaryConn.getDB('local');
diff --git a/jstests/replsets/read_committed.js b/jstests/replsets/read_committed.js
index 79a9cd3b0fa..7260fec8698 100644
--- a/jstests/replsets/read_committed.js
+++ b/jstests/replsets/read_committed.js
@@ -24,27 +24,27 @@ const testCases = {
insert: {
prepareCollection: function(coll) {}, // No-op
write: function(coll, writeConcern) {
- assert.writeOK(coll.insert({_id: 1}, writeConcern));
+ assert.commandWorked(coll.insert({_id: 1}, writeConcern));
},
expectedBefore: [],
expectedAfter: [{_id: 1}],
},
update: {
prepareCollection: function(coll) {
- assert.writeOK(coll.insert({_id: 1, state: 'before'}, majorityWriteConcern));
+ assert.commandWorked(coll.insert({_id: 1, state: 'before'}, majorityWriteConcern));
},
write: function(coll, writeConcern) {
- assert.writeOK(coll.update({_id: 1}, {$set: {state: 'after'}}, writeConcern));
+ assert.commandWorked(coll.update({_id: 1}, {$set: {state: 'after'}}, writeConcern));
},
expectedBefore: [{_id: 1, state: 'before'}],
expectedAfter: [{_id: 1, state: 'after'}],
},
remove: {
prepareCollection: function(coll) {
- assert.writeOK(coll.insert({_id: 1}, majorityWriteConcern));
+ assert.commandWorked(coll.insert({_id: 1}, majorityWriteConcern));
},
write: function(coll, writeConcern) {
- assert.writeOK(coll.remove({_id: 1}, writeConcern));
+ assert.commandWorked(coll.remove({_id: 1}, writeConcern));
},
expectedBefore: [{_id: 1}],
expectedAfter: [],
@@ -121,7 +121,7 @@ for (var testName in testCases) {
var test = testCases[testName];
const setUpInitialState = function setUpInitialState() {
- assert.writeOK(coll.remove({}, majorityWriteConcern));
+ assert.commandWorked(coll.remove({}, majorityWriteConcern));
test.prepareCollection(coll);
// Do some sanity checks.
assert.eq(doDirtyRead(coll), test.expectedBefore);
diff --git a/jstests/replsets/read_committed_after_rollback.js b/jstests/replsets/read_committed_after_rollback.js
index 097c75c1564..68d7d138040 100644
--- a/jstests/replsets/read_committed_after_rollback.js
+++ b/jstests/replsets/read_committed_after_rollback.js
@@ -70,8 +70,8 @@ var oldPrimaryColl = oldPrimary.getCollection(collName);
var newPrimaryColl = newPrimary.getCollection(collName);
// Set up initial state.
-assert.writeOK(oldPrimaryColl.insert({_id: 1, state: 'old'},
- {writeConcern: {w: 'majority', wtimeout: 30000}}));
+assert.commandWorked(oldPrimaryColl.insert({_id: 1, state: 'old'},
+ {writeConcern: {w: 'majority', wtimeout: 30000}}));
assert.eq(doDirtyRead(oldPrimaryColl), 'old');
assert.eq(doCommittedRead(oldPrimaryColl), 'old');
assert.eq(doDirtyRead(newPrimaryColl), 'old');
@@ -86,7 +86,7 @@ oldPrimary.disconnect([newPrimary, pureSecondary]);
assert.eq(doDirtyRead(newPrimaryColl), 'old');
// This write will only make it to oldPrimary and will never become committed.
-assert.writeOK(oldPrimaryColl.save({_id: 1, state: 'INVALID'}));
+assert.commandWorked(oldPrimaryColl.save({_id: 1, state: 'INVALID'}));
assert.eq(doDirtyRead(oldPrimaryColl), 'INVALID');
assert.eq(doCommittedRead(oldPrimaryColl), 'old');
@@ -106,7 +106,7 @@ assert.soon(function() {
// Stop applier on pureSecondary to ensure that writes to newPrimary won't become committed yet.
assert.commandWorked(
pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
-assert.writeOK(newPrimaryColl.save({_id: 1, state: 'new'}));
+assert.commandWorked(newPrimaryColl.save({_id: 1, state: 'new'}));
assert.eq(doDirtyRead(newPrimaryColl), 'new');
// Note that we still can't do a committed read from the new primary and reliably get anything,
// since we never proved that it learned about the commit level from the old primary before
@@ -135,12 +135,12 @@ assert.commandWorked(
pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
// Do a write to the new primary so that the old primary can establish a sync source to learn
// about the new commit.
-assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
+assert.commandWorked(newPrimary.getDB(name).unrelatedCollection.insert(
{a: 1}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
assert.eq(doCommittedRead(newPrimaryColl), 'new');
// Do another write to the new primary so that the old primary can be sure to receive the
// new committed optime.
-assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
+assert.commandWorked(newPrimary.getDB(name).unrelatedCollection.insert(
{a: 2}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
assert.eq(doCommittedRead(oldPrimaryColl), 'new');
diff --git a/jstests/replsets/read_committed_stale_history.js b/jstests/replsets/read_committed_stale_history.js
index f40841575f4..79564b62821 100644
--- a/jstests/replsets/read_committed_stale_history.js
+++ b/jstests/replsets/read_committed_stale_history.js
@@ -57,7 +57,7 @@ var primary = rst.getPrimary();
var secondaries = rst.getSecondaries();
assert.eq(nodes[0], primary);
// Wait for all data bearing nodes to get up to date.
-assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
+assert.commandWorked(nodes[0].getDB(dbName).getCollection(collName).insert(
{a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
// Stop the secondaries from replicating.
@@ -67,7 +67,7 @@ assert.commandWorked(
nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'}));
jsTestLog("Do a write that won't ever reach a majority of nodes");
-assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert({a: 2}));
+assert.commandWorked(nodes[0].getDB(dbName).getCollection(collName).insert({a: 2}));
// Ensure that the write that was just done is not visible in the committed snapshot.
checkDocNotCommitted(nodes[0], {a: 2});
@@ -90,7 +90,7 @@ restartServerReplication(secondaries);
waitForPrimary(nodes[1]);
jsTest.log("Do a write to the new primary");
-assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
+assert.commandWorked(nodes[1].getDB(dbName).getCollection(collName).insert(
{a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
// Ensure the new primary still cannot see the write from the old primary.
diff --git a/jstests/replsets/read_committed_with_catalog_changes.js b/jstests/replsets/read_committed_with_catalog_changes.js
index 10c444a1ed9..7e2e054c526 100644
--- a/jstests/replsets/read_committed_with_catalog_changes.js
+++ b/jstests/replsets/read_committed_with_catalog_changes.js
@@ -38,10 +38,10 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
const testCases = {
createCollectionInExistingDB: {
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
+ assert.commandWorked(db.other.insert({_id: 1}));
},
performOp: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
},
blockedCollections: ['coll'],
unblockedCollections: ['other'],
@@ -49,15 +49,15 @@ const testCases = {
createCollectionInNewDB: {
prepare: function(db) {},
performOp: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
},
blockedCollections: ['coll'],
unblockedCollections: ['otherDoesNotExist'], // Only existent collections are blocked.
},
dropCollection: {
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.other.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
},
performOp: function(db) {
assert(db.coll.drop());
@@ -67,7 +67,7 @@ const testCases = {
},
dropDB: {
prepare: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
// Drop collection explicitly during the preparation phase while we are still able
// to write to a majority. Otherwise, dropDatabase() will drop the collection
// and wait for the collection drop to be replicated to a majority of the nodes.
@@ -81,19 +81,19 @@ const testCases = {
},
dropAndRecreateCollection: {
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.other.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
},
performOp: function(db) {
assert(db.coll.drop());
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
},
blockedCollections: ['coll'],
unblockedCollections: ['other'],
},
dropAndRecreateDB: {
prepare: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
// Drop collection explicitly during the preparation phase while we are still able
// to write to a majority. Otherwise, dropDatabase() will drop the collection
// and wait for the collection drop to be replicated to a majority of the nodes.
@@ -101,15 +101,15 @@ const testCases = {
},
performOp: function(db) {
assert.commandWorked(db.dropDatabase({w: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
},
blockedCollections: ['coll'],
unblockedCollections: ['otherDoesNotExist'],
},
renameCollectionToNewName: {
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.from.insert({_id: 1}));
+ assert.commandWorked(db.other.insert({_id: 1}));
+ assert.commandWorked(db.from.insert({_id: 1}));
},
performOp: function(db) {
assert.commandWorked(db.from.renameCollection('coll'));
@@ -119,9 +119,9 @@ const testCases = {
},
renameCollectionToExistingName: {
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.from.insert({_id: 'from'}));
- assert.writeOK(db.coll.insert({_id: 'coll'}));
+ assert.commandWorked(db.other.insert({_id: 1}));
+ assert.commandWorked(db.from.insert({_id: 'from'}));
+ assert.commandWorked(db.coll.insert({_id: 'coll'}));
},
performOp: function(db) {
assert.commandWorked(db.from.renameCollection('coll', true));
@@ -131,8 +131,8 @@ const testCases = {
},
createIndexForeground: {
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.other.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
},
performOp: function(db) {
assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: false}));
@@ -142,8 +142,8 @@ const testCases = {
},
createIndexBackground: {
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.other.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
},
performOp: function(db) {
assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: true}));
@@ -153,8 +153,8 @@ const testCases = {
},
dropIndex: {
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.other.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
assert.commandWorked(db.coll.ensureIndex({x: 1}));
},
performOp: function(db) {
@@ -167,8 +167,8 @@ const testCases = {
// Remaining cases are local-only operations.
reIndex: {
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.other.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
assert.commandWorked(db.coll.ensureIndex({x: 1}));
},
performOp: function(db) {
@@ -181,8 +181,8 @@ const testCases = {
compact: {
// At least on WiredTiger, compact is fully inplace so it doesn't need to block readers.
prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.other.insert({_id: 1}));
+ assert.commandWorked(db.coll.insert({_id: 1}));
assert.commandWorked(db.coll.ensureIndex({x: 1}));
},
performOp: function(db) {
@@ -249,7 +249,7 @@ var mainDB = primary.getDB('mainDB');
// This DB won't be used by any tests so it should always be unblocked.
var otherDB = primary.getDB('otherDB');
var otherDBCollection = otherDB.collection;
-assert.writeOK(otherDBCollection.insert(
+assert.commandWorked(otherDBCollection.insert(
{}, {writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
assertReadsSucceed(otherDBCollection);
@@ -285,7 +285,7 @@ for (var testName in testCases) {
// performing the operation. This will ensure that the operation happens after an
// uncommitted write which prevents it from immediately being marked as committed.
if (test.localOnly) {
- assert.writeOK(otherDBCollection.insert({}));
+ assert.commandWorked(otherDBCollection.insert({}));
}
// Perform the op and ensure that blocked collections block and unblocked ones don't.
diff --git a/jstests/replsets/read_majority_two_arbs.js b/jstests/replsets/read_majority_two_arbs.js
index f49ebe71dd7..53ac988b6fb 100644
--- a/jstests/replsets/read_majority_two_arbs.js
+++ b/jstests/replsets/read_majority_two_arbs.js
@@ -62,7 +62,8 @@ function doCommittedRead() {
}
jsTest.log("doing write");
-assert.writeOK(t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}}));
+assert.commandWorked(
+ t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}}));
jsTest.log("doing read");
assert.eq(doDirtyRead(), 0);
jsTest.log("doing committed read");
diff --git a/jstests/replsets/read_operations_during_step_down.js b/jstests/replsets/read_operations_during_step_down.js
index 667e353d2fe..d6bdd779be3 100644
--- a/jstests/replsets/read_operations_during_step_down.js
+++ b/jstests/replsets/read_operations_during_step_down.js
@@ -28,7 +28,7 @@ TestData.dbName = dbName;
TestData.collName = collName;
jsTestLog("1. Do a document write");
-assert.writeOK(
+assert.commandWorked(
        primaryColl.insert({_id: 0}, {"writeConcern": {"w": "majority"}}));
rst.awaitReplication();
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index 8b789db3547..5c3114a0893 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -46,7 +46,7 @@ doTest = function(signal) {
printjson(master.getDB("admin").runCommand("replSetGetStatus"));
- assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
+ assert.commandWorked(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
print("replset2.js **** TEMP 1a ****");
@@ -66,7 +66,7 @@ doTest = function(signal) {
print("replset2.js **** Try inserting a single record ****");
master.getDB(testDB).dropDatabase();
var options = {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}};
- assert.writeOK(master.getDB(testDB).foo.insert({n: 1}, options));
+ assert.commandWorked(master.getDB(testDB).foo.insert({n: 1}, options));
m1 = master.getDB(testDB).foo.findOne({n: 1});
printjson(m1);
@@ -85,7 +85,7 @@ doTest = function(signal) {
for (var n = 0; n < 1000; n++) {
bulk.insert({n: n, data: bigData});
}
- assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
+ assert.commandWorked(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
print("replset2.js **** V1 ");
diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js
index 65f1be46e16..87eb6d6361c 100644
--- a/jstests/replsets/replset8.js
+++ b/jstests/replsets/replset8.js
@@ -29,7 +29,7 @@ for (var i = 0; i < doccount; ++i) {
bulk.insert({_id: i, x: bigstring});
bigstring += "a";
}
-var result = assert.writeOK(bulk.execute());
+var result = assert.commandWorked(bulk.execute());
jsTestLog('insert 0-' + (doccount - 1) + ' result: ' + tojson(result));
assert.eq(doccount, result.nInserted);
assert.eq(doccount + 1, mdc.find().itcount());
@@ -40,7 +40,7 @@ bulk = mdc.initializeUnorderedBulkOp();
for (i = doccount; i < doccount * 2; ++i) {
bulk.insert({_id: i, x: i});
}
-result = assert.writeOK(bulk.execute());
+result = assert.commandWorked(bulk.execute());
jsTestLog('insert ' + doccount + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
assert.eq(doccount, result.nInserted);
assert.eq(doccount * 2 + 1, mdc.find().itcount());
@@ -50,7 +50,7 @@ bulk = mdc.initializeUnorderedBulkOp();
for (i = 0; i < doccount; ++i) {
bulk.find({_id: i}).remove();
}
-result = assert.writeOK(bulk.execute());
+result = assert.commandWorked(bulk.execute());
jsTestLog('delete 0-' + (doccount - 1) + ' result: ' + tojson(result));
assert.eq(doccount, result.nRemoved);
assert.eq(doccount + 1, mdc.find().itcount());
@@ -68,7 +68,7 @@ for (i = doccount * 2; i > doccount; --i) {
bulk.find({_id: i}).update({$set: {x: bigstring}});
bigstring = bigstring.slice(0, -1); // remove last char
}
-result = assert.writeOK(bulk.execute({w: rt.nodes.length}));
+result = assert.commandWorked(bulk.execute({w: rt.nodes.length}));
jsTestLog('update ' + (doccount + 1) + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
assert.eq(doccount - 1, result.nMatched);
assert.eq(doccount - 1, result.nModified);
diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js
index bb1c1f7dc76..9851e501f69 100644
--- a/jstests/replsets/replsetprio1.js
+++ b/jstests/replsets/replsetprio1.js
@@ -34,11 +34,11 @@ replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
// do some writes on 1
var master = replTest.getPrimary();
for (var i = 0; i < 1000; i++) {
- assert.writeOK(master.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}}));
+ assert.commandWorked(master.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}}));
}
for (i = 0; i < 1000; i++) {
- assert.writeOK(master.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}}));
+ assert.commandWorked(master.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}}));
}
// bring 2 back up, 2 should wait until caught up and then become master
diff --git a/jstests/replsets/restore_term.js b/jstests/replsets/restore_term.js
index 072a22eb974..0414feb49af 100644
--- a/jstests/replsets/restore_term.js
+++ b/jstests/replsets/restore_term.js
@@ -31,7 +31,7 @@ var primaryColl = primary.getDB("test").coll;
// Current term may be greater than 1 if election race happens.
var firstSuccessfulTerm = getCurrentTerm(primary);
assert.gte(firstSuccessfulTerm, 1);
-assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}}));
assert.eq(getCurrentTerm(primary), firstSuccessfulTerm);
// Check that the insert op has the initial term.
diff --git a/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js b/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js
index 54e826dcfeb..e4728529ebe 100644
--- a/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js
+++ b/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js
@@ -20,8 +20,8 @@ var priConn = replTest.getPrimary();
var db = priConn.getDB('TestDB');
var config = priConn.getDB('config');
-assert.writeOK(db.user.insert({_id: 0}));
-assert.writeOK(db.user.insert({_id: 1}));
+assert.commandWorked(db.user.insert({_id: 0}));
+assert.commandWorked(db.user.insert({_id: 1}));
const lsid1 = UUID();
const lsid2 = UUID();
@@ -48,7 +48,7 @@ assert.eq(1, config.transactions.find({'_id.id': lsid1}).itcount());
assert.eq(1, config.transactions.find({'_id.id': lsid2}).itcount());
// Invalidating lsid1 doesn't impact lsid2, but allows same statement to be executed again
-assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
+assert.commandWorked(config.transactions.remove({'_id.id': lsid1}));
assert.commandWorked(db.runCommand(cmdObj1));
assert.eq(2, db.user.find({_id: 0}).toArray()[0].x);
assert.commandWorked(db.runCommand(cmdObj2));
@@ -65,8 +65,8 @@ assert.writeError(config.transactions.insert({_id: {UnknownField: 'Garbage'}}));
// Ensure inserting an invalid session record manually without all the required fields causes
// the session to not work anymore for retryable writes for that session, but not for any other
const lsidManual = config.transactions.find({'_id.id': lsid1}).toArray()[0]._id;
-assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
-assert.writeOK(config.transactions.insert({_id: lsidManual}));
+assert.commandWorked(config.transactions.remove({'_id.id': lsid1}));
+assert.commandWorked(config.transactions.insert({_id: lsidManual}));
const lsid3 = UUID();
assert.commandWorked(db.runCommand({
diff --git a/jstests/replsets/retryable_writes_failover.js b/jstests/replsets/retryable_writes_failover.js
index 2073e2fbded..7f3c16eee6d 100644
--- a/jstests/replsets/retryable_writes_failover.js
+++ b/jstests/replsets/retryable_writes_failover.js
@@ -128,8 +128,8 @@ let deleteCmd = {
primary = replTest.getPrimary();
testDB = primary.getDB("test");
-assert.writeOK(testDB.foo.insert({_id: 40, x: 1}));
-assert.writeOK(testDB.foo.insert({_id: 50, y: 1}));
+assert.commandWorked(testDB.foo.insert({_id: 40, x: 1}));
+assert.commandWorked(testDB.foo.insert({_id: 50, y: 1}));
// Run the command on the primary and wait for replication.
result = assert.commandWorked(testDB.runCommand(deleteCmd));
diff --git a/jstests/replsets/rollback_all_op_types.js b/jstests/replsets/rollback_all_op_types.js
index 8ffc53f2faf..5eddbe528e4 100644
--- a/jstests/replsets/rollback_all_op_types.js
+++ b/jstests/replsets/rollback_all_op_types.js
@@ -40,23 +40,23 @@ let rollbackOps = {
assert.commandWorked(db.createCollection(collName));
},
op: (db, collName) => {
- assert.writeOK(db[collName].insert({_id: 0}));
+ assert.commandWorked(db[collName].insert({_id: 0}));
}
}],
"update": [{
init: (db, collName) => {
- assert.writeOK(db[collName].insert({_id: 0, val: 0}));
+ assert.commandWorked(db[collName].insert({_id: 0, val: 0}));
},
op: (db, collName) => {
- assert.writeOK(db[collName].update({_id: 0}, {val: 1}));
+ assert.commandWorked(db[collName].update({_id: 0}, {val: 1}));
},
}],
"delete": [{
init: (db, collName) => {
- assert.writeOK(db[collName].insert({_id: 0}));
+ assert.commandWorked(db[collName].insert({_id: 0}));
},
op: (db, collName) => {
- assert.writeOK(db[collName].remove({_id: 0}));
+ assert.commandWorked(db[collName].remove({_id: 0}));
},
}],
"create": [{
diff --git a/jstests/replsets/rollback_collmods.js b/jstests/replsets/rollback_collmods.js
index 6a741ec6174..7c96235f33c 100644
--- a/jstests/replsets/rollback_collmods.js
+++ b/jstests/replsets/rollback_collmods.js
@@ -29,10 +29,10 @@ function printCollectionOptions(rollbackTest, time) {
// Operations that will be present on both nodes, before the common point.
let CommonOps = (node) => {
let testDb = node.getDB(dbName);
- assert.writeOK(testDb[coll1Name].insert({a: 1, b: 1}));
- assert.writeOK(testDb[coll2Name].insert({a: 2, b: 2}));
- assert.writeOK(testDb[coll3Name].insert({a: 3, b: 3}));
- assert.writeOK(testDb[coll4Name].insert({a: 4, b: 4}));
+ assert.commandWorked(testDb[coll1Name].insert({a: 1, b: 1}));
+ assert.commandWorked(testDb[coll2Name].insert({a: 2, b: 2}));
+ assert.commandWorked(testDb[coll3Name].insert({a: 3, b: 3}));
+ assert.commandWorked(testDb[coll4Name].insert({a: 4, b: 4}));
// Start with no validation action.
assert.commandWorked(testDb.runCommand({
diff --git a/jstests/replsets/rollback_creates_rollback_directory.js b/jstests/replsets/rollback_creates_rollback_directory.js
index 907e81e1f8c..3cb47eb65a2 100644
--- a/jstests/replsets/rollback_creates_rollback_directory.js
+++ b/jstests/replsets/rollback_creates_rollback_directory.js
@@ -47,21 +47,21 @@ function runRollbackDirectoryTest(shouldCreateRollbackFiles) {
}, "Arbiter failed to initialize.");
var options = {writeConcern: {w: 2, wtimeout: replTest.kDefaultTimeoutMS}, upsert: true};
- assert.writeOK(A.foo.update({key: 'value1'}, {$set: {req: 'req'}}, options));
+ assert.commandWorked(A.foo.update({key: 'value1'}, {$set: {req: 'req'}}, options));
var AID = replTest.getNodeId(a_conn);
replTest.stop(AID);
master = replTest.getPrimary();
assert(b_conn.host == master.host);
options = {writeConcern: {w: 1, wtimeout: replTest.kDefaultTimeoutMS}, upsert: true};
- assert.writeOK(B.foo.update({key: 'value1'}, {$set: {res: 'res'}}, options));
+ assert.commandWorked(B.foo.update({key: 'value1'}, {$set: {res: 'res'}}, options));
var BID = replTest.getNodeId(b_conn);
replTest.stop(BID);
replTest.restart(AID);
master = replTest.getPrimary();
assert(a_conn.host == master.host);
options = {writeConcern: {w: 1, wtimeout: replTest.kDefaultTimeoutMS}, upsert: true};
- assert.writeOK(A.foo.update({key: 'value2'}, {$set: {req: 'req'}}, options));
+ assert.commandWorked(A.foo.update({key: 'value2'}, {$set: {req: 'req'}}, options));
replTest.restart(BID); // should rollback
reconnect(B);
diff --git a/jstests/replsets/rollback_crud_op_sequences.js b/jstests/replsets/rollback_crud_op_sequences.js
index ce21957f45e..a2e89332141 100644
--- a/jstests/replsets/rollback_crud_op_sequences.js
+++ b/jstests/replsets/rollback_crud_op_sequences.js
@@ -58,15 +58,15 @@ var a = a_conn.getDB("foo");
var b = b_conn.getDB("foo");
// initial data for both nodes
-assert.writeOK(a.bar.insert({q: 0}));
-assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
-assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
-assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
-assert.writeOK(a.bar.insert({q: 40, a: 1}));
-assert.writeOK(a.bar.insert({q: 40, a: 2}));
-assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
+assert.commandWorked(a.bar.insert({q: 0}));
+assert.commandWorked(a.bar.insert({q: 1, a: "foo"}));
+assert.commandWorked(a.bar.insert({q: 2, a: "foo", x: 1}));
+assert.commandWorked(a.bar.insert({q: 3, bb: 9, a: "foo"}));
+assert.commandWorked(a.bar.insert({q: 40, a: 1}));
+assert.commandWorked(a.bar.insert({q: 40, a: 2}));
+assert.commandWorked(a.bar.insert({q: 70, txt: 'willremove'}));
a.createCollection("kap", {capped: true, size: 5000});
-assert.writeOK(a.kap.insert({foo: 1}));
+assert.commandWorked(a.kap.insert({foo: 1}));
// going back to empty on capped is a special case and must be tested
a.createCollection("kap2", {capped: true, size: 5501});
replTest.awaitReplication();
@@ -83,17 +83,17 @@ assert.soon(function() {
}, "node B did not become master as expected", ReplSetTest.kDefaultTimeoutMS);
// do operations on B and B alone, these will be rolled back
-assert.writeOK(b.bar.insert({q: 4}));
-assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
-assert.writeOK(b.bar.remove({q: 40})); // multi remove test
-assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
+assert.commandWorked(b.bar.insert({q: 4}));
+assert.commandWorked(b.bar.update({q: 3}, {q: 3, rb: true}));
+assert.commandWorked(b.bar.remove({q: 40})); // multi remove test
+assert.commandWorked(b.bar.update({q: 2}, {q: 39, rb: true}));
// rolling back a delete will involve reinserting the item(s)
-assert.writeOK(b.bar.remove({q: 1}));
-assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
-assert.writeOK(b.kap.insert({foo: 2}));
-assert.writeOK(b.kap2.insert({foo: 2}));
+assert.commandWorked(b.bar.remove({q: 1}));
+assert.commandWorked(b.bar.update({q: 0}, {$inc: {y: 1}}));
+assert.commandWorked(b.kap.insert({foo: 2}));
+assert.commandWorked(b.kap2.insert({foo: 2}));
// create a collection (need to roll back the whole thing)
-assert.writeOK(b.newcoll.insert({a: true}));
+assert.commandWorked(b.newcoll.insert({a: true}));
// create a new empty collection (need to roll back the whole thing)
b.createCollection("abc");
@@ -117,9 +117,9 @@ assert.soon(function() {
}
});
assert.gte(a.bar.find().itcount(), 1, "count check");
-assert.writeOK(a.bar.insert({txt: 'foo'}));
-assert.writeOK(a.bar.remove({q: 70}));
-assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
+assert.commandWorked(a.bar.insert({txt: 'foo'}));
+assert.commandWorked(a.bar.remove({q: 70}));
+assert.commandWorked(a.bar.update({q: 0}, {$inc: {y: 33}}));
// A is 1 2 3 7 8
// B is 1 2 3 4 5 6
diff --git a/jstests/replsets/rollback_ddl_op_sequences.js b/jstests/replsets/rollback_ddl_op_sequences.js
index 79883eac336..d5a224fcbed 100644
--- a/jstests/replsets/rollback_ddl_op_sequences.js
+++ b/jstests/replsets/rollback_ddl_op_sequences.js
@@ -63,23 +63,23 @@ var a = a_conn.getDB("foo");
var b = b_conn.getDB("foo");
// initial data for both nodes
-assert.writeOK(a.b.insert({x: 1}));
+assert.commandWorked(a.b.insert({x: 1}));
a.b.ensureIndex({x: 1});
-assert.writeOK(a.oldname.insert({y: 1}));
-assert.writeOK(a.oldname.insert({y: 2}));
+assert.commandWorked(a.oldname.insert({y: 1}));
+assert.commandWorked(a.oldname.insert({y: 2}));
a.oldname.ensureIndex({y: 1}, true);
-assert.writeOK(a.bar.insert({q: 0}));
-assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
-assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
-assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
-assert.writeOK(a.bar.insert({q: 40333333, a: 1}));
+assert.commandWorked(a.bar.insert({q: 0}));
+assert.commandWorked(a.bar.insert({q: 1, a: "foo"}));
+assert.commandWorked(a.bar.insert({q: 2, a: "foo", x: 1}));
+assert.commandWorked(a.bar.insert({q: 3, bb: 9, a: "foo"}));
+assert.commandWorked(a.bar.insert({q: 40333333, a: 1}));
for (var i = 0; i < 200; i++) {
- assert.writeOK(a.bar.insert({i: i}));
+ assert.commandWorked(a.bar.insert({i: i}));
}
-assert.writeOK(a.bar.insert({q: 40, a: 2}));
-assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
+assert.commandWorked(a.bar.insert({q: 40, a: 2}));
+assert.commandWorked(a.bar.insert({q: 70, txt: 'willremove'}));
a.createCollection("kap", {capped: true, size: 5000});
-assert.writeOK(a.kap.insert({foo: 1}));
+assert.commandWorked(a.kap.insert({foo: 1}));
replTest.awaitReplication();
// isolate A and wait for B to become master
@@ -94,17 +94,17 @@ assert.soon(function() {
});
// do operations on B and B alone, these will be rolled back
-assert.writeOK(b.bar.insert({q: 4}));
-assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
-assert.writeOK(b.bar.remove({q: 40})); // multi remove test
-assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
+assert.commandWorked(b.bar.insert({q: 4}));
+assert.commandWorked(b.bar.update({q: 3}, {q: 3, rb: true}));
+assert.commandWorked(b.bar.remove({q: 40})); // multi remove test
+assert.commandWorked(b.bar.update({q: 2}, {q: 39, rb: true}));
// rolling back a delete will involve reinserting the item(s)
-assert.writeOK(b.bar.remove({q: 1}));
-assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
-assert.writeOK(b.kap.insert({foo: 2}));
-assert.writeOK(b.kap2.insert({foo: 2}));
+assert.commandWorked(b.bar.remove({q: 1}));
+assert.commandWorked(b.bar.update({q: 0}, {$inc: {y: 1}}));
+assert.commandWorked(b.kap.insert({foo: 2}));
+assert.commandWorked(b.kap2.insert({foo: 2}));
// create a collection (need to roll back the whole thing)
-assert.writeOK(b.newcoll.insert({a: true}));
+assert.commandWorked(b.newcoll.insert({a: true}));
// create a new empty collection (need to roll back the whole thing)
b.createCollection("abc");
// drop a collection - we'll need all its data back!
@@ -119,8 +119,8 @@ assert(b.fooname.find().itcount() > 0, "count rename");
b.fooname.ensureIndex({q: 1});
// test roll back (drop) a whole database
var abc = b.getSisterDB("abc");
-assert.writeOK(abc.foo.insert({x: 1}));
-assert.writeOK(abc.bar.insert({y: 999}));
+assert.commandWorked(abc.foo.insert({x: 1}));
+assert.commandWorked(abc.bar.insert({y: 999}));
// isolate B, bring A back into contact with the arbiter, then wait for A to become master
// insert new data into A so that B will need to rollback when it reconnects to A
@@ -142,9 +142,9 @@ assert.soon(function() {
}
});
assert(a.bar.find().itcount() >= 1, "count check");
-assert.writeOK(a.bar.insert({txt: 'foo'}));
-assert.writeOK(a.bar.remove({q: 70}));
-assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
+assert.commandWorked(a.bar.insert({txt: 'foo'}));
+assert.commandWorked(a.bar.remove({q: 70}));
+assert.commandWorked(a.bar.update({q: 0}, {$inc: {y: 33}}));
// A is 1 2 3 7 8
// B is 1 2 3 4 5 6
diff --git a/jstests/replsets/rollback_drop_database.js b/jstests/replsets/rollback_drop_database.js
index aa783cc9dd3..58eb31ae257 100644
--- a/jstests/replsets/rollback_drop_database.js
+++ b/jstests/replsets/rollback_drop_database.js
@@ -22,7 +22,7 @@ let rollbackNode = rollbackTest.getPrimary();
let syncSourceNode = rollbackTest.getSecondary();
// Perform initial insert (common operation).
-assert.writeOK(rollbackNode.getDB(oldDbName)["beforeRollback"].insert({"num": 1}));
+assert.commandWorked(rollbackNode.getDB(oldDbName)["beforeRollback"].insert({"num": 1}));
// Set a failpoint on the original primary, so that it blocks after it commits the last
// 'dropCollection' entry but before the 'dropDatabase' entry is logged.
@@ -63,7 +63,7 @@ rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
// Perform an insert on another database while interfacing with the new primary.
// This is the sync source's divergent oplog entry.
-assert.writeOK(syncSourceNode.getDB(newDbName)["afterRollback"].insert({"num": 2}));
+assert.commandWorked(syncSourceNode.getDB(newDbName)["afterRollback"].insert({"num": 2}));
rollbackTest.transitionToSyncSourceOperationsDuringRollback();
rollbackTest.transitionToSteadyStateOperations();
diff --git a/jstests/replsets/rollback_rename_collection_on_sync_source.js b/jstests/replsets/rollback_rename_collection_on_sync_source.js
index be03faa94bf..fa227752e14 100644
--- a/jstests/replsets/rollback_rename_collection_on_sync_source.js
+++ b/jstests/replsets/rollback_rename_collection_on_sync_source.js
@@ -18,18 +18,18 @@ let doc2 = {x: 2};
let CommonOps = (node) => {
// Insert a document that will exist on the sync source and rollback node.
- assert.writeOK(node.getDB(dbName)[sourceCollName].insert(doc1));
+ assert.commandWorked(node.getDB(dbName)[sourceCollName].insert(doc1));
};
let RollbackOps = (node) => {
// Delete the document on rollback node so it will be refetched from sync source.
- assert.writeOK(node.getDB(dbName)[sourceCollName].remove(doc1));
+ assert.commandWorked(node.getDB(dbName)[sourceCollName].remove(doc1));
};
let SyncSourceOps = (node) => {
// Rename the original collection on the sync source.
assert.commandWorked(node.getDB(dbName)[sourceCollName].renameCollection(destCollName));
- assert.writeOK(node.getDB(dbName)[destCollName].insert(doc2));
+ assert.commandWorked(node.getDB(dbName)[destCollName].insert(doc2));
};
// Set up Rollback Test.
diff --git a/jstests/replsets/rollback_views.js b/jstests/replsets/rollback_views.js
index a802eb81663..7f7537b25e6 100644
--- a/jstests/replsets/rollback_views.js
+++ b/jstests/replsets/rollback_views.js
@@ -68,7 +68,7 @@ let a1 = nodeA.getDB("test1");
let b1 = nodeB.getDB("test1");
// Initial data for both nodes.
-assert.writeOK(a1.coll.insert([{_id: 1, x: 1}, {_id: 2, x: 2}]));
+assert.commandWorked(a1.coll.insert([{_id: 1, x: 1}, {_id: 2, x: 2}]));
// Wait for initial replication.
replTest.awaitReplication();
@@ -80,16 +80,16 @@ assert.soon(() => replTest.getPrimary() == nodeB, "node B did not become primary
// Do operations on B and B alone, these will be rolled back.
// For the collection creation, first create a view with the same name, stressing rollback.
-assert.writeOK(b1.coll.remove({x: 2}));
+assert.commandWorked(b1.coll.remove({x: 2}));
assert.commandWorked(b1.createView("x", "coll", [{$match: {x: 1}}]));
let b2 = b1.getSiblingDB("test2");
-assert.writeOK(b2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
+assert.commandWorked(b2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
assert.commandWorked(b2.createView("y", "coll", [{$match: {y: 2}}]));
let b3 = b1.getSiblingDB("test3");
assert.commandWorked(b3.createView("z", "coll", []));
-assert.writeOK(b3.system.views.remove({}));
-assert.writeOK(b3.z.insert([{z: 1}, {z: 2}, {z: 3}]));
-assert.writeOK(b3.z.remove({z: 1}));
+assert.commandWorked(b3.system.views.remove({}));
+assert.commandWorked(b3.z.insert([{z: 1}, {z: 2}, {z: 3}]));
+assert.commandWorked(b3.z.remove({z: 1}));
// Isolate B, bring A back into contact with the arbiter, then wait for A to become primary.
// Insert new data into A, so that B will need to rollback when it reconnects to A.
@@ -100,12 +100,12 @@ assert.soon(() => replTest.getPrimary() == nodeA, "nodeA did not become primary
// A is now primary and will perform writes that must be copied by B after rollback.
assert.eq(a1.coll.find().itcount(), 2, "expected two documents in test1.coll");
-assert.writeOK(a1.x.insert({_id: 3, x: "string in test1.x"}));
+assert.commandWorked(a1.x.insert({_id: 3, x: "string in test1.x"}));
let a2 = a1.getSiblingDB("test2");
assert.commandWorked(a2.createView("y", "coll", [{$match: {y: 2}}]));
-assert.writeOK(a2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
+assert.commandWorked(a2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
let a3 = a1.getSiblingDB("test3");
-assert.writeOK(a3.coll.insert([{z: 1}, {z: 2}, {z: 3}]));
+assert.commandWorked(a3.coll.insert([{z: 1}, {z: 2}, {z: 3}]));
assert.commandWorked(a3.createView("z", "coll", [{$match: {z: 3}}]));
// A is collections: test1.{coll,x}, test2.{coll,system.views}, test3.{coll,system.views}
diff --git a/jstests/replsets/rollback_waits_for_bgindex_completion.js b/jstests/replsets/rollback_waits_for_bgindex_completion.js
index e6433d558e6..857a90e1148 100644
--- a/jstests/replsets/rollback_waits_for_bgindex_completion.js
+++ b/jstests/replsets/rollback_waits_for_bgindex_completion.js
@@ -61,7 +61,7 @@ CommonOps(originalPrimary);
// Insert a document so that there is an operation to rollback.
const rollbackNode = rollbackTest.transitionToRollbackOperations();
-assert.writeOK(rollbackNode.getDB(dbName)["rollbackColl"].insert({x: 1}));
+assert.commandWorked(rollbackNode.getDB(dbName)["rollbackColl"].insert({x: 1}));
// Allow rollback to start. There are no sync source ops.
rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
diff --git a/jstests/replsets/rollback_with_socket_error_then_steady_state.js b/jstests/replsets/rollback_with_socket_error_then_steady_state.js
index 713658e1b5f..b7437d63b41 100644
--- a/jstests/replsets/rollback_with_socket_error_then_steady_state.js
+++ b/jstests/replsets/rollback_with_socket_error_then_steady_state.js
@@ -41,7 +41,7 @@ jsTestLog("Make sure node 0 is primary.");
stepUp(rst, nodes[0]);
assert.eq(nodes[0], rst.getPrimary());
// Wait for all data bearing nodes to get up to date.
-assert.writeOK(nodes[0].getCollection(collName).insert(
+assert.commandWorked(nodes[0].getCollection(collName).insert(
{a: counter++}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
jsTestLog("Create two partitions: [1] and [0,2,3,4].");
@@ -51,7 +51,7 @@ nodes[1].disconnect(nodes[3]);
nodes[1].disconnect(nodes[4]);
jsTestLog("Do a write that is replicated to [0,2,3,4].");
-assert.writeOK(nodes[0].getCollection(collName).insert(
+assert.commandWorked(nodes[0].getCollection(collName).insert(
{a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
jsTestLog("Repartition to: [0,2] and [1,3,4].");
@@ -68,7 +68,7 @@ waitForState(nodes[1], ReplSetTest.State.PRIMARY);
assert.eq(nodes[1], rst.getPrimary());
jsTestLog("Do a write to node 1 on the [1,3,4] side of the partition.");
-assert.writeOK(nodes[1].getCollection(collName).insert({a: counter++}));
+assert.commandWorked(nodes[1].getCollection(collName).insert({a: counter++}));
// Turn on failpoint on node 2 to pause rollback before doing anything.
assert.commandWorked(
@@ -112,7 +112,7 @@ waitForState(nodes[0], ReplSetTest.State.PRIMARY);
assert.eq(nodes[0], rst.getPrimary());
jsTestLog("w:2 write to node 0 (replicated to node 2)");
-assert.writeOK(nodes[0].getCollection(collName).insert(
+assert.commandWorked(nodes[0].getCollection(collName).insert(
{a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
// At this point node 2 has failed rollback before making any durable changes, including writing
diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js
index 209ed8172e1..6da89504ba9 100644
--- a/jstests/replsets/rslib.js
+++ b/jstests/replsets/rslib.js
@@ -42,7 +42,7 @@ syncFrom = function(syncingNode, desiredSyncSource, rst) {
stopServerReplication(syncingNode);
- assert.writeOK(rst.getPrimary().getDB(dummyName).getCollection(dummyName).insert({a: 1}));
+ assert.commandWorked(rst.getPrimary().getDB(dummyName).getCollection(dummyName).insert({a: 1}));
// Wait for 'desiredSyncSource' to get the dummy write we just did so we know it's
// definitely ahead of 'syncingNode' before we call replSetSyncFrom.
assert.soonNoExcept(function() {
diff --git a/jstests/replsets/secondary_as_sync_source.js b/jstests/replsets/secondary_as_sync_source.js
index ec18cebff0d..1d12f08b037 100644
--- a/jstests/replsets/secondary_as_sync_source.js
+++ b/jstests/replsets/secondary_as_sync_source.js
@@ -21,7 +21,7 @@ function addTestDocuments(db) {
for (var i = 0; i < size; ++i) {
bulk.insert({i: i});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, useBridge: true});
diff --git a/jstests/replsets/server_status_metrics.js b/jstests/replsets/server_status_metrics.js
index 7b271d197f4..e7784ba0c76 100644
--- a/jstests/replsets/server_status_metrics.js
+++ b/jstests/replsets/server_status_metrics.js
@@ -41,7 +41,7 @@ var primary = rt.getPrimary();
var testDB = primary.getDB("test");
assert.commandWorked(testDB.createCollection('a'));
-assert.writeOK(testDB.b.insert({}, {writeConcern: {w: 2}}));
+assert.commandWorked(testDB.b.insert({}, {writeConcern: {w: 2}}));
var ss = secondary.getDB("test").serverStatus();
// The number of ops received and the number of ops applied are not guaranteed to be the same
@@ -55,12 +55,12 @@ var bulk = testDB.a.initializeUnorderedBulkOp();
for (x = 0; x < 1000; x++) {
bulk.insert({});
}
-assert.writeOK(bulk.execute({w: 2}));
+assert.commandWorked(bulk.execute({w: 2}));
testSecondaryMetrics(secondary, 1000, secondaryBaseOplogOpsApplied, secondaryBaseOplogOpsReceived);
var options = {writeConcern: {w: 2}, multi: true, upsert: true};
-assert.writeOK(testDB.a.update({}, {$set: {d: new Date()}}, options));
+assert.commandWorked(testDB.a.update({}, {$set: {d: new Date()}}, options));
testSecondaryMetrics(secondary, 2000, secondaryBaseOplogOpsApplied, secondaryBaseOplogOpsReceived);
@@ -70,15 +70,15 @@ var startNum = testDB.serverStatus().metrics.getLastError.wtime.num;
printjson(primary.getDB("test").serverStatus().metrics);
-assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: 1, wtimeout: 5000}}));
+assert.commandWorked(testDB.a.insert({x: 1}, {writeConcern: {w: 1, wtimeout: 5000}}));
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis);
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum);
-assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: -11, wtimeout: 5000}}));
+assert.commandWorked(testDB.a.insert({x: 1}, {writeConcern: {w: -11, wtimeout: 5000}}));
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis);
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum);
-assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: 2, wtimeout: 5000}}));
+assert.commandWorked(testDB.a.insert({x: 1}, {writeConcern: {w: 2, wtimeout: 5000}}));
assert(testDB.serverStatus().metrics.getLastError.wtime.totalMillis >= startMillis);
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum + 1);
diff --git a/jstests/replsets/server_status_repl.js b/jstests/replsets/server_status_repl.js
index c00fcc8818a..058fc14c5fa 100644
--- a/jstests/replsets/server_status_repl.js
+++ b/jstests/replsets/server_status_repl.js
@@ -9,7 +9,7 @@ var primary = rt.getPrimary();
var testDB = primary.getDB("test");
assert.commandWorked(testDB.createCollection('a'));
-assert.writeOK(testDB.b.insert({}, {writeConcern: {w: 2}}));
+assert.commandWorked(testDB.b.insert({}, {writeConcern: {w: 2}}));
var ss = primary.getDB("test").serverStatus({repl: 1});
assert.neq(ss.repl.replicationProgress, null, tojson(ss.repl));
diff --git a/jstests/replsets/shutdown_primary.js b/jstests/replsets/shutdown_primary.js
index bcaefe8c541..038049c6753 100644
--- a/jstests/replsets/shutdown_primary.js
+++ b/jstests/replsets/shutdown_primary.js
@@ -25,13 +25,13 @@ replTest.initiate();
var primary = replTest.getPrimary();
var testDB = primary.getDB(name);
var timeout = ReplSetTest.kDefaultTimeoutMS;
-assert.writeOK(testDB.foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: timeout}}));
+assert.commandWorked(testDB.foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: timeout}}));
jsTestLog("Blocking replication to secondaries.");
stopReplicationOnSecondaries(replTest);
jsTestLog("Executing write to primary.");
-assert.writeOK(testDB.foo.insert({x: 2}));
+assert.commandWorked(testDB.foo.insert({x: 2}));
jsTestLog("Attempting to shut down primary.");
assert.commandFailedWithCode(primary.adminCommand({shutdown: 1}),
@@ -39,7 +39,7 @@ assert.commandFailedWithCode(primary.adminCommand({shutdown: 1}),
"shut down did not fail with 'ExceededTimeLimit'");
jsTestLog("Verifying primary did not shut down.");
-assert.writeOK(testDB.foo.insert({x: 3}));
+assert.commandWorked(testDB.foo.insert({x: 3}));
jsTestLog("Shutting down primary in a parallel shell");
var awaitShell = startParallelShell(function() {
diff --git a/jstests/replsets/single_server_majority.js b/jstests/replsets/single_server_majority.js
index c7c2ec862f4..039d8ccebeb 100644
--- a/jstests/replsets/single_server_majority.js
+++ b/jstests/replsets/single_server_majority.js
@@ -9,5 +9,5 @@ col = db.getCollection("single_server_majority");
col.drop();
// see if we can get a majority write on this single server
-assert.writeOK(col.save({a: "test"}, {writeConcern: {w: 'majority'}}));
+assert.commandWorked(col.save({a: "test"}, {writeConcern: {w: 'majority'}}));
MongoRunner.stopMongod(mongod); \ No newline at end of file
diff --git a/jstests/replsets/slave_delay_clean_shutdown.js b/jstests/replsets/slave_delay_clean_shutdown.js
index 2d3e75824da..8a68a2d0a58 100644
--- a/jstests/replsets/slave_delay_clean_shutdown.js
+++ b/jstests/replsets/slave_delay_clean_shutdown.js
@@ -26,7 +26,7 @@ rst.initiate(conf);
var master = rst.getPrimary(); // Waits for PRIMARY state.
// Push some ops through before setting slave delay.
-assert.writeOK(master.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}}));
+assert.commandWorked(master.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}}));
// Set slaveDelay and wait for secondary to receive the change.
conf = rst.getReplSetConfigFromNode();
@@ -40,7 +40,7 @@ sleep(2000); // The secondary apply loop only checks for slaveDelay changes onc
var secondary = rst.getSecondary();
const lastOp = getLatestOp(secondary);
-assert.writeOK(master.getCollection(ns).insert([{}, {}, {}]));
+assert.commandWorked(master.getCollection(ns).insert([{}, {}, {}]));
assert.soon(() => secondary.adminCommand('serverStatus').metrics.repl.buffer.count > 0,
() => secondary.adminCommand('serverStatus').metrics.repl);
assert.neq(getLatestOp(master), lastOp);
diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js
index 98dc46bacc4..770c2430fdd 100644
--- a/jstests/replsets/slavedelay1.js
+++ b/jstests/replsets/slavedelay1.js
@@ -26,7 +26,7 @@ doTest = function(signal) {
waitForAllMembers(master);
// insert a record
- assert.writeOK(master.foo.insert({x: 1}, {writeConcern: {w: 2}}));
+ assert.commandWorked(master.foo.insert({x: 1}, {writeConcern: {w: 2}}));
var doc = master.foo.findOne();
assert.eq(doc.x, 1);
@@ -65,7 +65,7 @@ doTest = function(signal) {
master = reconfig(replTest, config);
master = master.getSisterDB(name);
- assert.writeOK(master.foo.insert(
+ assert.commandWorked(master.foo.insert(
{_id: 123, x: 'foo'}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
for (var i = 0; i < 8; i++) {
diff --git a/jstests/replsets/step_down_during_draining.js b/jstests/replsets/step_down_during_draining.js
index 47c8ee2651a..b42ecd48d61 100644
--- a/jstests/replsets/step_down_during_draining.js
+++ b/jstests/replsets/step_down_during_draining.js
@@ -63,7 +63,7 @@ function stepUpNode(node) {
// Do an initial insert to prevent the secondary from going into recovery
var numDocuments = 20;
var coll = primary.getDB("foo").foo;
-assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
+assert.commandWorked(coll.insert({x: 0}, {writeConcern: {w: 3}}));
replSet.awaitReplication();
// Enable fail point to stop replication.
@@ -72,7 +72,7 @@ secondaries.forEach(enableFailPoint);
var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
for (var i = 1; i < numDocuments; ++i) {
- assert.writeOK(coll.insert({x: i}));
+ assert.commandWorked(coll.insert({x: i}));
}
jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
@@ -124,7 +124,7 @@ assert.commandWorked(
// Ensure new primary is writable.
jsTestLog('New primary should be writable after draining is complete');
-assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}));
+assert.commandWorked(secondary.getDB("foo").flag.insert({sentinel: 1}));
// Check that all writes reached the secondary's op queue prior to
// stepping down the original primary and got applied.
assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
diff --git a/jstests/replsets/step_down_during_draining2.js b/jstests/replsets/step_down_during_draining2.js
index 1e97f93865a..eb68e6ce0bf 100644
--- a/jstests/replsets/step_down_during_draining2.js
+++ b/jstests/replsets/step_down_during_draining2.js
@@ -67,7 +67,7 @@ function stepUpNode(node) {
// Do an initial insert to prevent the secondary from going into recovery
var numDocuments = 20;
var coll = primary.getDB("foo").foo;
-assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
+assert.commandWorked(coll.insert({x: 0}, {writeConcern: {w: 3}}));
replSet.awaitReplication();
// Enable fail point to stop replication.
@@ -76,7 +76,7 @@ secondaries.forEach(enableFailPoint);
var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
for (var i = 1; i < numDocuments; ++i) {
- assert.writeOK(coll.insert({x: i}));
+ assert.commandWorked(coll.insert({x: i}));
}
jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
@@ -165,7 +165,7 @@ assert.soon(function() {
});
jsTestLog('Ensure new primary is writable.');
-assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}, {writeConcern: {w: 3}}));
+assert.commandWorked(secondary.getDB("foo").flag.insert({sentinel: 1}, {writeConcern: {w: 3}}));
// Check that no writes were lost.
assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
replSet.stopSet();
diff --git a/jstests/replsets/step_down_during_draining3.js b/jstests/replsets/step_down_during_draining3.js
index 98c42955fc6..c3751ec136d 100644
--- a/jstests/replsets/step_down_during_draining3.js
+++ b/jstests/replsets/step_down_during_draining3.js
@@ -56,7 +56,7 @@ function stepUpNode(node) {
// Do an initial insert to prevent the secondary from going into recovery
var numDocuments = 20;
var coll = primary.getDB("foo").foo;
-assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
+assert.commandWorked(coll.insert({x: 0}, {writeConcern: {w: 3}}));
replSet.awaitReplication();
// Enable fail point to stop replication.
@@ -65,7 +65,7 @@ secondaries.forEach(enableFailPoint);
var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
for (var i = 1; i < numDocuments; ++i) {
- assert.writeOK(coll.insert({x: i}));
+ assert.commandWorked(coll.insert({x: i}));
}
jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js
index 6abdd335e92..7ce17186af4 100644
--- a/jstests/replsets/stepdown.js
+++ b/jstests/replsets/stepdown.js
@@ -28,7 +28,7 @@ var master = replTest.getPrimary();
// do a write
print("\ndo a write");
-assert.writeOK(master.getDB("foo").bar.insert({x: 1}));
+assert.commandWorked(master.getDB("foo").bar.insert({x: 1}));
replTest.awaitReplication();
// In the event of any error, we have to unlock any nodes that we have fsyncLocked.
@@ -61,7 +61,7 @@ try {
for (var i = 0; i < 11; i++) {
// do another write
- assert.writeOK(master.getDB("foo").bar.insert({x: i}));
+ assert.commandWorked(master.getDB("foo").bar.insert({x: i}));
}
let res = assert.commandWorked(master.adminCommand({replSetGetStatus: 1}));
diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js
index 508645cdf5a..5566e7379a1 100644
--- a/jstests/replsets/stepdown3.js
+++ b/jstests/replsets/stepdown3.js
@@ -20,7 +20,7 @@ replTest.awaitReplication();
// on the secondary (due to starting up), and we need to be within 10 seconds
// to step down.
var options = {writeConcern: {w: 2, wtimeout: 30000}};
-assert.writeOK(master.getDB("test").foo.insert({x: 2}, options));
+assert.commandWorked(master.getDB("test").foo.insert({x: 2}, options));
// lock secondary, to pause replication
print("\nlock secondary");
var locked = replTest._slaves[0];
diff --git a/jstests/replsets/stepdown_catch_up_opt.js b/jstests/replsets/stepdown_catch_up_opt.js
index 82c31b49a0a..718056d71c0 100644
--- a/jstests/replsets/stepdown_catch_up_opt.js
+++ b/jstests/replsets/stepdown_catch_up_opt.js
@@ -53,7 +53,7 @@ function disableFailPoint() {
// shut down.
try {
jsTestLog('Write to primary to make secondary out of sync.');
- assert.writeOK(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.');
+ assert.commandWorked(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.');
sleep(1000);
// Secondary is now at least 1 second behind.
diff --git a/jstests/replsets/stepdown_kill_other_ops.js b/jstests/replsets/stepdown_kill_other_ops.js
index 06fc8de563f..00630104ea0 100644
--- a/jstests/replsets/stepdown_kill_other_ops.js
+++ b/jstests/replsets/stepdown_kill_other_ops.js
@@ -19,7 +19,7 @@ replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
var primary = replSet.getPrimary();
assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
-assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000}));
+assert.commandWorked(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000}));
replSet.awaitReplication();
jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable");
diff --git a/jstests/replsets/stepdown_killop.js b/jstests/replsets/stepdown_killop.js
index 87d7d884a8b..e3446a5cdb5 100644
--- a/jstests/replsets/stepdown_killop.js
+++ b/jstests/replsets/stepdown_killop.js
@@ -35,7 +35,7 @@ assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
// do a write then ask the PRIMARY to stepdown
jsTestLog("Initiating stepdown");
-assert.writeOK(primary.getDB(name).foo.insert(
+assert.commandWorked(primary.getDB(name).foo.insert(
{myDoc: true, x: 1}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
var stepDownCmd = function() {
var res =
@@ -76,7 +76,7 @@ primary.getDB('admin').killOp(stepDownOpID);
var exitCode = stepDowner();
assert.eq(0, exitCode);
-assert.writeOK(primary.getDB(name).foo.remove({}));
+assert.commandWorked(primary.getDB(name).foo.remove({}));
restartServerReplication(secondary);
replSet.stopSet();
})();
diff --git a/jstests/replsets/stepdown_long_wait_time.js b/jstests/replsets/stepdown_long_wait_time.js
index 5958aa3a86c..6ece19ba068 100644
--- a/jstests/replsets/stepdown_long_wait_time.js
+++ b/jstests/replsets/stepdown_long_wait_time.js
@@ -33,7 +33,7 @@ stopServerReplication(secondary);
jsTestLog("do a write then ask the PRIMARY to stepdown");
var options = {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}};
-assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
+assert.commandWorked(primary.getDB(name).foo.insert({x: 1}, options));
var stepDownCmd = function() {
assert.commandWorked(db.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
diff --git a/jstests/replsets/stepdown_needs_electable_secondary.js b/jstests/replsets/stepdown_needs_electable_secondary.js
index 4d2124cc831..cb92b5133d4 100644
--- a/jstests/replsets/stepdown_needs_electable_secondary.js
+++ b/jstests/replsets/stepdown_needs_electable_secondary.js
@@ -64,7 +64,8 @@ jsTestLog("Doing a write to primary.");
var testDB = replTest.getPrimary().getDB('testdb');
var coll = testDB.stepdown_needs_electable_secondary;
var timeout = ReplSetTest.kDefaultTimeoutMS;
-assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 1, wtimeout: timeout}}));
+assert.commandWorked(
+ coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 1, wtimeout: timeout}}));
// Try to step down with only the primary caught up (1 node out of 5).
// stepDown should fail.
@@ -89,7 +90,8 @@ jsTestLog("Re-enabling writes to unelectable secondary: node #" +
restartServerReplication(secondaryB_unelectable);
// Wait for this secondary to catch up by issuing a write that must be replicated to 2 nodes
-assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 2, wtimeout: timeout}}));
+assert.commandWorked(
+ coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 2, wtimeout: timeout}}));
// Try to step down and fail
jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
@@ -103,7 +105,8 @@ jsTestLog("Re-enabling writes to unelectable secondary: node #" +
restartServerReplication(secondaryC_unelectable);
// Wait for this secondary to catch up by issuing a write that must be replicated to 3 nodes
-assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 3, wtimeout: timeout}}));
+assert.commandWorked(
+ coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 3, wtimeout: timeout}}));
// Try to step down and fail
jsTestLog("Trying to step down primary with a caught up majority that " +
@@ -118,7 +121,8 @@ jsTestLog("Re-enabling writes to electable secondary: node #" +
restartServerReplication(secondaryA_electable);
// Wait for this secondary to catch up by issuing a write that must be replicated to 4 nodes
-assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 4, wtimeout: timeout}}));
+assert.commandWorked(
+ coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 4, wtimeout: timeout}}));
// Try to step down. We expect success, so catch the exception thrown by 'replSetStepDown'.
jsTestLog("Trying to step down primary with a caught up majority that " +
diff --git a/jstests/replsets/stepdown_needs_majority.js b/jstests/replsets/stepdown_needs_majority.js
index cb465fb3f30..ba50f56f1c2 100644
--- a/jstests/replsets/stepdown_needs_majority.js
+++ b/jstests/replsets/stepdown_needs_majority.js
@@ -62,7 +62,7 @@ stopReplicationOnSecondaries(replTest);
// Write to the primary and attempt stepdown
//
jsTestLog("Issuing a write to the primary(" + primary.host + ") with write_concern:1");
-assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 1, wtimeout: timeout}}));
+assert.commandWorked(coll.insert(dummy_doc, {writeConcern: {w: 1, wtimeout: timeout}}));
jsTestLog("Trying to step down primary with only 1 node out of 5 caught up.");
assertStepDownFailsWithExceededTimeLimit(primary);
@@ -75,7 +75,7 @@ jsTestLog("Reenabling writes to one secondary (" + nodeIdStr(replTest, secondary
restartServerReplication(secondaryA);
jsTestLog("Issuing a write to the primary with write_concern:2");
-assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 2, wtimeout: timeout}}));
+assert.commandWorked(coll.insert(dummy_doc, {writeConcern: {w: 2, wtimeout: timeout}}));
jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
assertStepDownFailsWithExceededTimeLimit(primary);
@@ -88,7 +88,7 @@ jsTestLog("Reenabling writes to another secondary (" + nodeIdStr(replTest, secon
restartServerReplication(secondaryB);
jsTestLog("Issuing a write to the primary with write_concern:3");
-assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 3, wtimeout: timeout}}));
+assert.commandWorked(coll.insert(dummy_doc, {writeConcern: {w: 3, wtimeout: timeout}}));
jsTestLog("Trying to step down primary with 3 nodes out of 5 caught up.");
assertStepDownSucceeds(primary);
diff --git a/jstests/replsets/stepup.js b/jstests/replsets/stepup.js
index d4ce932a5bc..8c5b83ab586 100644
--- a/jstests/replsets/stepup.js
+++ b/jstests/replsets/stepup.js
@@ -27,14 +27,14 @@ assert.eq(primary, rst.getPrimary());
assert.commandWorked(
secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
-assert.writeOK(primary.getDB("test").bar.insert({x: 2}, {writeConcern: {w: 1}}));
+assert.commandWorked(primary.getDB("test").bar.insert({x: 2}, {writeConcern: {w: 1}}));
res = secondary.adminCommand({replSetStepUp: 1});
assert.commandFailedWithCode(res, ErrorCodes.CommandFailed);
assert.commandWorked(
secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
// Wait for the secondary to catch up by replicating a doc to both nodes.
-assert.writeOK(primary.getDB("test").bar.insert({x: 3}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(primary.getDB("test").bar.insert({x: 3}, {writeConcern: {w: "majority"}}));
// Step up the secondary. Retry since the old primary may step down when we try to ask for its
// vote.
diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js
index 4a1053b04bb..281acafa667 100644
--- a/jstests/replsets/sync2.js
+++ b/jstests/replsets/sync2.js
@@ -39,13 +39,13 @@ var option = {writeConcern: {w: conns.length - 1, wtimeout: replTest.kDefaultTim
// to bridging, it will not change sync sources and receive the write in time. This was not a
// problem in 3.0 because the old version of mongobridge caused all the nodes to restart during
// partitioning, forcing the set to rebuild the spanning tree.
-assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
+assert.commandWorked(master.getDB("foo").bar.insert({x: 1}, option));
// 4 is connected to 3
conns[4].disconnect(conns[2]);
conns[4].reconnect(conns[3]);
-assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
+assert.commandWorked(master.getDB("foo").bar.insert({x: 1}, option));
replTest.stopSet();
}());
diff --git a/jstests/replsets/system_profile.js b/jstests/replsets/system_profile.js
index 4e525d4fc93..97ba6d7abfd 100644
--- a/jstests/replsets/system_profile.js
+++ b/jstests/replsets/system_profile.js
@@ -18,13 +18,13 @@ var getLatestOp = function() {
};
var primaryDB = rst.getPrimary().getDB('test');
-assert.writeOK(primaryDB.foo.insert({}));
+assert.commandWorked(primaryDB.foo.insert({}));
var op = getLatestOp();
// Enable profiling on the primary
assert.commandWorked(primaryDB.runCommand({profile: 2}));
assert.eq(op, getLatestOp(), "oplog entry created when profile was enabled");
-assert.writeOK(primaryDB.foo.insert({}));
+assert.commandWorked(primaryDB.foo.insert({}));
op = getLatestOp();
assert.commandWorked(primaryDB.runCommand({profile: 0}));
assert.eq(op, getLatestOp(), "oplog entry created when profile was disabled");
@@ -36,7 +36,7 @@ assert.eq(op, getLatestOp(), "oplog entry created when system.profile was droppe
assert.commandWorked(primaryDB.createCollection("system.profile", {capped: true, size: 1000}));
assert.eq(op, getLatestOp(), "oplog entry created when system.profile was created");
assert.commandWorked(primaryDB.runCommand({profile: 2}));
-assert.writeOK(primaryDB.foo.insert({}));
+assert.commandWorked(primaryDB.foo.insert({}));
op = getLatestOp();
assert.commandWorked(primaryDB.runCommand({profile: 0}));
diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js
index 361b6204c08..03d401412a2 100644
--- a/jstests/replsets/tags2.js
+++ b/jstests/replsets/tags2.js
@@ -34,7 +34,7 @@ var master = replTest.getPrimary();
var db = master.getDB("test");
var wtimeout = ReplSetTest.kDefaultTimeoutMS;
-assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
+assert.commandWorked(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
var nextVersion = replTest.getReplSetConfigFromNode().version + 1;
conf.version = nextVersion;
@@ -44,7 +44,7 @@ replTest.awaitReplication();
master = replTest.getPrimary();
var db = master.getDB("test");
-assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
+assert.commandWorked(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
nextVersion++;
conf.version = nextVersion;
@@ -54,7 +54,7 @@ master.getDB("admin").runCommand({replSetReconfig: conf});
master = replTest.getPrimary();
var db = master.getDB("test");
-assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
+assert.commandWorked(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
replTest.stopSet();
}());
diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js
index 6f28faf300a..7aadef7c5b8 100644
--- a/jstests/replsets/tags_with_reconfig.js
+++ b/jstests/replsets/tags_with_reconfig.js
@@ -36,10 +36,10 @@ var master = replTest.getPrimary();
var db = master.getDB("test");
// Insert a document with write concern : anydc
-assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
+assert.commandWorked(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
// Insert a document with write concern : alldc
-assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
+assert.commandWorked(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
// Add a new tag to the replica set
var config = master.getDB("local").system.replset.findOne();
@@ -64,10 +64,10 @@ master = replTest.getPrimary();
var db = master.getDB("test");
// Insert a document with write concern : anydc
-assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
+assert.commandWorked(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
// Insert a document with write concern : alldc
-assert.writeOK(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
+assert.commandWorked(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
replTest.stopSet();
}());
diff --git a/jstests/replsets/temp_namespace.js b/jstests/replsets/temp_namespace.js
index cc7bf462924..de276222b95 100644
--- a/jstests/replsets/temp_namespace.js
+++ b/jstests/replsets/temp_namespace.js
@@ -38,7 +38,7 @@ assert.commandWorked(masterDB.runCommand(
assert.commandWorked(masterDB.runCommand(
{applyOps: [{op: "c", ns: masterDB.getName() + ".$cmd", o: {create: "keep2", temp: 0}}]}));
masterDB.runCommand({create: 'keep3'});
-assert.writeOK(masterDB.keep4.insert({}, {writeConcern: {w: 2}}));
+assert.commandWorked(masterDB.keep4.insert({}, {writeConcern: {w: 2}}));
// make sure they exist on primary and secondary
function countCollection(mydb, nameFilter) {
diff --git a/jstests/replsets/temp_namespace_restart_as_standalone.js b/jstests/replsets/temp_namespace_restart_as_standalone.js
index e5061629c82..4fb4baefdad 100644
--- a/jstests/replsets/temp_namespace_restart_as_standalone.js
+++ b/jstests/replsets/temp_namespace_restart_as_standalone.js
@@ -96,7 +96,7 @@ rst.start(secondaryNodeId, {}, restart);
// Verify that writes are replicated to the temporary collection and can successfully be applied
// by the secondary after having restarted it.
-assert.writeOK(primaryDB.temp_collection.insert(
+assert.commandWorked(primaryDB.temp_collection.insert(
{}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
rst.stopSet();
diff --git a/jstests/replsets/too_stale_secondary.js b/jstests/replsets/too_stale_secondary.js
index ec71aa7bab1..c882bcc304e 100644
--- a/jstests/replsets/too_stale_secondary.js
+++ b/jstests/replsets/too_stale_secondary.js
@@ -55,7 +55,8 @@ function overflowOplog(conn, db, writeConcern) {
// Keep inserting large documents until the oplog rolls over.
const largeStr = new Array(32 * 1024).join('aaaaaaaa');
while (bsonWoCompare(getFirstOplogEntry(conn), firstOplogEntry) === 0) {
- assert.writeOK(db[collName].insert({data: largeStr}, {writeConcern: {w: writeConcern}}));
+ assert.commandWorked(
+ db[collName].insert({data: largeStr}, {writeConcern: {w: writeConcern}}));
}
}
@@ -109,7 +110,7 @@ var primary = replTest.getPrimary();
var primaryTestDB = primary.getDB(dbName);
jsTestLog("1: Insert one document on the primary (Node 0) and ensure it is replicated.");
-assert.writeOK(primaryTestDB[collName].insert({a: 1}, {writeConcern: {w: 3}}));
+assert.commandWorked(primaryTestDB[collName].insert({a: 1}, {writeConcern: {w: 3}}));
assert(!tooStale(replTest.nodes[2]));
jsTestLog("2: Stop Node 2.");
diff --git a/jstests/replsets/transaction_table_multi_statement_txn.js b/jstests/replsets/transaction_table_multi_statement_txn.js
index 01fc3a577d5..c81c35e8bb0 100644
--- a/jstests/replsets/transaction_table_multi_statement_txn.js
+++ b/jstests/replsets/transaction_table_multi_statement_txn.js
@@ -26,8 +26,8 @@ replTest.awaitReplication();
const sessionId = session.getSessionId();
jsTestLog('Starting transaction on session ' + sessionId);
session.startTransaction();
-assert.writeOK(coll.insert({_id: 0}));
-assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 1}));
assert.commandWorked(session.commitTransaction_forTesting());
const opTime = session.getOperationTime();
const txnNum = session.getTxnNumber_forTesting();
diff --git a/jstests/replsets/transactions_during_step_down.js b/jstests/replsets/transactions_during_step_down.js
index eb6aa6dad6e..c74062cb5e5 100644
--- a/jstests/replsets/transactions_during_step_down.js
+++ b/jstests/replsets/transactions_during_step_down.js
@@ -25,7 +25,7 @@ var primaryColl = db[collName];
var collNss = primaryColl.getFullName();
jsTestLog("Writing data to collection.");
-assert.writeOK(primaryColl.insert({_id: 'readOp'}, {"writeConcern": {"w": 2}}));
+assert.commandWorked(primaryColl.insert({_id: 'readOp'}, {"writeConcern": {"w": 2}}));
TestData.dbName = dbName;
TestData.collName = collName;
@@ -120,7 +120,7 @@ testAbortOrCommitTxnFailsWithCode(
{failPoint: "hangBeforeAbortingTxn", op: "session.abortTransaction_forTesting()"});
jsTestLog("Testing stepdown during running transaction in inactive state.");
-TestData.cmd = "assert.writeOK(sessionColl.insert({_id: 'inactiveTxnOp'}))";
+TestData.cmd = "assert.commandWorked(sessionColl.insert({_id: 'inactiveTxnOp'}))";
// Do not start the transaction in parallel shell because when the parallel
// shell work is done, implicit call to "endSessions" and "abortTransaction"
// cmds are made. So, during step down we might not have any running
diff --git a/jstests/replsets/write_concern_after_stepdown.js b/jstests/replsets/write_concern_after_stepdown.js
index b54e62e8965..7c0cac74276 100644
--- a/jstests/replsets/write_concern_after_stepdown.js
+++ b/jstests/replsets/write_concern_after_stepdown.js
@@ -37,7 +37,7 @@ var primary = rst.getPrimary();
var secondaries = rst.getSecondaries();
assert.eq(nodes[0], primary);
// Wait for all data bearing nodes to get up to date.
-assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
+assert.commandWorked(nodes[0].getDB(dbName).getCollection(collName).insert(
{a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
// Stop the secondaries from replicating.
@@ -73,7 +73,7 @@ restartServerReplication(secondaries);
waitForPrimary(nodes[1]);
jsTest.log("Do a write to the new primary");
-assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
+assert.commandWorked(nodes[1].getDB(dbName).getCollection(collName).insert(
{a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
jsTest.log("Reconnect the old primary to the rest of the nodes");
diff --git a/jstests/replsets/write_concern_after_stepdown_and_stepup.js b/jstests/replsets/write_concern_after_stepdown_and_stepup.js
index daa143aa901..3cd88cfcf57 100644
--- a/jstests/replsets/write_concern_after_stepdown_and_stepup.js
+++ b/jstests/replsets/write_concern_after_stepdown_and_stepup.js
@@ -45,7 +45,7 @@ var primary = rst.getPrimary();
var secondaries = rst.getSecondaries();
assert.eq(nodes[0], primary);
// Wait for all data bearing nodes to get up to date.
-assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
+assert.commandWorked(nodes[0].getDB(dbName).getCollection(collName).insert(
{a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
// Stop the secondaries from replicating.
@@ -81,7 +81,7 @@ restartServerReplication(secondaries);
waitForPrimary(nodes[1]);
jsTest.log("Do a write to the new primary");
-assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
+assert.commandWorked(nodes[1].getDB(dbName).getCollection(collName).insert(
{a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
jsTest.log("Reconnect the old primary to the rest of the nodes");
diff --git a/jstests/serial_run/index_multi.js b/jstests/serial_run/index_multi.js
index 41b6b217c2d..5ba9b277357 100644
--- a/jstests/serial_run/index_multi.js
+++ b/jstests/serial_run/index_multi.js
@@ -26,7 +26,7 @@ for (var i = 0; i < 1e4; i++) {
bulk.insert(doc);
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Array of all index specs
var specs = [];
@@ -107,7 +107,7 @@ for (i = 0; i < 1e4; i++) {
bulk.find(criteria).update(mod);
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
indexJobs.forEach(function(join) {
join();
diff --git a/jstests/sharding/accurate_count_with_predicate.js b/jstests/sharding/accurate_count_with_predicate.js
index bb440e88873..4b5fcef4e2e 100644
--- a/jstests/sharding/accurate_count_with_predicate.js
+++ b/jstests/sharding/accurate_count_with_predicate.js
@@ -26,13 +26,13 @@ st.shardColl(shard0Coll.getName(), {x: 1}, {x: middle}, {x: middle + 1}, "test",
// Insert some docs.
for (let i = 0; i < num; i++) {
- assert.writeOK(st.getDB("test").slowcount.insert(getNthDocument(i)));
+ assert.commandWorked(st.getDB("test").slowcount.insert(getNthDocument(i)));
}
// Insert some orphan documents to shard 0. These are just documents outside the range
// which shard 0 owns.
for (let i = middle + 1; i < middle + 3; i++) {
- assert.writeOK(shard0Coll.insert(getNthDocument(i)));
+ assert.commandWorked(shard0Coll.insert(getNthDocument(i)));
}
// Run a count on the whole collection. The orphaned documents on shard 0 shouldn't be double
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index b676cb474e7..dfa3ef3b904 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -10,7 +10,7 @@ var db1 = conn1.getDB("testDB");
var numObjs = 3;
for (var i = 0; i < numObjs; i++) {
- assert.writeOK(db1.foo.save({a: i}));
+ assert.commandWorked(db1.foo.save({a: i}));
}
var configDB = s.s.getDB('config');
@@ -29,10 +29,10 @@ assert.eq(1024, newShardDoc.maxSize);
var conn2 = MongoRunner.runMongod({'shardsvr': ""});
var db2 = conn2.getDB("otherDB");
-assert.writeOK(db2.foo.save({a: 1}));
+assert.commandWorked(db2.foo.save({a: 1}));
var db3 = conn2.getDB("testDB");
-assert.writeOK(db3.foo.save({a: 1}));
+assert.commandWorked(db3.foo.save({a: 1}));
s.config.databases.find().forEach(printjson);
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 7fb1ab2efe1..64d5300c3c0 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -184,7 +184,7 @@ if (res.primary != addShardRes.shardAdded) {
assert.commandWorked(st.s.adminCommand({movePrimary: 'test', to: addShardRes.shardAdded}));
}
-assert.writeOK(st.s.getDB('test').foo.insert({x: 1}));
+assert.commandWorked(st.s.getDB('test').foo.insert({x: 1}));
assert.neq(null, rst5.getPrimary().getDB('test').foo.findOne());
assert.commandWorked(st.s.getDB('test').runCommand({dropDatabase: 1}));
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js
index 31d2c10f505..7a2b6866c8c 100644
--- a/jstests/sharding/addshard5.js
+++ b/jstests/sharding/addshard5.js
@@ -17,7 +17,7 @@ st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
assert.commandWorked(mongos.adminCommand({shardCollection: coll + '', key: {_id: 1}}));
// Insert one document
-assert.writeOK(coll.insert({hello: 'world'}));
+assert.commandWorked(coll.insert({hello: 'world'}));
// Migrate the collection to and from shard1 so shard0 loads the shard1 host
assert.commandWorked(mongos.adminCommand(
diff --git a/jstests/sharding/agg_project_limit_pipe_split.js b/jstests/sharding/agg_project_limit_pipe_split.js
index f17148a0877..7f5c7a51951 100644
--- a/jstests/sharding/agg_project_limit_pipe_split.js
+++ b/jstests/sharding/agg_project_limit_pipe_split.js
@@ -14,7 +14,7 @@ const bulkOp = coll.initializeOrderedBulkOp();
for (let i = 0; i < 400; ++i) {
bulkOp.insert({x: i, y: ["a", "b", "c"], z: Math.floor(i / 12)});
}
-assert.writeOK(bulkOp.execute());
+assert.commandWorked(bulkOp.execute());
let agg = coll.aggregate([
{$match: {$or: [{z: 9}, {z: 10}]}},
diff --git a/jstests/sharding/agg_sort.js b/jstests/sharding/agg_sort.js
index 0ee78631ec0..45ea86b0d97 100644
--- a/jstests/sharding/agg_sort.js
+++ b/jstests/sharding/agg_sort.js
@@ -31,7 +31,7 @@ const bulkOp = coll.initializeOrderedBulkOp();
for (var i = 0; i < nDocs; ++i) {
bulkOp.insert({_id: i, x: Math.floor(i / 2), y: yValues[i]});
}
-assert.writeOK(bulkOp.execute());
+assert.commandWorked(bulkOp.execute());
// Split the data into 3 chunks
assert.commandWorked(shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 3}}));
@@ -141,7 +141,7 @@ const textColl = db.sharded_agg_sort_text;
assert.commandWorked(
shardingTest.s0.adminCommand({shardCollection: textColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(textColl.insert([
+assert.commandWorked(textColl.insert([
{_id: 0, text: "apple"},
{_id: 1, text: "apple orange banana apple"},
{_id: 2, text: "apple orange"},
diff --git a/jstests/sharding/aggregates_during_balancing.js b/jstests/sharding/aggregates_during_balancing.js
index e0681798327..06db4cb6955 100644
--- a/jstests/sharding/aggregates_during_balancing.js
+++ b/jstests/sharding/aggregates_during_balancing.js
@@ -46,7 +46,7 @@ for (i = 0; i < nItems; ++i) {
filler: "0123456789012345678901234567890123456789"
});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTestLog('a project and group in shards, result combined in mongos');
var a1 = db.ts1
@@ -200,7 +200,7 @@ assert.eq(db.ts1.find().sort({_id: 1}).toArray(), outCollection.find().sort({_id
assert.commandFailed(
db.runCommand({aggregate: outCollection.getName(), pipeline: [{$out: db.ts1.getName()}]}));
-assert.writeOK(db.literal.save({dollar: false}));
+assert.commandWorked(db.literal.save({dollar: false}));
result =
db.literal
diff --git a/jstests/sharding/aggregation_currentop.js b/jstests/sharding/aggregation_currentop.js
index 4973b4f2d3f..a3a9d9c32e2 100644
--- a/jstests/sharding/aggregation_currentop.js
+++ b/jstests/sharding/aggregation_currentop.js
@@ -90,7 +90,7 @@ createUsers(mongosConn);
assert(clusterAdminDB.auth("admin", "pwd"));
for (let i = 0; i < 5; i++) {
- assert.writeOK(clusterTestDB.test.insert({_id: i, a: i}));
+ assert.commandWorked(clusterTestDB.test.insert({_id: i, a: i}));
}
st.ensurePrimaryShard(clusterTestDB.getName(), shardRS.name);
diff --git a/jstests/sharding/aggregations_in_session.js b/jstests/sharding/aggregations_in_session.js
index 456decee662..03c124a3f50 100644
--- a/jstests/sharding/aggregations_in_session.js
+++ b/jstests/sharding/aggregations_in_session.js
@@ -21,7 +21,7 @@ const mongosColl = session.getDatabase("test")[jsTestName()];
// merging on a mongod - otherwise the entire pipeline will be forwarded without a split and
// without a $mergeCursors stage.
st.shardColl(mongosColl, {_id: 1}, {_id: 1}, {_id: 1});
-assert.writeOK(mongosColl.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+assert.commandWorked(mongosColl.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
// This assertion will reproduce the hang described in SERVER-33660.
assert.eq(
diff --git a/jstests/sharding/all_config_servers_blackholed_from_mongos.js b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
index 53d6e435dd1..8fb040e60f7 100644
--- a/jstests/sharding/all_config_servers_blackholed_from_mongos.js
+++ b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
@@ -23,7 +23,7 @@ assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
assert.commandWorked(
testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(testDB.ShardedColl.insert({a: 1}));
+assert.commandWorked(testDB.ShardedColl.insert({a: 1}));
jsTest.log('Making all the config servers appear as a blackhole to mongos');
st._configServers.forEach(function(configSvr) {
diff --git a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
index 68745172568..19b19c724d3 100644
--- a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
+++ b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
@@ -13,7 +13,7 @@ var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
jsTest.log('Config nodes up: 3 of 3, shard nodes up: 2 of 2: ' +
'Insert test data to work with');
-assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
{_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
assert.eq([{_id: 0, count: 1}], st.s0.getDB('TestDB').TestColl.find().toArray());
@@ -21,7 +21,7 @@ jsTest.log('Config nodes up: 2 of 3, shard nodes up: 2 of 2: ' +
'Inserts and queries must work');
st.configRS.stop(0);
st.restartMongos(0);
-assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
{_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
assert.eq([{_id: 0, count: 2}], st.s0.getDB('TestDB').TestColl.find().toArray());
@@ -29,7 +29,7 @@ jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' +
'Inserts and queries must work');
st.configRS.stop(1);
st.restartMongos(0);
-assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
{_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js
index 7ecbbb1dc7b..d5b2a18fa0a 100644
--- a/jstests/sharding/allow_partial_results.js
+++ b/jstests/sharding/allow_partial_results.js
@@ -22,7 +22,7 @@ let bulk = coll.initializeUnorderedBulkOp();
for (let i = -50; i < 50; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Create a sharded collection with one chunk on each of the two shards.");
st.ensurePrimaryShard(dbName, st.shard0.shardName);
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index 2eb7dd102b6..dd6686f68a9 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -23,11 +23,11 @@ assert.writeError(coll.insert({i: [1, 2]}));
assert.writeError(coll.insert({_id: [1, 2], i: 3}));
// Insert an object with valid array key
-assert.writeOK(coll.insert({i: 1}));
+assert.commandWorked(coll.insert({i: 1}));
// Update the value with valid other field
value = coll.findOne({i: 1});
-assert.writeOK(coll.update(value, {$set: {j: 2}}));
+assert.commandWorked(coll.update(value, {$set: {j: 2}}));
// Update the value with invalid other fields
value = coll.findOne({i: 1});
@@ -39,7 +39,7 @@ assert.writeError(coll.update(value, Object.merge(value, {i: [3, 4]}), false, tr
// Multi-update the value with other fields (won't work, but no error)
value = coll.findOne({i: 1});
-assert.writeOK(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
+assert.commandWorked(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
// Query the value with other fields (won't work, but no error)
value = coll.findOne({i: 1});
@@ -51,29 +51,29 @@ coll.remove(Object.extend(value, {i: [1, 2, 3, 4]}));
// Can't remove using multikey, but shouldn't error
value = coll.findOne({i: 1});
-assert.writeOK(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
+assert.commandWorked(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
assert.eq(coll.find().itcount(), 1);
value = coll.findOne({i: 1});
-assert.writeOK(coll.remove(Object.extend(value, {i: 1})));
+assert.commandWorked(coll.remove(Object.extend(value, {i: 1})));
assert.eq(coll.find().itcount(), 0);
coll.ensureIndex({_id: 1, i: 1, j: 1});
// Can insert document that will make index into a multi-key as long as it's not part of shard
// key.
coll.remove({});
-assert.writeOK(coll.insert({i: 1, j: [1, 2]}));
+assert.commandWorked(coll.insert({i: 1, j: [1, 2]}));
assert.eq(coll.find().itcount(), 1);
// Same is true for updates.
coll.remove({});
coll.insert({_id: 1, i: 1});
-assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
+assert.commandWorked(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
assert.eq(coll.find().itcount(), 1);
// Same for upserts.
coll.remove({});
-assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
+assert.commandWorked(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
assert.eq(coll.find().itcount(), 1);
printjson("Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey");
@@ -82,7 +82,7 @@ printjson("Sharding-then-inserting-multikey tested, now trying inserting-then-sh
var coll = mongos.getCollection("" + coll + "2");
for (var i = 0; i < 10; i++) {
// TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({i: [i, i + 1]}));
+ assert.commandWorked(coll.insert({i: [i, i + 1]}));
}
coll.ensureIndex({_id: 1, i: 1});
@@ -99,7 +99,7 @@ st.printShardingStatus();
var coll = mongos.getCollection("" + coll + "3");
for (var i = 0; i < 10; i++) {
// TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({i: i}));
+ assert.commandWorked(coll.insert({i: i}));
}
coll.ensureIndex({_id: 1, i: 1});
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 61b5c273315..5131c512271 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -67,7 +67,7 @@ s.getDB(adminUser.db)
login(adminUser);
// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
-assert.writeOK(
+assert.commandWorked(
s.getDB("config").settings.update({_id: "balancer"},
{$set: {"_secondaryThrottle": false, "_waitForDelete": true}},
{upsert: true}));
@@ -151,7 +151,7 @@ login(testUser);
assert.eq(s.getDB("test").foo.findOne(), null);
print("insert try 2");
-assert.writeOK(s.getDB("test").foo.insert({x: 1}));
+assert.commandWorked(s.getDB("test").foo.insert({x: 1}));
assert.eq(1, s.getDB("test").foo.find().itcount(), tojson(result));
logout(testUser);
@@ -181,7 +181,7 @@ var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
for (i = 0; i < num; i++) {
bulk.insert({_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
s.startBalancer(60000);
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index 8afe4facc2e..2692bc9bc42 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -74,7 +74,7 @@ for (var i = 0; i < 100; i++) {
for (var j = 0; j < 10; j++) {
bulk.insert({i: i, j: j, str: str});
}
- assert.writeOK(bulk.execute({w: "majority"}));
+ assert.commandWorked(bulk.execute({w: "majority"}));
// Split the chunk we just inserted so that we have something to balance.
assert.commandWorked(st.splitFind("test.foo", {i: i, j: 0}));
}
@@ -82,7 +82,8 @@ for (var i = 0; i < 100; i++) {
assert.eq(expectedDocs, testDB.foo.count());
// Wait for the balancer to start back up
-assert.writeOK(configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
+assert.commandWorked(
+ configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
st.startBalancer();
// Make sure we've done at least some splitting, so the balancer will work
diff --git a/jstests/sharding/auth_repl.js b/jstests/sharding/auth_repl.js
index 0c8e976bd48..5605225b446 100644
--- a/jstests/sharding/auth_repl.js
+++ b/jstests/sharding/auth_repl.js
@@ -36,7 +36,7 @@ priTestDB.createUser({user: 'a', pwd: 'a', roles: jsTest.basicUserRoles},
assert.eq(1, testDB.auth('a', 'a'));
jsTest.log('Sending an authorized query that should be ok');
-assert.writeOK(testColl.insert({x: 1}, {writeConcern: {w: nodeCount}}));
+assert.commandWorked(testColl.insert({x: 1}, {writeConcern: {w: nodeCount}}));
conn.setSlaveOk(true);
doc = testColl.findOne();
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index 9aa9bc8db84..480d0c4318b 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -75,7 +75,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var x = 0; x < 20; x++) {
bulk.insert({v: x, k: 10});
}
-assert.writeOK(bulk.execute({w: nodeCount}));
+assert.commandWorked(bulk.execute({w: nodeCount}));
/* Although mongos never caches query results, try to do a different query
* everytime just to be sure.
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 0d1fb713c97..9b0fbe240ce 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -20,11 +20,11 @@ var test1User = {
};
function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
+ assert.commandWorked(collection.remove(pattern));
}
function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
+ assert.commandWorked(collection.insert(obj));
}
// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 3d60fb2ccca..e9b338801bf 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -20,11 +20,11 @@ var test1Reader = {
};
function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
+ assert.commandWorked(collection.remove(pattern));
}
function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
+ assert.commandWorked(collection.insert(obj));
}
// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
diff --git a/jstests/sharding/auto_rebalance_parallel.js b/jstests/sharding/auto_rebalance_parallel.js
index ef6af0d57c5..03e5755ec0c 100644
--- a/jstests/sharding/auto_rebalance_parallel.js
+++ b/jstests/sharding/auto_rebalance_parallel.js
@@ -17,10 +17,10 @@ function prepareCollectionForBalance(collName) {
var coll = st.s0.getCollection(collName);
// Create 4 chunks initially and ensure they get balanced within 1 balancer round
- assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+ assert.commandWorked(coll.insert({Key: 1, Value: 'Test value 1'}));
+ assert.commandWorked(coll.insert({Key: 10, Value: 'Test value 10'}));
+ assert.commandWorked(coll.insert({Key: 20, Value: 'Test value 20'}));
+ assert.commandWorked(coll.insert({Key: 30, Value: 'Test value 30'}));
assert.commandWorked(st.splitAt(collName, {Key: 10}));
assert.commandWorked(st.splitAt(collName, {Key: 20}));
diff --git a/jstests/sharding/auto_rebalance_parallel_replica_sets.js b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
index 0be9549f3cd..8bcbd63813a 100644
--- a/jstests/sharding/auto_rebalance_parallel_replica_sets.js
+++ b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
@@ -13,10 +13,10 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key
var coll = st.s0.getDB('TestDB').TestColl;
// Create 4 chunks initially and ensure they get balanced within 1 balancer round
-assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
-assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
-assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
-assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+assert.commandWorked(coll.insert({Key: 1, Value: 'Test value 1'}));
+assert.commandWorked(coll.insert({Key: 10, Value: 'Test value 10'}));
+assert.commandWorked(coll.insert({Key: 20, Value: 'Test value 20'}));
+assert.commandWorked(coll.insert({Key: 30, Value: 'Test value 30'}));
assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
diff --git a/jstests/sharding/autodiscover_config_rs_from_secondary.js b/jstests/sharding/autodiscover_config_rs_from_secondary.js
index 390d9bb7aa6..b9e87eeae06 100644
--- a/jstests/sharding/autodiscover_config_rs_from_secondary.js
+++ b/jstests/sharding/autodiscover_config_rs_from_secondary.js
@@ -34,7 +34,7 @@ var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to n
// perform writes to the config servers.
var mongos = MongoRunner.runMongos({configdb: seedList});
var admin = mongos.getDB('admin');
- assert.writeOK(admin.foo.insert({a: 1}));
+ assert.commandWorked(admin.foo.insert({a: 1}));
assert.eq(1, admin.foo.findOne().a);
MongoRunner.stopMongos(mongos);
}
diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js
index 58dbe7ece1e..39e05e9daae 100644
--- a/jstests/sharding/autosplit.js
+++ b/jstests/sharding/autosplit.js
@@ -38,7 +38,7 @@ function insertDocsAndWaitForSplit(numDocs) {
for (; i < curMaxKey + numDocs; i++) {
bulk.insert({num: i, s: bigString});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
waitForOngoingChunkSplits(s);
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
index 1777a82678a..95ac0835935 100644
--- a/jstests/sharding/autosplit_heuristics.js
+++ b/jstests/sharding/autosplit_heuristics.js
@@ -66,7 +66,7 @@ if (!isDebugBuild) {
// Insert enough docs to trigger splits into all chunks
for (var i = 0; i < totalInserts; i++) {
- assert.writeOK(coll.insert({_id: i % numChunks + (i / totalInserts), pad: pad}));
+ assert.commandWorked(coll.insert({_id: i % numChunks + (i / totalInserts), pad: pad}));
// Splitting is asynchronous so we should wait after each insert
// for autosplitting to happen
waitForOngoingChunkSplits(st);
diff --git a/jstests/sharding/autosplit_with_balancer.js b/jstests/sharding/autosplit_with_balancer.js
index 0372ca09b9a..b68a9e58289 100644
--- a/jstests/sharding/autosplit_with_balancer.js
+++ b/jstests/sharding/autosplit_with_balancer.js
@@ -23,7 +23,7 @@ for (var j = 0; j < 30; j++) {
bulk.insert({num: i, s: bigString});
i++;
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}));
}
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index fdc0d15509c..dc16fe7e46e 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -30,7 +30,7 @@ var bulk = s.s0.getDB('TestDB').TestColl.initializeUnorderedBulkOp();
for (var i = 0; i < 2100; i++) {
bulk.insert({_id: i, x: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({enablesharding: 'TestDB'}));
s.ensurePrimaryShard('TestDB', s.shard0.shardName);
diff --git a/jstests/sharding/balancer_window.js b/jstests/sharding/balancer_window.js
index ee2d55b1345..50ab8325802 100644
--- a/jstests/sharding/balancer_window.js
+++ b/jstests/sharding/balancer_window.js
@@ -56,16 +56,17 @@ var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shard
var startDate = new Date();
var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
-assert.writeOK(configDB.settings.update({_id: 'balancer'},
- {
- $set: {
- activeWindow: {
- start: hourMinStart.addHour(-2).toString(),
- stop: hourMinStart.addHour(-1).toString()
- },
- }
- },
- true));
+assert.commandWorked(
+ configDB.settings.update({_id: 'balancer'},
+ {
+ $set: {
+ activeWindow: {
+ start: hourMinStart.addHour(-2).toString(),
+ stop: hourMinStart.addHour(-1).toString()
+ },
+ }
+ },
+ true));
st.startBalancer();
st.waitForBalancer(true, 60000);
@@ -73,7 +74,7 @@ st.waitForBalancer(true, 60000);
var shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
assert.eq(shard0Chunks, shard0ChunksAfter);
-assert.writeOK(configDB.settings.update(
+assert.commandWorked(configDB.settings.update(
{_id: 'balancer'},
{
$set: {
diff --git a/jstests/sharding/basic_drop_coll.js b/jstests/sharding/basic_drop_coll.js
index b7fda388e34..e457dc5c9fd 100644
--- a/jstests/sharding/basic_drop_coll.js
+++ b/jstests/sharding/basic_drop_coll.js
@@ -11,7 +11,7 @@ var testDB = st.s.getDB('test');
// Test dropping an unsharded collection.
-assert.writeOK(testDB.bar.insert({x: 1}));
+assert.commandWorked(testDB.bar.insert({x: 1}));
assert.neq(null, testDB.bar.findOne({x: 1}));
assert.commandWorked(testDB.runCommand({drop: 'bar'}));
@@ -29,8 +29,8 @@ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zon
assert.commandWorked(st.s.adminCommand(
{updateZoneKeyRange: 'test.user', min: {_id: 0}, max: {_id: 10}, zone: 'foo'}));
-assert.writeOK(testDB.user.insert({_id: 10}));
-assert.writeOK(testDB.user.insert({_id: -10}));
+assert.commandWorked(testDB.user.insert({_id: 10}));
+assert.commandWorked(testDB.user.insert({_id: -10}));
assert.neq(null, st.shard0.getDB('test').user.findOne({_id: -10}));
assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10}));
diff --git a/jstests/sharding/basic_split.js b/jstests/sharding/basic_split.js
index 00a442ac353..fcab7365aad 100644
--- a/jstests/sharding/basic_split.js
+++ b/jstests/sharding/basic_split.js
@@ -46,7 +46,7 @@ var bulk = testDB.user.initializeUnorderedBulkOp();
for (var x = -1200; x < 1200; x++) {
bulk.insert({_id: x, val: kiloDoc});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
@@ -89,7 +89,7 @@ bulk = testDB.compound.initializeUnorderedBulkOp();
for (x = -1200; x < 1200; x++) {
bulk.insert({x: x, y: x, val: kiloDoc});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount());
assert.commandWorked(configDB.adminCommand(
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index 60b848dd6de..854e1364531 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -131,7 +131,7 @@ var oldChunks = config.chunks.find().toArray();
var staleMongos = MongoRunner.runMongos({configdb: configConnStr});
brokenColl = staleMongos.getCollection(brokenColl.toString());
-assert.writeOK(brokenColl.insert({hello: "world"}));
+assert.commandWorked(brokenColl.insert({hello: "world"}));
// Modify the chunks to make shards at a higher version
@@ -140,9 +140,9 @@ assert.commandWorked(
// Rewrite the old chunks back to the config server
-assert.writeOK(config.chunks.remove({}));
+assert.commandWorked(config.chunks.remove({}));
for (var i = 0; i < oldChunks.length; i++) {
- assert.writeOK(config.chunks.insert(oldChunks[i]));
+ assert.commandWorked(config.chunks.insert(oldChunks[i]));
}
// Ensure that the inserts have propagated to all secondary nodes
diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js
index a2162771492..00e03fe8019 100644
--- a/jstests/sharding/bulk_insert.js
+++ b/jstests/sharding/bulk_insert.js
@@ -14,7 +14,7 @@ var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
var collDi = st.shard0.getCollection(jsTestName() + ".collDirect");
jsTest.log('Checking write to config collections...');
-assert.writeOK(admin.TestColl.insert({SingleDoc: 1}));
+assert.commandWorked(admin.TestColl.insert({SingleDoc: 1}));
jsTest.log("Setting up collections...");
@@ -33,9 +33,9 @@ assert.commandWorked(admin.runCommand(
{moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
var resetColls = function() {
- assert.writeOK(collSh.remove({}));
- assert.writeOK(collUn.remove({}));
- assert.writeOK(collDi.remove({}));
+ assert.commandWorked(collSh.remove({}));
+ assert.commandWorked(collUn.remove({}));
+ assert.commandWorked(collDi.remove({}));
};
var isDupKeyError = function(err) {
@@ -54,13 +54,13 @@ jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
resetColls();
var inserts = [{ukey: 0}, {ukey: 1}];
-assert.writeOK(collSh.insert(inserts));
+assert.commandWorked(collSh.insert(inserts));
assert.eq(2, collSh.find().itcount());
-assert.writeOK(collUn.insert(inserts));
+assert.commandWorked(collUn.insert(inserts));
assert.eq(2, collUn.find().itcount());
-assert.writeOK(collDi.insert(inserts));
+assert.commandWorked(collDi.insert(inserts));
assert.eq(2, collDi.find().itcount());
jsTest.log("Bulk insert (no COE) with mongos error...");
@@ -107,13 +107,13 @@ jsTest.log("Bulk insert (no COE) on second shard...");
resetColls();
var inserts = [{ukey: 0}, {ukey: -1}];
-assert.writeOK(collSh.insert(inserts));
+assert.commandWorked(collSh.insert(inserts));
assert.eq(2, collSh.find().itcount());
-assert.writeOK(collUn.insert(inserts));
+assert.commandWorked(collUn.insert(inserts));
assert.eq(2, collUn.find().itcount());
-assert.writeOK(collDi.insert(inserts));
+assert.commandWorked(collDi.insert(inserts));
assert.eq(2, collDi.find().itcount());
jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
@@ -245,7 +245,7 @@ assert.commandWorked(admin.runCommand(
assert.commandWorked(admin.runCommand(
{moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
-assert.writeOK(staleCollSh.insert(inserts));
+assert.commandWorked(staleCollSh.insert(inserts));
//
// Test when the legacy batch exceeds the BSON object size limit
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 4f3a4626818..67726754282 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -76,7 +76,7 @@ while (docsInserted < numDocs) {
docsInserted++;
}
- assert.writeOK(coll.insert(bulk));
+ assert.commandWorked(coll.insert(bulk));
if (docsInserted % 10000 == 0) {
print("Inserted " + docsInserted + " documents.");
diff --git a/jstests/sharding/change_stream_chunk_migration.js b/jstests/sharding/change_stream_chunk_migration.js
index a4e74ed3efd..272353befb8 100644
--- a/jstests/sharding/change_stream_chunk_migration.js
+++ b/jstests/sharding/change_stream_chunk_migration.js
@@ -45,8 +45,8 @@ assert.commandWorked(
mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
// Insert two documents.
-assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
@@ -67,8 +67,8 @@ for (let id of [0, 20]) {
}
// Insert into both the chunks.
-assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
// Split again, and move a second chunk to the first shard. The new chunks are:
// [MinKey, 0), [0, 10), and [10, MaxKey].
@@ -82,9 +82,9 @@ assert.commandWorked(mongos.adminCommand({
}));
// Insert again, into all three chunks.
-assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
// Make sure we can see all the inserts, without any 'retryNeeded' entries.
for (let nextExpectedId of [1, 21, -2, 2, 22]) {
@@ -101,9 +101,9 @@ assert(!changeStream.hasNext());
// Insert into all three chunks.
jsTestLog("Insert into all three chunks");
-assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
assert.commandWorked(mongos.adminCommand({
@@ -114,9 +114,9 @@ assert.commandWorked(mongos.adminCommand({
}));
// Insert again, into all three chunks.
-assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
// Make sure we can see all the inserts, without any 'retryNeeded' entries.
for (let nextExpectedId of [-3, 3, 23, -4, 4, 24]) {
@@ -133,25 +133,25 @@ assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "ne
// At this point, there haven't been any migrations to that shard; check that the changeStream
// works normally.
-assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
for (let nextExpectedId of [-5, 5, 25]) {
assert.soon(() => changeStream.hasNext());
assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
}
-assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
// Now migrate a chunk to the new shard and verify the stream continues to return results
// from both before and after the migration.
jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
assert.commandWorked(mongos.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 20}, to: "newShard", _waitForDelete: true}));
-assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
for (let nextExpectedId of [16, -6, 6, 26]) {
assert.soon(() => changeStream.hasNext());
diff --git a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
index 5d854fdf44c..d2be5e2d406 100644
--- a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
+++ b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
@@ -159,7 +159,7 @@ for (let shardDB of [shard0DB, shard1DB]) {
// Write a document to shard0, and confirm that - despite the fact that shard1 is still idle - a
// getMore with a high maxTimeMS returns the document before this timeout expires.
csCursorId = reopenChangeStream(csCursorId);
-assert.writeOK(mongosColl.insert({_id: -1}));
+assert.commandWorked(mongosColl.insert({_id: -1}));
startTime = (new Date()).getTime();
const csResult = assert.commandWorked(mongosDB.runCommand(
{getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: thirtyMins}));
diff --git a/jstests/sharding/change_stream_lookup_single_shard_cluster.js b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
index 53fed919125..19fd090918c 100644
--- a/jstests/sharding/change_stream_lookup_single_shard_cluster.js
+++ b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
@@ -36,7 +36,7 @@ const mongosColl = mongosDB[jsTestName()];
assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
assert.commandWorked(
mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
// Verify that the pipeline splits and merges on mongoS despite only targeting a single shard.
const explainPlan = assert.commandWorked(
diff --git a/jstests/sharding/change_stream_metadata_notifications.js b/jstests/sharding/change_stream_metadata_notifications.js
index 48138d089ec..4fd28583dd9 100644
--- a/jstests/sharding/change_stream_metadata_notifications.js
+++ b/jstests/sharding/change_stream_metadata_notifications.js
@@ -45,15 +45,15 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {shardKey: 1}, to: st.rs1.getURL()}));
// Write a document to each chunk.
-assert.writeOK(mongosColl.insert({shardKey: -1, _id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({shardKey: 1, _id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({shardKey: -1, _id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({shardKey: 1, _id: 1}, {writeConcern: {w: "majority"}}));
let changeStream = mongosColl.watch();
// We awaited the replication of the first writes, so the change stream shouldn't return them.
-assert.writeOK(mongosColl.update({shardKey: -1, _id: -1}, {$set: {updated: true}}));
-assert.writeOK(mongosColl.update({shardKey: 1, _id: 1}, {$set: {updated: true}}));
-assert.writeOK(mongosColl.insert({shardKey: 2, _id: 2}));
+assert.commandWorked(mongosColl.update({shardKey: -1, _id: -1}, {$set: {updated: true}}));
+assert.commandWorked(mongosColl.update({shardKey: 1, _id: 1}, {$set: {updated: true}}));
+assert.commandWorked(mongosColl.insert({shardKey: 2, _id: 2}));
// Drop the collection and test that we return a "drop" entry, followed by an "invalidate"
// entry.
diff --git a/jstests/sharding/change_stream_read_preference.js b/jstests/sharding/change_stream_read_preference.js
index 1c4129e9952..41cd75ee901 100644
--- a/jstests/sharding/change_stream_read_preference.js
+++ b/jstests/sharding/change_stream_read_preference.js
@@ -60,16 +60,16 @@ for (let rs of [st.rs0, st.rs1]) {
}
// Write a document to each chunk.
-assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
// Test that change streams go to the primary by default.
let changeStreamComment = "change stream against primary";
const primaryStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
{comment: changeStreamComment});
-assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
-assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
+assert.commandWorked(mongosColl.update({_id: -1}, {$set: {updated: true}}));
+assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updated: true}}));
assert.soon(() => primaryStream.hasNext());
assert.eq(primaryStream.next().fullDocument, {_id: -1, updated: true});
@@ -100,8 +100,8 @@ const secondaryStream =
mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
{comment: changeStreamComment, $readPreference: {mode: "secondary"}});
-assert.writeOK(mongosColl.update({_id: -1}, {$set: {updatedCount: 2}}));
-assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: -1}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
assert.soon(() => secondaryStream.hasNext());
assert.eq(secondaryStream.next().fullDocument, {_id: -1, updated: true, updatedCount: 2});
diff --git a/jstests/sharding/change_stream_show_migration_events.js b/jstests/sharding/change_stream_show_migration_events.js
index c07e059e4d1..570a8039a8c 100644
--- a/jstests/sharding/change_stream_show_migration_events.js
+++ b/jstests/sharding/change_stream_show_migration_events.js
@@ -72,8 +72,8 @@ assert.commandWorked(
mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
// Insert two documents.
-assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
@@ -100,8 +100,8 @@ checkEvents(changeStreamShardZero, shardZeroEventsAfterNewShard);
checkEvents(changeStreamShardOne, shardOneEvents);
// Insert into both the chunks.
-assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
// Split again, and move a second chunk to the first shard. The new chunks are:
// [MinKey, 0), [0, 10), and [10, MaxKey].
@@ -115,9 +115,9 @@ assert.commandWorked(mongos.adminCommand({
}));
// Insert again, into all three chunks.
-assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
var shardZeroEvents = [
makeEvent(1, "insert"),
@@ -146,9 +146,9 @@ assert(!changeStreamShardOne.hasNext());
// Insert into all three chunks.
jsTestLog("Insert into all three chunks");
-assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
assert.commandWorked(mongos.adminCommand({
@@ -159,9 +159,9 @@ assert.commandWorked(mongos.adminCommand({
}));
// Insert again, into all three chunks.
-assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
// Check that each change stream returns the expected events.
shardZeroEvents = [
@@ -194,9 +194,9 @@ const changeStreamNewShard = newShard.getPrimary().getCollection('test.chunk_mig
// At this point, there haven't been any migrations to that shard; check that the changeStream
// works normally.
-assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
shardOneEvents = [
makeEvent(-5, "insert"),
@@ -208,16 +208,16 @@ assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
checkEvents(changeStreamShardOne, shardOneEvents);
assert(!changeStreamNewShard.hasNext(), "Do not expect any results yet");
-assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
// Now migrate a chunk to the new shard and verify the stream continues to return results
// from both before and after the migration.
jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
assert.commandWorked(mongos.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 20}, to: "newShard", _waitForDelete: true}));
-assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
let shardOneEventsBeforeNewShard = [
makeEvent(16, "insert"),
diff --git a/jstests/sharding/change_stream_update_lookup_collation.js b/jstests/sharding/change_stream_update_lookup_collation.js
index 9c13f4afac8..643f2869b46 100644
--- a/jstests/sharding/change_stream_update_lookup_collation.js
+++ b/jstests/sharding/change_stream_update_lookup_collation.js
@@ -72,10 +72,10 @@ assert.gte(bsonWoCompare({shardKey: "abc"}, {shardKey: "aBC"}), 1);
// know the update lookup will use both the _id and the shard key, and we want to make sure it
// is only targeting a single shard. Also note that _id is a string, since we want to make sure
// the _id index can only be used if we are using the collection's default collation.
-assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "ABC"}));
-assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "ABC"}));
-assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "abc"}));
-assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "abc"}));
+assert.commandWorked(mongosColl.insert({_id: "abc_1", shardKey: "ABC"}));
+assert.commandWorked(mongosColl.insert({_id: "abc_2", shardKey: "ABC"}));
+assert.commandWorked(mongosColl.insert({_id: "abc_1", shardKey: "abc"}));
+assert.commandWorked(mongosColl.insert({_id: "abc_2", shardKey: "abc"}));
// Verify that the post-change lookup uses the simple collation to target to a single shard,
// then uses the collection-default collation to perform the lookup on the shard.
@@ -128,10 +128,10 @@ const strengthOneCollation = {
// Insert some documents that might be confused with existing documents under the change
// stream's collation, but should not be confused during the update lookup.
-assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "ABÇ"}));
-assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "ABÇ"}));
-assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "abç"}));
-assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "abç"}));
+assert.commandWorked(mongosColl.insert({_id: "abç_1", shardKey: "ABÇ"}));
+assert.commandWorked(mongosColl.insert({_id: "abç_2", shardKey: "ABÇ"}));
+assert.commandWorked(mongosColl.insert({_id: "abç_1", shardKey: "abç"}));
+assert.commandWorked(mongosColl.insert({_id: "abç_2", shardKey: "abç"}));
assert.eq(mongosColl.find({shardKey: "abc"}).collation(strengthOneCollation).itcount(), 8);
diff --git a/jstests/sharding/change_stream_update_lookup_read_concern.js b/jstests/sharding/change_stream_update_lookup_read_concern.js
index 03b9ec86738..3dc1fb47bfd 100644
--- a/jstests/sharding/change_stream_update_lookup_read_concern.js
+++ b/jstests/sharding/change_stream_update_lookup_read_concern.js
@@ -63,7 +63,7 @@ assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})
assert.commandWorked(
mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
rst.awaitReplication();
// Make sure reads with read preference tag 'closestSecondary' go to the tagged secondary.
@@ -88,7 +88,7 @@ const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updat
comment: changeStreamComment,
$readPreference: {mode: "nearest", tags: [{tag: "closestSecondary"}]}
});
-assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 1}}));
+assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updatedCount: 1}}));
assert.soon(() => changeStream.hasNext());
let latestChange = changeStream.next();
assert.eq(latestChange.operationType, "update");
@@ -160,7 +160,7 @@ profilerHasSingleMatchingEntryOrThrow({
// the new, lagged secondary. Even though it's lagged, the lookup should use 'afterClusterTime'
// to ensure it does not return until the node can see the change it's looking up.
stopServerReplication(newClosestSecondary);
-assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
// Since we stopped replication, we expect the update lookup to block indefinitely until we
// resume replication, so we resume replication in a parallel shell while this thread is blocked
diff --git a/jstests/sharding/change_streams.js b/jstests/sharding/change_streams.js
index 08c075c1e18..83cfa09abd1 100644
--- a/jstests/sharding/change_streams.js
+++ b/jstests/sharding/change_streams.js
@@ -77,14 +77,14 @@ function runTest(collName, shardKey) {
makeShardKey(1) /* move to shard 1 */);
// Write a document to each chunk.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1)));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(-1)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(1)));
let changeStream = mongosColl.aggregate([{$changeStream: {}}]);
// Test that a change stream can see inserts on shard 0.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1000)));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1000)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(1000)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(-1000)));
assert.soon(() => changeStream.hasNext(), "expected to be able to see the first insert");
assertChangeStreamEventEq(changeStream.next(), {
@@ -96,7 +96,7 @@ function runTest(collName, shardKey) {
// Because the periodic noop writer is disabled, do another write to shard 0 in order to
// advance that shard's clock and enabling the stream to return the earlier write to shard 1
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1001)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(1001)));
assert.soon(() => changeStream.hasNext(), "expected to be able to see the second insert");
assertChangeStreamEventEq(changeStream.next(), {
@@ -122,11 +122,11 @@ function runTest(collName, shardKey) {
changeStream.close();
jsTestLog('Testing multi-update change streams with shard key ' + shardKey);
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(10, {a: 0, b: 0})));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-10, {a: 0, b: 0})));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(10, {a: 0, b: 0})));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(-10, {a: 0, b: 0})));
changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert.writeOK(mongosColl.update({a: 0}, {$set: {b: 2}}, {multi: true}));
+ assert.commandWorked(mongosColl.update({a: 0}, {$set: {b: 2}}, {multi: true}));
assert.soon(() => changeStream.hasNext());
assertChangeStreamEventEq(changeStream.next(), {
@@ -154,11 +154,11 @@ function runTest(collName, shardKey) {
assert.commandWorked(
st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: false}));
- assert.writeOK(mongosColl.remove({}));
+ assert.commandWorked(mongosColl.remove({}));
// We awaited the replication of the first write, so the change stream shouldn't return it.
// Use { w: "majority" } to deal with journaling correctly, even though we only have one
// node.
- assert.writeOK(
+ assert.commandWorked(
mongosColl.insert(makeShardKeyDocument(0, {a: 1}), {writeConcern: {w: "majority"}}));
changeStream = mongosColl.aggregate([{$changeStream: {}}]);
@@ -188,15 +188,19 @@ function runTest(collName, shardKey) {
makeShardKey(1) /* move to shard 1 */);
// Write one document to each chunk.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1), {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1), {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ mongosColl.insert(makeShardKeyDocument(-1), {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ mongosColl.insert(makeShardKeyDocument(1), {writeConcern: {w: "majority"}}));
changeStream = mongosColl.aggregate([{$changeStream: {}}]);
assert(!changeStream.hasNext());
// Store a valid resume token before dropping the collection, to be used later in the test
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-2), {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(2), {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ mongosColl.insert(makeShardKeyDocument(-2), {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ mongosColl.insert(makeShardKeyDocument(2), {writeConcern: {w: "majority"}}));
assert.soon(() => changeStream.hasNext());
const resumeToken = changeStream.next()._id;
diff --git a/jstests/sharding/change_streams_establishment_finds_new_shards.js b/jstests/sharding/change_streams_establishment_finds_new_shards.js
index 146fc166d50..8f2393f99ee 100644
--- a/jstests/sharding/change_streams_establishment_finds_new_shards.js
+++ b/jstests/sharding/change_streams_establishment_finds_new_shards.js
@@ -68,8 +68,8 @@ assert(!changeStream.hasNext(), "Do not expect any results yet");
awaitNewShard();
// Insert two documents in different shards.
-assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
// Expect to see them both.
for (let id of [0, 20]) {
diff --git a/jstests/sharding/change_streams_primary_shard_unaware.js b/jstests/sharding/change_streams_primary_shard_unaware.js
index b325f770585..de89b928998 100644
--- a/jstests/sharding/change_streams_primary_shard_unaware.js
+++ b/jstests/sharding/change_streams_primary_shard_unaware.js
@@ -58,7 +58,7 @@ assert.commandWorked(mongosDB.createCollection(testName));
// triggering a refresh when a change stream is established through mongos2.
const mongos2DB = st.s2.getDB(testName);
const mongos2Coll = mongos2DB[testName];
-assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
+assert.commandWorked(mongos2Coll.insert({_id: 0, a: 0}));
// Create index on the shard key.
assert.commandWorked(mongos2Coll.createIndex({a: 1}));
@@ -92,7 +92,7 @@ assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
// Insert a doc and verify that the primary shard is now aware that the collection is sharded.
-assert.writeOK(mongosColl.insert({_id: 1, a: 1}));
+assert.commandWorked(mongosColl.insert({_id: 1, a: 1}));
assert.eq(true, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
// Verify that both cursors are able to pick up an inserted document.
@@ -123,10 +123,10 @@ assert.commandWorked(mongosDB.adminCommand({
}));
// Update the document on the primary shard.
-assert.writeOK(mongosColl.update({_id: 1, a: 1}, {$set: {b: 1}}));
+assert.commandWorked(mongosColl.update({_id: 1, a: 1}, {$set: {b: 1}}));
// Insert another document to each shard.
-assert.writeOK(mongosColl.insert({_id: -2, a: -2}));
-assert.writeOK(mongosColl.insert({_id: 2, a: 2}));
+assert.commandWorked(mongosColl.insert({_id: -2, a: -2}));
+assert.commandWorked(mongosColl.insert({_id: 2, a: 2}));
// Verify that both cursors pick up the first inserted doc regardless of the moveChunk
// operation.
diff --git a/jstests/sharding/change_streams_shards_start_in_sync.js b/jstests/sharding/change_streams_shards_start_in_sync.js
index 3928913a1bb..7b493800dcc 100644
--- a/jstests/sharding/change_streams_shards_start_in_sync.js
+++ b/jstests/sharding/change_streams_shards_start_in_sync.js
@@ -98,17 +98,17 @@ function waitForShardCursor(rs) {
// Make sure the shard 0 $changeStream cursor is established before doing the first writes.
waitForShardCursor(st.rs0);
-assert.writeOK(mongosColl.insert({_id: -1000}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -1000}, {writeConcern: {w: "majority"}}));
// This write to shard 1 occurs before the $changeStream cursor on shard 1 is open, because the
// mongos where the $changeStream is running is disconnected from shard 1.
-assert.writeOK(mongosColl.insert({_id: 1001}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 1001}, {writeConcern: {w: "majority"}}));
jsTestLog("Reconnecting");
st.rs1.getPrimary().reconnect(st.s1);
waitForShardCursor(st.rs1);
-assert.writeOK(mongosColl.insert({_id: -1002}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -1002}, {writeConcern: {w: "majority"}}));
waitForShell();
st.stop();
})();
diff --git a/jstests/sharding/change_streams_unsharded_becomes_sharded.js b/jstests/sharding/change_streams_unsharded_becomes_sharded.js
index c28e19c9520..9ab4b1901fa 100644
--- a/jstests/sharding/change_streams_unsharded_becomes_sharded.js
+++ b/jstests/sharding/change_streams_unsharded_becomes_sharded.js
@@ -57,8 +57,8 @@ function testUnshardedBecomesSharded(collToWatch) {
// Verify that the cursor picks up documents inserted while the collection is unsharded. The
// 'documentKey' at this point is simply the _id field.
- assert.writeOK(mongosColl.insert({_id: 0, x: 0}));
- assert.writeOK(mongosCollOther.insert({_id: 0, y: 0}));
+ assert.commandWorked(mongosColl.insert({_id: 0, x: 0}));
+ assert.commandWorked(mongosCollOther.insert({_id: 0, y: 0}));
const [preShardCollectionChange] = cst.assertNextChangesEqual({
cursor: cursor,
expectedChanges: [{
@@ -100,8 +100,8 @@ function testUnshardedBecomesSharded(collToWatch) {
// documents. The 'documentKey' field should now include the shard key, even before a
// 'kNewShardDetected' operation has been generated by the migration of a chunk to a new
// shard.
- assert.writeOK(mongosColl.insert({_id: 1, x: 1}));
- assert.writeOK(mongosCollOther.insert({_id: 1, y: 1}));
+ assert.commandWorked(mongosColl.insert({_id: 1, x: 1}));
+ assert.commandWorked(mongosCollOther.insert({_id: 1, y: 1}));
cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [postShardCollectionChanges[0]]});
// Move the [minKey, 0) chunk to shard1.
@@ -119,8 +119,8 @@ function testUnshardedBecomesSharded(collToWatch) {
}));
// Make sure the change stream cursor sees a document inserted on the recipient shard.
- assert.writeOK(mongosColl.insert({_id: -1, x: -1}));
- assert.writeOK(mongosCollOther.insert({_id: -1, y: -1}));
+ assert.commandWorked(mongosColl.insert({_id: -1, x: -1}));
+ assert.commandWorked(mongosCollOther.insert({_id: -1, y: -1}));
cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [postShardCollectionChanges[1]]});
// Confirm that we can resume the stream on the sharded collection using the token generated
@@ -145,8 +145,8 @@ function testUnshardedBecomesSharded(collToWatch) {
// Insert a couple documents to shard1, creating a scenario where the getMore to shard0 will
// indicate that the change stream is invalidated yet shard1 will still have data to return.
- assert.writeOK(mongosColl.insert({_id: -2, x: -2}));
- assert.writeOK(mongosColl.insert({_id: -3, x: -3}));
+ assert.commandWorked(mongosColl.insert({_id: -2, x: -2}));
+ assert.commandWorked(mongosColl.insert({_id: -3, x: -3}));
// Drop and recreate the collection.
mongosColl.drop();
@@ -156,8 +156,8 @@ function testUnshardedBecomesSharded(collToWatch) {
// Shard the collection on a different shard key and ensure that each shard has a chunk.
st.shardColl(mongosColl.getName(), {z: 1}, {z: 0}, {z: -1}, mongosDB.getName());
- assert.writeOK(mongosColl.insert({_id: -1, z: -1}));
- assert.writeOK(mongosColl.insert({_id: 1, z: 1}));
+ assert.commandWorked(mongosColl.insert({_id: -1, z: -1}));
+ assert.commandWorked(mongosColl.insert({_id: 1, z: 1}));
// Verify that the change stream picks up the inserts, however the shard key is missing
// since the collection has since been dropped and recreated.
diff --git a/jstests/sharding/change_streams_whole_db.js b/jstests/sharding/change_streams_whole_db.js
index 322be4a19b4..baefc8107c3 100644
--- a/jstests/sharding/change_streams_whole_db.js
+++ b/jstests/sharding/change_streams_whole_db.js
@@ -35,7 +35,7 @@ let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collecti
assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
// Test that the change stream returns operations on the unsharded test collection.
-assert.writeOK(mongosColl.insert({_id: 0}));
+assert.commandWorked(mongosColl.insert({_id: 0}));
let expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0},
@@ -52,8 +52,8 @@ const mongosCollShardedOnX = mongosDB[jsTestName() + "_sharded_on_x"];
st.shardColl(mongosCollShardedOnX.getName(), {x: 1}, {x: 0}, {x: 1}, mongosDB.getName());
// Write a document to each chunk.
-assert.writeOK(mongosCollShardedOnX.insert({_id: 0, x: -1}));
-assert.writeOK(mongosCollShardedOnX.insert({_id: 1, x: 1}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 0, x: -1}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 1, x: 1}));
// Verify that the change stream returns both inserts.
expected = [
@@ -74,8 +74,8 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
// Now send inserts to both the sharded and unsharded collections, and verify that the change
// streams returns them in order.
-assert.writeOK(mongosCollShardedOnX.insert({_id: 2, x: 2}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 2, x: 2}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
// Verify that the change stream returns both inserts.
expected = [
@@ -106,8 +106,8 @@ st.shardColl(mongosCollShardedCompound.getName(),
mongosDB.getName());
// Write a document to each chunk.
-assert.writeOK(mongosCollShardedCompound.insert({_id: 0, y: -1, x: 0}));
-assert.writeOK(mongosCollShardedCompound.insert({_id: 1, y: 1, x: 0}));
+assert.commandWorked(mongosCollShardedCompound.insert({_id: 0, y: -1, x: 0}));
+assert.commandWorked(mongosCollShardedCompound.insert({_id: 1, y: 1, x: 0}));
// Verify that the change stream returns both inserts.
expected = [
@@ -128,9 +128,9 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
// Send inserts to all 3 collections and verify that the results contain the correct
// documentKeys and are in the correct order.
-assert.writeOK(mongosCollShardedOnX.insert({_id: 3, x: 3}));
-assert.writeOK(mongosColl.insert({_id: 3}));
-assert.writeOK(mongosCollShardedCompound.insert({_id: 2, x: 0, y: -2}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 3, x: 3}));
+assert.commandWorked(mongosColl.insert({_id: 3}));
+assert.commandWorked(mongosCollShardedCompound.insert({_id: 2, x: 0, y: -2}));
// Verify that the change stream returns both inserts.
expected = [
@@ -160,7 +160,7 @@ const resumeTokenBeforeDrop = results[0]._id;
// Write one more document to the collection that will be dropped, to be returned after
// resuming.
-assert.writeOK(mongosCollShardedOnX.insert({_id: 4, x: 4}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 4, x: 4}));
// Drop the collection, invalidating the open change stream.
assertDropCollection(mongosDB, mongosCollShardedOnX.getName());
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
index bf996dda39b..90c6c6cb0f1 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -42,7 +42,7 @@ for (var s = 0; s < 2; s++) {
var bulk = shardColl.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++)
bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
assert.eq(200,
diff --git a/jstests/sharding/clone_catalog_data.js b/jstests/sharding/clone_catalog_data.js
index 3daed3214c4..94d3ee8fd8f 100644
--- a/jstests/sharding/clone_catalog_data.js
+++ b/jstests/sharding/clone_catalog_data.js
@@ -26,8 +26,8 @@
// Create some test documents and put them in each collection.
[{a: 1, b: 2, c: 4}, {a: 2, b: 4, c: 8}, {a: 3, b: 6, c: 12}].forEach(d => {
- assert.writeOK(testDB.coll1.insert(d));
- assert.writeOK(testDB.coll2.insert(d));
+ assert.commandWorked(testDB.coll1.insert(d));
+ assert.commandWorked(testDB.coll2.insert(d));
});
// Create indexes on each collection.
diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js
index d995ee19ab6..88d7c4fc3da 100644
--- a/jstests/sharding/coll_epoch_test1.js
+++ b/jstests/sharding/coll_epoch_test1.js
@@ -34,7 +34,7 @@ var bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({_id: i, test: "a"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(100, staleMongos.getCollection(coll + "").find({test: "a"}).itcount());
assert(coll.drop());
@@ -56,7 +56,7 @@ bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({notId: i, test: "b"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(100, staleMongos.getCollection(coll + "").find({test: "b"}).itcount());
assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a"]}}).itcount());
@@ -74,7 +74,7 @@ bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({test: "c"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(100, staleMongos.getCollection(coll + "").find({test: "c"}).itcount());
assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b"]}}).itcount());
diff --git a/jstests/sharding/coll_epoch_test2.js b/jstests/sharding/coll_epoch_test2.js
index 0033e59f57d..e01513adb04 100644
--- a/jstests/sharding/coll_epoch_test2.js
+++ b/jstests/sharding/coll_epoch_test2.js
@@ -32,7 +32,7 @@ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-assert.writeOK(coll.insert({hello: "world"}));
+assert.commandWorked(coll.insert({hello: "world"}));
jsTest.log("Sharding collection across multiple shards...");
@@ -86,7 +86,7 @@ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++)
bulk.insert({_id: i});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
res = admin.runCommand({split: coll + "", middle: {_id: 200}});
assert.commandWorked(res);
@@ -112,17 +112,18 @@ assert.neq(null, readMongos.getCollection(coll + "").findOne({_id: 1}));
jsTest.log("Checking update...");
// Ensure that updating an element finds the right location
-assert.writeOK(updateMongos.getCollection(coll + "").update({_id: 1}, {$set: {updated: true}}));
+assert.commandWorked(
+ updateMongos.getCollection(coll + "").update({_id: 1}, {$set: {updated: true}}));
assert.neq(null, coll.findOne({updated: true}));
jsTest.log("Checking insert...");
// Ensure that inserting an element finds the right shard
-assert.writeOK(insertMongos.getCollection(coll + "").insert({_id: 101}));
+assert.commandWorked(insertMongos.getCollection(coll + "").insert({_id: 101}));
assert.neq(null, coll.findOne({_id: 101}));
jsTest.log("Checking remove...");
// Ensure that removing an element finds the right shard, verified by the mongos doing the sharding
-assert.writeOK(removeMongos.getCollection(coll + "").remove({_id: 2}));
+assert.commandWorked(removeMongos.getCollection(coll + "").remove({_id: 2}));
assert.eq(null, coll.findOne({_id: 2}));
coll.drop();
diff --git a/jstests/sharding/collation_lookup.js b/jstests/sharding/collation_lookup.js
index f9388cf9aa3..f5e928e8374 100644
--- a/jstests/sharding/collation_lookup.js
+++ b/jstests/sharding/collation_lookup.js
@@ -367,11 +367,11 @@ const withoutDefaultCollationColl = mongosDB[testName + "_without_default"];
assert.commandWorked(
mongosDB.createCollection(withDefaultCollationColl.getName(), caseInsensitive));
-assert.writeOK(withDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
+assert.commandWorked(withDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
-assert.writeOK(withoutDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
-assert.writeOK(withoutDefaultCollationColl.insert({_id: "uppercase", str: "ABC"}));
-assert.writeOK(withoutDefaultCollationColl.insert({_id: "unmatched", str: "def"}));
+assert.commandWorked(withoutDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
+assert.commandWorked(withoutDefaultCollationColl.insert({_id: "uppercase", str: "ABC"}));
+assert.commandWorked(withoutDefaultCollationColl.insert({_id: "unmatched", str: "def"}));
//
// Sharded collection with default collation and unsharded collection without a default
diff --git a/jstests/sharding/collation_targeting.js b/jstests/sharding/collation_targeting.js
index c58396eaa80..3fb27342ca4 100644
--- a/jstests/sharding/collation_targeting.js
+++ b/jstests/sharding/collation_targeting.js
@@ -46,10 +46,10 @@ var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
-assert.writeOK(coll.insert(a_1));
-assert.writeOK(coll.insert(a_100));
-assert.writeOK(coll.insert(a_FOO));
-assert.writeOK(coll.insert(a_foo));
+assert.commandWorked(coll.insert(a_1));
+assert.commandWorked(coll.insert(a_100));
+assert.commandWorked(coll.insert(a_FOO));
+assert.commandWorked(coll.insert(a_foo));
// Aggregate.
@@ -245,33 +245,33 @@ assert.eq(1,
// Test a remove command on strings with non-simple collation. This should be scatter-gather.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.remove({a: "foo"}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(2, writeRes.nRemoved);
explain = coll.explain().remove({a: "foo"}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_FOO));
- assert.writeOK(coll.insert(a_foo));
+ assert.commandWorked(coll.insert(a_FOO));
+ assert.commandWorked(coll.insert(a_foo));
}
// Test a remove command on strings with simple collation. This should be single-shard.
writeRes = coll.remove({a: "foo"});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = coll.explain().remove({a: "foo"});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(coll.insert(a_foo));
+assert.commandWorked(coll.insert(a_foo));
// Test a remove command on numbers with non-simple collation. This should be single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.remove({a: 100}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = coll.explain().remove({a: 100}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_100));
+ assert.commandWorked(coll.insert(a_100));
}
// A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
@@ -287,23 +287,23 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single remove on string shard key with simple collation should succeed, because it is
// single-shard.
writeRes = coll.remove({a: "foo"}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = coll.explain().remove({a: "foo"}, {justOne: true});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(coll.insert(a_foo));
+assert.commandWorked(coll.insert(a_foo));
// Single remove on number shard key with non-simple collation should succeed, because it is
// single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.remove({a: 100}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = coll.explain().remove({a: 100}, {justOne: true, collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_100));
+ assert.commandWorked(coll.insert(a_100));
}
// Single remove on string _id with non-collection-default collation should fail, because it is
@@ -314,17 +314,17 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single remove on string _id with collection-default collation should succeed, because it is
// an exact-ID query.
-assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
writeRes = coll.remove({_id: "foo"}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
// Single remove on string _id with collection-default collation explicitly given should
// succeed, because it is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
writeRes = coll.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
}
@@ -332,9 +332,9 @@ if (testDB.getMongo().writeMode() === "commands") {
// is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.remove({_id: a_100._id}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
- assert.writeOK(coll.insert(a_100));
+ assert.commandWorked(coll.insert(a_100));
}
// Update.
@@ -342,7 +342,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Test an update command on strings with non-simple collation. This should be scatter-gather.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(2, writeRes.nMatched);
explain = coll.explain().update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
@@ -352,7 +352,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Test an update command on strings with simple collation. This should be single-shard.
writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
assert.commandWorked(explain);
@@ -361,7 +361,7 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// Test an update command on numbers with non-simple collation. This should be single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain =
coll.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
@@ -382,7 +382,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single update on string shard key with simple collation should succeed, because it is
// single-shard.
writeRes = coll.update({a: "foo"}, {$set: {b: 1}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update({a: "foo"}, {$set: {b: 1}});
assert.commandWorked(explain);
@@ -392,7 +392,7 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
assert.commandWorked(explain);
@@ -402,34 +402,34 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single update on string _id with non-collection-default collation should fail, because it is
// not an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
assert.writeError(coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
- assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+ assert.commandWorked(coll.remove({_id: "foo"}, {justOne: true}));
}
// Single update on string _id with collection-default collation should succeed, because it is
// an exact-ID query.
-assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
writeRes = coll.update({_id: "foo"}, {$set: {b: 1}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
-assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+assert.commandWorked(coll.remove({_id: "foo"}, {justOne: true}));
// Single update on string _id with collection-default collation explicitly given should
// succeed, because it is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
writeRes = coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
- assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+ assert.commandWorked(coll.remove({_id: "foo"}, {justOne: true}));
}
// Single update on number _id with non-collection-default collation should succeed, because it
// is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update({_id: a_foo._id}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
}
@@ -443,7 +443,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Upsert on strings with simple collation should succeed, because it is single-shard.
writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
assert.commandWorked(explain);
@@ -453,7 +453,7 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update(
{a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update(
{a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
diff --git a/jstests/sharding/collation_targeting_inherited.js b/jstests/sharding/collation_targeting_inherited.js
index 676dadbc972..c4e86fb1833 100644
--- a/jstests/sharding/collation_targeting_inherited.js
+++ b/jstests/sharding/collation_targeting_inherited.js
@@ -53,10 +53,10 @@ var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
-assert.writeOK(collCaseInsensitive.insert(a_1));
-assert.writeOK(collCaseInsensitive.insert(a_100));
-assert.writeOK(collCaseInsensitive.insert(a_FOO));
-assert.writeOK(collCaseInsensitive.insert(a_foo));
+assert.commandWorked(collCaseInsensitive.insert(a_1));
+assert.commandWorked(collCaseInsensitive.insert(a_100));
+assert.commandWorked(collCaseInsensitive.insert(a_FOO));
+assert.commandWorked(collCaseInsensitive.insert(a_foo));
// Aggregate.
@@ -264,34 +264,34 @@ assert.eq(1,
// Test a remove command on strings with non-simple collation inherited from collection default.
// This should be scatter-gather.
writeRes = collCaseInsensitive.remove({a: "foo"});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(2, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: "foo"});
assert.commandWorked(explain);
assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(collCaseInsensitive.insert(a_FOO));
-assert.writeOK(collCaseInsensitive.insert(a_foo));
+assert.commandWorked(collCaseInsensitive.insert(a_FOO));
+assert.commandWorked(collCaseInsensitive.insert(a_foo));
// Test a remove command on strings with simple collation. This should be single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = collCaseInsensitive.remove({a: "foo"}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: "foo"}, {collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_foo));
+ assert.commandWorked(collCaseInsensitive.insert(a_foo));
}
// Test a remove command on numbers with non-simple collation inherited from collection default.
// This should be single-shard.
writeRes = collCaseInsensitive.remove({a: 100});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: 100});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(collCaseInsensitive.insert(a_100));
+assert.commandWorked(collCaseInsensitive.insert(a_100));
// A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
// exact-ID if it contains an equality on _id and either has the collection default collation or
@@ -306,24 +306,24 @@ assert.writeError(collCaseInsensitive.remove({a: "foo"}, {justOne: true}));
if (testDB.getMongo().writeMode() === "commands") {
writeRes =
collCaseInsensitive.remove({a: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: "foo"},
{justOne: true, collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_foo));
+ assert.commandWorked(collCaseInsensitive.insert(a_foo));
}
// Single remove on number shard key with non-simple collation inherited from collection default
// should succeed, because it is single-shard.
writeRes = collCaseInsensitive.remove({a: 100}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: 100}, {justOne: true});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(collCaseInsensitive.insert(a_100));
+assert.commandWorked(collCaseInsensitive.insert(a_100));
// Single remove on string _id with non-collection-default collation should fail, because it is
// not an exact-ID query.
@@ -332,18 +332,18 @@ assert.writeError(
// Single remove on string _id with collection-default collation should succeed, because it is
// an exact-ID query.
-assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+assert.commandWorked(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
writeRes = collCaseInsensitive.remove({_id: "foo"}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
// Single remove on string _id with collection-default collation explicitly given should
// succeed, because it is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
writeRes =
collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
}
@@ -351,16 +351,16 @@ if (testDB.getMongo().writeMode() === "commands") {
// is an exact-ID query.
writeRes =
collCaseInsensitive.remove({_id: a_100._id}, {justOne: true, collation: {locale: "simple"}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
-assert.writeOK(collCaseInsensitive.insert(a_100));
+assert.commandWorked(collCaseInsensitive.insert(a_100));
// Update.
// Test an update command on strings with non-simple collation inherited from collection
// default. This should be scatter-gather.
writeRes = collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(2, writeRes.nMatched);
explain = collCaseInsensitive.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
assert.commandWorked(explain);
@@ -370,7 +370,7 @@ assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
if (testDB.getMongo().writeMode() === "commands") {
writeRes = collCaseInsensitive.update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
@@ -381,7 +381,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Test an update command on numbers with non-simple collation inherited from collection
// default. This should be single-shard.
writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true});
assert.commandWorked(explain);
@@ -401,7 +401,7 @@ assert.writeError(collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}));
if (testDB.getMongo().writeMode() === "commands") {
writeRes =
collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update(
{a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
@@ -412,7 +412,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single update on number shard key with non-simple collation inherited from collation default
// should succeed, because it is single-shard.
writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}});
assert.commandWorked(explain);
@@ -427,21 +427,21 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single update on string _id with collection-default collation should succeed, because it is
// an exact-ID query.
-assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+assert.commandWorked(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
writeRes = collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
-assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
+assert.commandWorked(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
// Single update on string _id with collection-default collation explicitly given should
// succeed, because it is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
writeRes =
collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
- assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
+ assert.commandWorked(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
}
// Single update on number _id with non-collection-default collation inherited from collection
@@ -449,7 +449,7 @@ if (testDB.getMongo().writeMode() === "commands") {
if (testDB.getMongo().writeMode() === "commands") {
writeRes = collCaseInsensitive.update(
{_id: a_foo._id}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
}
@@ -464,7 +464,7 @@ assert.writeError(
if (testDB.getMongo().writeMode() === "commands") {
writeRes = collCaseInsensitive.update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
@@ -475,7 +475,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Upsert on numbers with non-simple collation inherited from collection default should succeed,
// because it is single-shard.
writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain =
collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
diff --git a/jstests/sharding/config_rs_no_primary.js b/jstests/sharding/config_rs_no_primary.js
index 6b7c7155a6e..41d74869930 100644
--- a/jstests/sharding/config_rs_no_primary.js
+++ b/jstests/sharding/config_rs_no_primary.js
@@ -35,7 +35,7 @@ var testOps = function(mongos) {
jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
mongos);
var initialCount = mongos.getDB('test').foo.count();
- assert.writeOK(mongos.getDB('test').foo.insert({a: 1}));
+ assert.commandWorked(mongos.getDB('test').foo.insert({a: 1}));
assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
assert.throws(function() {
diff --git a/jstests/sharding/convert_to_and_from_sharded.js b/jstests/sharding/convert_to_and_from_sharded.js
index 15da2e0cc73..9076e803331 100644
--- a/jstests/sharding/convert_to_and_from_sharded.js
+++ b/jstests/sharding/convert_to_and_from_sharded.js
@@ -18,13 +18,13 @@ var checkBasicCRUD = function(coll) {
var doc = coll.findOne({_id: 'marker', y: {$exists: false}});
assert.neq(null, doc);
- assert.writeOK(coll.update({_id: 'marker'}, {$set: {y: 2}}));
+ assert.commandWorked(coll.update({_id: 'marker'}, {$set: {y: 2}}));
assert.eq(2, coll.findOne({_id: 'marker'}).y);
- assert.writeOK(coll.remove({_id: 'marker'}));
+ assert.commandWorked(coll.remove({_id: 'marker'}));
assert.eq(null, coll.findOne({_id: 'marker'}));
- assert.writeOK(coll.insert({_id: 'marker'}, {writeConcern: {w: NUM_NODES}}));
+ assert.commandWorked(coll.insert({_id: 'marker'}, {writeConcern: {w: NUM_NODES}}));
assert.eq('marker', coll.findOne({_id: 'marker'})._id);
};
@@ -49,10 +49,10 @@ if (jsTestOptions().shardMixedBinVersions) {
replShard.awaitReplication();
}
-assert.writeOK(priConn.getDB('test').unsharded.insert({_id: 'marker'}));
+assert.commandWorked(priConn.getDB('test').unsharded.insert({_id: 'marker'}));
checkBasicCRUD(priConn.getDB('test').unsharded);
-assert.writeOK(priConn.getDB('test').sharded.insert({_id: 'marker'}));
+assert.commandWorked(priConn.getDB('test').sharded.insert({_id: 'marker'}));
checkBasicCRUD(priConn.getDB('test').sharded);
for (var x = 0; x < NUM_NODES; x++) {
@@ -76,7 +76,7 @@ checkBasicCRUD(st.s.getDB('test').unsharded);
checkBasicCRUD(st.s.getDB('test').sharded);
for (x = 0; x < 4; x++) {
- assert.writeOK(st.s.getDB('test').sharded.insert({_id: x}));
+ assert.commandWorked(st.s.getDB('test').sharded.insert({_id: x}));
assert.commandWorked(st.s.adminCommand({split: 'test.sharded', middle: {_id: x}}));
}
diff --git a/jstests/sharding/count_config_servers.js b/jstests/sharding/count_config_servers.js
index ff7cff2c698..7df3c4b8843 100644
--- a/jstests/sharding/count_config_servers.js
+++ b/jstests/sharding/count_config_servers.js
@@ -17,7 +17,7 @@ var configDB = st.config;
var coll = configDB.test;
for (var x = 0; x < 10; x++) {
- assert.writeOK(coll.insert({v: x}));
+ assert.commandWorked(coll.insert({v: x}));
}
if (st.configRS) {
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index 596509c1c2d..a0357de1f81 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -24,7 +24,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 300; i++) {
bulk.insert({i: i % 10});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var connA = conn;
var connB = new Mongo(st.s.host);
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index b68c4bf1bca..2926fea3b1b 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -22,7 +22,7 @@ assert.commandWorked(
st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
// Insert some data
-assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+assert.commandWorked(coll.insert({_id: true, a: true, b: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({a: 1}));
@@ -49,7 +49,7 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id:
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+assert.commandWorked(coll.insert({_id: true, a: true, b: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({a: 1}));
@@ -67,7 +67,7 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1,
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
+assert.commandWorked(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({c: 1}));
@@ -101,7 +101,7 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {'a.b'
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
+assert.commandWorked(coll.insert({_id: true, a: {b: true}, c: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({c: 1}));
@@ -124,7 +124,7 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1}
st.printShardingStatus();
// Insert some bad data manually on the shard
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
+assert.commandWorked(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
// Index without shard key query - not covered but succeeds
assert.commandWorked(coll.ensureIndex({c: 1}));
diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js
index f11ffd13f2a..86b81983a43 100644
--- a/jstests/sharding/create_idx_empty_primary.js
+++ b/jstests/sharding/create_idx_empty_primary.js
@@ -15,7 +15,7 @@ assert.commandWorked(testDB.adminCommand({shardcollection: 'test.user', key: {_i
assert.commandWorked(
testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: st.shard0.shardName}));
-assert.writeOK(testDB.user.insert({_id: 0}));
+assert.commandWorked(testDB.user.insert({_id: 0}));
var res = testDB.user.ensureIndex({i: 1});
assert.commandWorked(res);
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 6b66c9cf130..b6070673ae8 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -20,7 +20,7 @@ var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < numObjs; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(1,
s.config.chunks.count({"ns": "test.foo"}),
"test requires collection to have one chunk initially");
diff --git a/jstests/sharding/cursor_cleanup.js b/jstests/sharding/cursor_cleanup.js
index 741c7f48b3c..e78052322c7 100644
--- a/jstests/sharding/cursor_cleanup.js
+++ b/jstests/sharding/cursor_cleanup.js
@@ -27,8 +27,8 @@ for (var i = -150; i < 150; i++) {
bulk.insert({_id: i});
bulk2.insert({_id: i});
}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
+assert.commandWorked(bulk.execute());
+assert.commandWorked(bulk2.execute());
jsTest.log("Open a cursor to a sharded and unsharded collection.");
diff --git a/jstests/sharding/cursor_timeout.js b/jstests/sharding/cursor_timeout.js
index 7c43fd8f99a..316005dacad 100644
--- a/jstests/sharding/cursor_timeout.js
+++ b/jstests/sharding/cursor_timeout.js
@@ -67,7 +67,7 @@ assert.commandWorked(adminDB.runCommand({
}));
for (let x = 0; x < 20; x++) {
- assert.writeOK(routerColl.insert({x: x}));
+ assert.commandWorked(routerColl.insert({x: x}));
}
// Open both a normal and a no-timeout cursor on mongos. Batch size is 1 to ensure that
diff --git a/jstests/sharding/cursor_valid_after_shard_stepdown.js b/jstests/sharding/cursor_valid_after_shard_stepdown.js
index c26de68b8c6..52452809915 100644
--- a/jstests/sharding/cursor_valid_after_shard_stepdown.js
+++ b/jstests/sharding/cursor_valid_after_shard_stepdown.js
@@ -16,8 +16,8 @@ var db = st.s0.getDB('TestDB');
var coll = db.TestColl;
// Insert documents for the test
-assert.writeOK(coll.insert({x: 1, value: 'Test value 1'}));
-assert.writeOK(coll.insert({x: 2, value: 'Test value 2'}));
+assert.commandWorked(coll.insert({x: 1, value: 'Test value 1'}));
+assert.commandWorked(coll.insert({x: 2, value: 'Test value 2'}));
// Establish a cursor on the primary (by not using slaveOk read)
var findCursor = assert.commandWorked(db.runCommand({find: 'TestColl', batchSize: 1})).cursor;
diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js
index 87b13519678..108097bf808 100644
--- a/jstests/sharding/delete_during_migrate.js
+++ b/jstests/sharding/delete_during_migrate.js
@@ -25,7 +25,7 @@ var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 200000; i++) {
bulk.insert({a: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// enable sharding of the collection. Only 1 chunk.
t.ensureIndex({a: 1});
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 26347ec8330..3c9499b73aa 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -6,9 +6,9 @@ var s = new ShardingTest({shards: 2});
assert.eq(2, s.config.shards.count(), "server count wrong");
var test1 = s.getDB("test1").foo;
-assert.writeOK(test1.insert({a: 1}));
-assert.writeOK(test1.insert({a: 2}));
-assert.writeOK(test1.insert({a: 3}));
+assert.commandWorked(test1.insert({a: 1}));
+assert.commandWorked(test1.insert({a: 2}));
+assert.commandWorked(test1.insert({a: 3}));
assert.eq(3, test1.count());
assert.commandFailed(s.s0.adminCommand({addshard: "sdd$%", maxTimeMS: 60000}), "Bad hostname");
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
index 9de2ecb6d4a..a10c2cba5aa 100644
--- a/jstests/sharding/drop_sharded_db.js
+++ b/jstests/sharding/drop_sharded_db.js
@@ -29,7 +29,7 @@ for (var i = 0; i < numColls; i++) {
}
// Insert a document to an unsharded collection and make sure that the document is there.
-assert.writeOK(dbA.unsharded.insert({dummy: 1}));
+assert.commandWorked(dbA.unsharded.insert({dummy: 1}));
var shardName = config.databases.findOne({_id: dbA.getName()}).primary;
var shardHostConn = new Mongo(config.shards.findOne({_id: shardName}).host);
var dbAOnShard = shardHostConn.getDB(dbA.getName());
diff --git a/jstests/sharding/empty_doc_results.js b/jstests/sharding/empty_doc_results.js
index 65fe1cccd7f..0d2f8a49cb6 100644
--- a/jstests/sharding/empty_doc_results.js
+++ b/jstests/sharding/empty_doc_results.js
@@ -23,7 +23,7 @@ for (var i = -50; i < 50; i++) {
var doc = {};
if (i >= 0)
doc.positiveId = true;
- assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.insert(doc));
}
//
diff --git a/jstests/sharding/enable_sharding_basic.js b/jstests/sharding/enable_sharding_basic.js
index d185ff11b6e..046b4f6e520 100644
--- a/jstests/sharding/enable_sharding_basic.js
+++ b/jstests/sharding/enable_sharding_basic.js
@@ -37,7 +37,7 @@ assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
// Verify config.databases metadata.
-assert.writeOK(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
+assert.commandWorked(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
assert.commandWorked(st.s0.adminCommand({enableSharding: 'unsharded'}));
assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
@@ -46,8 +46,8 @@ assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitione
assert.commandFailed(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
assert.commandFailed(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
-assert.writeOK(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
-assert.writeOK(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
+assert.commandWorked(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
// Calling 'enableSharding' on one mongos and 'shardCollection' through another must work
assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
diff --git a/jstests/sharding/enforce_zone_policy.js b/jstests/sharding/enforce_zone_policy.js
index 259d05ff716..259b8443fee 100644
--- a/jstests/sharding/enforce_zone_policy.js
+++ b/jstests/sharding/enforce_zone_policy.js
@@ -15,7 +15,7 @@ var bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < 9; i++) {
bulk.insert({_id: i, x: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
diff --git a/jstests/sharding/error_during_agg_getmore.js b/jstests/sharding/error_during_agg_getmore.js
index 74933437c16..04aaef554f4 100644
--- a/jstests/sharding/error_during_agg_getmore.js
+++ b/jstests/sharding/error_during_agg_getmore.js
@@ -26,8 +26,8 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
// Write a document to each chunk.
-assert.writeOK(mongosColl.insert({_id: -1}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: -1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
// Delay messages between shard 1 and the mongos, long enough that shard 1's responses will
// likely arrive after the response from shard 0, but not so long that the background cluster
diff --git a/jstests/sharding/error_propagation.js b/jstests/sharding/error_propagation.js
index 7fe4822e295..5845581a5f1 100644
--- a/jstests/sharding/error_propagation.js
+++ b/jstests/sharding/error_propagation.js
@@ -10,11 +10,11 @@ var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 3}});
var db = st.getDB('test');
db.setSlaveOk(true);
-assert.writeOK(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
+assert.commandWorked(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
assert.commandWorked(db.runCommand(
{aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}}));
-assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
+assert.commandWorked(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
var res = db.runCommand(
{aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}});
diff --git a/jstests/sharding/exact_shard_key_target.js b/jstests/sharding/exact_shard_key_target.js
index 7ff31a97dda..d0670c20172 100644
--- a/jstests/sharding/exact_shard_key_target.js
+++ b/jstests/sharding/exact_shard_key_target.js
@@ -26,10 +26,10 @@ st.printShardingStatus();
//
// JustOne remove
coll.remove({});
-assert.writeOK(coll.insert({_id: 1, a: {b: -1}}));
-assert.writeOK(coll.insert({_id: 2, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 1, a: {b: -1}}));
+assert.commandWorked(coll.insert({_id: 2, a: {b: 1}}));
// Need orphaned data to see the impact
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
+assert.commandWorked(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
assert.eq(1, coll.remove({a: {b: 1}}, {justOne: true}).nRemoved);
assert.eq(2,
st.shard0.getCollection(coll.toString()).count() +
@@ -38,10 +38,10 @@ assert.eq(2,
//
// Non-multi update
coll.remove({});
-assert.writeOK(coll.insert({_id: 1, a: {b: 1}}));
-assert.writeOK(coll.insert({_id: 2, a: {b: -1}}));
+assert.commandWorked(coll.insert({_id: 1, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 2, a: {b: -1}}));
// Need orphaned data to see the impact
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
+assert.commandWorked(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
assert.eq(1, coll.update({a: {b: 1}}, {$set: {updated: true}}, {multi: false}).nMatched);
assert.eq(1,
st.shard0.getCollection(coll.toString()).count({updated: true}) +
@@ -50,8 +50,8 @@ assert.eq(1,
//
// Successive upserts (replacement-style)
coll.remove({});
-assert.writeOK(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
-assert.writeOK(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
+assert.commandWorked(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
+assert.commandWorked(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
assert.eq(1,
st.shard0.getCollection(coll.toString()).count() +
st.shard1.getCollection(coll.toString()).count());
@@ -59,8 +59,8 @@ assert.eq(1,
//
// Successive upserts ($op-style)
coll.remove({});
-assert.writeOK(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
-assert.writeOK(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
+assert.commandWorked(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
+assert.commandWorked(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
assert.eq(1,
st.shard0.getCollection(coll.toString()).count() +
st.shard1.getCollection(coll.toString()).count());
diff --git a/jstests/sharding/explainFind_stale_mongos.js b/jstests/sharding/explainFind_stale_mongos.js
index 93a5d1489cc..7cf3024a545 100644
--- a/jstests/sharding/explainFind_stale_mongos.js
+++ b/jstests/sharding/explainFind_stale_mongos.js
@@ -15,7 +15,7 @@ let staleMongos = st.s0;
let freshMongos = st.s1;
jsTest.log("Make the stale mongos load a cache entry for db " + dbName + " once");
-assert.writeOK(staleMongos.getDB(dbName).getCollection(collName).insert({_id: 1}));
+assert.commandWorked(staleMongos.getDB(dbName).getCollection(collName).insert({_id: 1}));
jsTest.log("Call shardCollection on " + ns + " from the fresh mongos");
assert.commandWorked(freshMongos.adminCommand({enableSharding: dbName}));
diff --git a/jstests/sharding/explain_agg_read_pref.js b/jstests/sharding/explain_agg_read_pref.js
index 0e774e4d8a8..63e4f3362f7 100644
--- a/jstests/sharding/explain_agg_read_pref.js
+++ b/jstests/sharding/explain_agg_read_pref.js
@@ -41,7 +41,7 @@ const rs1Primary = st.rs1.getPrimary();
const rs1Secondary = st.rs1.getSecondary();
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
//
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index e92e9c4d713..92e5ff5b468 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -19,8 +19,8 @@ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 10}}));
assert.commandWorked(s.s0.adminCommand(
{movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
-assert.writeOK(db.foo.insert({num: 5}));
-assert.writeOK(db.foo.save({num: 15}));
+assert.commandWorked(db.foo.insert({num: 5}));
+assert.commandWorked(db.foo.save({num: 15}));
let a = s.rs0.getPrimary().getDB("test");
let b = s.rs1.getPrimary().getDB("test");
@@ -66,8 +66,8 @@ assert.commandWorked(s.s0.adminCommand({split: "test.foo4", middle: {num: 10}}))
assert.commandWorked(s.s0.adminCommand(
{movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
-assert.writeOK(db.foo4.insert({num: 5}));
-assert.writeOK(db.foo4.insert({num: 15}));
+assert.commandWorked(db.foo4.insert({num: 5}));
+assert.commandWorked(db.foo4.insert({num: 15}));
assert.eq(1, a.foo4.count(), "ua1");
assert.eq(1, b.foo4.count(), "ub1");
@@ -79,7 +79,7 @@ assert(a.foo4.getIndexes()[1].unique, "ua3");
assert(b.foo4.getIndexes()[1].unique, "ub3");
assert.eq(2, db.foo4.count(), "uc1");
-assert.writeOK(db.foo4.insert({num: 7}));
+assert.commandWorked(db.foo4.insert({num: 7}));
assert.eq(3, db.foo4.count(), "uc2");
assert.writeError(db.foo4.insert({num: 7}));
assert.eq(3, db.foo4.count(), "uc4");
@@ -106,12 +106,12 @@ assert(db.foo5.isCapped(), "cb1");
assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo5", key: {num: 1}}));
// ---- can't shard non-empty collection without index -----
-assert.writeOK(db.foo8.insert({a: 1}));
+assert.commandWorked(db.foo8.insert({a: 1}));
assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo8", key: {a: 1}}),
"non-empty collection");
// ---- can't shard non-empty collection with null values in shard key ----
-assert.writeOK(db.foo9.insert({b: 1}));
+assert.commandWorked(db.foo9.insert({b: 1}));
assert.commandWorked(db.foo9.createIndex({a: 1}));
assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo9", key: {a: 1}}),
"entry with null value");
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index 0ccf500804f..5edad528114 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -37,7 +37,7 @@ var bulk = dbForTest.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var x = dbForTest.foo.stats();
diff --git a/jstests/sharding/find_getmore_cmd.js b/jstests/sharding/find_getmore_cmd.js
index d711c7cda53..3591d938e07 100644
--- a/jstests/sharding/find_getmore_cmd.js
+++ b/jstests/sharding/find_getmore_cmd.js
@@ -15,12 +15,12 @@ var db = st.s.getDB("test");
var coll = db.getCollection("find_getmore_cmd");
coll.drop();
-assert.writeOK(coll.insert({_id: -9, a: 4, b: "foo foo"}));
-assert.writeOK(coll.insert({_id: -5, a: 8}));
-assert.writeOK(coll.insert({_id: -1, a: 10, b: "foo"}));
-assert.writeOK(coll.insert({_id: 1, a: 5}));
-assert.writeOK(coll.insert({_id: 5, a: 20, b: "foo foo foo"}));
-assert.writeOK(coll.insert({_id: 9, a: 3}));
+assert.commandWorked(coll.insert({_id: -9, a: 4, b: "foo foo"}));
+assert.commandWorked(coll.insert({_id: -5, a: 8}));
+assert.commandWorked(coll.insert({_id: -1, a: 10, b: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, a: 5}));
+assert.commandWorked(coll.insert({_id: 5, a: 20, b: "foo foo foo"}));
+assert.commandWorked(coll.insert({_id: 9, a: 3}));
assert.commandWorked(coll.ensureIndex({b: "text"}));
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index 001a9a386d7..75698ec45a0 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -35,7 +35,7 @@ var bulk = db.sharded_coll.initializeUnorderedBulkOp();
for (var i = 0; i < numObjs; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
for (var i = 2; i < numObjs; i += 2) {
diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js
index fd950bcf43c..7a9d7845d6b 100644
--- a/jstests/sharding/forget_mr_temp_ns.js
+++ b/jstests/sharding/forget_mr_temp_ns.js
@@ -13,7 +13,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10; i++) {
bulk.insert({_id: i, even: (i % 2 == 0)});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var map = function() {
emit(this.even, 1);
diff --git a/jstests/sharding/fts_score_sort_sharded.js b/jstests/sharding/fts_score_sort_sharded.js
index 2145e987558..e49703821f3 100644
--- a/jstests/sharding/fts_score_sort_sharded.js
+++ b/jstests/sharding/fts_score_sort_sharded.js
@@ -22,10 +22,10 @@ assert.commandWorked(
//
// Insert documents into collection and create text index.
//
-assert.writeOK(coll.insert({_id: 1, a: "pizza"}));
-assert.writeOK(coll.insert({_id: -1, a: "pizza pizza"}));
-assert.writeOK(coll.insert({_id: 2, a: "pizza pizza pizza"}));
-assert.writeOK(coll.insert({_id: -2, a: "pizza pizza pizza pizza"}));
+assert.commandWorked(coll.insert({_id: 1, a: "pizza"}));
+assert.commandWorked(coll.insert({_id: -1, a: "pizza pizza"}));
+assert.commandWorked(coll.insert({_id: 2, a: "pizza pizza pizza"}));
+assert.commandWorked(coll.insert({_id: -2, a: "pizza pizza pizza pizza"}));
assert.commandWorked(coll.ensureIndex({a: "text"}));
//
diff --git a/jstests/sharding/geo_near_sharded.js b/jstests/sharding/geo_near_sharded.js
index 714876e7fda..1d7247b3771 100644
--- a/jstests/sharding/geo_near_sharded.js
+++ b/jstests/sharding/geo_near_sharded.js
@@ -36,7 +36,7 @@ function test(st, db, sharded, indexType) {
var lng = 180 - Random.rand() * 360;
bulk.insert({rand: Math.random(), loc: [lng, lat]});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
assert.eq(db[coll].count(), numPts);
assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
diff --git a/jstests/sharding/geo_near_sort.js b/jstests/sharding/geo_near_sort.js
index e2f0292904e..8c3a19465c7 100644
--- a/jstests/sharding/geo_near_sort.js
+++ b/jstests/sharding/geo_near_sort.js
@@ -46,10 +46,10 @@ const doc2 = {
a: "aa",
b: 0
};
-assert.writeOK(coll.insert(docMinus2));
-assert.writeOK(coll.insert(docMinus1));
-assert.writeOK(coll.insert(doc1));
-assert.writeOK(coll.insert(doc2));
+assert.commandWorked(coll.insert(docMinus2));
+assert.commandWorked(coll.insert(docMinus1));
+assert.commandWorked(coll.insert(doc1));
+assert.commandWorked(coll.insert(doc2));
function testSortOrders(query, indexSpec) {
assert.commandWorked(coll.createIndex(indexSpec));
diff --git a/jstests/sharding/graph_lookup.js b/jstests/sharding/graph_lookup.js
index 4678ba2f9a9..90922f815a2 100644
--- a/jstests/sharding/graph_lookup.js
+++ b/jstests/sharding/graph_lookup.js
@@ -9,8 +9,8 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: "test.foo", key: {_id:
let db = st.s0.getDB("test");
-assert.writeOK(db.foo.insert([{}, {}, {}, {}]));
-assert.writeOK(db.bar.insert({_id: 1, x: 1}));
+assert.commandWorked(db.foo.insert([{}, {}, {}, {}]));
+assert.commandWorked(db.bar.insert({_id: 1, x: 1}));
const res = db.foo
.aggregate([{
diff --git a/jstests/sharding/idhack_sharded.js b/jstests/sharding/idhack_sharded.js
index 6b9716ea608..b11e2b0e579 100644
--- a/jstests/sharding/idhack_sharded.js
+++ b/jstests/sharding/idhack_sharded.js
@@ -17,7 +17,7 @@ assert.commandWorked(coll.getDB().adminCommand(
// Test that idhack queries with projections that remove the shard key return correct results.
// SERVER-14032.
//
-assert.writeOK(coll.insert({_id: 1, x: 1, y: 1}));
+assert.commandWorked(coll.insert({_id: 1, x: 1, y: 1}));
assert.eq(1, coll.find().itcount());
assert.eq(1, coll.find({_id: 1}, {x: 0}).itcount());
assert.eq(1, coll.find({_id: 1}, {y: 1}).itcount());
@@ -26,8 +26,8 @@ coll.remove({});
//
// Test that idhack queries with covered projections do not return orphan documents. SERVER-14034.
//
-assert.writeOK(st.shard0.getCollection(coll.getFullName()).insert({_id: 1, x: 1}));
-assert.writeOK(st.shard1.getCollection(coll.getFullName()).insert({_id: 1, x: 1}));
+assert.commandWorked(st.shard0.getCollection(coll.getFullName()).insert({_id: 1, x: 1}));
+assert.commandWorked(st.shard1.getCollection(coll.getFullName()).insert({_id: 1, x: 1}));
assert.eq(2, coll.count());
assert.eq(1, coll.find().itcount());
assert.eq(1, coll.find({_id: 1}, {_id: 1}).itcount());
diff --git a/jstests/sharding/implicit_db_creation.js b/jstests/sharding/implicit_db_creation.js
index 0f45dbb94ae..03c460bdeff 100644
--- a/jstests/sharding/implicit_db_creation.js
+++ b/jstests/sharding/implicit_db_creation.js
@@ -15,14 +15,14 @@ var testDB = st.s.getDB('test');
assert.eq(null, testDB.user.findOne());
assert.eq(null, configDB.databases.findOne({_id: 'test'}));
-assert.writeOK(testDB.user.insert({x: 1}));
+assert.commandWorked(testDB.user.insert({x: 1}));
var testDBDoc = configDB.databases.findOne();
assert.eq('test', testDBDoc._id, tojson(testDBDoc));
// Test that inserting to another collection in the same database will not modify the existing
// config.databases entry.
-assert.writeOK(testDB.bar.insert({y: 1}));
+assert.commandWorked(testDB.bar.insert({y: 1}));
assert.eq(testDBDoc, configDB.databases.findOne());
st.s.adminCommand({enableSharding: 'foo'});
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js
index a017b463037..d93392537af 100644
--- a/jstests/sharding/in_memory_sort_limit.js
+++ b/jstests/sharding/in_memory_sort_limit.js
@@ -25,7 +25,7 @@ var bulkOp = mongosCol.initializeOrderedBulkOp();
for (var i = 0; i < 12800; i++) {
bulkOp.insert({x: i, str: filler});
}
-assert.writeOK(bulkOp.execute());
+assert.commandWorked(bulkOp.execute());
var passLimit = 2000;
var failLimit = 4000;
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index b440c535230..67c4c3ceeb6 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -12,7 +12,7 @@ for (var i = 0; i < 22; i++) {
for (var j = 0; j < 300; j++) {
bulk.insert({num: j, x: 1});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
if (i == 0) {
s.adminCommand({enablesharding: "" + coll._db});
diff --git a/jstests/sharding/inserts_consistent.js b/jstests/sharding/inserts_consistent.js
index 4bea7d95474..79bfcecfcf5 100644
--- a/jstests/sharding/inserts_consistent.js
+++ b/jstests/sharding/inserts_consistent.js
@@ -31,13 +31,13 @@ jsTest.log("Inserting docs that needs to be retried...");
var nextId = -1;
for (var i = 0; i < 2; i++) {
printjson("Inserting " + nextId);
- assert.writeOK(collB.insert({_id: nextId--, hello: "world"}));
+ assert.commandWorked(collB.insert({_id: nextId--, hello: "world"}));
}
jsTest.log("Inserting doc which successfully goes through...");
// Do second write
-assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
+assert.commandWorked(collB.insert({_id: nextId--, goodbye: "world"}));
// Assert that write went through
assert.eq(coll.find().itcount(), 3);
@@ -60,7 +60,7 @@ printjson(adminB.runCommand({flushRouterConfig: 1}));
jsTest.log("Inserting second doc which successfully goes through...");
// Do second write
-assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
+assert.commandWorked(collB.insert({_id: nextId--, goodbye: "world"}));
jsTest.log("All docs written this time!");
diff --git a/jstests/sharding/invalid_system_views_sharded_collection.js b/jstests/sharding/invalid_system_views_sharded_collection.js
index 899d4482987..62189edcffc 100644
--- a/jstests/sharding/invalid_system_views_sharded_collection.js
+++ b/jstests/sharding/invalid_system_views_sharded_collection.js
@@ -26,10 +26,10 @@ function runTest(st, badViewDefinition) {
assert.commandWorked(viewsCollection.createIndex({x: 1}));
const unshardedColl = db.getCollection("unshardedColl");
- assert.writeOK(unshardedColl.insert({b: "boo"}));
+ assert.commandWorked(unshardedColl.insert({b: "boo"}));
- assert.writeOK(db.system.views.insert(badViewDefinition),
- "failed to insert " + tojson(badViewDefinition));
+ assert.commandWorked(db.system.views.insert(badViewDefinition),
+ "failed to insert " + tojson(badViewDefinition));
// Test that a command involving views properly fails with a views-specific error code.
assert.commandFailedWithCode(
@@ -44,12 +44,12 @@ function runTest(st, badViewDefinition) {
" in system.views";
}
- assert.writeOK(viewsCollection.insert({y: "baz", a: 5}), makeErrorMessage("insert"));
+ assert.commandWorked(viewsCollection.insert({y: "baz", a: 5}), makeErrorMessage("insert"));
- assert.writeOK(viewsCollection.update({y: "baz"}, {$set: {y: "qux"}}),
- makeErrorMessage("update"));
+ assert.commandWorked(viewsCollection.update({y: "baz"}, {$set: {y: "qux"}}),
+ makeErrorMessage("update"));
- assert.writeOK(viewsCollection.remove({y: "baz"}), makeErrorMessage("remove"));
+ assert.commandWorked(viewsCollection.remove({y: "baz"}), makeErrorMessage("remove"));
assert.commandWorked(
db.runCommand(
diff --git a/jstests/sharding/json_schema.js b/jstests/sharding/json_schema.js
index 5a4a68102b7..b2a1ff21c77 100644
--- a/jstests/sharding/json_schema.js
+++ b/jstests/sharding/json_schema.js
@@ -30,10 +30,10 @@ assert.commandWorked(testDB.adminCommand(
{moveChunk: coll.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
// Write one document into each of the chunks.
-assert.writeOK(coll.insert({_id: -150, a: 1}));
-assert.writeOK(coll.insert({_id: -50, a: 10}));
-assert.writeOK(coll.insert({_id: 50, a: "str"}));
-assert.writeOK(coll.insert({_id: 150}));
+assert.commandWorked(coll.insert({_id: -150, a: 1}));
+assert.commandWorked(coll.insert({_id: -50, a: 10}));
+assert.commandWorked(coll.insert({_id: 50, a: "str"}));
+assert.commandWorked(coll.insert({_id: 150}));
// Test that $jsonSchema in a find command returns the correct results.
assert.eq(4, coll.find({$jsonSchema: {}}).itcount());
@@ -46,7 +46,7 @@ let res = coll.update(
{$jsonSchema: {properties: {_id: {type: "number", minimum: 100}, a: {type: "number"}}}},
{$inc: {a: 1}},
{multi: true});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(1, res.nModified);
const schema = {
@@ -54,7 +54,7 @@ const schema = {
required: ["_id"]
};
res = coll.update({$jsonSchema: schema}, {$set: {b: 1}}, {multi: true});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(1, res.nModified);
// Test that $jsonSchema works correctly in a findAndModify command.
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index fe8e04e7492..f26b0f36268 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -24,7 +24,7 @@ for (; x < 1500; x++) {
bulk.insert({x: x, big: big});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
s.printShardingStatus(true);
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index e0d19e8874b..86ac72100bd 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -216,7 +216,8 @@ for (var i = 0; i < types.length; i++) {
assert.eq(1, c.find({xx: {$exists: true}}).count(), curT.name + " xx 2 ");
assert.eq(curT.values[3], getKey(c.findOne({xx: 17})), curT.name + " xx 3 ");
- assert.writeOK(c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
+ assert.commandWorked(
+ c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
assert.commandWorked(c.ensureIndex({_id: 1}));
diff --git a/jstests/sharding/kill_pinned_cursor.js b/jstests/sharding/kill_pinned_cursor.js
index 1c19626ab77..fcf9d0fa96c 100644
--- a/jstests/sharding/kill_pinned_cursor.js
+++ b/jstests/sharding/kill_pinned_cursor.js
@@ -27,7 +27,7 @@ let coll = mongosDB.jstest_kill_pinned_cursor;
coll.drop();
for (let i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
diff --git a/jstests/sharding/killop.js b/jstests/sharding/killop.js
index 7f2e4d23173..bedc712eb3d 100644
--- a/jstests/sharding/killop.js
+++ b/jstests/sharding/killop.js
@@ -9,7 +9,7 @@ const conn = st.s;
const db = conn.getDB("killOp");
const coll = db.test;
-assert.writeOK(db.getCollection(coll.getName()).insert({x: 1}));
+assert.commandWorked(db.getCollection(coll.getName()).insert({x: 1}));
const kFailPointName = "waitInFindBeforeMakingBatch";
assert.commandWorked(conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "alwaysOn"}));
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index 35e38722edb..7fecb777de5 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -22,13 +22,13 @@ var configSecondaryList = st.configRS.getSecondaries();
var configSecondaryToKill = configSecondaryList[0];
var delayedConfigSecondary = configSecondaryList[1];
-assert.writeOK(testDB.user.insert({_id: 1}));
+assert.commandWorked(testDB.user.insert({_id: 1}));
delayedConfigSecondary.getDB('admin').adminCommand(
{configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
// Do one metadata write in order to bump the optime on mongos
-assert.writeOK(st.getDB('config').TestConfigColl.insert({TestKey: 'Test value'}));
+assert.commandWorked(st.getDB('config').TestConfigColl.insert({TestKey: 'Test value'}));
st.configRS.stopMaster();
MongoRunner.stopMongod(configSecondaryToKill);
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index c3df1b4baf2..eb5bc916196 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -33,7 +33,7 @@ while (inserted < (400 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
diff --git a/jstests/sharding/large_skip_one_shard.js b/jstests/sharding/large_skip_one_shard.js
index e1f717a5f5a..95637ee0a50 100644
--- a/jstests/sharding/large_skip_one_shard.js
+++ b/jstests/sharding/large_skip_one_shard.js
@@ -18,7 +18,7 @@ assert(admin.runCommand({moveChunk: collSharded + "", find: {_id: 0}, to: st.sha
function testSelectWithSkip(coll) {
for (var i = -100; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
// Run a query which only requires 5 results from a single shard
diff --git a/jstests/sharding/linearizable_read_concern.js b/jstests/sharding/linearizable_read_concern.js
index 9b3ac62acce..d83362a898c 100644
--- a/jstests/sharding/linearizable_read_concern.js
+++ b/jstests/sharding/linearizable_read_concern.js
@@ -91,7 +91,7 @@ jsTestLog("Testing linearizable read from primaries.");
// Execute a linearizable read from primaries (targeting both shards) which should succeed.
st.s.setReadPref("primary");
-var res = assert.writeOK(testDB.runReadCommand({
+var res = assert.commandWorked(testDB.runReadCommand({
find: collName,
sort: {x: 1},
filter: dualShardQueryFilter,
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index ce13ea5871d..fe34dbe0aa7 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -31,9 +31,9 @@ var dbEntryCheck = function(dbEntry, onConfig) {
// Non-config-server db checks.
{
- assert.writeOK(mongos.getDB("blah").foo.insert({_id: 1}));
- assert.writeOK(mongos.getDB("foo").foo.insert({_id: 1}));
- assert.writeOK(mongos.getDB("raw").foo.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("blah").foo.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("foo").foo.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("raw").foo.insert({_id: 1}));
res = mongos.adminCommand("listDatabases");
dbArray = res.databases;
@@ -53,8 +53,8 @@ var dbEntryCheck = function(dbEntry, onConfig) {
// Admin and config are always reported on the config shard.
{
- assert.writeOK(mongos.getDB("admin").test.insert({_id: 1}));
- assert.writeOK(mongos.getDB("config").test.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("admin").test.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("config").test.insert({_id: 1}));
res = mongos.adminCommand("listDatabases");
dbArray = res.databases;
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 55b7548d6db..c2f9a8a75ad 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -135,9 +135,9 @@ var assertCanRunCommands = function(mongo, st) {
// this will throw if it fails
test.system.users.findOne();
- assert.writeOK(test.foo.save({_id: 0}));
- assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeOK(test.foo.remove({_id: 0}));
+ assert.commandWorked(test.foo.save({_id: 0}));
+ assert.commandWorked(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.commandWorked(test.foo.remove({_id: 0}));
// Multi-shard
test.foo.mapReduce(
diff --git a/jstests/sharding/lookup.js b/jstests/sharding/lookup.js
index 3c0364bd6a4..82a8c63624b 100644
--- a/jstests/sharding/lookup.js
+++ b/jstests/sharding/lookup.js
@@ -42,13 +42,13 @@ function runTest(coll, from, thirdColl, fourthColl) {
assert.commandWorked(thirdColl.remove({}));
assert.commandWorked(fourthColl.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 2}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
+ assert.commandWorked(coll.insert({_id: 1, a: null}));
+ assert.commandWorked(coll.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 0, b: 1}));
- assert.writeOK(from.insert({_id: 1, b: null}));
- assert.writeOK(from.insert({_id: 2}));
+ assert.commandWorked(from.insert({_id: 0, b: 1}));
+ assert.commandWorked(from.insert({_id: 1, b: null}));
+ assert.commandWorked(from.insert({_id: 2}));
//
// Basic functionality.
@@ -258,17 +258,17 @@ function runTest(coll, from, thirdColl, fourthColl) {
//
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.writeOK(coll.insert({_id: 3, a: {c: 1}}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
+ assert.commandWorked(coll.insert({_id: 1, a: null}));
+ assert.commandWorked(coll.insert({_id: 2}));
+ assert.commandWorked(coll.insert({_id: 3, a: {c: 1}}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: 1}));
- assert.writeOK(from.insert({_id: 1, b: null}));
- assert.writeOK(from.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 3, b: {c: 1}}));
- assert.writeOK(from.insert({_id: 4, b: {c: 2}}));
+ assert.commandWorked(from.insert({_id: 0, b: 1}));
+ assert.commandWorked(from.insert({_id: 1, b: null}));
+ assert.commandWorked(from.insert({_id: 2}));
+ assert.commandWorked(from.insert({_id: 3, b: {c: 1}}));
+ assert.commandWorked(from.insert({_id: 4, b: {c: 2}}));
// Once without a dotted field.
let pipeline = [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}];
@@ -293,11 +293,11 @@ function runTest(coll, from, thirdColl, fourthColl) {
// With an $unwind stage.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {b: 1}}));
- assert.writeOK(coll.insert({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 0, a: {b: 1}}));
+ assert.commandWorked(coll.insert({_id: 1}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, target: 1}));
+ assert.commandWorked(from.insert({_id: 0, target: 1}));
pipeline = [
{
@@ -331,11 +331,11 @@ function runTest(coll, from, thirdColl, fourthColl) {
// This must only do an equality match rather than treating the value as a regex.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: /a regex/}));
+ assert.commandWorked(coll.insert({_id: 0, a: /a regex/}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: /a regex/}));
- assert.writeOK(from.insert({_id: 1, b: "string that matches /a regex/"}));
+ assert.commandWorked(from.insert({_id: 0, b: /a regex/}));
+ assert.commandWorked(from.insert({_id: 1, b: "string that matches /a regex/"}));
pipeline = [
{
@@ -356,11 +356,11 @@ function runTest(coll, from, thirdColl, fourthColl) {
// Basic array corresponding to multiple documents.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 2]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [0, 1, 2]}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
+ assert.commandWorked(from.insert({_id: 0}));
+ assert.commandWorked(from.insert({_id: 1}));
pipeline = [
{
@@ -377,11 +377,11 @@ function runTest(coll, from, thirdColl, fourthColl) {
// Basic array corresponding to a single document.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [1]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [1]}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
+ assert.commandWorked(from.insert({_id: 0}));
+ assert.commandWorked(from.insert({_id: 1}));
pipeline = [
{
@@ -398,14 +398,14 @@ function runTest(coll, from, thirdColl, fourthColl) {
// Array containing regular expressions.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [/a regex/, /^x/]}));
- assert.writeOK(coll.insert({_id: 1, a: [/^x/]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [/a regex/, /^x/]}));
+ assert.commandWorked(coll.insert({_id: 1, a: [/^x/]}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: "should not match a regex"}));
- assert.writeOK(from.insert({_id: 1, b: "xxxx"}));
- assert.writeOK(from.insert({_id: 2, b: /a regex/}));
- assert.writeOK(from.insert({_id: 3, b: /^x/}));
+ assert.commandWorked(from.insert({_id: 0, b: "should not match a regex"}));
+ assert.commandWorked(from.insert({_id: 1, b: "xxxx"}));
+ assert.commandWorked(from.insert({_id: 2, b: /a regex/}));
+ assert.commandWorked(from.insert({_id: 3, b: /^x/}));
pipeline = [
{
@@ -425,13 +425,13 @@ function runTest(coll, from, thirdColl, fourthColl) {
// 'localField' references a field within an array of sub-objects.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {b: 2}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [{b: 1}, {b: 2}]}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
- assert.writeOK(from.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 3}));
+ assert.commandWorked(from.insert({_id: 0}));
+ assert.commandWorked(from.insert({_id: 1}));
+ assert.commandWorked(from.insert({_id: 2}));
+ assert.commandWorked(from.insert({_id: 3}));
pipeline = [
{
diff --git a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
index 9d71a70e135..4f010f1bb86 100644
--- a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
@@ -64,8 +64,8 @@ function shardKeyFromId(id) {
// Do some writes.
for (let id = 0; id < nDocs; ++id) {
const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
- assert.writeOK(mongosColl.insert(documentKey));
- assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 1}}));
+ assert.commandWorked(mongosColl.insert(documentKey));
+ assert.commandWorked(mongosColl.update(documentKey, {$set: {updatedCount: 1}}));
}
[changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
@@ -89,7 +89,7 @@ for (let id = 0; id < nDocs; ++id) {
// migrated.
for (let id = 0; id < nDocs; ++id) {
const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
- assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 2}}));
+ assert.commandWorked(mongosColl.update(documentKey, {$set: {updatedCount: 2}}));
}
// Move the upper chunk back to shard 0.
diff --git a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
index 058a92c6832..422fb652d4a 100644
--- a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
@@ -55,8 +55,8 @@ const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updat
// Write enough documents that we likely have some on each shard.
const nDocs = 1000;
for (let id = 0; id < nDocs; ++id) {
- assert.writeOK(mongosColl.insert({_id: id, shardKey: id}));
- assert.writeOK(mongosColl.update({shardKey: id}, {$set: {updatedCount: 1}}));
+ assert.commandWorked(mongosColl.insert({_id: id, shardKey: id}));
+ assert.commandWorked(mongosColl.update({shardKey: id}, {$set: {updatedCount: 1}}));
}
for (let id = 0; id < nDocs; ++id) {
diff --git a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
index f6235d1082c..d512e9ff66c 100644
--- a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
@@ -43,16 +43,16 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
// Write a document to each chunk.
-assert.writeOK(mongosColl.insert({_id: -1}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: -1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
// Do some writes.
-assert.writeOK(mongosColl.insert({_id: 1000}));
-assert.writeOK(mongosColl.insert({_id: -1000}));
-assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 1}}));
-assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 1}}));
+assert.commandWorked(mongosColl.insert({_id: 1000}));
+assert.commandWorked(mongosColl.insert({_id: -1000}));
+assert.commandWorked(mongosColl.update({_id: 1000}, {$set: {updatedCount: 1}}));
+assert.commandWorked(mongosColl.update({_id: -1000}, {$set: {updatedCount: 1}}));
for (let nextId of [1000, -1000]) {
assert.soon(() => changeStream.hasNext());
@@ -72,8 +72,8 @@ for (let nextId of [1000, -1000]) {
// Test that the change stream can still see the updated post image, even if a chunk is
// migrated.
-assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 2}}));
-assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: 1000}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: -1000}, {$set: {updatedCount: 2}}));
// Split the [0, MaxKey) chunk into 2: [0, 500), [500, MaxKey).
assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 500}}));
diff --git a/jstests/sharding/lookup_mongod_unaware.js b/jstests/sharding/lookup_mongod_unaware.js
index 2750425205e..56a4beafee9 100644
--- a/jstests/sharding/lookup_mongod_unaware.js
+++ b/jstests/sharding/lookup_mongod_unaware.js
@@ -73,16 +73,16 @@ const expectedResults = [
assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
-assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
-assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 1, a: null}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 1, b: null}));
// Send writes through mongos1 such that it's aware of the collections and believes they are
// unsharded.
-assert.writeOK(mongos1LocalColl.insert({_id: 2}));
-assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
+assert.commandWorked(mongos1LocalColl.insert({_id: 2}));
+assert.commandWorked(mongos1ForeignColl.insert({_id: 2}));
//
// Test unsharded local and sharded foreign collections, with the primary shard unaware that
@@ -161,9 +161,9 @@ assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
// Recreate the foreign collection as unsharded.
mongos0ForeignColl.drop();
-assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 2}));
// Verify $lookup results through the fresh mongos.
restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
diff --git a/jstests/sharding/lookup_stale_mongos.js b/jstests/sharding/lookup_stale_mongos.js
index f1e71280a18..2c74af07b28 100644
--- a/jstests/sharding/lookup_stale_mongos.js
+++ b/jstests/sharding/lookup_stale_mongos.js
@@ -40,16 +40,16 @@ const expectedResults = [
assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
-assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
-assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 1, a: null}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 1, b: null}));
// Send writes through mongos1 such that it's aware of the collections and believes they are
// unsharded.
-assert.writeOK(mongos1LocalColl.insert({_id: 2}));
-assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
+assert.commandWorked(mongos1LocalColl.insert({_id: 2}));
+assert.commandWorked(mongos1ForeignColl.insert({_id: 2}));
//
// Test unsharded local and sharded foreign collections, with mongos unaware that the foreign
@@ -106,9 +106,9 @@ assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
// Recreate the foreign collection as unsharded through mongos0.
mongos0ForeignColl.drop();
-assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 2}));
// Issue a $lookup through mongos1, which is unaware that the foreign collection is now
// unsharded.
@@ -121,9 +121,9 @@ assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
// Recreate the local collection as unsharded through mongos0.
mongos0LocalColl.drop();
-assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
-assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
-assert.writeOK(mongos0LocalColl.insert({_id: 2}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 1, a: null}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 2}));
// Issue a $lookup through mongos1, which is unaware that the local collection is now
// unsharded.
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
index b51b0111a1e..9abe9c922c2 100644
--- a/jstests/sharding/mapReduce_inSharded.js
+++ b/jstests/sharding/mapReduce_inSharded.js
@@ -25,7 +25,7 @@ for (var j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
@@ -74,9 +74,9 @@ verifyOutput(out);
// Ensure that the collation option is propagated to the shards. This uses a case-insensitive
// collation, and the query seeding the mapReduce should only match the document if the
// collation is passed along to the shards.
-assert.writeOK(db.srcSharded.remove({}));
+assert.commandWorked(db.srcSharded.remove({}));
assert.eq(db.srcSharded.find().itcount(), 0);
-assert.writeOK(db.srcSharded.insert({i: 0, j: 0, str: "FOO"}));
+assert.commandWorked(db.srcSharded.insert({i: 0, j: 0, str: "FOO"}));
out = db.srcSharded.mapReduce(
map,
reduce,
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index 7a8730d2c4d..e8d2c44b94e 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -26,7 +26,7 @@ for (var j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
index 07da267d132..004db315f97 100644
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ b/jstests/sharding/mapReduce_nonSharded.js
@@ -21,7 +21,7 @@ for (j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index eeb88371a7e..60a6ab8c0d0 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -21,7 +21,7 @@ for (j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
diff --git a/jstests/sharding/mapReduce_outSharded_checkUUID.js b/jstests/sharding/mapReduce_outSharded_checkUUID.js
index 25a499c4bed..5e4386da1d2 100644
--- a/jstests/sharding/mapReduce_outSharded_checkUUID.js
+++ b/jstests/sharding/mapReduce_outSharded_checkUUID.js
@@ -34,7 +34,7 @@ for (var j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
@@ -69,8 +69,8 @@ assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
assert.commandWorked(admin.runCommand({split: "mrShard.outSharded", middle: {"_id": 2000}}));
assert.commandWorked(admin.runCommand(
{moveChunk: "mrShard.outSharded", find: {"_id": 2000}, to: st.shard0.shardName}));
-assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 1000}));
-assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 2001}));
+assert.commandWorked(st.s.getCollection("mrShard.outSharded").insert({_id: 1000}));
+assert.commandWorked(st.s.getCollection("mrShard.outSharded").insert({_id: 2001}));
origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index 16c56658a5d..8579149cb36 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -53,7 +53,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = -nDocsPerShard; i < nDocsPerShard; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(nDocsPerShard, shards[0].getCollection(coll.getFullName()).count());
assert.eq(nDocsPerShard, shards[1].getCollection(coll.getFullName()).count());
diff --git a/jstests/sharding/merge_chunks_compound_shard_key.js b/jstests/sharding/merge_chunks_compound_shard_key.js
index 3472073f4c5..4eb965329c2 100644
--- a/jstests/sharding/merge_chunks_compound_shard_key.js
+++ b/jstests/sharding/merge_chunks_compound_shard_key.js
@@ -56,11 +56,11 @@ assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 0}}))
assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 1}}));
jsTest.log("Insert some data into each of the chunk ranges.");
-assert.writeOK(coll.insert({x: -1, y: 2}));
-assert.writeOK(coll.insert({x: 0, y: 2}));
-assert.writeOK(coll.insert({x: 1, y: 2}));
-assert.writeOK(coll.insert({x: 2, y: 1}));
-assert.writeOK(coll.insert({x: 2, y: 3}));
+assert.commandWorked(coll.insert({x: -1, y: 2}));
+assert.commandWorked(coll.insert({x: 0, y: 2}));
+assert.commandWorked(coll.insert({x: 1, y: 2}));
+assert.commandWorked(coll.insert({x: 2, y: 1}));
+assert.commandWorked(coll.insert({x: 2, y: 3}));
// Chunks after merge:
// (MinKey, { x: 0, y: 1 })
diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js
index 3166f47113e..d4f74c26fb6 100644
--- a/jstests/sharding/merge_chunks_test.js
+++ b/jstests/sharding/merge_chunks_test.js
@@ -36,10 +36,10 @@ assert.commandWorked(
st.printShardingStatus();
// Insert some data into each of the consolidated ranges
-assert.writeOK(coll.insert({_id: 0}));
-assert.writeOK(coll.insert({_id: 10}));
-assert.writeOK(coll.insert({_id: 40}));
-assert.writeOK(coll.insert({_id: 110}));
+assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 10}));
+assert.commandWorked(coll.insert({_id: 40}));
+assert.commandWorked(coll.insert({_id: 110}));
var staleCollection = staleMongos.getCollection(coll + "");
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index e525a909fea..60306797dfd 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -3,7 +3,8 @@
var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
-assert.writeOK(s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
+assert.commandWorked(
+ s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', s.shard1.shardName);
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
@@ -19,7 +20,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var x = 0; x < 100; x++) {
bulk.insert({x: x, big: big});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 30}}));
assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 66}}));
@@ -35,7 +36,7 @@ print("direct : " + direct);
var directDB = direct.getDB("test");
for (var done = 0; done < 2 * 1024 * 1024; done += big.length) {
- assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
+ assert.commandWorked(directDB.foo.insert({x: 50 + Math.random(), big: big}));
}
s.printShardingStatus();
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index 13195b61b65..b9df583c56a 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -36,7 +36,7 @@ for (var i = 0; i < 40; i++) {
bulk.insert({data: dataObj});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(40, coll.count(), "prep1");
assert.commandWorked(admin.runCommand({shardcollection: "" + coll, key: {_id: 1}}));
diff --git a/jstests/sharding/migrate_overwrite_id.js b/jstests/sharding/migrate_overwrite_id.js
index 8060a2de8b4..97f8aa5c218 100644
--- a/jstests/sharding/migrate_overwrite_id.js
+++ b/jstests/sharding/migrate_overwrite_id.js
@@ -19,8 +19,8 @@ var id = 12345;
jsTest.log("Inserting a document with id : 12345 into both shards with diff shard key...");
-assert.writeOK(coll.insert({_id: id, skey: -1}));
-assert.writeOK(coll.insert({_id: id, skey: 1}));
+assert.commandWorked(coll.insert({_id: id, skey: -1}));
+assert.commandWorked(coll.insert({_id: id, skey: 1}));
printjson(st.shard0.getCollection(coll + "").find({_id: id}).toArray());
printjson(st.shard1.getCollection(coll + "").find({_id: id}).toArray());
diff --git a/jstests/sharding/migration_critical_section_concurrency.js b/jstests/sharding/migration_critical_section_concurrency.js
index e98f1f05262..db9f6c7b749 100644
--- a/jstests/sharding/migration_critical_section_concurrency.js
+++ b/jstests/sharding/migration_critical_section_concurrency.js
@@ -18,15 +18,15 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll0', key: {
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll0', middle: {Key: 0}}));
var coll0 = testDB.Coll0;
-assert.writeOK(coll0.insert({Key: -1, Value: '-1'}));
-assert.writeOK(coll0.insert({Key: 1, Value: '1'}));
+assert.commandWorked(coll0.insert({Key: -1, Value: '-1'}));
+assert.commandWorked(coll0.insert({Key: 1, Value: '1'}));
assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll1', key: {Key: 1}}));
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll1', middle: {Key: 0}}));
var coll1 = testDB.Coll1;
-assert.writeOK(coll1.insert({Key: -1, Value: '-1'}));
-assert.writeOK(coll1.insert({Key: 1, Value: '1'}));
+assert.commandWorked(coll1.insert({Key: -1, Value: '-1'}));
+assert.commandWorked(coll1.insert({Key: 1, Value: '1'}));
// Ensure that coll0 has chunks on both shards so we can test queries against both donor and
// recipient for Coll1's migration below
@@ -44,8 +44,8 @@ waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
// Ensure that all operations for 'Coll0', which is not being migrated are not stalled
assert.eq(1, coll0.find({Key: {$lte: -1}}).itcount());
assert.eq(1, coll0.find({Key: {$gte: 1}}).itcount());
-assert.writeOK(coll0.insert({Key: -2, Value: '-2'}));
-assert.writeOK(coll0.insert({Key: 2, Value: '2'}));
+assert.commandWorked(coll0.insert({Key: -2, Value: '-2'}));
+assert.commandWorked(coll0.insert({Key: 2, Value: '2'}));
assert.eq(2, coll0.find({Key: {$lte: -1}}).itcount());
assert.eq(2, coll0.find({Key: {$gte: 1}}).itcount());
@@ -56,7 +56,7 @@ assert.eq(1, coll1.find({Key: {$gte: 1}}).itcount());
// Ensure that all operations for non-sharded collections are not stalled
var collUnsharded = testDB.CollUnsharded;
assert.eq(0, collUnsharded.find({}).itcount());
-assert.writeOK(collUnsharded.insert({TestKey: 0, Value: 'Zero'}));
+assert.commandWorked(collUnsharded.insert({TestKey: 0, Value: 'Zero'}));
assert.eq(1, collUnsharded.find({}).itcount());
unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
diff --git a/jstests/sharding/migration_ignore_interrupts_1.js b/jstests/sharding/migration_ignore_interrupts_1.js
index 0272a204661..d093f0a8f0b 100644
--- a/jstests/sharding/migration_ignore_interrupts_1.js
+++ b/jstests/sharding/migration_ignore_interrupts_1.js
@@ -23,9 +23,9 @@ st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 0}}));
assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
-assert.writeOK(coll1.insert({a: -10}));
-assert.writeOK(coll1.insert({a: 0}));
-assert.writeOK(coll1.insert({a: 10}));
+assert.commandWorked(coll1.insert({a: -10}));
+assert.commandWorked(coll1.insert({a: 0}));
+assert.commandWorked(coll1.insert({a: 10}));
assert.eq(3, shard0Coll1.find().itcount());
assert.eq(0, shard1Coll1.find().itcount());
assert.eq(0, shard2Coll1.find().itcount());
diff --git a/jstests/sharding/migration_ignore_interrupts_2.js b/jstests/sharding/migration_ignore_interrupts_2.js
index b60fa50ccf2..f5147659c96 100644
--- a/jstests/sharding/migration_ignore_interrupts_2.js
+++ b/jstests/sharding/migration_ignore_interrupts_2.js
@@ -17,7 +17,7 @@ var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbNa
assert.commandWorked(admin.runCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
-assert.writeOK(coll1.insert({a: 0}));
+assert.commandWorked(coll1.insert({a: 0}));
assert.eq(1, shard0Coll1.find().itcount());
assert.eq(0, shard1Coll1.find().itcount());
assert.eq(1, coll1.find().itcount());
diff --git a/jstests/sharding/migration_ignore_interrupts_3.js b/jstests/sharding/migration_ignore_interrupts_3.js
index e48159b77b8..9474643c60d 100644
--- a/jstests/sharding/migration_ignore_interrupts_3.js
+++ b/jstests/sharding/migration_ignore_interrupts_3.js
@@ -25,14 +25,14 @@ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
-assert.writeOK(coll1.insert({a: 0}));
+assert.commandWorked(coll1.insert({a: 0}));
assert.eq(1, shard0Coll1.find().itcount());
assert.eq(0, shard1Coll1.find().itcount());
assert.eq(0, shard2Coll1.find().itcount());
assert.eq(1, coll1.find().itcount());
assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
-assert.writeOK(coll2.insert({a: 0}));
+assert.commandWorked(coll2.insert({a: 0}));
assert.eq(1, shard0Coll2.find().itcount());
assert.eq(0, shard1Coll2.find().itcount());
assert.eq(0, shard2Coll2.find().itcount());
diff --git a/jstests/sharding/migration_ignore_interrupts_4.js b/jstests/sharding/migration_ignore_interrupts_4.js
index bc692a9897c..3d4ad25be63 100644
--- a/jstests/sharding/migration_ignore_interrupts_4.js
+++ b/jstests/sharding/migration_ignore_interrupts_4.js
@@ -25,14 +25,14 @@ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
-assert.writeOK(coll1.insert({a: 0}));
+assert.commandWorked(coll1.insert({a: 0}));
assert.eq(1, shard0Coll1.find().itcount());
assert.eq(0, shard1Coll1.find().itcount());
assert.eq(0, shard2Coll1.find().itcount());
assert.eq(1, coll1.find().itcount());
assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
-assert.writeOK(coll2.insert({a: 0}));
+assert.commandWorked(coll2.insert({a: 0}));
assert.eq(1, shard0Coll2.find().itcount());
assert.eq(0, shard1Coll2.find().itcount());
assert.eq(0, shard2Coll2.find().itcount());
@@ -79,8 +79,8 @@ joinMoveChunk = moveChunkParallel(
waitForMigrateStep(shard2, migrateStepNames.cloned);
// Populate donor (shard0) xfermods log.
-assert.writeOK(coll2.insert({a: 1}));
-assert.writeOK(coll2.insert({a: 2}));
+assert.commandWorked(coll2.insert({a: 1}));
+assert.commandWorked(coll2.insert({a: 2}));
assert.eq(3, coll2.find().itcount(), "Failed to insert documents into coll2.");
assert.eq(3, shard0Coll2.find().itcount());
diff --git a/jstests/sharding/migration_move_chunk_after_receive.js b/jstests/sharding/migration_move_chunk_after_receive.js
index fe28af0d8c3..4f58b88d693 100644
--- a/jstests/sharding/migration_move_chunk_after_receive.js
+++ b/jstests/sharding/migration_move_chunk_after_receive.js
@@ -20,10 +20,10 @@ var testColl = testDB.TestColl;
// Create 3 chunks with one document each and move them so that 0 is on shard0, 1 is on shard1,
// etc.
-assert.writeOK(testColl.insert({Key: 0, Value: 'Value'}));
-assert.writeOK(testColl.insert({Key: 100, Value: 'Value'}));
-assert.writeOK(testColl.insert({Key: 101, Value: 'Value'}));
-assert.writeOK(testColl.insert({Key: 200, Value: 'Value'}));
+assert.commandWorked(testColl.insert({Key: 0, Value: 'Value'}));
+assert.commandWorked(testColl.insert({Key: 100, Value: 'Value'}));
+assert.commandWorked(testColl.insert({Key: 101, Value: 'Value'}));
+assert.commandWorked(testColl.insert({Key: 200, Value: 'Value'}));
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 100}}));
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 101}}));
diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js
index 73ee2dea163..8ce1cfc77ca 100644
--- a/jstests/sharding/migration_sets_fromMigrate_flag.js
+++ b/jstests/sharding/migration_sets_fromMigrate_flag.js
@@ -54,7 +54,7 @@ jsTest.log('Inserting 5 docs into donor shard, ensuring one orphan on the recipi
// Insert just one document into the collection and fail a migration after the cloning step in
// order to get an orphan onto the recipient shard with the correct UUID for the collection.
-assert.writeOK(coll.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: 2}));
assert.eq(1, donorColl.count());
assert.commandWorked(
recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "alwaysOn"}));
@@ -65,10 +65,10 @@ assert.commandWorked(
recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "off"}));
// Insert the remaining documents into the collection.
-assert.writeOK(coll.insert({_id: 0}));
-assert.writeOK(coll.insert({_id: 1}));
-assert.writeOK(coll.insert({_id: 3}));
-assert.writeOK(coll.insert({_id: 4}));
+assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 3}));
+assert.commandWorked(coll.insert({_id: 4}));
assert.eq(5, donorColl.count());
/**
@@ -100,8 +100,8 @@ waitForMigrateStep(recipient, migrateStepNames.cloned);
jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
-assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
-assert.writeOK(coll.remove({_id: 4}));
+assert.commandWorked(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
+assert.commandWorked(coll.remove({_id: 4}));
/**
* Finish migration. Unpause recipient migration, wait for it to collect
diff --git a/jstests/sharding/migration_with_source_ops.js b/jstests/sharding/migration_with_source_ops.js
index 91c7a460196..76dfbb9b239 100644
--- a/jstests/sharding/migration_with_source_ops.js
+++ b/jstests/sharding/migration_with_source_ops.js
@@ -53,9 +53,9 @@ assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
// 10 documents in each chunk on the donor
jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
for (var i = 0; i < 10; ++i)
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
for (var i = 20; i < 30; ++i)
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
assert.eq(20, coll.count());
/**
@@ -99,15 +99,15 @@ var joinMoveChunk = moveChunkParallel(
waitForMigrateStep(recipient, migrateStepNames.cloned);
jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
-assert.writeOK(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
+assert.commandWorked(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
-assert.writeOK(coll.insert({a: 10}));
-assert.writeOK(coll.insert({a: 30}));
+assert.commandWorked(coll.insert({a: 10}));
+assert.commandWorked(coll.insert({a: 30}));
jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
-assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
-assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
+assert.commandWorked(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
+assert.commandWorked(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
/**
* Finish migration. Unpause recipient migration, wait for it to collect
diff --git a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
index 012324e9946..133a1d3f84b 100644
--- a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
+++ b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
@@ -16,7 +16,7 @@ var recoveryDoc = {
minOpTimeUpdaters: 2
};
-assert.writeOK(st.shard0.getDB('admin').system.version.insert(recoveryDoc));
+assert.commandWorked(st.shard0.getDB('admin').system.version.insert(recoveryDoc));
// Make sure test is setup correctly.
var minOpTimeRecoveryDoc =
diff --git a/jstests/sharding/missing_key.js b/jstests/sharding/missing_key.js
index 14078cbff24..e6b04623fad 100644
--- a/jstests/sharding/missing_key.js
+++ b/jstests/sharding/missing_key.js
@@ -8,8 +8,8 @@ var st = new ShardingTest({shards: 1});
var db = st.s.getDB('testDb');
var coll = db.testColl;
-assert.writeOK(coll.insert({x: 1, z: 1}));
-assert.writeOK(coll.insert({y: 1, z: 1}));
+assert.commandWorked(coll.insert({x: 1, z: 1}));
+assert.commandWorked(coll.insert({y: 1, z: 1}));
assert.commandWorked(db.adminCommand({enableSharding: 'testDb'}));
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index 6dc458c2ae7..ae6aa1643d1 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -11,7 +11,7 @@ print("Creating unsharded connection...");
var mongos2 = st._mongos[1];
var coll = mongos2.getCollection("test.foo");
-assert.writeOK(coll.insert({i: 0}));
+assert.commandWorked(coll.insert({i: 0}));
print("Sharding collection...");
@@ -30,7 +30,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({i: i + 1});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
st.printShardingStatus(true);
diff --git a/jstests/sharding/mongos_query_comment.js b/jstests/sharding/mongos_query_comment.js
index ccb10d16824..a17500758ea 100644
--- a/jstests/sharding/mongos_query_comment.js
+++ b/jstests/sharding/mongos_query_comment.js
@@ -28,7 +28,7 @@ const shardColl = shardDB.test;
const collNS = mongosColl.getFullName();
for (let i = 0; i < 5; ++i) {
- assert.writeOK(mongosColl.insert({_id: i, a: i}));
+ assert.commandWorked(mongosColl.insert({_id: i, a: i}));
}
// The profiler will be used to verify that comments are present on the shard.
diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
index 25f18ef1f92..fa03a0310ba 100644
--- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
@@ -52,8 +52,8 @@ var collSharded = mongos.getCollection("fooSharded.barSharded");
var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
// Create the unsharded database with shard0 primary
-assert.writeOK(collUnsharded.insert({some: "doc"}));
-assert.writeOK(collUnsharded.remove({}));
+assert.commandWorked(collUnsharded.insert({some: "doc"}));
+assert.commandWorked(collUnsharded.remove({}));
assert.commandWorked(
admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
@@ -110,9 +110,9 @@ var mongosConnNew = null;
var wc = {writeConcern: {w: 2, wtimeout: 60000}};
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
jsTest.log("Stopping primary of third shard...");
@@ -126,15 +126,15 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
jsTest.log("Testing idle connection with third primary down...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
@@ -150,11 +150,11 @@ mongosConnNew = authDBUsers(new Mongo(mongos.host));
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
gc(); // Clean up new connections
@@ -175,15 +175,15 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
jsTest.log("Testing idle connection with second primary down...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
@@ -202,11 +202,11 @@ mongosConnNew.setSlaveOk();
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
gc(); // Clean up new connections
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index d41759de5db..b6fffb409e5 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -29,8 +29,8 @@ var collSharded = mongos.getCollection("fooSharded.barSharded");
var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
// Create the unsharded database
-assert.writeOK(collUnsharded.insert({some: "doc"}));
-assert.writeOK(collUnsharded.remove({}));
+assert.commandWorked(collUnsharded.insert({some: "doc"}));
+assert.commandWorked(collUnsharded.remove({}));
assert.commandWorked(
admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
@@ -69,9 +69,9 @@ var mongosConnNew = null;
var wc = {writeConcern: {w: 2, wtimeout: 60000}};
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
jsTest.log("Stopping primary of third shard...");
@@ -85,15 +85,15 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
jsTest.log("Testing idle connection with third primary down...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
@@ -109,11 +109,11 @@ mongosConnNew = new Mongo(mongos.host);
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
gc(); // Clean up new connections
@@ -173,16 +173,16 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
// Writes
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
jsTest.log("Testing idle connection with second primary down...");
// Writes
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
// Reads with read prefs
mongosConnIdle.setSlaveOk();
@@ -325,11 +325,11 @@ gc(); // Clean up new connections incrementally to compensate for slow win32 ma
// Writes
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
mongosConnNew = new Mongo(mongos.host);
assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
gc(); // Clean up new connections
diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js
index 7d4560b5ee6..479ca437f97 100644
--- a/jstests/sharding/mongos_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_shard_failure_tolerance.js
@@ -32,8 +32,8 @@ assert.commandWorked(
admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
// Create the unsharded database
-assert.writeOK(collUnsharded.insert({some: "doc"}));
-assert.writeOK(collUnsharded.remove({}));
+assert.commandWorked(collUnsharded.insert({some: "doc"}));
+assert.commandWorked(collUnsharded.remove({}));
st.ensurePrimaryShard(collUnsharded.getDB().toString(), st.shard0.shardName);
//
@@ -46,9 +46,9 @@ var mongosConnActive = new Mongo(st.s0.host);
var mongosConnIdle = null;
var mongosConnNew = null;
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
jsTest.log("Stopping third shard...");
@@ -62,15 +62,15 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
jsTest.log("Testing idle connection...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
@@ -86,11 +86,11 @@ mongosConnNew = new Mongo(st.s0.host);
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
gc(); // Clean up new connections
@@ -104,16 +104,16 @@ jsTest.log("Testing active connection...");
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
jsTest.log("Testing idle connection...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
@@ -127,13 +127,13 @@ mongosConnNew = new Mongo(st.s0.host);
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
mongosConnNew = new Mongo(st.s0.host);
assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
st.stop();
})();
diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js
index 66b71aa12c3..f920992799c 100644
--- a/jstests/sharding/mongos_validate_writes.js
+++ b/jstests/sharding/mongos_validate_writes.js
@@ -37,7 +37,7 @@ coll.ensureIndex({b: 1});
st.shardColl(coll, {b: 1}, {b: 0}, {b: 1}, coll.getDB(), true);
// Make sure that we can successfully insert, even though we have stale state
-assert.writeOK(staleCollA.insert({b: "b"}));
+assert.commandWorked(staleCollA.insert({b: "b"}));
// Make sure we unsuccessfully insert with old info
assert.writeError(staleCollB.insert({a: "a"}));
@@ -48,7 +48,7 @@ coll.ensureIndex({c: 1});
st.shardColl(coll, {c: 1}, {c: 0}, {c: 1}, coll.getDB(), true);
// Make sure we can successfully upsert, even though we have stale state
-assert.writeOK(staleCollA.update({c: "c"}, {c: "c"}, true));
+assert.commandWorked(staleCollA.update({c: "c"}, {c: "c"}, true));
// Make sure we unsuccessfully upsert with old info
assert.writeError(staleCollB.update({b: "b"}, {b: "b"}, true));
@@ -59,9 +59,9 @@ coll.ensureIndex({d: 1});
st.shardColl(coll, {d: 1}, {d: 0}, {d: 1}, coll.getDB(), true);
// Make sure we can successfully update, even though we have stale state
-assert.writeOK(coll.insert({d: "d"}));
+assert.commandWorked(coll.insert({d: "d"}));
-assert.writeOK(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
+assert.commandWorked(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
assert.eq(staleCollA.findOne().x, "x");
// Make sure we unsuccessfully update with old info
@@ -76,9 +76,9 @@ st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
st.shardColl(coll, {e: 1}, {e: 0}, {e: 1}, coll.getDB(), true);
// Make sure we can successfully remove, even though we have stale state
-assert.writeOK(coll.insert({e: "e"}));
+assert.commandWorked(coll.insert({e: "e"}));
-assert.writeOK(staleCollA.remove({e: "e"}, true));
+assert.commandWorked(staleCollA.remove({e: "e"}, true));
assert.eq(null, staleCollA.findOne());
// Make sure we unsuccessfully remove with old info
diff --git a/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js b/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
index 5bece4f1c76..16536ea4bb1 100644
--- a/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
+++ b/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
@@ -20,10 +20,10 @@ st.ensurePrimaryShard(dbName, donor.shardName);
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
jsTest.log("Insert a document with {_id: 0} into " + ns + " through mongos");
-assert.writeOK(st.s.getCollection(ns).insert({_id: 0}));
+assert.commandWorked(st.s.getCollection(ns).insert({_id: 0}));
jsTest.log("Insert a document with {_id: 1} into " + ns + " directly on the recipient");
-assert.writeOK(recipient.getCollection(ns).insert({_id: 1}));
+assert.commandWorked(recipient.getCollection(ns).insert({_id: 1}));
jsTest.log("Check that the UUID on the recipient differs from the UUID on the donor");
const recipientUUIDBefore =
diff --git a/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js b/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
index c7602b4f644..1c6cc248d7a 100644
--- a/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
@@ -93,7 +93,7 @@ var tests = [
for (let i = 0; i < 10; i++) {
bulk.insert({x: 10});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
},
checkRetryResult: function(result, retryResult) {
checkFindAndModifyResult(result, retryResult);
diff --git a/jstests/sharding/move_chunk_open_cursors.js b/jstests/sharding/move_chunk_open_cursors.js
index 1b15fb198cf..312f8143048 100644
--- a/jstests/sharding/move_chunk_open_cursors.js
+++ b/jstests/sharding/move_chunk_open_cursors.js
@@ -15,7 +15,7 @@ let bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < nDocs; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Make sure we know which shard will host the data to begin.
st.ensurePrimaryShard(dbName, st.shard0.shardName);
diff --git a/jstests/sharding/move_chunk_remove_with_write_retryability.js b/jstests/sharding/move_chunk_remove_with_write_retryability.js
index c417710f462..e493bea1632 100644
--- a/jstests/sharding/move_chunk_remove_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_remove_with_write_retryability.js
@@ -36,7 +36,7 @@ var setup = function(coll) {
bulk.insert({x: 10});
bulk.insert({x: 20});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
};
var checkRetryResult = function(result, retryResult) {
assert.eq(result.ok, retryResult.ok);
diff --git a/jstests/sharding/move_primary_clone_test.js b/jstests/sharding/move_primary_clone_test.js
index 81811e2e993..404562e7cd1 100644
--- a/jstests/sharding/move_primary_clone_test.js
+++ b/jstests/sharding/move_primary_clone_test.js
@@ -115,8 +115,8 @@ function createCollections(sharded) {
assert.commandWorked(db.createCollection('bar', barOptions));
for (let i = 0; i < 3; i++) {
- assert.writeOK(db.foo.insert({a: i}));
- assert.writeOK(db.bar.insert({a: i}));
+ assert.commandWorked(db.foo.insert({a: i}));
+ assert.commandWorked(db.bar.insert({a: i}));
}
assert.eq(3, db.foo.count());
assert.eq(3, db.bar.count());
diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js
index f4c1c2f00a6..653326e2ab6 100644
--- a/jstests/sharding/movechunk_include.js
+++ b/jstests/sharding/movechunk_include.js
@@ -32,7 +32,7 @@ function setupMoveChunkTest(shardOptions) {
bulk.insert({_id: num++, s: str});
data += str.length;
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
// Make sure there are chunks to move
for (var i = 0; i < 10; ++i) {
diff --git a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
index 3a03a485dc9..8c0a9d81a3c 100644
--- a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
+++ b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
@@ -23,7 +23,7 @@ assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.TestColl', ke
var coll = mongos.getDB('TestDB').TestColl;
// We have one chunk initially
-assert.writeOK(coll.insert({Key: 0, Value: 'Test value'}));
+assert.commandWorked(coll.insert({Key: 0, Value: 'Test value'}));
pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
diff --git a/jstests/sharding/movechunk_parallel.js b/jstests/sharding/movechunk_parallel.js
index 4c486e64f89..ca16d4caa8b 100644
--- a/jstests/sharding/movechunk_parallel.js
+++ b/jstests/sharding/movechunk_parallel.js
@@ -18,10 +18,10 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key
var coll = st.s0.getDB('TestDB').TestColl;
// Create 4 chunks initially
-assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
-assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
-assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
-assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+assert.commandWorked(coll.insert({Key: 1, Value: 'Test value 1'}));
+assert.commandWorked(coll.insert({Key: 10, Value: 'Test value 10'}));
+assert.commandWorked(coll.insert({Key: 20, Value: 'Test value 20'}));
+assert.commandWorked(coll.insert({Key: 30, Value: 'Test value 30'}));
assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index b3d4bd7a9c3..f1531142bd0 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -40,7 +40,7 @@ var bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
numDocs += numBatch;
@@ -100,7 +100,7 @@ bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("No errors on insert batch.");
numDocs += numBatch;
diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js
index bb129b2c6b7..e0cb7c94d26 100644
--- a/jstests/sharding/mr_and_agg_versioning.js
+++ b/jstests/sharding/mr_and_agg_versioning.js
@@ -26,7 +26,7 @@ var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Add orphaned documents directly to the shards to ensure they are properly filtered out.
st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js
index 52622b4ce66..195444c97f0 100644
--- a/jstests/sharding/mr_shard_version.js
+++ b/jstests/sharding/mr_shard_version.js
@@ -16,7 +16,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i, key: "" + (i % numKeys), value: i % numKeys});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(numDocs, coll.find().itcount());
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index fad2106aad5..796ef9e3a7e 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -12,7 +12,7 @@ assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num:
st.configRS.awaitLastOpCommitted();
// "test.existing" - unsharded
-assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('test').existing.insert({_id: 1}));
assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
@@ -27,7 +27,7 @@ assert.eq(1, st.s0.getDB('test').existing.count({_id: 1})); // SERVER-2828
assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
// Test stats
-assert.writeOK(st.s0.getDB('test').existing2.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('test').existing2.insert({_id: 1}));
assert.eq(1, st.s0.getDB('test').existing2.count({_id: 1}));
assert.eq(1, st.s1.getDB('test').existing2.count({_id: 1}));
@@ -43,10 +43,10 @@ assert.commandWorked(st.s1.adminCommand({split: "test.existing2", middle: {_id:
}
// Test admin commands
-assert.writeOK(st.s0.getDB('test').existing3.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('test').existing3.insert({_id: 1}));
assert.eq(1, st.s0.getDB('test').existing3.count({_id: 1}));
assert.eq(1, st.s1.getDB('test').existing3.count({_id: 1}));
-assert.writeOK(st.s1.adminCommand({shardcollection: "test.existing3", key: {_id: 1}}));
+assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing3", key: {_id: 1}}));
st.configRS.awaitLastOpCommitted();
assert.commandWorked(st.s1.adminCommand({split: "test.existing3", middle: {_id: 5}}));
diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js
index 7e2dce7c8b0..8650046fe78 100644
--- a/jstests/sharding/multi_mongos2a.js
+++ b/jstests/sharding/multi_mongos2a.js
@@ -9,7 +9,7 @@ st.ensurePrimaryShard('test', st.shard1.shardName);
assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
-assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('test').existing.insert({_id: 1}));
assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
diff --git a/jstests/sharding/multi_shard_transaction_without_majority_reads.js b/jstests/sharding/multi_shard_transaction_without_majority_reads.js
index 8ddb69a665d..e694f5d4c7a 100644
--- a/jstests/sharding/multi_shard_transaction_without_majority_reads.js
+++ b/jstests/sharding/multi_shard_transaction_without_majority_reads.js
@@ -15,22 +15,22 @@ st.ensurePrimaryShard('TestDB', st.shard0.shardName);
assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
const coll = st.s0.getDB('TestDB').TestColl;
-assert.writeOK(coll.insert({_id: -1, x: 0}));
-assert.writeOK(coll.insert({_id: 1, x: 0}));
+assert.commandWorked(coll.insert({_id: -1, x: 0}));
+assert.commandWorked(coll.insert({_id: 1, x: 0}));
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: 1}}));
assert.commandWorked(
st.s0.adminCommand({moveChunk: 'TestDB.TestColl', find: {_id: 1}, to: st.shard1.shardName}));
-assert.writeOK(coll.update({_id: -1}, {$inc: {x: 1}}));
-assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
+assert.commandWorked(coll.update({_id: -1}, {$inc: {x: 1}}));
+assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}}));
const session = st.s0.startSession();
const sessionColl = session.getDatabase('TestDB').TestColl;
session.startTransaction();
-assert.writeOK(sessionColl.update({_id: -1}, {$inc: {x: 1}}));
-assert.writeOK(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
+assert.commandWorked(sessionColl.update({_id: -1}, {$inc: {x: 1}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
assert.commandFailedWithCode(session.commitTransaction_forTesting(),
ErrorCodes.ReadConcernMajorityNotEnabled);
diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js
index 90330f43cc0..14d1e0d6a39 100644
--- a/jstests/sharding/multi_write_target.js
+++ b/jstests/sharding/multi_write_target.js
@@ -23,13 +23,13 @@ assert.commandWorked(
jsTest.log("Testing multi-update...");
// Put data on all shards
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
+assert.commandWorked(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+assert.commandWorked(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+assert.commandWorked(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
// Non-multi-update doesn't work without shard key
assert.writeError(coll.update({x: 1}, {$set: {updated: true}}, {multi: false}));
-assert.writeOK(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
// Ensure update goes to *all* shards
assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updated: true}));
@@ -38,7 +38,7 @@ assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updated: true
// _id update works, and goes to all shards even on the stale mongos
var staleColl = st.s1.getCollection('foo.bar');
-assert.writeOK(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
+assert.commandWorked(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
// Ensure _id update goes to *all* shards
assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updatedById: true}));
@@ -49,7 +49,7 @@ jsTest.log("Testing multi-delete...");
// non-multi-delete doesn't work without shard key
assert.writeError(coll.remove({x: 1}, {justOne: true}));
-assert.writeOK(coll.remove({x: 1}, {justOne: false}));
+assert.commandWorked(coll.remove({x: 1}, {justOne: false}));
// Ensure delete goes to *all* shards
assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
@@ -57,12 +57,12 @@ assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({x: 1}));
assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
// Put more on all shards
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
-assert.writeOK(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+assert.commandWorked(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+assert.commandWorked(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
// Data not in chunks
-assert.writeOK(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
+assert.commandWorked(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
-assert.writeOK(coll.remove({_id: 0}, {justOne: true}));
+assert.commandWorked(coll.remove({_id: 0}, {justOne: true}));
// Ensure _id delete goes to *all* shards
assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
diff --git a/jstests/sharding/oplog_document_key.js b/jstests/sharding/oplog_document_key.js
index d138457e4f0..3830f41686c 100644
--- a/jstests/sharding/oplog_document_key.js
+++ b/jstests/sharding/oplog_document_key.js
@@ -19,28 +19,28 @@ assert.commandWorked(db.adminCommand({shardcollection: 'test.byX', key: {x: 1}})
assert.commandWorked(db.adminCommand({shardcollection: 'test.byXId', key: {x: 1, _id: 1}}));
assert.commandWorked(db.adminCommand({shardcollection: 'test.byIdX', key: {_id: 1, x: 1}}));
-assert.writeOK(db.un.insert({_id: 10, x: 50, y: 60}));
-assert.writeOK(db.un.insert({_id: 30, x: 70, y: 80}));
+assert.commandWorked(db.un.insert({_id: 10, x: 50, y: 60}));
+assert.commandWorked(db.un.insert({_id: 30, x: 70, y: 80}));
-assert.writeOK(db.byId.insert({_id: 11, x: 51, y: 61}));
-assert.writeOK(db.byId.insert({_id: 31, x: 71, y: 81}));
+assert.commandWorked(db.byId.insert({_id: 11, x: 51, y: 61}));
+assert.commandWorked(db.byId.insert({_id: 31, x: 71, y: 81}));
-assert.writeOK(db.byX.insert({_id: 12, x: 52, y: 62}));
-assert.writeOK(db.byX.insert({_id: 32, x: 72, y: 82}));
+assert.commandWorked(db.byX.insert({_id: 12, x: 52, y: 62}));
+assert.commandWorked(db.byX.insert({_id: 32, x: 72, y: 82}));
-assert.writeOK(db.byXId.insert({_id: 13, x: 53, y: 63}));
-assert.writeOK(db.byXId.insert({_id: 33, x: 73, y: 83}));
+assert.commandWorked(db.byXId.insert({_id: 13, x: 53, y: 63}));
+assert.commandWorked(db.byXId.insert({_id: 33, x: 73, y: 83}));
-assert.writeOK(db.byIdX.insert({_id: 14, x: 54, y: 64}));
-assert.writeOK(db.byIdX.insert({_id: 34, x: 74, y: 84}));
+assert.commandWorked(db.byIdX.insert({_id: 14, x: 54, y: 64}));
+assert.commandWorked(db.byIdX.insert({_id: 34, x: 74, y: 84}));
var oplog = st.rs0.getPrimary().getDB('local').oplog.rs;
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'un'");
-assert.writeOK(db.un.update({_id: 10, x: 50}, {$set: {y: 70}})); // in place
-assert.writeOK(db.un.update({_id: 30, x: 70}, {y: 75})); // replacement
+assert.commandWorked(db.un.update({_id: 10, x: 50}, {$set: {y: 70}})); // in place
+assert.commandWorked(db.un.update({_id: 30, x: 70}, {y: 75})); // replacement
// unsharded, only _id appears in o2:
@@ -53,8 +53,8 @@ assert.eq(b.o2, {_id: 30});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'byId'");
-assert.writeOK(db.byId.update({_id: 11}, {$set: {y: 71}})); // in place
-assert.writeOK(db.byId.update({_id: 31}, {x: 71, y: 76})); // replacement
+assert.commandWorked(db.byId.update({_id: 11}, {$set: {y: 71}})); // in place
+assert.commandWorked(db.byId.update({_id: 31}, {x: 71, y: 76})); // replacement
// sharded by {_id: 1}: only _id appears in o2:
@@ -67,8 +67,8 @@ assert.eq(b.o2, {_id: 31});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'byX'");
-assert.writeOK(db.byX.update({x: 52}, {$set: {y: 72}})); // in place
-assert.writeOK(db.byX.update({x: 72}, {x: 72, y: 77})); // replacement
+assert.commandWorked(db.byX.update({x: 52}, {$set: {y: 72}})); // in place
+assert.commandWorked(db.byX.update({x: 72}, {x: 72, y: 77})); // replacement
// sharded by {x: 1}: x appears in o2, followed by _id:
@@ -81,8 +81,8 @@ assert.eq(b.o2, {x: 72, _id: 32});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'byXId'");
-assert.writeOK(db.byXId.update({_id: 13, x: 53}, {$set: {y: 73}})); // in place
-assert.writeOK(db.byXId.update({_id: 33, x: 73}, {x: 73, y: 78})); // replacement
+assert.commandWorked(db.byXId.update({_id: 13, x: 53}, {$set: {y: 73}})); // in place
+assert.commandWorked(db.byXId.update({_id: 33, x: 73}, {x: 73, y: 78})); // replacement
// sharded by {x: 1, _id: 1}: x appears in o2, followed by _id:
@@ -95,8 +95,8 @@ assert.eq(b.o2, {x: 73, _id: 33});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'byIdX'");
-assert.writeOK(db.byIdX.update({_id: 14, x: 54}, {$set: {y: 74}})); // in place
-assert.writeOK(db.byIdX.update({_id: 34, x: 74}, {x: 74, y: 79})); // replacement
+assert.commandWorked(db.byIdX.update({_id: 14, x: 54}, {$set: {y: 74}})); // in place
+assert.commandWorked(db.byIdX.update({_id: 34, x: 74}, {x: 74, y: 79})); // replacement
// sharded by {_id: 1, x: 1}: _id appears in o2, followed by x:
@@ -109,8 +109,8 @@ assert.eq(b.o2, {_id: 34, x: 74});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test remove command: 'un'");
-assert.writeOK(db.un.remove({_id: 10}));
-assert.writeOK(db.un.remove({_id: 30}));
+assert.commandWorked(db.un.remove({_id: 10}));
+assert.commandWorked(db.un.remove({_id: 30}));
a = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 10});
assert.eq(a.o, {_id: 10});
@@ -120,8 +120,8 @@ assert.eq(b.o, {_id: 30});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test remove command: 'byX'");
-assert.writeOK(db.byX.remove({_id: 12}));
-assert.writeOK(db.byX.remove({_id: 32}));
+assert.commandWorked(db.byX.remove({_id: 12}));
+assert.commandWorked(db.byX.remove({_id: 32}));
a = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 12});
assert.eq(a.o, {x: 52, _id: 12});
@@ -131,8 +131,8 @@ assert.eq(b.o, {x: 72, _id: 32});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test remove command: 'byXId'");
-assert.writeOK(db.byXId.remove({_id: 13}));
-assert.writeOK(db.byXId.remove({_id: 33}));
+assert.commandWorked(db.byXId.remove({_id: 13}));
+assert.commandWorked(db.byXId.remove({_id: 33}));
a = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 13});
assert.eq(a.o, {x: 53, _id: 13});
@@ -142,8 +142,8 @@ assert.eq(b.o, {x: 73, _id: 33});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test remove command: 'byIdX'");
-assert.writeOK(db.byIdX.remove({_id: 14}));
-assert.writeOK(db.byIdX.remove({_id: 34}));
+assert.commandWorked(db.byIdX.remove({_id: 14}));
+assert.commandWorked(db.byIdX.remove({_id: 34}));
a = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 14});
assert.eq(a.o, {_id: 14, x: 54});
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index c02e708fd53..1be6fa06f0d 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -25,7 +25,7 @@ s.startBalancer();
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < N; i++)
bulk.insert({_id: i});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var doCommand = function(dbname, cmd) {
x = benchRun({
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index b1a730db297..b0e37e91bba 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -32,7 +32,7 @@ for (i = 0; i < 100; i++) {
bulk.insert({num: i, str: longStr});
bulk.insert({num: i + 100, x: i, str: longStr});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// no usable index yet, should throw
assert.throws(function() {
@@ -43,13 +43,13 @@ assert.throws(function() {
assert.commandWorked(coll.ensureIndex({num: 1, x: 1}));
// usable index, but doc with empty 'num' value, so still should throw
-assert.writeOK(coll.insert({x: -5}));
+assert.commandWorked(coll.insert({x: -5}));
assert.throws(function() {
s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
});
// remove the bad doc. now should finally succeed
-assert.writeOK(coll.remove({x: -5}));
+assert.commandWorked(coll.remove({x: -5}));
assert.commandWorked(s.s0.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}}));
// make sure extra index is not created
@@ -167,7 +167,7 @@ for (i = 0; i < 3; i++) {
bulk.insert({skey: 0, extra: i, superfluous: j});
}
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
// split on that key, and check it makes 2 chunks
var splitRes = admin.runCommand({split: coll2 + "", middle: {skey: 0}});
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index 288d6abe694..87d4e81a315 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -19,7 +19,7 @@ while (inserted < (20 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Make sure that there's only one chunk holding all the data.
s.printChunks();
diff --git a/jstests/sharding/primary_config_server_blackholed_from_mongos.js b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
index 674dc1f9235..1b2dfbc15b5 100644
--- a/jstests/sharding/primary_config_server_blackholed_from_mongos.js
+++ b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
@@ -16,7 +16,7 @@ var bulk = testDB.ShardedColl.initializeUnorderedBulkOp();
for (var i = 0; i < 1000; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
const configPrimary = st.configRS.getPrimary();
const admin = configPrimary.getDB("admin");
@@ -49,7 +49,7 @@ assert.writeError(
jsTest.log('Doing CRUD operations on the sharded collection');
assert.eq(1000, testDB.ShardedColl.find().itcount());
-assert.writeOK(testDB.ShardedColl.insert({_id: 1000}));
+assert.commandWorked(testDB.ShardedColl.insert({_id: 1000}));
assert.eq(1001, testDB.ShardedColl.find().count());
jsTest.log('Doing read operations on a config server collection');
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 18bc8bdea6e..c33ca3a1ac5 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -102,7 +102,7 @@ config.getCollectionInfos().forEach(function(c) {
assert.commandWorked(configCopy.createCollection(c.name, c.options));
// Clone the docs.
config.getCollection(c.name).find().hint({_id: 1}).forEach(function(d) {
- assert.writeOK(configCopy.getCollection(c.name).insert(d));
+ assert.commandWorked(configCopy.getCollection(c.name).insert(d));
});
// Build the indexes.
config.getCollection(c.name).getIndexes().forEach(function(i) {
@@ -179,11 +179,11 @@ function testCollDetails(args) {
assert.commandWorked(admin.runCommand(cmdObj));
if (args.hasOwnProperty("unique")) {
- assert.writeOK(mongos.getDB("config").collections.update({_id: collName},
- {$set: {"unique": args.unique}}));
+ assert.commandWorked(mongos.getDB("config").collections.update(
+ {_id: collName}, {$set: {"unique": args.unique}}));
}
if (args.hasOwnProperty("noBalance")) {
- assert.writeOK(mongos.getDB("config").collections.update(
+ assert.commandWorked(mongos.getDB("config").collections.update(
{_id: collName}, {$set: {"noBalance": args.noBalance}}));
}
@@ -217,7 +217,7 @@ function testCollDetails(args) {
mongos.getCollection(collName).drop();
} catch (e) {
// Ignore drop errors because they are from the illegal values in the collection entry
- assert.writeOK(mongos.getDB("config").collections.remove({_id: collName}));
+ assert.commandWorked(mongos.getDB("config").collections.remove({_id: collName}));
}
testCollDetailsNum++;
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index 65739b2b9a5..d6755814229 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -190,7 +190,7 @@ var queryConfigChunks = function(st) {
// Setup.
assert.commandWorked(st.s.adminCommand({shardcollection: testColl.getFullName(), key: {e: 1}}));
for (var i = 0; i < testCollData.length; i++) {
- assert.writeOK(testColl.insert(testCollData[i]));
+ assert.commandWorked(testColl.insert(testCollData[i]));
}
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 2}}));
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 6}}));
@@ -256,7 +256,7 @@ var queryUserCreated = function(database) {
// Setup.
dropCollectionIfExists(userColl);
for (var i = 0; i < userCollData.length; i++) {
- assert.writeOK(userColl.insert(userCollData[i]));
+ assert.commandWorked(userColl.insert(userCollData[i]));
}
assert.commandWorked(userColl.createIndex({s: 1}));
diff --git a/jstests/sharding/query_sharded.js b/jstests/sharding/query_sharded.js
index 92ef5f19678..b79cff55cb1 100644
--- a/jstests/sharding/query_sharded.js
+++ b/jstests/sharding/query_sharded.js
@@ -11,7 +11,7 @@ var coll = mongos.getCollection("foo.bar");
//
// Ensure we can't use exhaust option through mongos
coll.remove({});
-assert.writeOK(coll.insert({a: 'b'}));
+assert.commandWorked(coll.insert({a: 'b'}));
var query = coll.find({});
assert.neq(null, query.next());
query = coll.find({}).addOption(DBQuery.Option.exhaust);
@@ -23,7 +23,7 @@ assert.throws(function() {
//
// Ensure we can't trick mongos by inserting exhaust option on a command through mongos
coll.remove({});
-assert.writeOK(coll.insert({a: 'b'}));
+assert.commandWorked(coll.insert({a: 'b'}));
var cmdColl = mongos.getCollection(coll.getDB().toString() + ".$cmd");
var cmdQuery = cmdColl.find({ping: 1}).limit(1);
assert.commandWorked(cmdQuery.next());
diff --git a/jstests/sharding/read_pref_multi_mongos_stale_config.js b/jstests/sharding/read_pref_multi_mongos_stale_config.js
index b451b976d39..b3c6ab6bc42 100644
--- a/jstests/sharding/read_pref_multi_mongos_stale_config.js
+++ b/jstests/sharding/read_pref_multi_mongos_stale_config.js
@@ -24,8 +24,8 @@ var toShard = configDB2.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
assert.commandWorked(testDB2.adminCommand({moveChunk: 'test.user', to: toShard, find: {x: 50}}));
// Insert a document into each chunk
-assert.writeOK(testDB2.user.insert({x: 30}));
-assert.writeOK(testDB2.user.insert({x: 130}));
+assert.commandWorked(testDB2.user.insert({x: 30}));
+assert.commandWorked(testDB2.user.insert({x: 130}));
// The testDB1 mongos does not know the chunk has been moved, and will retry
var cursor = testDB1.user.find({x: 30}).readPref('primary');
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index 40326f50fec..512719b08b6 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -30,16 +30,16 @@ var collSOk = mongosSOK.getCollection("" + coll);
var rsA = shardTest.rs0;
var rsB = shardTest.rs1;
-assert.writeOK(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
-assert.writeOK(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
+assert.commandWorked(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
+assert.commandWorked(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
rsA.awaitReplication();
rsB.awaitReplication();
print("1: initial insert");
-assert.writeOK(coll.save({_id: -1, a: "a", date: new Date()}));
-assert.writeOK(coll.save({_id: 1, b: "b", date: new Date()}));
+assert.commandWorked(coll.save({_id: -1, a: "a", date: new Date()}));
+assert.commandWorked(coll.save({_id: 1, b: "b", date: new Date()}));
print("2: shard collection");
diff --git a/jstests/sharding/refine_collection_shard_key_basic.js b/jstests/sharding/refine_collection_shard_key_basic.js
index 1b10759142d..a698649a3be 100644
--- a/jstests/sharding/refine_collection_shard_key_basic.js
+++ b/jstests/sharding/refine_collection_shard_key_basic.js
@@ -33,7 +33,7 @@ function enableShardingAndShardColl(keyDoc) {
function dropAndRecreateColl(keyDoc) {
assert.commandWorked(mongos.getDB(kDbName).runCommand({drop: kCollName}));
- assert.writeOK(mongos.getCollection(kNsName).insert(keyDoc));
+ assert.commandWorked(mongos.getCollection(kNsName).insert(keyDoc));
}
function dropAndReshardColl(keyDoc) {
@@ -81,8 +81,8 @@ function setupCRUDBeforeRefine() {
const sessionDB = session.getDatabase(kDbName);
// The documents below will be read after refineCollectionShardKey to verify data integrity.
- assert.writeOK(sessionDB.getCollection(kCollName).insert({a: 5, b: 5, c: 5, d: 5}));
- assert.writeOK(sessionDB.getCollection(kCollName).insert({a: 10, b: 10, c: 10, d: 10}));
+ assert.commandWorked(sessionDB.getCollection(kCollName).insert({a: 5, b: 5, c: 5, d: 5}));
+ assert.commandWorked(sessionDB.getCollection(kCollName).insert({a: 10, b: 10, c: 10, d: 10}));
}
function validateCRUDAfterRefine() {
@@ -104,17 +104,17 @@ function validateCRUDAfterRefine() {
ErrorCodes.ShardKeyNotFound);
assert.writeErrorWithCode(sessionDB.getCollection(kCollName).insert({a: -1, b: -1}),
ErrorCodes.ShardKeyNotFound);
- assert.writeOK(sessionDB.getCollection(kCollName).insert({a: 1, b: 1, c: 1, d: 1}));
- assert.writeOK(sessionDB.getCollection(kCollName).insert({a: -1, b: -1, c: -1, d: -1}));
+ assert.commandWorked(sessionDB.getCollection(kCollName).insert({a: 1, b: 1, c: 1, d: 1}));
+ assert.commandWorked(sessionDB.getCollection(kCollName).insert({a: -1, b: -1, c: -1, d: -1}));
// The full shard key is required when updating documents.
assert.writeErrorWithCode(
sessionDB.getCollection(kCollName).update({a: 1, b: 1}, {$set: {b: 2}}), 31025);
assert.writeErrorWithCode(
sessionDB.getCollection(kCollName).update({a: -1, b: -1}, {$set: {b: 2}}), 31025);
- assert.writeOK(
+ assert.commandWorked(
sessionDB.getCollection(kCollName).update({a: 1, b: 1, c: 1, d: 1}, {$set: {b: 2}}));
- assert.writeOK(
+ assert.commandWorked(
sessionDB.getCollection(kCollName).update({a: -1, b: -1, c: -1, d: -1}, {$set: {b: 4}}));
assert.eq(2, sessionDB.getCollection(kCollName).findOne({a: 1}).b);
@@ -131,10 +131,12 @@ function validateCRUDAfterRefine() {
ErrorCodes.ShardKeyNotFound);
assert.writeErrorWithCode(sessionDB.getCollection(kCollName).remove({a: -1, b: -1}, true),
ErrorCodes.ShardKeyNotFound);
- assert.writeOK(sessionDB.getCollection(kCollName).remove({a: 1, b: 2, c: 1, d: 1}, true));
- assert.writeOK(sessionDB.getCollection(kCollName).remove({a: -1, b: 4, c: -1, d: -1}, true));
- assert.writeOK(sessionDB.getCollection(kCollName).remove({a: 5, b: 5, c: 5, d: 5}, true));
- assert.writeOK(sessionDB.getCollection(kCollName).remove({a: 10, b: 10, c: 10, d: 10}, true));
+ assert.commandWorked(sessionDB.getCollection(kCollName).remove({a: 1, b: 2, c: 1, d: 1}, true));
+ assert.commandWorked(
+ sessionDB.getCollection(kCollName).remove({a: -1, b: 4, c: -1, d: -1}, true));
+ assert.commandWorked(sessionDB.getCollection(kCollName).remove({a: 5, b: 5, c: 5, d: 5}, true));
+ assert.commandWorked(
+ sessionDB.getCollection(kCollName).remove({a: 10, b: 10, c: 10, d: 10}, true));
assert.eq(null, sessionDB.getCollection(kCollName).findOne());
}
@@ -259,7 +261,7 @@ assert.commandFailedWithCode(
mongos.adminCommand({refineCollectionShardKey: kNsName, key: {_id: 1, aKey: 1}}),
ErrorCodes.NamespaceNotFound);
-assert.writeOK(mongos.getCollection(kNsName).insert({aKey: 1}));
+assert.commandWorked(mongos.getCollection(kNsName).insert({aKey: 1}));
// Should fail because namespace 'db.foo' is not sharded. NOTE: This NamespaceNotSharded error
// is thrown in RefineCollectionShardKeyCommand by 'getShardedCollectionRoutingInfoWithRefresh'.
@@ -378,7 +380,7 @@ assert.commandFailedWithCode(
// Should fail because only a multikey index exists for new shard key {_id: 1, aKey: 1}.
dropAndReshardColl({_id: 1});
assert.commandWorked(mongos.getCollection(kNsName).createIndex({_id: 1, aKey: 1}));
-assert.writeOK(mongos.getCollection(kNsName).insert({aKey: [1, 2, 3, 4, 5]}));
+assert.commandWorked(mongos.getCollection(kNsName).insert({aKey: [1, 2, 3, 4, 5]}));
assert.commandFailedWithCode(
mongos.adminCommand({refineCollectionShardKey: kNsName, key: {_id: 1, aKey: 1}}),
@@ -408,7 +410,7 @@ validateConfigChangelog(1);
// shard key {_id: 1, aKey: 1}.
dropAndReshardColl({_id: 1});
assert.commandWorked(mongos.getCollection(kNsName).createIndex({_id: 1, aKey: 1}));
-assert.writeOK(mongos.getCollection(kNsName).insert({_id: 12345}));
+assert.commandWorked(mongos.getCollection(kNsName).insert({_id: 12345}));
assert.commandFailedWithCode(
mongos.adminCommand({refineCollectionShardKey: kNsName, key: {_id: 1, aKey: 1}}),
diff --git a/jstests/sharding/refine_collection_shard_key_jumbo.js b/jstests/sharding/refine_collection_shard_key_jumbo.js
index 9abee48c7fe..2d198f30179 100644
--- a/jstests/sharding/refine_collection_shard_key_jumbo.js
+++ b/jstests/sharding/refine_collection_shard_key_jumbo.js
@@ -25,7 +25,7 @@ function generateJumboChunk() {
bulk.insert({x: x, y: i, big: big});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
function runBalancer() {
diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js
index df836cd8ef0..63dc78a61e2 100644
--- a/jstests/sharding/regex_targeting.js
+++ b/jstests/sharding/regex_targeting.js
@@ -70,84 +70,84 @@ assert.writeError(collHashed.insert({_id: /regex value/, hash: 0}));
//
//
// (For now) we can insert a regex shard key
-assert.writeOK(collSharded.insert({a: /regex value/}));
-assert.writeOK(collCompound.insert({a: /regex value/, b: "other value"}));
-assert.writeOK(collNested.insert({a: {b: /regex value/}}));
-assert.writeOK(collHashed.insert({hash: /regex value/}));
+assert.commandWorked(collSharded.insert({a: /regex value/}));
+assert.commandWorked(collCompound.insert({a: /regex value/, b: "other value"}));
+assert.commandWorked(collNested.insert({a: {b: /regex value/}}));
+assert.commandWorked(collHashed.insert({hash: /regex value/}));
//
//
// Query by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({a: "abcde-0"}));
-assert.writeOK(coll.insert({a: "abcde-1"}));
-assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.commandWorked(coll.insert({a: "abcde-0"}));
+assert.commandWorked(coll.insert({a: "abcde-1"}));
+assert.commandWorked(coll.insert({a: /abcde.*/}));
assert.eq(coll.find().itcount(), coll.find({a: /abcde.*/}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({a: "abcde-0"}));
-assert.writeOK(collSharded.insert({a: "abcde-1"}));
-assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.commandWorked(collSharded.insert({a: "abcde-0"}));
+assert.commandWorked(collSharded.insert({a: "abcde-1"}));
+assert.commandWorked(collSharded.insert({a: /abcde.*/}));
assert.eq(collSharded.find().itcount(), collSharded.find({a: /abcde.*/}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
-assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
-assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.commandWorked(collCompound.insert({a: "abcde-0", b: 0}));
+assert.commandWorked(collCompound.insert({a: "abcde-1", b: 0}));
+assert.commandWorked(collCompound.insert({a: /abcde.*/, b: 0}));
assert.eq(collCompound.find().itcount(), collCompound.find({a: /abcde.*/}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
-assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
-assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(collNested.insert({a: {b: /abcde.*/}}));
assert.eq(collNested.find().itcount(), collNested.find({'a.b': /abcde.*/}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+ assert.commandWorked(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.commandWorked(collHashed.insert({hash: /abcde.*/}));
assert.eq(collHashed.find().itcount(), collHashed.find({hash: /abcde.*/}).itcount());
//
//
// Update by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({a: "abcde-0"}));
-assert.writeOK(coll.insert({a: "abcde-1"}));
-assert.writeOK(coll.insert({a: /abcde.*/}));
-assert.writeOK(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(coll.insert({a: "abcde-0"}));
+assert.commandWorked(coll.insert({a: "abcde-1"}));
+assert.commandWorked(coll.insert({a: /abcde.*/}));
+assert.commandWorked(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(coll.find().itcount(), coll.find({updated: true}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({a: "abcde-0"}));
-assert.writeOK(collSharded.insert({a: "abcde-1"}));
-assert.writeOK(collSharded.insert({a: /abcde.*/}));
-assert.writeOK(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(collSharded.insert({a: "abcde-0"}));
+assert.commandWorked(collSharded.insert({a: "abcde-1"}));
+assert.commandWorked(collSharded.insert({a: /abcde.*/}));
+assert.commandWorked(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(collSharded.find().itcount(), collSharded.find({updated: true}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
-assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
-assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
-assert.writeOK(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(collCompound.insert({a: "abcde-0", b: 0}));
+assert.commandWorked(collCompound.insert({a: "abcde-1", b: 0}));
+assert.commandWorked(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.commandWorked(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(collCompound.find().itcount(), collCompound.find({updated: true}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
-assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
-assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
-assert.writeOK(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(collNested.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(collNested.find().itcount(), collNested.find({updated: true}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+ assert.commandWorked(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({hash: /abcde.*/}));
-assert.writeOK(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(collHashed.insert({hash: /abcde.*/}));
+assert.commandWorked(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(collHashed.find().itcount(), collHashed.find({updated: true}).itcount());
collSharded.remove({});
@@ -219,40 +219,40 @@ assert.commandFailedWithCode(collNested.update({c: 1}, {a: {b: /abcde.*/}}, {ups
//
// Remove by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({a: "abcde-0"}));
-assert.writeOK(coll.insert({a: "abcde-1"}));
-assert.writeOK(coll.insert({a: /abcde.*/}));
-assert.writeOK(coll.remove({a: /abcde.*/}));
+assert.commandWorked(coll.insert({a: "abcde-0"}));
+assert.commandWorked(coll.insert({a: "abcde-1"}));
+assert.commandWorked(coll.insert({a: /abcde.*/}));
+assert.commandWorked(coll.remove({a: /abcde.*/}));
assert.eq(0, coll.find({}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({a: "abcde-0"}));
-assert.writeOK(collSharded.insert({a: "abcde-1"}));
-assert.writeOK(collSharded.insert({a: /abcde.*/}));
-assert.writeOK(collSharded.remove({a: /abcde.*/}));
+assert.commandWorked(collSharded.insert({a: "abcde-0"}));
+assert.commandWorked(collSharded.insert({a: "abcde-1"}));
+assert.commandWorked(collSharded.insert({a: /abcde.*/}));
+assert.commandWorked(collSharded.remove({a: /abcde.*/}));
assert.eq(0, collSharded.find({}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
-assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
-assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
-assert.writeOK(collCompound.remove({a: /abcde.*/}));
+assert.commandWorked(collCompound.insert({a: "abcde-0", b: 0}));
+assert.commandWorked(collCompound.insert({a: "abcde-1", b: 0}));
+assert.commandWorked(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.commandWorked(collCompound.remove({a: /abcde.*/}));
assert.eq(0, collCompound.find({}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
-assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
-assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
-assert.writeOK(collNested.remove({'a.b': /abcde.*/}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(collNested.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.remove({'a.b': /abcde.*/}));
assert.eq(0, collNested.find({}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+ assert.commandWorked(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({hash: /abcde.*/}));
-assert.writeOK(collHashed.remove({hash: /abcde.*/}));
+assert.commandWorked(collHashed.insert({hash: /abcde.*/}));
+assert.commandWorked(collHashed.remove({hash: /abcde.*/}));
assert.eq(0, collHashed.find({}).itcount());
//
@@ -260,23 +260,24 @@ assert.eq(0, collHashed.find({}).itcount());
// Query/Update/Remove by nested regex is different depending on how the nested regex is
// specified
coll.remove({});
-assert.writeOK(coll.insert({a: {b: "abcde-0"}}));
-assert.writeOK(coll.insert({a: {b: "abcde-1"}}));
-assert.writeOK(coll.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(coll.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(coll.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(coll.insert({a: {b: /abcde.*/}}));
assert.eq(1, coll.find({a: {b: /abcde.*/}}).itcount());
-assert.writeOK(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
assert.eq(1, coll.find({updated: true}).itcount());
-assert.writeOK(coll.remove({a: {b: /abcde.*/}}));
+assert.commandWorked(coll.remove({a: {b: /abcde.*/}}));
assert.eq(2, coll.find().itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
-assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
-assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(collNested.insert({a: {b: /abcde.*/}}));
assert.eq(1, collNested.find({a: {b: /abcde.*/}}).itcount());
-assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(
+ collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
assert.eq(1, collNested.find({updated: true}).itcount());
-assert.writeOK(collNested.remove({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.remove({a: {b: /abcde.*/}}));
assert.eq(2, collNested.find().itcount());
st.stop();
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index eb7418b76ed..ee216f724bc 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -90,7 +90,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 300; i++) {
bulk.insert({i: i % 10, str: str});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(300, coll.find().itcount());
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index bcd37cdf570..8597a4ae6c9 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -11,14 +11,14 @@ var s = new ShardingTest({shards: 2, mongos: 1, rs: {oplogSize: 10}});
var db = s.getDB("test");
var replTest = s.rs0;
-assert.writeOK(db.foo.insert({_id: 1}));
+assert.commandWorked(db.foo.insert({_id: 1}));
db.foo.renameCollection('bar');
assert.isnull(db.getLastError(), '1.0');
assert.eq(db.bar.findOne(), {_id: 1}, '1.1');
assert.eq(db.bar.count(), 1, '1.2');
assert.eq(db.foo.count(), 0, '1.3');
-assert.writeOK(db.foo.insert({_id: 2}));
+assert.commandWorked(db.foo.insert({_id: 2}));
db.foo.renameCollection('bar', true);
assert.isnull(db.getLastError(), '2.0');
assert.eq(db.bar.findOne(), {_id: 2}, '2.1');
@@ -50,7 +50,7 @@ assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl', d
jsTest.log("Testing write concern (1)");
-assert.writeOK(db.foo.insert({_id: 3}));
+assert.commandWorked(db.foo.insert({_id: 3}));
db.foo.renameCollection('bar', true);
var ans = db.runCommand({getLastError: 1, w: 3});
@@ -75,7 +75,7 @@ let liveSlaves = replTest._slaves.filter(function(node) {
replTest.awaitSecondaryNodes(null, liveSlaves);
awaitRSClientHosts(s.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name);
-assert.writeOK(db.foo.insert({_id: 4}));
+assert.commandWorked(db.foo.insert({_id: 4}));
assert.commandWorked(db.foo.renameCollection('bar', true));
ans = db.runCommand({getLastError: 1, w: 3, wtimeout: 5000});
diff --git a/jstests/sharding/rename_across_mongos.js b/jstests/sharding/rename_across_mongos.js
index de2fa50bcea..0c378c5054a 100644
--- a/jstests/sharding/rename_across_mongos.js
+++ b/jstests/sharding/rename_across_mongos.js
@@ -10,7 +10,7 @@ st.s1.getDB(dbName).dropDatabase();
// Create collection on first mongos and insert a document
assert.commandWorked(st.s0.getDB(dbName).runCommand({create: 'CollNameBeforeRename'}));
-assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
+assert.commandWorked(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
if (st.configRS) {
// Ensure that the second mongos will see the newly created database metadata when
diff --git a/jstests/sharding/replication_with_undefined_shard_key.js b/jstests/sharding/replication_with_undefined_shard_key.js
index 2da48889a4c..cc8f0e89f4a 100644
--- a/jstests/sharding/replication_with_undefined_shard_key.js
+++ b/jstests/sharding/replication_with_undefined_shard_key.js
@@ -15,15 +15,15 @@ assert.commandWorked(mongosDB.adminCommand({
}));
// Insert a document with a literal undefined value.
-assert.writeOK(mongosColl.insert({x: undefined}));
+assert.commandWorked(mongosColl.insert({x: undefined}));
jsTestLog("Doing writes that generate oplog entries including undefined document key");
-assert.writeOK(mongosColl.update(
+assert.commandWorked(mongosColl.update(
{},
{$set: {a: 1}},
{multi: true, writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
-assert.writeOK(
+assert.commandWorked(
mongosColl.remove({}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
st.stop();
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index 453c4d980f1..402cc7f9016 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -43,7 +43,7 @@ replTest.awaitSecondaryNodes();
awaitRSClientHosts(st.s0, replTest.nodes, {ok: true});
replTest.awaitNodesAgreeOnPrimary();
-assert.writeOK(st.s0.getDB('test').user.insert({x: 1}));
+assert.commandWorked(st.s0.getDB('test').user.insert({x: 1}));
st.stop();
})();
diff --git a/jstests/sharding/resume_change_stream.js b/jstests/sharding/resume_change_stream.js
index 19c53012fda..0c4c788484f 100644
--- a/jstests/sharding/resume_change_stream.js
+++ b/jstests/sharding/resume_change_stream.js
@@ -54,20 +54,20 @@ function testResume(mongosColl, collToWatch) {
{moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
// Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
let changeStream = cst.startWatchingChanges(
{pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
// We awaited the replication of the first writes, so the change stream shouldn't return
// them.
- assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
+ assert.commandWorked(mongosColl.update({_id: -1}, {$set: {updated: true}}));
// Record current time to resume a change stream later in the test.
const resumeTimeFirstUpdate = mongosDB.runCommand({isMaster: 1}).$clusterTime.clusterTime;
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
+ assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updated: true}}));
// Test that we see the two writes, and remember their resume tokens.
let next = cst.getOneChange(changeStream);
@@ -82,8 +82,8 @@ function testResume(mongosColl, collToWatch) {
// Write some additional documents, then test that it's possible to resume after the first
// update.
- assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
changeStream = cst.startWatchingChanges({
pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
@@ -115,7 +115,7 @@ function testResume(mongosColl, collToWatch) {
while (!oplogIsRolledOver()) {
let idVal = 100 + (i++);
- assert.writeOK(
+ assert.commandWorked(
mongosColl.insert({_id: idVal, long_str: largeStr}, {writeConcern: {w: "majority"}}));
sleep(100);
}
@@ -165,23 +165,23 @@ function testResume(mongosColl, collToWatch) {
// Insert test documents.
for (let counter = 0; counter < numberOfDocs / 5; ++counter) {
- assert.writeOK(mongosColl.insert({_id: "abcd" + counter, shardKey: counter * 5 + 0},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "Abcd" + counter, shardKey: counter * 5 + 1},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "aBcd" + counter, shardKey: counter * 5 + 2},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "abCd" + counter, shardKey: counter * 5 + 3},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "abcD" + counter, shardKey: counter * 5 + 4},
- {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "abcd" + counter, shardKey: counter * 5 + 0},
+ {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "Abcd" + counter, shardKey: counter * 5 + 1},
+ {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "aBcd" + counter, shardKey: counter * 5 + 2},
+ {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "abCd" + counter, shardKey: counter * 5 + 3},
+ {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "abcD" + counter, shardKey: counter * 5 + 4},
+ {writeConcern: {w: "majority"}}));
}
let allChangesCursor = cst.startWatchingChanges(
{pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
// Perform the multi-update that will induce timestamp collisions
- assert.writeOK(mongosColl.update({}, {$set: {updated: true}}, {multi: true}));
+ assert.commandWorked(mongosColl.update({}, {$set: {updated: true}}, {multi: true}));
// Loop over documents and open inner change streams resuming from a specified position.
// Note we skip the last document as it does not have the next document so we would
diff --git a/jstests/sharding/resume_change_stream_from_stale_mongos.js b/jstests/sharding/resume_change_stream_from_stale_mongos.js
index fbc8bd904bb..d4d935eb6a5 100644
--- a/jstests/sharding/resume_change_stream_from_stale_mongos.js
+++ b/jstests/sharding/resume_change_stream_from_stale_mongos.js
@@ -34,8 +34,8 @@ st.ensurePrimaryShard(firstMongosDB.getName(), st.rs0.getURL());
// record a resume token after the first chunk migration.
let changeStream = firstMongosColl.aggregate([{$changeStream: {}}]);
-assert.writeOK(firstMongosColl.insert({_id: -1}));
-assert.writeOK(firstMongosColl.insert({_id: 1}));
+assert.commandWorked(firstMongosColl.insert({_id: -1}));
+assert.commandWorked(firstMongosColl.insert({_id: 1}));
for (let nextId of [-1, 1]) {
assert.soon(() => changeStream.hasNext());
@@ -54,8 +54,8 @@ assert.commandWorked(firstMongosDB.adminCommand(
{moveChunk: firstMongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
// Then do one insert to each shard.
-assert.writeOK(firstMongosColl.insert({_id: -2}));
-assert.writeOK(firstMongosColl.insert({_id: 2}));
+assert.commandWorked(firstMongosColl.insert({_id: -2}));
+assert.commandWorked(firstMongosColl.insert({_id: 2}));
// The change stream should see all the inserts after internally re-establishing cursors after
// the chunk split.
@@ -69,8 +69,8 @@ for (let nextId of [-2, 2]) {
}
// Do some writes that occur on each shard after the resume token.
-assert.writeOK(firstMongosColl.insert({_id: -3}));
-assert.writeOK(firstMongosColl.insert({_id: 3}));
+assert.commandWorked(firstMongosColl.insert({_id: -3}));
+assert.commandWorked(firstMongosColl.insert({_id: 3}));
// Now try to resume the change stream using a stale mongos which believes the collection is
// unsharded. The first mongos should use the shard versioning protocol to discover that the
diff --git a/jstests/sharding/resume_change_stream_on_subset_of_shards.js b/jstests/sharding/resume_change_stream_on_subset_of_shards.js
index b914a310e82..6176ead3664 100644
--- a/jstests/sharding/resume_change_stream_on_subset_of_shards.js
+++ b/jstests/sharding/resume_change_stream_on_subset_of_shards.js
@@ -40,13 +40,13 @@ assert.commandWorked(mongosDB.adminCommand(
let changeStream = mongosColl.watch();
// ... then do one write to produce a resume token...
-assert.writeOK(mongosColl.insert({_id: -2}));
+assert.commandWorked(mongosColl.insert({_id: -2}));
assert.soon(() => changeStream.hasNext());
const resumeToken = changeStream.next()._id;
// ... followed by one write to each chunk for testing purposes, i.e. shards 0 and 1.
-assert.writeOK(mongosColl.insert({_id: -1}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: -1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
// The change stream should see all the inserts after establishing cursors on all shards.
for (let nextId of [-1, 1]) {
@@ -58,7 +58,7 @@ for (let nextId of [-1, 1]) {
}
// Insert another document after storing the resume token.
-assert.writeOK(mongosColl.insert({_id: 2}));
+assert.commandWorked(mongosColl.insert({_id: 2}));
// Resume the change stream and verify that it correctly sees the next insert. This is meant
// to test resuming a change stream when not all shards are aware that the collection exists,
diff --git a/jstests/sharding/retryable_writes.js b/jstests/sharding/retryable_writes.js
index d35172edf3a..9da8f40effa 100644
--- a/jstests/sharding/retryable_writes.js
+++ b/jstests/sharding/retryable_writes.js
@@ -141,8 +141,8 @@ function runTests(mainConn, priConn) {
initialStatus = priConn.adminCommand({serverStatus: 1});
verifyServerStatusFields(initialStatus);
- assert.writeOK(testDBMain.user.insert({_id: 40, x: 1}));
- assert.writeOK(testDBMain.user.insert({_id: 50, y: 1}));
+ assert.commandWorked(testDBMain.user.insert({_id: 40, x: 1}));
+ assert.commandWorked(testDBMain.user.insert({_id: 50, y: 1}));
assert.eq(2, testDBPri.user.find({x: 1}).itcount());
assert.eq(2, testDBPri.user.find({y: 1}).itcount());
@@ -294,8 +294,8 @@ function runTests(mainConn, priConn) {
initialStatus = priConn.adminCommand({serverStatus: 1});
verifyServerStatusFields(initialStatus);
- assert.writeOK(testDBMain.user.insert({_id: 70, f: 1}));
- assert.writeOK(testDBMain.user.insert({_id: 80, f: 1}));
+ assert.commandWorked(testDBMain.user.insert({_id: 70, f: 1}));
+ assert.commandWorked(testDBMain.user.insert({_id: 80, f: 1}));
cmd = {
findAndModify: 'user',
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index edf537d4ed1..cd9b405d4f4 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -40,7 +40,7 @@ st.printShardingStatus();
var inserts = [{_id: -1}, {_id: 1}, {_id: 1000}];
collOneShard.insert(inserts);
-assert.writeOK(collAllShards.insert(inserts));
+assert.commandWorked(collAllShards.insert(inserts));
var returnPartialFlag = 1 << 7;
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 08a7c3a017a..e1954ce0c8a 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -72,7 +72,7 @@ let testCases = {
addShardToZone: {skip: "primary only"},
aggregate: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
checkResults: function(res) {
@@ -107,7 +107,7 @@ let testCases = {
convertToCapped: {skip: "primary only"},
count: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {count: coll, query: {x: 1}},
checkResults: function(res) {
@@ -128,8 +128,8 @@ let testCases = {
delete: {skip: "primary only"},
distinct: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {distinct: coll, key: "x"},
checkResults: function(res) {
@@ -156,7 +156,7 @@ let testCases = {
filemd5: {skip: "does not return user data"},
find: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {find: coll, filter: {x: 1}},
checkResults: function(res) {
@@ -207,8 +207,8 @@ let testCases = {
makeSnapshot: {skip: "does not return user data"},
mapReduce: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {
mapReduce: coll,
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index 78ade52128e..3853784a980 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -79,7 +79,7 @@ let testCases = {
addShardToZone: {skip: "primary only"},
aggregate: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
checkResults: function(res) {
@@ -120,7 +120,7 @@ let testCases = {
convertToCapped: {skip: "primary only"},
count: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {count: coll, query: {x: 1}},
checkResults: function(res) {
@@ -147,8 +147,8 @@ let testCases = {
delete: {skip: "primary only"},
distinct: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {distinct: coll, key: "x"},
checkResults: function(res) {
@@ -180,7 +180,7 @@ let testCases = {
filemd5: {skip: "does not return user data"},
find: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {find: coll, filter: {x: 1}},
checkResults: function(res) {
@@ -237,8 +237,8 @@ let testCases = {
makeSnapshot: {skip: "does not return user data"},
mapReduce: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {
mapReduce: coll,
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index 541124a7048..8557f2a8b53 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -72,7 +72,7 @@ let testCases = {
addShardToZone: {skip: "primary only"},
aggregate: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
checkResults: function(res) {
@@ -108,7 +108,7 @@ let testCases = {
convertToCapped: {skip: "primary only"},
count: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {count: coll, query: {x: 1}},
checkResults: function(res) {
@@ -130,8 +130,8 @@ let testCases = {
delete: {skip: "primary only"},
distinct: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {distinct: coll, key: "x"},
checkResults: function(res) {
@@ -158,7 +158,7 @@ let testCases = {
filemd5: {skip: "does not return user data"},
find: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {find: coll, filter: {x: 1}},
checkResults: function(res) {
@@ -210,8 +210,8 @@ let testCases = {
makeSnapshot: {skip: "does not return user data"},
mapReduce: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {
mapReduce: coll,
diff --git a/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js b/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
index 6c8b150aebb..af7d221d17a 100644
--- a/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
+++ b/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
@@ -25,7 +25,7 @@ let freshMongos = st.s0;
let staleMongos = st.s1;
jsTest.log("do insert from stale mongos to make it load the routing table before the move");
-assert.writeOK(staleMongos.getCollection(ns).insert({x: 1}));
+assert.commandWorked(staleMongos.getCollection(ns).insert({x: 1}));
jsTest.log("do moveChunk from fresh mongos");
assert.commandWorked(freshMongos.adminCommand({
diff --git a/jstests/sharding/secondary_shard_versioning.js b/jstests/sharding/secondary_shard_versioning.js
index 94e49c09a5d..a5c684dccb1 100644
--- a/jstests/sharding/secondary_shard_versioning.js
+++ b/jstests/sharding/secondary_shard_versioning.js
@@ -20,7 +20,7 @@ let freshMongos = st.s0;
let staleMongos = st.s1;
jsTest.log("do insert from stale mongos to make it load the routing table before the move");
-assert.writeOK(staleMongos.getDB('test').foo.insert({x: 1}));
+assert.commandWorked(staleMongos.getDB('test').foo.insert({x: 1}));
jsTest.log("do moveChunk from fresh mongos");
assert.commandWorked(freshMongos.adminCommand({
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index f54a70aad7c..b4c7c0842d8 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -7,9 +7,9 @@
var s = new ShardingTest({shards: 2});
var db = s.getDB("test");
-assert.writeOK(db.foo.insert({num: 1, name: "eliot"}));
-assert.writeOK(db.foo.insert({num: 2, name: "sara"}));
-assert.writeOK(db.foo.insert({num: -1, name: "joe"}));
+assert.commandWorked(db.foo.insert({num: 1, name: "eliot"}));
+assert.commandWorked(db.foo.insert({num: 2, name: "sara"}));
+assert.commandWorked(db.foo.insert({num: -1, name: "joe"}));
assert.commandWorked(db.foo.ensureIndex({num: 1}));
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index fd8d8657af6..2bcb9f99843 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -28,9 +28,9 @@ assert.eq(2, s.config.chunks.count({"ns": "test.foo"}), "should be 2 shards");
var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
-assert.writeOK(db.foo.save({num: 1, name: "eliot"}));
-assert.writeOK(db.foo.save({num: 2, name: "sara"}));
-assert.writeOK(db.foo.save({num: -1, name: "joe"}));
+assert.commandWorked(db.foo.save({num: 1, name: "eliot"}));
+assert.commandWorked(db.foo.save({num: 2, name: "sara"}));
+assert.commandWorked(db.foo.save({num: -1, name: "joe"}));
assert.eq(
3, s.getPrimaryShard("test").getDB("test").foo.find().length(), "not right directly to db A");
@@ -64,15 +64,15 @@ assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same aft
placeCheck(3);
// Test inserts go to right server/shard
-assert.writeOK(db.foo.save({num: 3, name: "bob"}));
+assert.commandWorked(db.foo.save({num: 3, name: "bob"}));
assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
+assert.commandWorked(db.foo.save({num: -2, name: "funny man"}));
assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
+assert.commandWorked(db.foo.save({num: 0, name: "funny guy"}));
assert.eq(2, primary.foo.find().length(), "boundary A");
assert.eq(4, secondary.foo.find().length(), "boundary B");
@@ -197,7 +197,7 @@ placeCheck(8);
printAll();
var total = db.foo.find().count();
-var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
+var res = assert.commandWorked(db.foo.update({}, {$inc: {x: 1}}, false, true));
printAll();
assert.eq(total, res.nModified, res.toString());
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index d0957a1c45d..74dcb30c25e 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -75,7 +75,7 @@ function doCounts(name, total, onlyItCounts) {
}
var total = doCounts("before wrong save");
-assert.writeOK(secondary.insert({_id: 111, num: -3}));
+assert.commandWorked(secondary.insert({_id: 111, num: -3}));
doCounts("after wrong save", total, true);
e = a.find().explain("executionStats").executionStats;
assert.eq(3, e.nReturned, "ex1");
diff --git a/jstests/sharding/shard7.js b/jstests/sharding/shard7.js
index 20122b60e24..094688d6e37 100644
--- a/jstests/sharding/shard7.js
+++ b/jstests/sharding/shard7.js
@@ -41,9 +41,9 @@ assert.eq(0, aggregate.toArray().length);
c.save({a: null, b: null});
c.save({a: 1, b: 1});
-assert.writeOK(c.remove(unsatisfiable));
+assert.commandWorked(c.remove(unsatisfiable));
assert.eq(2, c.count());
-assert.writeOK(c.update(unsatisfiable, {$set: {c: 1}}, false, true));
+assert.commandWorked(c.update(unsatisfiable, {$set: {c: 1}}, false, true));
assert.eq(2, c.count());
assert.eq(0, c.count({c: 1}));
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index 5ed9e129a4d..ea5f350483b 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -79,7 +79,7 @@ var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
};
// Simulate the upsert that is performed by a config server on addShard.
- assert.writeOK(mongodConn.getDB('admin').system.version.update(
+ assert.commandWorked(mongodConn.getDB('admin').system.version.update(
{
_id: shardIdentityDoc._id,
shardName: shardIdentityDoc.shardName,
diff --git a/jstests/sharding/shard_aware_init_secondaries.js b/jstests/sharding/shard_aware_init_secondaries.js
index a1387592212..bc919924a37 100644
--- a/jstests/sharding/shard_aware_init_secondaries.js
+++ b/jstests/sharding/shard_aware_init_secondaries.js
@@ -37,7 +37,7 @@ var shardIdentityQuery = {
var shardIdentityUpdate = {
$set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
};
-assert.writeOK(priConn.getDB('admin').system.version.update(
+assert.commandWorked(priConn.getDB('admin').system.version.update(
shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 2}}));
var secConn = replTest.getSecondary();
diff --git a/jstests/sharding/shard_aware_primary_failover.js b/jstests/sharding/shard_aware_primary_failover.js
index 9e7f572c3e9..d26c4e34a1c 100644
--- a/jstests/sharding/shard_aware_primary_failover.js
+++ b/jstests/sharding/shard_aware_primary_failover.js
@@ -37,7 +37,7 @@ var shardIdentityQuery = {
var shardIdentityUpdate = {
$set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
};
-assert.writeOK(primaryConn.getDB('admin').system.version.update(
+assert.commandWorked(primaryConn.getDB('admin').system.version.update(
shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 'majority'}}));
replTest.stopMaster();
diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js
index f417cdc4165..aa9be219496 100644
--- a/jstests/sharding/shard_collection_basic.js
+++ b/jstests/sharding/shard_collection_basic.js
@@ -55,7 +55,7 @@ function getIndexSpecByName(coll, indexName) {
// Fail if db is not sharded.
assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
// Fail if db is not sharding enabled.
assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
@@ -71,7 +71,7 @@ assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: "aaa"}));
assert.commandFailed(
mongos.getDB('test').runCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
// Can't shard if key is not specified.
assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo'}));
@@ -119,28 +119,28 @@ testAndClenaupWithKeyNoIndexOK({_id: 'hashed'});
testAndClenaupWithKeyNoIndexOK({a: 1});
// Cant shard collection with data and no index on the shard key.
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyOK({a: 1});
// Shard by a hashed key.
testAndClenaupWithKeyNoIndexOK({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyOK({a: 'hashed'});
// Shard by a compound key.
testAndClenaupWithKeyNoIndexOK({x: 1, y: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
testAndClenaupWithKeyNoIndexFailed({x: 1, y: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
testAndClenaupWithKeyOK({x: 1, y: 1});
testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 1});
@@ -152,21 +152,21 @@ testAndClenaupWithKeyOK({'z.x': 'hashed'});
// Can't shard by a multikey.
assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 1});
assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1, b: 1}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyOK({a: 'hashed'});
// Cant shard by a parallel arrays.
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
@@ -260,7 +260,7 @@ assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key
// shard key as a prefix has a non-simple collation.
mongos.getDB(kDbName).foo.drop();
assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 'foo'}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 'foo'}));
// This index will inherit the collection's default collation.
assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
assert.commandFailed(mongos.adminCommand(
diff --git a/jstests/sharding/shard_collection_existing_zones.js b/jstests/sharding/shard_collection_existing_zones.js
index 8030b40ee9a..eb7343f221c 100644
--- a/jstests/sharding/shard_collection_existing_zones.js
+++ b/jstests/sharding/shard_collection_existing_zones.js
@@ -130,7 +130,7 @@ function testNonemptyZonedCollection() {
[{min: {x: 0}, max: {x: 10}}, {min: {x: 10}, max: {x: 20}}, {min: {x: 20}, max: {x: 40}}];
for (let i = 0; i < 40; i++) {
- assert.writeOK(testColl.insert({x: i}));
+ assert.commandWorked(testColl.insert({x: i}));
}
assert.commandWorked(testColl.createIndex(shardKey));
diff --git a/jstests/sharding/shard_collection_verify_initial_chunks.js b/jstests/sharding/shard_collection_verify_initial_chunks.js
index 65c5897371e..38e3d4d67a6 100644
--- a/jstests/sharding/shard_collection_verify_initial_chunks.js
+++ b/jstests/sharding/shard_collection_verify_initial_chunks.js
@@ -27,7 +27,7 @@ assert.commandFailed(mongos.adminCommand(
{shardCollection: 'TestDB.RangeCollEmpty', key: {aKey: 1}, numInitialChunks: 6}));
// Unsupported: Hashed sharding + numInitialChunks + non-empty collection
-assert.writeOK(db.HashedCollNotEmpty.insert({aKey: 1}));
+assert.commandWorked(db.HashedCollNotEmpty.insert({aKey: 1}));
assert.commandWorked(db.HashedCollNotEmpty.createIndex({aKey: "hashed"}));
assert.commandFailed(mongos.adminCommand(
{shardCollection: 'TestDB.HashedCollNotEmpty', key: {aKey: "hashed"}, numInitialChunks: 6}));
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 8a5c19d1eb9..6e3242647f8 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -17,7 +17,7 @@ var bulk = db.data.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i, s: bigString});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var avgObjSize = db.data.stats().avgObjSize;
var dataSize = db.data.stats().size;
diff --git a/jstests/sharding/shard_existing_coll_chunk_count.js b/jstests/sharding/shard_existing_coll_chunk_count.js
index 91a6abca2ee..7ee54444976 100644
--- a/jstests/sharding/shard_existing_coll_chunk_count.js
+++ b/jstests/sharding/shard_existing_coll_chunk_count.js
@@ -52,7 +52,7 @@ var runCase = function(opts) {
for (; i < limit; i++) {
bulk.insert({i, pad});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
// Create shard key index.
assert.commandWorked(coll.createIndex({i: 1}));
@@ -132,7 +132,7 @@ runCase({
// Lower chunksize to 1MB, and restart the mongod for it to take. We also
// need to restart mongos for the case of the last-stable suite where the
// shard is also last-stable.
-assert.writeOK(
+assert.commandWorked(
s.getDB("config").getCollection("settings").update({_id: "chunksize"}, {$set: {value: 1}}, {
upsert: true
}));
diff --git a/jstests/sharding/shard_identity_rollback.js b/jstests/sharding/shard_identity_rollback.js
index fd437c2ea1b..fe5a24c3e96 100644
--- a/jstests/sharding/shard_identity_rollback.js
+++ b/jstests/sharding/shard_identity_rollback.js
@@ -37,7 +37,7 @@ var shardIdentityDoc = {
clusterId: ObjectId()
};
-assert.writeOK(priConn.getDB('admin').system.version.update(
+assert.commandWorked(priConn.getDB('admin').system.version.update(
{_id: 'shardIdentity'}, shardIdentityDoc, {upsert: true}));
// Ensure sharding state on the primary was initialized
@@ -71,7 +71,7 @@ restartServerReplication(secondaries);
// Wait for a new healthy primary
var newPriConn = replTest.getPrimary();
assert.neq(priConn, newPriConn);
-assert.writeOK(newPriConn.getDB('test').foo.insert({a: 1}, {writeConcern: {w: 'majority'}}));
+assert.commandWorked(newPriConn.getDB('test').foo.insert({a: 1}, {writeConcern: {w: 'majority'}}));
// Restart the original primary so it triggers a rollback of the shardIdentity insert.
jsTest.log("Restarting original primary");
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index a068da936fb..d5ee0becf29 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -34,7 +34,7 @@ var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({x: i, text: textString});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Get connection to mongos for the cluster
var mongosConn = shardingTest.s;
@@ -50,7 +50,7 @@ assert.commandWorked(mongosConn.getDB('admin').runCommand(
{shardcollection: testDBName + '.' + testCollName, key: {x: 1}}));
// Test case where GLE should return an error
-assert.writeOK(testDB.foo.insert({_id: 'a', x: 1}));
+assert.commandWorked(testDB.foo.insert({_id: 'a', x: 1}));
assert.writeError(testDB.foo.insert({_id: 'a', x: 1}, {writeConcern: {w: 2, wtimeout: 30000}}));
// Add more data
@@ -58,7 +58,7 @@ bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = numDocs; i < 2 * numDocs; i++) {
bulk.insert({x: i, text: textString});
}
-assert.writeOK(bulk.execute({w: replNodes, wtimeout: 30000}));
+assert.commandWorked(bulk.execute({w: replNodes, wtimeout: 30000}));
// Take down two nodes and make sure slaveOk reads still work
var primary = replSet1._master;
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 3076dde5b7e..29a6ee2fc02 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -29,12 +29,12 @@ var coll = db.getCollection(collName);
// Split chunk again
assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
-assert.writeOK(coll.update({_id: 3}, {_id: 3}));
+assert.commandWorked(coll.update({_id: 3}, {_id: 3}));
// Split chunk again
assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
-assert.writeOK(coll.update({_id: 3}, {_id: 3}));
+assert.commandWorked(coll.update({_id: 3}, {_id: 3}));
// Split chunk again
assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js
index 13715d62ddc..93d43b0aa98 100644
--- a/jstests/sharding/shard_kill_and_pooling.js
+++ b/jstests/sharding/shard_kill_and_pooling.js
@@ -22,7 +22,7 @@ for (var test = 0; test < 2; test++) {
var coll = mongos.getCollection("foo.bar");
var db = coll.getDB();
- assert.writeOK(coll.insert({hello: "world"}));
+ assert.commandWorked(coll.insert({hello: "world"}));
jsTest.log("Creating new connections...");
diff --git a/jstests/sharding/sharded_limit_batchsize.js b/jstests/sharding/sharded_limit_batchsize.js
index e7f1f589ca9..9113b7dd8ba 100644
--- a/jstests/sharding/sharded_limit_batchsize.js
+++ b/jstests/sharding/sharded_limit_batchsize.js
@@ -101,14 +101,14 @@ assert.commandWorked(db.adminCommand(
// Write 20 documents which all go to the primary shard in the unsharded collection.
for (var i = 1; i <= 10; ++i) {
// These go to shard 1.
- assert.writeOK(shardedCol.insert({_id: i, x: i}));
+ assert.commandWorked(shardedCol.insert({_id: i, x: i}));
// These go to shard 0.
- assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
+ assert.commandWorked(shardedCol.insert({_id: -i, x: -i}));
// These go to shard 0 inside the non-sharded collection.
- assert.writeOK(unshardedCol.insert({_id: i, x: i}));
- assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
+ assert.commandWorked(unshardedCol.insert({_id: i, x: i}));
+ assert.commandWorked(unshardedCol.insert({_id: -i, x: -i}));
}
//
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index c38b178f73a..2f2289182e5 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -23,7 +23,7 @@ var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
var inserts = [{_id: 0}, {_id: 1}, {_id: 2}];
-assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
+assert.commandWorked(st.s1.getCollection(coll.toString()).insert(inserts));
profileEntry = profileColl.findOne();
assert.neq(null, profileEntry);
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index f07708d2d23..d6e0384d7f7 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -20,7 +20,7 @@ while (inserted < (20 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index 5dd2eabebfb..50365ace129 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -34,7 +34,7 @@ while (inserted < (40 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10);
@@ -60,7 +60,7 @@ while (getShardSize(shardConn) < maxSizeBytes) {
for (var x = 0; x < 20; x++) {
localBulk.insert({x: x, val: bigString});
}
- assert.writeOK(localBulk.execute());
+ assert.commandWorked(localBulk.execute());
// Force the storage engine to flush files to disk so shardSize will get updated.
assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index d51f5d41d32..4600861ede5 100644
--- a/jstests/sharding/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -31,7 +31,7 @@ while (inserted < (40 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index e97a6366120..57dfe723830 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -62,7 +62,7 @@ for (i = 0; i < N; i++) {
for (i = 0; i < N * 9; i++) {
doUpdate(bulk, false);
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
for (var i = 0; i < 50; i++) {
s.printChunks("test.foo");
@@ -130,7 +130,7 @@ function diff1() {
}
}
- assert.writeOK(res);
+ assert.commandWorked(res);
});
} else {
consecutiveNoProgressMadeErrors = 0;
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
index f196381528e..3bac25b5e34 100644
--- a/jstests/sharding/sharding_migrate_cursor1.js
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -34,7 +34,7 @@ var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i, s: bigString});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
diff --git a/jstests/sharding/sharding_multiple_ns_rs.js b/jstests/sharding/sharding_multiple_ns_rs.js
index cd4a70fda15..96584ab161d 100644
--- a/jstests/sharding/sharding_multiple_ns_rs.js
+++ b/jstests/sharding/sharding_multiple_ns_rs.js
@@ -22,8 +22,8 @@ for (var i = 0; i < 100; i++) {
bulk.insert({_id: i, x: i});
bulk2.insert({_id: i, x: i});
}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
+assert.commandWorked(bulk.execute());
+assert.commandWorked(bulk2.execute());
s.splitAt("test.foo", {_id: 50});
diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js
index af021bf9741..d15f97bafd6 100644
--- a/jstests/sharding/sharding_rs1.js
+++ b/jstests/sharding/sharding_rs1.js
@@ -23,7 +23,7 @@ while (insertedBytes < (10 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString, x: Math.random()});
insertedBytes += bigString.length;
}
-assert.writeOK(bulk.execute({w: 3}));
+assert.commandWorked(bulk.execute({w: 3}));
assert.commandWorked(s.s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index 2b35cf695e4..d2a6f0e2b74 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -129,7 +129,7 @@ for (var i = 0; i < 100; i++) {
continue;
bulk.insert({x: i});
}
-assert.writeOK(bulk.execute({w: 3}));
+assert.commandWorked(bulk.execute({w: 3}));
// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
// replication for this and future tests to pass
@@ -225,7 +225,7 @@ rs.getSecondaries().forEach(function(secondary) {
// Modify data only on the primary replica of the primary shard.
// { x: 60 } goes to the shard of "rs", which is the primary shard.
-assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
+assert.commandWorked(ts.insert({primaryOnly: true, x: 60}));
// Read from secondary through mongos, the doc is not there due to replication delay or fsync.
// But we can guarantee not to read from primary.
assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
@@ -234,7 +234,7 @@ rs.getSecondaries().forEach(function(secondary) {
secondary.getDB("test").fsyncUnlock();
});
// Clean up the data
-assert.writeOK(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
+assert.commandWorked(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
for (var i = 0; i < 10; i++) {
m = new Mongo(s.s.name);
diff --git a/jstests/sharding/sharding_statistics_server_status.js b/jstests/sharding/sharding_statistics_server_status.js
index 575a84f2152..54421d568d9 100644
--- a/jstests/sharding/sharding_statistics_server_status.js
+++ b/jstests/sharding/sharding_statistics_server_status.js
@@ -87,7 +87,7 @@ incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
// Insert docs and then move chunk again from shard1 to shard0.
for (let i = 0; i < numDocsToInsert; ++i) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
++numDocsInserted;
}
assert.commandWorked(mongos.adminCommand(
diff --git a/jstests/sharding/shards_and_config_return_last_committed_optime.js b/jstests/sharding/shards_and_config_return_last_committed_optime.js
index 780090d9f67..2268240a52d 100644
--- a/jstests/sharding/shards_and_config_return_last_committed_optime.js
+++ b/jstests/sharding/shards_and_config_return_last_committed_optime.js
@@ -116,7 +116,7 @@ let secondary = st.rs0.getSecondary();
st.rs0.awaitLastOpCommitted();
stopServerReplication(secondary);
-assert.writeOK(primary.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 1}}));
+assert.commandWorked(primary.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 1}}));
// Sharded collection.
assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "sharding-aware shard primary");
@@ -141,7 +141,7 @@ secondary = st.configRS.getSecondary();
st.configRS.awaitLastOpCommitted();
stopServerReplication(secondary);
-assert.writeOK(primary.getDB("config").foo.insert({x: 1}, {writeConcern: {w: 1}}));
+assert.commandWorked(primary.getDB("config").foo.insert({x: 1}, {writeConcern: {w: 1}}));
assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "config server primary");
assertReturnsLastCommittedOpTime(secondary.getDB("test"), "foo", "config server secondary");
diff --git a/jstests/sharding/snapshot_cursor_commands_mongos.js b/jstests/sharding/snapshot_cursor_commands_mongos.js
index a853cc10942..e71fffdfdc4 100644
--- a/jstests/sharding/snapshot_cursor_commands_mongos.js
+++ b/jstests/sharding/snapshot_cursor_commands_mongos.js
@@ -199,7 +199,7 @@ function runTest(testScenario, {useCausalConsistency, commands, collName}) {
// Insert an 11th document which should not be visible to the snapshot cursor. This
// write is performed outside of the session.
- assert.writeOK(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
verifyInvalidGetMoreAttempts(mainDb, collName, cursorId, lsid, txnNumber);
@@ -247,7 +247,7 @@ function runTest(testScenario, {useCausalConsistency, commands, collName}) {
assert.eq(11, res.cursor.firstBatch.length);
// Remove the 11th document to preserve the collection for the next command.
- assert.writeOK(mainDb[collName].remove({_id: 10}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mainDb[collName].remove({_id: 10}, {writeConcern: {w: "majority"}}));
assert.commandWorked(session.commitTransaction_forTesting());
session.endSession();
diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js
index 7a40714c35c..ba2af4a6ed2 100644
--- a/jstests/sharding/split_with_force.js
+++ b/jstests/sharding/split_with_force.js
@@ -24,7 +24,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < (250 * 1000) + 10; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Insert a bunch of data into the rest of the collection...");
@@ -32,7 +32,7 @@ bulk = coll.initializeUnorderedBulkOp();
for (var i = 1; i <= (250 * 1000); i++) {
bulk.insert({_id: -i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Get split points of the chunk using force : true...");
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 8e281dcbe20..f45dda6892e 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -27,7 +27,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 1024; i++) {
bulk.insert({_id: -(i + 1)});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Insert 32 docs into the high chunk of a collection");
@@ -35,7 +35,7 @@ bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 32; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Split off MaxKey chunk...");
diff --git a/jstests/sharding/stale_mongos_updates_and_removes.js b/jstests/sharding/stale_mongos_updates_and_removes.js
index 85eb196e93e..44343e53a63 100644
--- a/jstests/sharding/stale_mongos_updates_and_removes.js
+++ b/jstests/sharding/stale_mongos_updates_and_removes.js
@@ -27,8 +27,8 @@ function resetCollection() {
assert.commandWorked(staleMongos.adminCommand({shardCollection: collNS, key: {x: 1}}));
for (let i = 0; i < numShardKeys; i++) {
- assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
- assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
+ assert.commandWorked(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
+ assert.commandWorked(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
}
// Make sure data has replicated to all config servers so freshMongos finds a sharded
@@ -116,7 +116,7 @@ function checkAllRemoveQueries(makeMongosStaleFunc) {
function doRemove(query, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
- assert.writeOK(staleMongos.getCollection(collNS).remove(query, multiOption));
+ assert.commandWorked(staleMongos.getCollection(collNS).remove(query, multiOption));
if (multiOption.justOne) {
// A total of one document should have been removed from the collection.
assert.eq(numDocs - 1, staleMongos.getCollection(collNS).find().itcount());
@@ -159,7 +159,7 @@ function checkAllUpdateQueries(makeMongosStaleFunc) {
function doUpdate(query, update, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
- assert.writeOK(staleMongos.getCollection(collNS).update(query, update, multiOption));
+ assert.commandWorked(staleMongos.getCollection(collNS).update(query, update, multiOption));
if (multiOption.multi) {
// All documents matching the query should have been updated.
assert.eq(staleMongos.getCollection(collNS).find(query).itcount(),
diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js
index 1183e369b2e..065218712bd 100644
--- a/jstests/sharding/stale_version_write.js
+++ b/jstests/sharding/stale_version_write.js
@@ -10,10 +10,10 @@ var mongosB = st.s1;
jsTest.log("Adding new collections...");
var collA = mongosA.getCollection(jsTestName() + ".coll");
-assert.writeOK(collA.insert({hello: "world"}));
+assert.commandWorked(collA.insert({hello: "world"}));
var collB = mongosB.getCollection("" + collA);
-assert.writeOK(collB.insert({hello: "world"}));
+assert.commandWorked(collB.insert({hello: "world"}));
jsTest.log("Enabling sharding...");
@@ -26,7 +26,7 @@ collA.findOne();
jsTest.log("Trigger shard version mismatch...");
-assert.writeOK(collB.insert({goodbye: "world"}));
+assert.commandWorked(collB.insert({goodbye: "world"}));
print("Inserted...");
diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js
index 21fd233944c..31d84293d6b 100644
--- a/jstests/sharding/startup_with_all_configs_down.js
+++ b/jstests/sharding/startup_with_all_configs_down.js
@@ -29,7 +29,7 @@ var st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
jsTestLog("Setting up initial data");
for (var i = 0; i < 100; i++) {
- assert.writeOK(st.s.getDB('test').foo.insert({_id: i}));
+ assert.commandWorked(st.s.getDB('test').foo.insert({_id: i}));
}
assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index 8354b00114f..0f9f00a667c 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -37,7 +37,7 @@ s.adminCommand({
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < N; i++)
bulk.insert({_id: i});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Flush all writes to disk since some of the stats are dependent on state in disk (like
// totalIndexSize).
diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js
index b85a188938d..86441a17cd6 100644
--- a/jstests/sharding/test_stacked_migration_cleanup.js
+++ b/jstests/sharding/test_stacked_migration_cleanup.js
@@ -29,7 +29,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10 * 1000; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Opening a mongod cursor...");
@@ -53,7 +53,7 @@ bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < numChunks; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
sleep(10 * 1000);
diff --git a/jstests/sharding/time_zone_info_mongos.js b/jstests/sharding/time_zone_info_mongos.js
index 73b59b16f7a..5ed55fdf879 100644
--- a/jstests/sharding/time_zone_info_mongos.js
+++ b/jstests/sharding/time_zone_info_mongos.js
@@ -48,8 +48,8 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
// Write a document containing a 'date' field to each chunk.
-assert.writeOK(mongosColl.insert({_id: -1, date: ISODate("2017-11-13T12:00:00.000+0000")}));
-assert.writeOK(mongosColl.insert({_id: 1, date: ISODate("2017-11-13T03:00:00.000+0600")}));
+assert.commandWorked(mongosColl.insert({_id: -1, date: ISODate("2017-11-13T12:00:00.000+0000")}));
+assert.commandWorked(mongosColl.insert({_id: 1, date: ISODate("2017-11-13T03:00:00.000+0600")}));
// Constructs a pipeline which splits the 'date' field into its constituent parts on mongoD,
// reassembles the original date on mongoS, and verifies that the two match. All timezone
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index bbf930c9b8f..b648e2afcab 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -10,7 +10,7 @@ function shardSetup(shardConfig, dbName, collName) {
var db = st.getDB(dbName);
// Set the balancer mode to only balance on autoSplit
- assert.writeOK(st.s.getDB('config').settings.update(
+ assert.commandWorked(st.s.getDB('config').settings.update(
{_id: 'balancer'},
{'$unset': {stopped: ''}, '$set': {mode: 'autoSplitOnly'}},
{writeConcern: {w: 'majority'}}));
@@ -108,7 +108,7 @@ function runTest(test) {
}
}
- assert.writeOK(configDB.tags.remove({ns: db + "." + collName}));
+ assert.commandWorked(configDB.tags.remove({ns: db + "." + collName}));
// End of test cleanup
}
diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js
index 329ad529ac3..ab7b1c058a8 100644
--- a/jstests/sharding/trace_missing_docs_test.js
+++ b/jstests/sharding/trace_missing_docs_test.js
@@ -23,9 +23,9 @@ var testDocMissing = function(useReplicaSet) {
coll.ensureIndex({sk: 1});
assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {sk: 1}}));
- assert.writeOK(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
- assert.writeOK(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
- assert.writeOK(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
+ assert.commandWorked(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
+ assert.commandWorked(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
+ assert.commandWorked(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
assert.commandWorked(admin.runCommand(
{moveChunk: coll + "", find: {sk: 0}, to: st.shard1.shardName, _waitForDelete: true}));
diff --git a/jstests/sharding/transactions_causal_consistency.js b/jstests/sharding/transactions_causal_consistency.js
index e2f6a9aed58..e67fd331bd1 100644
--- a/jstests/sharding/transactions_causal_consistency.js
+++ b/jstests/sharding/transactions_causal_consistency.js
@@ -29,8 +29,10 @@ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
// Verifies transactions using causal consistency read all causally prior operations.
function runTest(st, readConcern) {
@@ -69,7 +71,7 @@ function runTest(st, readConcern) {
// Clean up for the next iteration.
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard1.shardName}));
- assert.writeOK(sessionDB[collName].remove(docToInsert));
+ assert.commandWorked(sessionDB[collName].remove(docToInsert));
}
const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
diff --git a/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js b/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
index 11a2c39997f..b87b5b25e9a 100644
--- a/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
+++ b/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
@@ -27,8 +27,8 @@ const session = st.s.startSession();
const unshardedCollDB = session.getDatabase(unshardedDbName);
const shardedCollDB = session.getDatabase(shardedDbName);
-assert.writeOK(unshardedCollDB[unshardedCollName].insert({_id: "jack"}));
-assert.writeOK(shardedCollDB[shardedCollName].insert({_id: "jack"}));
+assert.commandWorked(unshardedCollDB[unshardedCollName].insert({_id: "jack"}));
+assert.commandWorked(shardedCollDB[shardedCollName].insert({_id: "jack"}));
// Reload metadata to avoid stale config or stale database version errors.
flushRoutersAndRefreshShardMetadata(st, {ns: shardedNs, dbNames: [unshardedDbName]});
diff --git a/jstests/sharding/transactions_implicit_abort.js b/jstests/sharding/transactions_implicit_abort.js
index 003b6e4cefe..09845a80493 100644
--- a/jstests/sharding/transactions_implicit_abort.js
+++ b/jstests/sharding/transactions_implicit_abort.js
@@ -15,8 +15,10 @@ const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
// Set up a sharded collection with one chunk on each shard.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
diff --git a/jstests/sharding/transactions_multi_writes.js b/jstests/sharding/transactions_multi_writes.js
index e4c8b43cd95..f24adef1dc7 100644
--- a/jstests/sharding/transactions_multi_writes.js
+++ b/jstests/sharding/transactions_multi_writes.js
@@ -30,9 +30,9 @@ assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 10}}));
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName}));
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 15}, to: st.shard2.shardName}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 3, counter: 0, skey: 15}));
+assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
+assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
+assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 3, counter: 0, skey: 15}));
// Runs the given multi-write and asserts a manually inserted orphan document is not affected.
// The write is assumed to target chunks [min, 0) and [0, 10), which begin on shard0 and shard1,
@@ -58,7 +58,8 @@ function runTest(st, session, writeCmd, staleRouter) {
}
const orphanShardDB = st[orphanShardName].getPrimary().getDB(dbName);
- assert.writeOK(orphanShardDB[collName].insert(orphanDoc, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ orphanShardDB[collName].insert(orphanDoc, {writeConcern: {w: "majority"}}));
// Start a transaction with majority read concern to ensure the orphan will be visible if
// its shard is targeted and send the multi-write.
@@ -98,13 +99,13 @@ function runTest(st, session, writeCmd, staleRouter) {
// Reset the database state for the next iteration.
if (isUpdate) {
- assert.writeOK(sessionDB[collName].update({}, {$set: {counter: 0}}, {multi: true}));
+ assert.commandWorked(sessionDB[collName].update({}, {$set: {counter: 0}}, {multi: true}));
} else { // isDelete
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
+ assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
+ assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
}
- assert.writeOK(orphanShardDB[collName].remove({skey: orphanDoc.skey}));
+ assert.commandWorked(orphanShardDB[collName].remove({skey: orphanDoc.skey}));
if (staleRouter) {
// Move the chunk back with the main router so it isn't stale.
diff --git a/jstests/sharding/transactions_read_concerns.js b/jstests/sharding/transactions_read_concerns.js
index af2c24b2b02..17fee3c6404 100644
--- a/jstests/sharding/transactions_read_concerns.js
+++ b/jstests/sharding/transactions_read_concerns.js
@@ -17,8 +17,10 @@ const st = new ShardingTest({shards: 2, config: 1});
// Set up a sharded collection with 2 chunks, one on each shard.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
@@ -65,7 +67,7 @@ function runTest(st, readConcern, sessionOptions) {
assert.commandWorked(session.commitTransaction_forTesting());
// Clean up for the next iteration.
- assert.writeOK(sessionDB[collName].remove({_id: 5}));
+ assert.commandWorked(sessionDB[collName].remove({_id: 5}));
}
// Specifying no read concern level is allowed and should not compute a global snapshot.
diff --git a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
index 12c7fa1fab3..f02c181c3d4 100644
--- a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
+++ b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
@@ -26,8 +26,10 @@ const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
// Set up one sharded collection with 2 chunks, both on the primary shard.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
@@ -155,8 +157,8 @@ function runTest(testCase, moveChunkBack) {
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
}
- assert.writeOK(sessionColl.remove({}));
- assert.writeOK(sessionColl.insert([{_id: 5}, {_id: -5}]));
+ assert.commandWorked(sessionColl.remove({}));
+ assert.commandWorked(sessionColl.insert([{_id: 5}, {_id: -5}]));
}
kCommandTestCases.forEach(testCase => runTest(testCase, false /*moveChunkBack*/));
diff --git a/jstests/sharding/transactions_snapshot_errors_first_statement.js b/jstests/sharding/transactions_snapshot_errors_first_statement.js
index 3b0f5f74953..ed503f89c25 100644
--- a/jstests/sharding/transactions_snapshot_errors_first_statement.js
+++ b/jstests/sharding/transactions_snapshot_errors_first_statement.js
@@ -71,7 +71,7 @@ function runTest(st, collName, numShardsToError, errorCode, isSharded) {
// Clean up after insert to avoid duplicate key errors.
if (commandName === "insert") {
- assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
+ assert.commandWorked(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
}
//
@@ -89,7 +89,7 @@ function runTest(st, collName, numShardsToError, errorCode, isSharded) {
// Clean up after insert to avoid duplicate key errors.
if (commandName === "insert") {
- assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
+ assert.commandWorked(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
}
//
@@ -118,7 +118,8 @@ enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
jsTestLog("Unsharded transaction");
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
for (let errorCode of kSnapshotErrors) {
@@ -132,7 +133,8 @@ st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
jsTestLog("One shard sharded transaction");
diff --git a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
index d3ac5f6e15c..87a29e271fa 100644
--- a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
+++ b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
@@ -76,7 +76,8 @@ enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
jsTestLog("Unsharded transaction");
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
// Single shard case simulates the storage engine discarding an in-use snapshot.
@@ -91,7 +92,8 @@ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
// Set up 2 chunks, [minKey, 10), [10, maxKey), each with one document (includes the document
// already inserted).
assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
jsTestLog("One shard transaction");
diff --git a/jstests/sharding/transactions_stale_database_version_errors.js b/jstests/sharding/transactions_stale_database_version_errors.js
index e92aed58b9c..8507030ba4c 100644
--- a/jstests/sharding/transactions_stale_database_version_errors.js
+++ b/jstests/sharding/transactions_stale_database_version_errors.js
@@ -15,7 +15,8 @@ enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
// Set up two unsharded collections in different databases with shard0 as their primary.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
@@ -45,7 +46,8 @@ session.startTransaction();
const dbName2 = "test2";
const sessionDB2 = session.getDatabase(dbName2);
-assert.writeOK(st.s.getDB(dbName2)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName2)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName2}));
st.ensurePrimaryShard(dbName2, st.shard1.shardName);
@@ -70,7 +72,7 @@ assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.N
const otherDbName = "other_test";
const otherCollName = "bar";
-assert.writeOK(
+assert.commandWorked(
st.s.getDB(otherDbName)[otherCollName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: otherDbName}));
st.ensurePrimaryShard(otherDbName, st.shard0.shardName);
@@ -80,7 +82,7 @@ const sessionOtherDB = session.getDatabase(otherDbName);
// Advance the router's cached last committed opTime for Shard0, so it chooses a read timestamp
// after the collection is created on shard1, to avoid SnapshotUnavailable.
assert.commandWorked(sessionOtherDB.runCommand({find: otherCollName})); // Not database versioned.
-assert.writeOK(sessionDB[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(sessionDB[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
session.startTransaction();
diff --git a/jstests/sharding/transactions_stale_shard_version_errors.js b/jstests/sharding/transactions_stale_shard_version_errors.js
index 3bc71a01083..2ff76e94b0c 100644
--- a/jstests/sharding/transactions_stale_shard_version_errors.js
+++ b/jstests/sharding/transactions_stale_shard_version_errors.js
@@ -34,8 +34,10 @@ assert.commandWorked(st.rs2.getPrimary().adminCommand(
// Shard two collections in the same database, each with 2 chunks, [minKey, 0), [0, maxKey),
// with one document each, all on Shard0.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
@@ -48,9 +50,10 @@ expectChunks(st, ns, [2, 0, 0]);
const otherCollName = "bar";
const otherNs = dbName + "." + otherCollName;
-assert.writeOK(
+assert.commandWorked(
st.s.getDB(dbName)[otherCollName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[otherCollName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[otherCollName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({shardCollection: otherNs, key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({split: otherNs, middle: {_id: 0}}));
diff --git a/jstests/sharding/transactions_target_at_point_in_time.js b/jstests/sharding/transactions_target_at_point_in_time.js
index 3cdfb4b49fe..1e1de688828 100644
--- a/jstests/sharding/transactions_target_at_point_in_time.js
+++ b/jstests/sharding/transactions_target_at_point_in_time.js
@@ -28,8 +28,10 @@ const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
// Set up one sharded collection with 2 chunks, both on the primary shard.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
diff --git a/jstests/sharding/transactions_view_resolution.js b/jstests/sharding/transactions_view_resolution.js
index 1a8224ee089..25e4c083e55 100644
--- a/jstests/sharding/transactions_view_resolution.js
+++ b/jstests/sharding/transactions_view_resolution.js
@@ -23,7 +23,7 @@ const unshardedViewName = "unsharded_view";
const viewOnShardedViewName = "sharded_view_view";
function setUpUnshardedCollectionAndView(st, session, primaryShard) {
- assert.writeOK(st.s.getDB(unshardedDbName)[unshardedCollName].insert(
+ assert.commandWorked(st.s.getDB(unshardedDbName)[unshardedCollName].insert(
{_id: 1, x: "unsharded"}, {writeConcern: {w: "majority"}}));
st.ensurePrimaryShard(unshardedDbName, primaryShard);
@@ -37,9 +37,9 @@ function setUpUnshardedCollectionAndView(st, session, primaryShard) {
function setUpShardedCollectionAndView(st, session, primaryShard) {
const ns = shardedDbName + "." + shardedCollName;
- assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
+ assert.commandWorked(st.s.getDB(shardedDbName)[shardedCollName].insert(
{_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
+ assert.commandWorked(st.s.getDB(shardedDbName)[shardedCollName].insert(
{_id: 1}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
st.ensurePrimaryShard(shardedDbName, primaryShard);
@@ -270,7 +270,7 @@ function assertAggResultEqInTransaction(coll, pipeline, expected) {
// is supported.
const lookupDbName = "dbForLookup";
const lookupCollName = "collForLookup";
-assert.writeOK(
+assert.commandWorked(
st.s.getDB(lookupDbName)[lookupCollName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
const lookupColl = session.getDatabase(unshardedDbName)[unshardedCollName];
diff --git a/jstests/sharding/transactions_writes_not_retryable.js b/jstests/sharding/transactions_writes_not_retryable.js
index 99dc2155469..7c33eab52cb 100644
--- a/jstests/sharding/transactions_writes_not_retryable.js
+++ b/jstests/sharding/transactions_writes_not_retryable.js
@@ -97,7 +97,8 @@ const sessionDB = session.getDatabase(dbName);
// Unsharded.
jsTestLog("Testing against unsharded collection");
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
kCmdTestCases.forEach(cmdTestCase => {
runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, false /*isSharded*/);
diff --git a/jstests/sharding/txn_recover_decision_using_recovery_router.js b/jstests/sharding/txn_recover_decision_using_recovery_router.js
index d148c0fdfbf..47e0f835305 100644
--- a/jstests/sharding/txn_recover_decision_using_recovery_router.js
+++ b/jstests/sharding/txn_recover_decision_using_recovery_router.js
@@ -245,7 +245,8 @@ txnNumber++;
const recoveryToken = startNewMultiShardWriteTransaction();
assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
-assert.writeOK(st.rs1.getPrimary().getDB("config").transactions.remove({}, false /* justOne */));
+assert.commandWorked(
+ st.rs1.getPrimary().getDB("config").transactions.remove({}, false /* justOne */));
assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
ErrorCodes.NoSuchTransaction);
diff --git a/jstests/sharding/txn_writes_during_movechunk.js b/jstests/sharding/txn_writes_during_movechunk.js
index 357ea22e14e..8ad9237f24c 100644
--- a/jstests/sharding/txn_writes_during_movechunk.js
+++ b/jstests/sharding/txn_writes_during_movechunk.js
@@ -13,8 +13,8 @@ st.ensurePrimaryShard('test', st.shard0.shardName);
assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
let coll = st.s.getDB('test').user;
-assert.writeOK(coll.insert({_id: 'updateMe'}));
-assert.writeOK(coll.insert({_id: 'deleteMe'}));
+assert.commandWorked(coll.insert({_id: 'updateMe'}));
+assert.commandWorked(coll.insert({_id: 'deleteMe'}));
pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
diff --git a/jstests/sharding/unique_index_on_shardservers.js b/jstests/sharding/unique_index_on_shardservers.js
index 4ee9bb007d1..2af5b7a418d 100644
--- a/jstests/sharding/unique_index_on_shardservers.js
+++ b/jstests/sharding/unique_index_on_shardservers.js
@@ -10,7 +10,8 @@ let rs = st.rs0;
// Create `test.coll` and add some indexes on it:
// with index versions as default, v=1 and v=2; both unique and standard types
-assert.writeOK(mongos.getDB("test").coll.insert({_id: 1, a: 1, b: 1, c: 1, d: 1, e: 1, f: 1}));
+assert.commandWorked(
+ mongos.getDB("test").coll.insert({_id: 1, a: 1, b: 1, c: 1, d: 1, e: 1, f: 1}));
assert.commandWorked(mongos.getDB("test").coll.createIndex({a: 1}, {"v": 1}));
assert.commandWorked(mongos.getDB("test").coll.createIndex({b: 1}, {"v": 1, "unique": true}));
assert.commandWorked(mongos.getDB("test").coll.createIndex({c: 1}, {"v": 2}));
diff --git a/jstests/sharding/unowned_doc_filtering.js b/jstests/sharding/unowned_doc_filtering.js
index 5a337aaa454..3567b9c4dda 100644
--- a/jstests/sharding/unowned_doc_filtering.js
+++ b/jstests/sharding/unowned_doc_filtering.js
@@ -25,7 +25,7 @@ var inserts = [];
for (var i = 0; i < 100; i++) {
inserts.push({x: i});
}
-assert.writeOK(testDB.foo.insert(inserts));
+assert.commandWorked(testDB.foo.insert(inserts));
assert.commandWorked(testDB.adminCommand({split: 'test.foo', find: {x: 50}}));
assert.commandWorked(
diff --git a/jstests/sharding/unsharded_collection_targetting.js b/jstests/sharding/unsharded_collection_targetting.js
index 5393a212ae4..4ae771e3d6e 100644
--- a/jstests/sharding/unsharded_collection_targetting.js
+++ b/jstests/sharding/unsharded_collection_targetting.js
@@ -22,11 +22,11 @@ st.ensurePrimaryShard(mongosDB.getName(), st.rs1.getURL());
// about the location of the collection before the move.
const mongos2DB = st.s1.getDB(testName);
const mongos2Coll = mongos2DB[testName];
-assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
+assert.commandWorked(mongos2Coll.insert({_id: 0, a: 0}));
st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-assert.writeOK(mongos2Coll.insert({_id: 1, a: 0}));
+assert.commandWorked(mongos2Coll.insert({_id: 1, a: 0}));
st.stop();
})();
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
index 96bf4f454dc..73c3d460403 100644
--- a/jstests/sharding/update_immutable_fields.js
+++ b/jstests/sharding/update_immutable_fields.js
@@ -43,18 +43,18 @@ shard0Coll.remove({});
assert.writeError(shard0Coll.save({_id: 3}));
// Full shard key in save
-assert.writeOK(shard0Coll.save({_id: 1, a: 1}));
+assert.commandWorked(shard0Coll.save({_id: 1, a: 1}));
// Full shard key on replacement (basically the same as above)
shard0Coll.remove({});
-assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}, true));
+assert.commandWorked(shard0Coll.update({_id: 1}, {a: 1}, true));
// Full shard key after $set
shard0Coll.remove({});
-assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
+assert.commandWorked(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
// Update existing doc (replacement), same shard key value
-assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}));
+assert.commandWorked(shard0Coll.update({_id: 1}, {a: 1}));
// Update existing doc ($set), same shard key value
assert.commandWorked(shard0Coll.update({_id: 1}, {$set: {a: 1}}));
@@ -74,7 +74,7 @@ assert.writeError(shard0Coll.update({_id: 1}, {$unset: {a: 1}}));
// Error due to removing all the embedded fields.
shard0Coll.remove({});
-assert.writeOK(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
+assert.commandWorked(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
assert.writeError(shard0Coll.update({}, {$unset: {"a.c": 1}}));
assert.writeError(shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}}));
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index ea1939bfd72..c0466216647 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -51,7 +51,7 @@ for (let i = 0; i < 2; i++) {
assert.writeError(coll.update({_id: 1, key: 1}, {$set: {key: 2}}));
assert.eq(coll.findOne({_id: 1}).key, 1, 'key unchanged');
- assert.writeOK(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
+ assert.commandWorked(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
coll.update({key: 17}, {$inc: {x: 5}}, true);
assert.eq(5, coll.findOne({key: 17}).x, "up1");
@@ -60,12 +60,12 @@ for (let i = 0; i < 2; i++) {
assert.eq(5, coll.findOne({key: 18}).x, "up2");
// Make sure we can extract exact _id from certain queries
- assert.writeOK(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
// Invalid extraction of exact _id from query
assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
@@ -77,12 +77,12 @@ for (let i = 0; i < 2; i++) {
assert.writeError(coll.update({'_id.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
// Make sure we can extract exact shard key from certain queries
- assert.writeOK(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
// Invalid extraction of exact key from query
assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
@@ -93,16 +93,20 @@ for (let i = 0; i < 2; i++) {
assert[hashedKey ? "writeError" : "writeOK"](
coll.update({key: {$gt: 0}}, {$set: {x: 1}}, {multi: false}));
// Note: {key:-1} and {key:-2} fall on shard0 for both hashed and ascending shardkeys.
- assert.writeOK(coll.update({$or: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(
+ coll.update({$or: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(
+ coll.update({$and: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
// In cases where an inexact query does target multiple shards, single update is rejected.
assert.writeError(coll.update({key: {$gt: MinKey}}, {$set: {x: 1}}, {multi: false}));
assert.writeError(coll.update({$or: [{key: -10}, {key: 10}]}, {$set: {x: 1}}, {multi: false}));
// Make sure failed shard key or _id extraction doesn't affect the other
- assert.writeOK(coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(
+ coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(
+ coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
}
s.stop();
diff --git a/jstests/sharding/upsert_sharded.js b/jstests/sharding/upsert_sharded.js
index 32a59b9a586..eb92f0c41b4 100644
--- a/jstests/sharding/upsert_sharded.js
+++ b/jstests/sharding/upsert_sharded.js
@@ -20,7 +20,7 @@ var upsertedResult = function(query, expr) {
};
var upsertedField = function(query, expr, fieldName) {
- assert.writeOK(upsertedResult(query, expr));
+ assert.commandWorked(upsertedResult(query, expr));
return coll.findOne()[fieldName];
};
diff --git a/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js b/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
index 2d6b4c57020..8b49447bed6 100644
--- a/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
+++ b/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
@@ -22,7 +22,7 @@ for (let i = 0; i < 3; i++) {
// It shouldn't matter whether the collection existed on the shard already or not; test
// both cases.
if (i === 0) {
- assert.writeOK(st.s.getDB(db).getCollection(coll).insert({x: 1}));
+ assert.commandWorked(st.s.getDB(db).getCollection(coll).insert({x: 1}));
}
assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
diff --git a/jstests/sharding/validate_collection.js b/jstests/sharding/validate_collection.js
index 0584c2a8c63..647d5c95378 100644
--- a/jstests/sharding/validate_collection.js
+++ b/jstests/sharding/validate_collection.js
@@ -19,12 +19,12 @@ var s = st.s;
var testDb = st.getDB('test');
function setup() {
- assert.writeOK(testDb.test.insert({_id: 0}));
- assert.writeOK(testDb.test.insert({_id: 1}));
+ assert.commandWorked(testDb.test.insert({_id: 0}));
+ assert.commandWorked(testDb.test.insert({_id: 1}));
- assert.writeOK(testDb.dummy.insert({_id: 0}));
- assert.writeOK(testDb.dummy.insert({_id: 1}));
- assert.writeOK(testDb.dummy.insert({_id: 2}));
+ assert.commandWorked(testDb.dummy.insert({_id: 0}));
+ assert.commandWorked(testDb.dummy.insert({_id: 1}));
+ assert.commandWorked(testDb.dummy.insert({_id: 2}));
}
function validate(valid) {
diff --git a/jstests/sharding/view_rewrite.js b/jstests/sharding/view_rewrite.js
index e0177f84b80..dae49dc2b0b 100644
--- a/jstests/sharding/view_rewrite.js
+++ b/jstests/sharding/view_rewrite.js
@@ -41,7 +41,7 @@ assert.commandWorked(
mongosDB.adminCommand({moveChunk: coll.getFullName(), find: {a: 5}, to: "view_rewrite-rs1"}));
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(mongosDB.createView("view", coll.getName(), []));
diff --git a/jstests/sharding/views.js b/jstests/sharding/views.js
index 876406902a6..48b724a6a99 100644
--- a/jstests/sharding/views.js
+++ b/jstests/sharding/views.js
@@ -58,7 +58,7 @@ assert.commandWorked(
db.adminCommand({moveChunk: coll.getFullName(), find: {a: 25}, to: st.shard1.shardName}));
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(db.createView("view", coll.getName(), [{$match: {a: {$gte: 4}}}]));
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index 0c808102bf3..197d29ccc90 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -22,7 +22,7 @@ assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
// a max chunk size of 1MB we'd expect the autosplitter to split this into
// at least 3 chunks
for (var x = 0; x < 3100; x++) {
- assert.writeOK(testDB.runCommand(
+ assert.commandWorked(testDB.runCommand(
{insert: 'insert', documents: [{x: x, v: doc1k}], ordered: false, writeConcern: {w: 1}}));
}
@@ -41,7 +41,7 @@ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key:
assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
for (var x = 0; x < 2100; x++) {
- assert.writeOK(testDB.runCommand({
+ assert.commandWorked(testDB.runCommand({
update: 'update',
updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
ordered: false,
@@ -62,7 +62,7 @@ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key:
assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
for (var x = 0; x < 1100; x++) {
- assert.writeOK(testDB.runCommand({
+ assert.commandWorked(testDB.runCommand({
delete: 'delete',
deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
ordered: false,
@@ -94,7 +94,7 @@ for (var x = 0; x < 2100; x += 400) {
docs.push({x: (x + y), v: doc1k});
}
- assert.writeOK(testDB.runCommand(
+ assert.commandWorked(testDB.runCommand(
{insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}}));
}
@@ -118,7 +118,7 @@ for (var x = 0; x < 2100; x += 400) {
docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
}
- assert.writeOK(
+ assert.commandWorked(
testDB.runCommand({update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}}));
}
@@ -142,7 +142,7 @@ for (var x = 0; x < 2100; x += 400) {
docs.push({q: {x: id, v: doc1k}, top: 0});
}
- assert.writeOK(testDB.runCommand({
+ assert.commandWorked(testDB.runCommand({
delete: 'delete',
deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
ordered: false,
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index dcd3b93fb71..1278d6c6dfe 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -38,7 +38,7 @@ for (var j = 0; j < 100; j++) {
bulk.insert({i: idInc++, val: valInc++, y: str});
}
}
-assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
+assert.commandWorked(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
jsTest.log("Documents inserted, doing double-checks of insert...");
@@ -126,7 +126,7 @@ for (var j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
bulk.insert({i: idInc++, val: valInc++, y: str});
}
- assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
+ assert.commandWorked(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
}
jsTest.log("No errors...");
diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js
index 1d4ccbdb3d6..9a330271347 100644
--- a/jstests/sharding/zero_shard_version.js
+++ b/jstests/sharding/zero_shard_version.js
@@ -25,7 +25,7 @@ var checkShardMajorVersion = function(conn, expectedVersion) {
// mongos0: 1|0|a
var testDB_s1 = st.s1.getDB('test');
-assert.writeOK(testDB_s1.user.insert({x: 1}));
+assert.commandWorked(testDB_s1.user.insert({x: 1}));
assert.commandWorked(
testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
@@ -62,7 +62,7 @@ assert.neq(null, testDB_s3.user.findOne({x: 1}));
// mongos versions: s0, s2, s3: 2|0|a
testDB_s1.user.drop();
-assert.writeOK(testDB_s1.user.insert({x: 10}));
+assert.commandWorked(testDB_s1.user.insert({x: 10}));
// shard0: 0|0|0
// shard1: 0|0|0
diff --git a/jstests/slow1/conc_update.js b/jstests/slow1/conc_update.js
index 083b333b281..34398d91f3f 100644
--- a/jstests/slow1/conc_update.js
+++ b/jstests/slow1/conc_update.js
@@ -13,7 +13,7 @@ var bulk = db.conc.initializeUnorderedBulkOp();
for (var i = 0; i < NRECORDS; i++) {
bulk.insert({x: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
print("making an index (this will take a while)");
db.conc.ensureIndex({x: 1});
@@ -24,11 +24,11 @@ const updater = startParallelShell(
"db = db.getSisterDB('concurrency');\
db.concflag.insert({ inprog: true });\
sleep(20);\
- assert.writeOK(db.conc.update({}, \
+ assert.commandWorked(db.conc.update({}, \
{ $inc: { x: " +
NRECORDS +
"}}, false, true)); \
- assert.writeOK(db.concflag.update({}, { inprog: false }));");
+ assert.commandWorked(db.concflag.update({}, { inprog: false }));");
assert.soon(function() {
var x = db.concflag.findOne();
diff --git a/jstests/slow1/initial_sync_many_dbs.js b/jstests/slow1/initial_sync_many_dbs.js
index 03eea70525e..c042c8190d8 100644
--- a/jstests/slow1/initial_sync_many_dbs.js
+++ b/jstests/slow1/initial_sync_many_dbs.js
@@ -31,7 +31,7 @@ for (var i = 0; i < num_dbs; i++) {
var collname = name + '_coll' + j;
var coll = primary.getDB(dbname)[collname];
for (var k = 0; k < num_docs; k++) {
- assert.writeOK(coll.insert({_id: k}));
+ assert.commandWorked(coll.insert({_id: k}));
}
}
}
diff --git a/jstests/slow1/mr_during_migrate.js b/jstests/slow1/mr_during_migrate.js
index 06d79c46e92..8bd8392154b 100644
--- a/jstests/slow1/mr_during_migrate.js
+++ b/jstests/slow1/mr_during_migrate.js
@@ -21,7 +21,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i, data: data});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Make sure everything got inserted
assert.eq(numDocs, coll.find().itcount());
diff --git a/jstests/slow1/sharding_multiple_collections.js b/jstests/slow1/sharding_multiple_collections.js
index f6570d08a69..428554bc1c1 100644
--- a/jstests/slow1/sharding_multiple_collections.js
+++ b/jstests/slow1/sharding_multiple_collections.js
@@ -22,8 +22,8 @@ for (i = 0; i < N; i++) {
bulk.insert({_id: i, s: S});
bulk2.insert({_id: i, s: S, s2: S});
}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
+assert.commandWorked(bulk.execute());
+assert.commandWorked(bulk2.execute());
s.printShardingStatus();
diff --git a/jstests/ssl/initial_sync1_x509.js b/jstests/ssl/initial_sync1_x509.js
index 6c6932f049c..554c6328ca5 100644
--- a/jstests/ssl/initial_sync1_x509.js
+++ b/jstests/ssl/initial_sync1_x509.js
@@ -53,7 +53,7 @@ function runInitialSyncTest() {
for (var i = 0; i < 100; i++) {
bulk.insert({date: new Date(), x: i, str: "all the talk on the market"});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
print("total in foo: " + foo.bar.count());
print("4. Make sure synced");
@@ -65,7 +65,7 @@ function runInitialSyncTest() {
for (var i = 0; i < 100; i++) {
bulk.insert({date: new Date(), x: i, str: "all the talk on the market"});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
print("6. Everyone happy eventually");
replTest.awaitReplication(300000);
diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js
index e7ef4c01388..d41fc6b7f10 100644
--- a/jstests/ssl/libs/ssl_helpers.js
+++ b/jstests/ssl/libs/ssl_helpers.js
@@ -85,8 +85,8 @@ function testShardedLookup(shardingTest) {
barBulk.insert({_id: i});
lookupShouldReturn.push({_id: i, bar_docs: [{_id: i}]});
}
- assert.writeOK(fooBulk.execute());
- assert.writeOK(barBulk.execute());
+ assert.commandWorked(fooBulk.execute());
+ assert.commandWorked(barBulk.execute());
var docs =
lookupdb.foo
@@ -154,7 +154,7 @@ function mixedShardTest(options1, options2, shouldSucceed) {
for (var i = 0; i < 128; i++) {
bulk.insert({_id: i, string: bigstr});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
assert.eq(128, db1.col.count(), "error retrieving documents from cluster");
// Split chunk to make it small enough to move
diff --git a/jstests/ssl/sharding_with_x509.js b/jstests/ssl/sharding_with_x509.js
index 326d0167585..d52d1e20d17 100644
--- a/jstests/ssl/sharding_with_x509.js
+++ b/jstests/ssl/sharding_with_x509.js
@@ -44,7 +44,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < toInsert; i++) {
bulk.insert({my: "test", data: "to", insert: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
print("starting updating phase");
@@ -55,7 +55,7 @@ for (var i = 0; i < toUpdate; i++) {
var id = coll.findOne({insert: i})._id;
bulk.find({insert: i, _id: id}).update({$inc: {counter: 1}});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
print("starting deletion");
@@ -65,7 +65,7 @@ bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < toDelete; i++) {
bulk.find({insert: i}).removeOne();
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Make sure the right amount of data is there
assert.eq(coll.find().itcount({my: 'test'}), toInsert / 2);
diff --git a/jstests/ssl/ssl_fragment.js b/jstests/ssl/ssl_fragment.js
index cad36a47226..4e15262e57f 100644
--- a/jstests/ssl/ssl_fragment.js
+++ b/jstests/ssl/ssl_fragment.js
@@ -14,7 +14,7 @@ function runTest(conn) {
}
const ssl_frag = conn.getCollection('test.ssl_frag');
- assert.writeOK(ssl_frag.insert({_id: "large_str", foo: s}));
+ assert.commandWorked(ssl_frag.insert({_id: "large_str", foo: s}));
const read = ssl_frag.find({_id: "large_str"}).toArray()[0].foo;
assert.eq(s, read, "Did not receive value written");
diff --git a/jstests/ssl/upgrade_noauth_to_x509_ssl.js b/jstests/ssl/upgrade_noauth_to_x509_ssl.js
index f533047d1b7..17b6ab30f21 100644
--- a/jstests/ssl/upgrade_noauth_to_x509_ssl.js
+++ b/jstests/ssl/upgrade_noauth_to_x509_ssl.js
@@ -34,7 +34,7 @@ var testDB = rstConn1.getDB(dbName);
assert.commandWorked(rstConn1.adminCommand(
{createUser: 'root', pwd: 'root', roles: ['root'], writeConcern: {w: 3}}));
-assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.commandWorked(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
assert.eq(1, testDB.a.count(), 'Error interacting with replSet');
print('=== UPGRADE transition to x509/allowSSL -> transition to x509/preferSSL ===');
@@ -43,7 +43,7 @@ rst.nodes.forEach(function(node) {
});
rst.awaitSecondaryNodes();
testDB = rst.getPrimary().getDB(dbName);
-assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.commandWorked(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
assert.eq(2, testDB.a.count(), 'Error interacting with replSet');
print('=== UPGRADE transition to x509/preferSSL -> x509/requireSSL ===');
@@ -51,7 +51,7 @@ rst.upgradeSet(x509RequireSSL, 'root', 'root');
// upgradeSet leaves its connections logged in as root
testDB = rst.getPrimary().getDB(dbName);
-assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.commandWorked(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
assert.eq(3, testDB.a.count(), 'Error interacting with replSet');
rst.stopSet();
diff --git a/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js b/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js
index 79330a27c27..90004a956fc 100644
--- a/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js
+++ b/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js
@@ -27,7 +27,7 @@ rst.startSet();
rst.initiate();
var testDB = rst.getPrimary().getDB(dbName);
-assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.commandWorked(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
assert.eq(1, testDB.a.find().itcount(), 'Error interacting with replSet');
print('=== UPGRADE no-auth/no-ssl -> transition to X509/allowSSL ===');
@@ -35,7 +35,7 @@ rst.upgradeSet(transitionToX509AllowSSL);
// Connect to the new primary
testDB = rst.getPrimary().getDB(dbName);
-assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.commandWorked(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
assert.eq(2, testDB.a.find().itcount(), 'Error interacting with replSet');
rst.stopSet();
diff --git a/jstests/sslSpecial/x509_cluster_auth_rollover.js b/jstests/sslSpecial/x509_cluster_auth_rollover.js
index ab106be3679..404ee8e0cca 100644
--- a/jstests/sslSpecial/x509_cluster_auth_rollover.js
+++ b/jstests/sslSpecial/x509_cluster_auth_rollover.js
@@ -59,7 +59,7 @@ const rolloverConfig = function(newConfig) {
assert.soon(() => {
let primary = rst.getPrimary();
assert.commandWorked(primary.getDB("admin").runCommand({isMaster: 1}));
- assert.writeOK(primary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
+ assert.commandWorked(primary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
// Start a shell that connects to the server with the current CA/cert configuration
// and ensure that it's able to connect and authenticate with x509.
diff --git a/jstests/tool/dumprestoreWithNoOptions.js b/jstests/tool/dumprestoreWithNoOptions.js
index e6fdbd87ca8..68c69289670 100644
--- a/jstests/tool/dumprestoreWithNoOptions.js
+++ b/jstests/tool/dumprestoreWithNoOptions.js
@@ -29,7 +29,7 @@ for (var opt in options) {
cappedOptions[opt],
'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
}
-assert.writeOK(db.capped.insert({x: 1}));
+assert.commandWorked(db.capped.insert({x: 1}));
// Full dump/restore
@@ -57,7 +57,7 @@ var cappedOptions = db.capped.exists().options;
for (var opt in options) {
assert.eq(options[opt], cappedOptions[opt], 'invalid option');
}
-assert.writeOK(db.capped.insert({x: 1}));
+assert.commandWorked(db.capped.insert({x: 1}));
dumppath = t.ext + "noOptionsSingleDump/";
mkdir(dumppath);
@@ -88,7 +88,7 @@ for (var opt in options) {
assert.eq(options[opt], cappedOptions[opt], 'invalid option');
}
-assert.writeOK(db.capped.insert({x: 1}));
+assert.commandWorked(db.capped.insert({x: 1}));
dumppath = t.ext + "noOptionsSingleColDump/";
mkdir(dumppath);
diff --git a/jstests/tool/exportimport_bigarray.js b/jstests/tool/exportimport_bigarray.js
index cd3219a70cd..87142b4ea0d 100644
--- a/jstests/tool/exportimport_bigarray.js
+++ b/jstests/tool/exportimport_bigarray.js
@@ -25,7 +25,7 @@ var bulk = src.initializeUnorderedBulkOp();
for (i = 0; i < numDocs; ++i) {
bulk.insert({x: bigString});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
data = 'data/exportimport_array_test.json';
diff --git a/jstests/tool/tool_replset.js b/jstests/tool/tool_replset.js
index be65c11118c..11c1aceae42 100644
--- a/jstests/tool/tool_replset.js
+++ b/jstests/tool/tool_replset.js
@@ -31,7 +31,7 @@ replTest.initiate(config);
var master = replTest.getPrimary();
assert.eq(nodes[0], master, "incorrect master elected");
for (var i = 0; i < 100; i++) {
- assert.writeOK(master.getDB("foo").bar.insert({a: i}));
+ assert.commandWorked(master.getDB("foo").bar.insert({a: i}));
}
replTest.awaitReplication();