summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorJames Wahlin <james.wahlin@mongodb.com>2019-08-14 13:52:59 +0000
committerevergreen <evergreen@mongodb.com>2019-08-14 13:52:59 +0000
commit39c3a5d77b976e131d37476f2e7255d6058f5093 (patch)
tree01cc28719f215b17196ec913f475cd8efda9b37d /jstests/sharding
parent69d0dd1dc4fb1f78d21c47aa5dd82aa9077b69eb (diff)
downloadmongo-39c3a5d77b976e131d37476f2e7255d6058f5093.tar.gz
SERVER-42773 Replace uses of the assert.writeOK() Javascript assertion with assert.commandWorked()
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/accurate_count_with_predicate.js4
-rw-r--r--jstests/sharding/addshard1.js6
-rw-r--r--jstests/sharding/addshard2.js2
-rw-r--r--jstests/sharding/addshard5.js2
-rw-r--r--jstests/sharding/agg_project_limit_pipe_split.js2
-rw-r--r--jstests/sharding/agg_sort.js4
-rw-r--r--jstests/sharding/aggregates_during_balancing.js4
-rw-r--r--jstests/sharding/aggregation_currentop.js2
-rw-r--r--jstests/sharding/aggregations_in_session.js2
-rw-r--r--jstests/sharding/all_config_servers_blackholed_from_mongos.js2
-rw-r--r--jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js6
-rw-r--r--jstests/sharding/allow_partial_results.js2
-rw-r--r--jstests/sharding/array_shard_key.js20
-rw-r--r--jstests/sharding/auth.js6
-rw-r--r--jstests/sharding/authCommands.js5
-rw-r--r--jstests/sharding/auth_repl.js2
-rw-r--r--jstests/sharding/auth_slaveok_routing.js2
-rw-r--r--jstests/sharding/authmr.js4
-rw-r--r--jstests/sharding/authwhere.js4
-rw-r--r--jstests/sharding/auto_rebalance_parallel.js8
-rw-r--r--jstests/sharding/auto_rebalance_parallel_replica_sets.js8
-rw-r--r--jstests/sharding/autodiscover_config_rs_from_secondary.js2
-rw-r--r--jstests/sharding/autosplit.js2
-rw-r--r--jstests/sharding/autosplit_heuristics.js2
-rw-r--r--jstests/sharding/autosplit_with_balancer.js2
-rw-r--r--jstests/sharding/balance_repl.js2
-rw-r--r--jstests/sharding/balancer_window.js23
-rw-r--r--jstests/sharding/basic_drop_coll.js6
-rw-r--r--jstests/sharding/basic_split.js4
-rw-r--r--jstests/sharding/batch_write_command_sharded.js6
-rw-r--r--jstests/sharding/bulk_insert.js22
-rw-r--r--jstests/sharding/bulk_shard_insert.js2
-rw-r--r--jstests/sharding/change_stream_chunk_migration.js40
-rw-r--r--jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js2
-rw-r--r--jstests/sharding/change_stream_lookup_single_shard_cluster.js2
-rw-r--r--jstests/sharding/change_stream_metadata_notifications.js10
-rw-r--r--jstests/sharding/change_stream_read_preference.js12
-rw-r--r--jstests/sharding/change_stream_show_migration_events.js40
-rw-r--r--jstests/sharding/change_stream_update_lookup_collation.js16
-rw-r--r--jstests/sharding/change_stream_update_lookup_read_concern.js6
-rw-r--r--jstests/sharding/change_streams.js32
-rw-r--r--jstests/sharding/change_streams_establishment_finds_new_shards.js4
-rw-r--r--jstests/sharding/change_streams_primary_shard_unaware.js10
-rw-r--r--jstests/sharding/change_streams_shards_start_in_sync.js6
-rw-r--r--jstests/sharding/change_streams_unsharded_becomes_sharded.js20
-rw-r--r--jstests/sharding/change_streams_whole_db.js22
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js2
-rw-r--r--jstests/sharding/clone_catalog_data.js4
-rw-r--r--jstests/sharding/coll_epoch_test1.js6
-rw-r--r--jstests/sharding/coll_epoch_test2.js11
-rw-r--r--jstests/sharding/collation_lookup.js8
-rw-r--r--jstests/sharding/collation_targeting.js74
-rw-r--r--jstests/sharding/collation_targeting_inherited.js70
-rw-r--r--jstests/sharding/config_rs_no_primary.js2
-rw-r--r--jstests/sharding/convert_to_and_from_sharded.js12
-rw-r--r--jstests/sharding/count_config_servers.js2
-rw-r--r--jstests/sharding/count_slaveok.js2
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js10
-rw-r--r--jstests/sharding/create_idx_empty_primary.js2
-rw-r--r--jstests/sharding/cursor1.js2
-rw-r--r--jstests/sharding/cursor_cleanup.js4
-rw-r--r--jstests/sharding/cursor_timeout.js2
-rw-r--r--jstests/sharding/cursor_valid_after_shard_stepdown.js4
-rw-r--r--jstests/sharding/delete_during_migrate.js2
-rw-r--r--jstests/sharding/diffservers1.js6
-rw-r--r--jstests/sharding/drop_sharded_db.js2
-rw-r--r--jstests/sharding/empty_doc_results.js2
-rw-r--r--jstests/sharding/enable_sharding_basic.js6
-rw-r--r--jstests/sharding/enforce_zone_policy.js2
-rw-r--r--jstests/sharding/error_during_agg_getmore.js4
-rw-r--r--jstests/sharding/error_propagation.js4
-rw-r--r--jstests/sharding/exact_shard_key_target.js20
-rw-r--r--jstests/sharding/explainFind_stale_mongos.js2
-rw-r--r--jstests/sharding/explain_agg_read_pref.js2
-rw-r--r--jstests/sharding/features1.js14
-rw-r--r--jstests/sharding/features3.js2
-rw-r--r--jstests/sharding/find_getmore_cmd.js12
-rw-r--r--jstests/sharding/findandmodify1.js2
-rw-r--r--jstests/sharding/forget_mr_temp_ns.js2
-rw-r--r--jstests/sharding/fts_score_sort_sharded.js8
-rw-r--r--jstests/sharding/geo_near_sharded.js2
-rw-r--r--jstests/sharding/geo_near_sort.js8
-rw-r--r--jstests/sharding/graph_lookup.js4
-rw-r--r--jstests/sharding/idhack_sharded.js6
-rw-r--r--jstests/sharding/implicit_db_creation.js4
-rw-r--r--jstests/sharding/in_memory_sort_limit.js2
-rw-r--r--jstests/sharding/index1.js2
-rw-r--r--jstests/sharding/inserts_consistent.js6
-rw-r--r--jstests/sharding/invalid_system_views_sharded_collection.js14
-rw-r--r--jstests/sharding/json_schema.js12
-rw-r--r--jstests/sharding/jumbo1.js2
-rw-r--r--jstests/sharding/key_many.js3
-rw-r--r--jstests/sharding/kill_pinned_cursor.js2
-rw-r--r--jstests/sharding/killop.js2
-rw-r--r--jstests/sharding/lagged_config_secondary.js4
-rw-r--r--jstests/sharding/large_chunk.js2
-rw-r--r--jstests/sharding/large_skip_one_shard.js2
-rw-r--r--jstests/sharding/linearizable_read_concern.js2
-rw-r--r--jstests/sharding/listDatabases.js10
-rw-r--r--jstests/sharding/localhostAuthBypass.js6
-rw-r--r--jstests/sharding/lookup.js76
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js6
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js4
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_id_shard_key.js16
-rw-r--r--jstests/sharding/lookup_mongod_unaware.js18
-rw-r--r--jstests/sharding/lookup_stale_mongos.js24
-rw-r--r--jstests/sharding/mapReduce_inSharded.js6
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js2
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js2
-rw-r--r--jstests/sharding/mapReduce_outSharded.js2
-rw-r--r--jstests/sharding/mapReduce_outSharded_checkUUID.js6
-rw-r--r--jstests/sharding/max_time_ms_sharded.js2
-rw-r--r--jstests/sharding/merge_chunks_compound_shard_key.js10
-rw-r--r--jstests/sharding/merge_chunks_test.js8
-rw-r--r--jstests/sharding/migrateBig.js7
-rw-r--r--jstests/sharding/migrateBig_balancer.js2
-rw-r--r--jstests/sharding/migrate_overwrite_id.js4
-rw-r--r--jstests/sharding/migration_critical_section_concurrency.js14
-rw-r--r--jstests/sharding/migration_ignore_interrupts_1.js6
-rw-r--r--jstests/sharding/migration_ignore_interrupts_2.js2
-rw-r--r--jstests/sharding/migration_ignore_interrupts_3.js4
-rw-r--r--jstests/sharding/migration_ignore_interrupts_4.js8
-rw-r--r--jstests/sharding/migration_move_chunk_after_receive.js8
-rw-r--r--jstests/sharding/migration_sets_fromMigrate_flag.js14
-rw-r--r--jstests/sharding/migration_with_source_ops.js14
-rw-r--r--jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js2
-rw-r--r--jstests/sharding/missing_key.js4
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js4
-rw-r--r--jstests/sharding/mongos_query_comment.js2
-rw-r--r--jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js40
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js40
-rw-r--r--jstests/sharding/mongos_shard_failure_tolerance.js40
-rw-r--r--jstests/sharding/mongos_validate_writes.js12
-rw-r--r--jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js4
-rw-r--r--jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js2
-rw-r--r--jstests/sharding/move_chunk_open_cursors.js2
-rw-r--r--jstests/sharding/move_chunk_remove_with_write_retryability.js2
-rw-r--r--jstests/sharding/move_primary_clone_test.js4
-rw-r--r--jstests/sharding/movechunk_include.js2
-rw-r--r--jstests/sharding/movechunk_interrupt_at_primary_stepdown.js2
-rw-r--r--jstests/sharding/movechunk_parallel.js8
-rw-r--r--jstests/sharding/mrShardedOutput.js4
-rw-r--r--jstests/sharding/mr_and_agg_versioning.js2
-rw-r--r--jstests/sharding/mr_shard_version.js2
-rw-r--r--jstests/sharding/multi_mongos2.js8
-rw-r--r--jstests/sharding/multi_mongos2a.js2
-rw-r--r--jstests/sharding/multi_shard_transaction_without_majority_reads.js12
-rw-r--r--jstests/sharding/multi_write_target.js20
-rw-r--r--jstests/sharding/oplog_document_key.js56
-rw-r--r--jstests/sharding/parallel.js2
-rw-r--r--jstests/sharding/prefix_shard_key.js8
-rw-r--r--jstests/sharding/presplit.js2
-rw-r--r--jstests/sharding/primary_config_server_blackholed_from_mongos.js4
-rw-r--r--jstests/sharding/printShardingStatus.js10
-rw-r--r--jstests/sharding/query_config.js4
-rw-r--r--jstests/sharding/query_sharded.js4
-rw-r--r--jstests/sharding/read_pref_multi_mongos_stale_config.js4
-rw-r--r--jstests/sharding/recovering_slaveok.js8
-rw-r--r--jstests/sharding/refine_collection_shard_key_basic.js30
-rw-r--r--jstests/sharding/refine_collection_shard_key_jumbo.js2
-rw-r--r--jstests/sharding/regex_targeting.js133
-rw-r--r--jstests/sharding/remove2.js2
-rw-r--r--jstests/sharding/rename.js8
-rw-r--r--jstests/sharding/rename_across_mongos.js2
-rw-r--r--jstests/sharding/replication_with_undefined_shard_key.js6
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js2
-rw-r--r--jstests/sharding/resume_change_stream.js36
-rw-r--r--jstests/sharding/resume_change_stream_from_stale_mongos.js12
-rw-r--r--jstests/sharding/resume_change_stream_on_subset_of_shards.js8
-rw-r--r--jstests/sharding/retryable_writes.js8
-rw-r--r--jstests/sharding/return_partial_shards_down.js2
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js14
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js14
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js14
-rw-r--r--jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js2
-rw-r--r--jstests/sharding/secondary_shard_versioning.js2
-rw-r--r--jstests/sharding/shard1.js6
-rw-r--r--jstests/sharding/shard2.js14
-rw-r--r--jstests/sharding/shard3.js2
-rw-r--r--jstests/sharding/shard7.js4
-rw-r--r--jstests/sharding/shard_aware_init.js2
-rw-r--r--jstests/sharding/shard_aware_init_secondaries.js2
-rw-r--r--jstests/sharding/shard_aware_primary_failover.js2
-rw-r--r--jstests/sharding/shard_collection_basic.js28
-rw-r--r--jstests/sharding/shard_collection_existing_zones.js2
-rw-r--r--jstests/sharding/shard_collection_verify_initial_chunks.js2
-rw-r--r--jstests/sharding/shard_existing.js2
-rw-r--r--jstests/sharding/shard_existing_coll_chunk_count.js4
-rw-r--r--jstests/sharding/shard_identity_rollback.js4
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js6
-rw-r--r--jstests/sharding/shard_keycount.js4
-rw-r--r--jstests/sharding/shard_kill_and_pooling.js2
-rw-r--r--jstests/sharding/sharded_limit_batchsize.js8
-rw-r--r--jstests/sharding/sharded_profile.js2
-rw-r--r--jstests/sharding/sharding_balance1.js2
-rw-r--r--jstests/sharding/sharding_balance2.js4
-rw-r--r--jstests/sharding/sharding_balance3.js2
-rw-r--r--jstests/sharding/sharding_balance4.js4
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js2
-rw-r--r--jstests/sharding/sharding_multiple_ns_rs.js4
-rw-r--r--jstests/sharding/sharding_rs1.js2
-rw-r--r--jstests/sharding/sharding_rs2.js6
-rw-r--r--jstests/sharding/sharding_statistics_server_status.js2
-rw-r--r--jstests/sharding/shards_and_config_return_last_committed_optime.js4
-rw-r--r--jstests/sharding/snapshot_cursor_commands_mongos.js4
-rw-r--r--jstests/sharding/split_with_force.js4
-rw-r--r--jstests/sharding/split_with_force_small.js4
-rw-r--r--jstests/sharding/stale_mongos_updates_and_removes.js8
-rw-r--r--jstests/sharding/stale_version_write.js6
-rw-r--r--jstests/sharding/startup_with_all_configs_down.js2
-rw-r--r--jstests/sharding/stats.js2
-rw-r--r--jstests/sharding/test_stacked_migration_cleanup.js4
-rw-r--r--jstests/sharding/time_zone_info_mongos.js4
-rw-r--r--jstests/sharding/top_chunk_autosplit.js4
-rw-r--r--jstests/sharding/trace_missing_docs_test.js6
-rw-r--r--jstests/sharding/transactions_causal_consistency.js8
-rw-r--r--jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js4
-rw-r--r--jstests/sharding/transactions_implicit_abort.js6
-rw-r--r--jstests/sharding/transactions_multi_writes.js17
-rw-r--r--jstests/sharding/transactions_read_concerns.js8
-rw-r--r--jstests/sharding/transactions_reject_writes_for_moved_chunks.js10
-rw-r--r--jstests/sharding/transactions_snapshot_errors_first_statement.js10
-rw-r--r--jstests/sharding/transactions_snapshot_errors_subsequent_statements.js6
-rw-r--r--jstests/sharding/transactions_stale_database_version_errors.js10
-rw-r--r--jstests/sharding/transactions_stale_shard_version_errors.js11
-rw-r--r--jstests/sharding/transactions_target_at_point_in_time.js6
-rw-r--r--jstests/sharding/transactions_view_resolution.js8
-rw-r--r--jstests/sharding/transactions_writes_not_retryable.js3
-rw-r--r--jstests/sharding/txn_recover_decision_using_recovery_router.js3
-rw-r--r--jstests/sharding/txn_writes_during_movechunk.js4
-rw-r--r--jstests/sharding/unique_index_on_shardservers.js3
-rw-r--r--jstests/sharding/unowned_doc_filtering.js2
-rw-r--r--jstests/sharding/unsharded_collection_targetting.js4
-rw-r--r--jstests/sharding/update_immutable_fields.js10
-rw-r--r--jstests/sharding/update_sharded.js38
-rw-r--r--jstests/sharding/upsert_sharded.js2
-rw-r--r--jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js2
-rw-r--r--jstests/sharding/validate_collection.js10
-rw-r--r--jstests/sharding/view_rewrite.js2
-rw-r--r--jstests/sharding/views.js2
-rw-r--r--jstests/sharding/write_cmd_auto_split.js12
-rw-r--r--jstests/sharding/zbigMapReduce.js4
-rw-r--r--jstests/sharding/zero_shard_version.js4
243 files changed, 1052 insertions, 1013 deletions
diff --git a/jstests/sharding/accurate_count_with_predicate.js b/jstests/sharding/accurate_count_with_predicate.js
index bb440e88873..4b5fcef4e2e 100644
--- a/jstests/sharding/accurate_count_with_predicate.js
+++ b/jstests/sharding/accurate_count_with_predicate.js
@@ -26,13 +26,13 @@ st.shardColl(shard0Coll.getName(), {x: 1}, {x: middle}, {x: middle + 1}, "test",
// Insert some docs.
for (let i = 0; i < num; i++) {
- assert.writeOK(st.getDB("test").slowcount.insert(getNthDocument(i)));
+ assert.commandWorked(st.getDB("test").slowcount.insert(getNthDocument(i)));
}
// Insert some orphan documents to shard 0. These are just documents outside the range
// which shard 0 owns.
for (let i = middle + 1; i < middle + 3; i++) {
- assert.writeOK(shard0Coll.insert(getNthDocument(i)));
+ assert.commandWorked(shard0Coll.insert(getNthDocument(i)));
}
// Run a count on the whole collection. The orphaned documents on shard 0 shouldn't be double
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index b676cb474e7..dfa3ef3b904 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -10,7 +10,7 @@ var db1 = conn1.getDB("testDB");
var numObjs = 3;
for (var i = 0; i < numObjs; i++) {
- assert.writeOK(db1.foo.save({a: i}));
+ assert.commandWorked(db1.foo.save({a: i}));
}
var configDB = s.s.getDB('config');
@@ -29,10 +29,10 @@ assert.eq(1024, newShardDoc.maxSize);
var conn2 = MongoRunner.runMongod({'shardsvr': ""});
var db2 = conn2.getDB("otherDB");
-assert.writeOK(db2.foo.save({a: 1}));
+assert.commandWorked(db2.foo.save({a: 1}));
var db3 = conn2.getDB("testDB");
-assert.writeOK(db3.foo.save({a: 1}));
+assert.commandWorked(db3.foo.save({a: 1}));
s.config.databases.find().forEach(printjson);
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 7fb1ab2efe1..64d5300c3c0 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -184,7 +184,7 @@ if (res.primary != addShardRes.shardAdded) {
assert.commandWorked(st.s.adminCommand({movePrimary: 'test', to: addShardRes.shardAdded}));
}
-assert.writeOK(st.s.getDB('test').foo.insert({x: 1}));
+assert.commandWorked(st.s.getDB('test').foo.insert({x: 1}));
assert.neq(null, rst5.getPrimary().getDB('test').foo.findOne());
assert.commandWorked(st.s.getDB('test').runCommand({dropDatabase: 1}));
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js
index 31d2c10f505..7a2b6866c8c 100644
--- a/jstests/sharding/addshard5.js
+++ b/jstests/sharding/addshard5.js
@@ -17,7 +17,7 @@ st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
assert.commandWorked(mongos.adminCommand({shardCollection: coll + '', key: {_id: 1}}));
// Insert one document
-assert.writeOK(coll.insert({hello: 'world'}));
+assert.commandWorked(coll.insert({hello: 'world'}));
// Migrate the collection to and from shard1 so shard0 loads the shard1 host
assert.commandWorked(mongos.adminCommand(
diff --git a/jstests/sharding/agg_project_limit_pipe_split.js b/jstests/sharding/agg_project_limit_pipe_split.js
index f17148a0877..7f5c7a51951 100644
--- a/jstests/sharding/agg_project_limit_pipe_split.js
+++ b/jstests/sharding/agg_project_limit_pipe_split.js
@@ -14,7 +14,7 @@ const bulkOp = coll.initializeOrderedBulkOp();
for (let i = 0; i < 400; ++i) {
bulkOp.insert({x: i, y: ["a", "b", "c"], z: Math.floor(i / 12)});
}
-assert.writeOK(bulkOp.execute());
+assert.commandWorked(bulkOp.execute());
let agg = coll.aggregate([
{$match: {$or: [{z: 9}, {z: 10}]}},
diff --git a/jstests/sharding/agg_sort.js b/jstests/sharding/agg_sort.js
index 0ee78631ec0..45ea86b0d97 100644
--- a/jstests/sharding/agg_sort.js
+++ b/jstests/sharding/agg_sort.js
@@ -31,7 +31,7 @@ const bulkOp = coll.initializeOrderedBulkOp();
for (var i = 0; i < nDocs; ++i) {
bulkOp.insert({_id: i, x: Math.floor(i / 2), y: yValues[i]});
}
-assert.writeOK(bulkOp.execute());
+assert.commandWorked(bulkOp.execute());
// Split the data into 3 chunks
assert.commandWorked(shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 3}}));
@@ -141,7 +141,7 @@ const textColl = db.sharded_agg_sort_text;
assert.commandWorked(
shardingTest.s0.adminCommand({shardCollection: textColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(textColl.insert([
+assert.commandWorked(textColl.insert([
{_id: 0, text: "apple"},
{_id: 1, text: "apple orange banana apple"},
{_id: 2, text: "apple orange"},
diff --git a/jstests/sharding/aggregates_during_balancing.js b/jstests/sharding/aggregates_during_balancing.js
index e0681798327..06db4cb6955 100644
--- a/jstests/sharding/aggregates_during_balancing.js
+++ b/jstests/sharding/aggregates_during_balancing.js
@@ -46,7 +46,7 @@ for (i = 0; i < nItems; ++i) {
filler: "0123456789012345678901234567890123456789"
});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTestLog('a project and group in shards, result combined in mongos');
var a1 = db.ts1
@@ -200,7 +200,7 @@ assert.eq(db.ts1.find().sort({_id: 1}).toArray(), outCollection.find().sort({_id
assert.commandFailed(
db.runCommand({aggregate: outCollection.getName(), pipeline: [{$out: db.ts1.getName()}]}));
-assert.writeOK(db.literal.save({dollar: false}));
+assert.commandWorked(db.literal.save({dollar: false}));
result =
db.literal
diff --git a/jstests/sharding/aggregation_currentop.js b/jstests/sharding/aggregation_currentop.js
index 4973b4f2d3f..a3a9d9c32e2 100644
--- a/jstests/sharding/aggregation_currentop.js
+++ b/jstests/sharding/aggregation_currentop.js
@@ -90,7 +90,7 @@ createUsers(mongosConn);
assert(clusterAdminDB.auth("admin", "pwd"));
for (let i = 0; i < 5; i++) {
- assert.writeOK(clusterTestDB.test.insert({_id: i, a: i}));
+ assert.commandWorked(clusterTestDB.test.insert({_id: i, a: i}));
}
st.ensurePrimaryShard(clusterTestDB.getName(), shardRS.name);
diff --git a/jstests/sharding/aggregations_in_session.js b/jstests/sharding/aggregations_in_session.js
index 456decee662..03c124a3f50 100644
--- a/jstests/sharding/aggregations_in_session.js
+++ b/jstests/sharding/aggregations_in_session.js
@@ -21,7 +21,7 @@ const mongosColl = session.getDatabase("test")[jsTestName()];
// merging on a mongod - otherwise the entire pipeline will be forwarded without a split and
// without a $mergeCursors stage.
st.shardColl(mongosColl, {_id: 1}, {_id: 1}, {_id: 1});
-assert.writeOK(mongosColl.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+assert.commandWorked(mongosColl.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
// This assertion will reproduce the hang described in SERVER-33660.
assert.eq(
diff --git a/jstests/sharding/all_config_servers_blackholed_from_mongos.js b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
index 53d6e435dd1..8fb040e60f7 100644
--- a/jstests/sharding/all_config_servers_blackholed_from_mongos.js
+++ b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
@@ -23,7 +23,7 @@ assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
assert.commandWorked(
testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(testDB.ShardedColl.insert({a: 1}));
+assert.commandWorked(testDB.ShardedColl.insert({a: 1}));
jsTest.log('Making all the config servers appear as a blackhole to mongos');
st._configServers.forEach(function(configSvr) {
diff --git a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
index 68745172568..19b19c724d3 100644
--- a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
+++ b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
@@ -13,7 +13,7 @@ var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
jsTest.log('Config nodes up: 3 of 3, shard nodes up: 2 of 2: ' +
'Insert test data to work with');
-assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
{_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
assert.eq([{_id: 0, count: 1}], st.s0.getDB('TestDB').TestColl.find().toArray());
@@ -21,7 +21,7 @@ jsTest.log('Config nodes up: 2 of 3, shard nodes up: 2 of 2: ' +
'Inserts and queries must work');
st.configRS.stop(0);
st.restartMongos(0);
-assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
{_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
assert.eq([{_id: 0, count: 2}], st.s0.getDB('TestDB').TestColl.find().toArray());
@@ -29,7 +29,7 @@ jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' +
'Inserts and queries must work');
st.configRS.stop(1);
st.restartMongos(0);
-assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
{_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js
index 7ecbbb1dc7b..d5b2a18fa0a 100644
--- a/jstests/sharding/allow_partial_results.js
+++ b/jstests/sharding/allow_partial_results.js
@@ -22,7 +22,7 @@ let bulk = coll.initializeUnorderedBulkOp();
for (let i = -50; i < 50; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Create a sharded collection with one chunk on each of the two shards.");
st.ensurePrimaryShard(dbName, st.shard0.shardName);
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index 2eb7dd102b6..dd6686f68a9 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -23,11 +23,11 @@ assert.writeError(coll.insert({i: [1, 2]}));
assert.writeError(coll.insert({_id: [1, 2], i: 3}));
// Insert an object with valid array key
-assert.writeOK(coll.insert({i: 1}));
+assert.commandWorked(coll.insert({i: 1}));
// Update the value with valid other field
value = coll.findOne({i: 1});
-assert.writeOK(coll.update(value, {$set: {j: 2}}));
+assert.commandWorked(coll.update(value, {$set: {j: 2}}));
// Update the value with invalid other fields
value = coll.findOne({i: 1});
@@ -39,7 +39,7 @@ assert.writeError(coll.update(value, Object.merge(value, {i: [3, 4]}), false, tr
// Multi-update the value with other fields (won't work, but no error)
value = coll.findOne({i: 1});
-assert.writeOK(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
+assert.commandWorked(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
// Query the value with other fields (won't work, but no error)
value = coll.findOne({i: 1});
@@ -51,29 +51,29 @@ coll.remove(Object.extend(value, {i: [1, 2, 3, 4]}));
// Can't remove using multikey, but shouldn't error
value = coll.findOne({i: 1});
-assert.writeOK(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
+assert.commandWorked(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
assert.eq(coll.find().itcount(), 1);
value = coll.findOne({i: 1});
-assert.writeOK(coll.remove(Object.extend(value, {i: 1})));
+assert.commandWorked(coll.remove(Object.extend(value, {i: 1})));
assert.eq(coll.find().itcount(), 0);
coll.ensureIndex({_id: 1, i: 1, j: 1});
// Can insert document that will make index into a multi-key as long as it's not part of shard
// key.
coll.remove({});
-assert.writeOK(coll.insert({i: 1, j: [1, 2]}));
+assert.commandWorked(coll.insert({i: 1, j: [1, 2]}));
assert.eq(coll.find().itcount(), 1);
// Same is true for updates.
coll.remove({});
coll.insert({_id: 1, i: 1});
-assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
+assert.commandWorked(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
assert.eq(coll.find().itcount(), 1);
// Same for upserts.
coll.remove({});
-assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
+assert.commandWorked(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
assert.eq(coll.find().itcount(), 1);
printjson("Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey");
@@ -82,7 +82,7 @@ printjson("Sharding-then-inserting-multikey tested, now trying inserting-then-sh
var coll = mongos.getCollection("" + coll + "2");
for (var i = 0; i < 10; i++) {
// TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({i: [i, i + 1]}));
+ assert.commandWorked(coll.insert({i: [i, i + 1]}));
}
coll.ensureIndex({_id: 1, i: 1});
@@ -99,7 +99,7 @@ st.printShardingStatus();
var coll = mongos.getCollection("" + coll + "3");
for (var i = 0; i < 10; i++) {
// TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({i: i}));
+ assert.commandWorked(coll.insert({i: i}));
}
coll.ensureIndex({_id: 1, i: 1});
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 61b5c273315..5131c512271 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -67,7 +67,7 @@ s.getDB(adminUser.db)
login(adminUser);
// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
-assert.writeOK(
+assert.commandWorked(
s.getDB("config").settings.update({_id: "balancer"},
{$set: {"_secondaryThrottle": false, "_waitForDelete": true}},
{upsert: true}));
@@ -151,7 +151,7 @@ login(testUser);
assert.eq(s.getDB("test").foo.findOne(), null);
print("insert try 2");
-assert.writeOK(s.getDB("test").foo.insert({x: 1}));
+assert.commandWorked(s.getDB("test").foo.insert({x: 1}));
assert.eq(1, s.getDB("test").foo.find().itcount(), tojson(result));
logout(testUser);
@@ -181,7 +181,7 @@ var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
for (i = 0; i < num; i++) {
bulk.insert({_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
s.startBalancer(60000);
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index 8afe4facc2e..2692bc9bc42 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -74,7 +74,7 @@ for (var i = 0; i < 100; i++) {
for (var j = 0; j < 10; j++) {
bulk.insert({i: i, j: j, str: str});
}
- assert.writeOK(bulk.execute({w: "majority"}));
+ assert.commandWorked(bulk.execute({w: "majority"}));
// Split the chunk we just inserted so that we have something to balance.
assert.commandWorked(st.splitFind("test.foo", {i: i, j: 0}));
}
@@ -82,7 +82,8 @@ for (var i = 0; i < 100; i++) {
assert.eq(expectedDocs, testDB.foo.count());
// Wait for the balancer to start back up
-assert.writeOK(configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
+assert.commandWorked(
+ configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
st.startBalancer();
// Make sure we've done at least some splitting, so the balancer will work
diff --git a/jstests/sharding/auth_repl.js b/jstests/sharding/auth_repl.js
index 0c8e976bd48..5605225b446 100644
--- a/jstests/sharding/auth_repl.js
+++ b/jstests/sharding/auth_repl.js
@@ -36,7 +36,7 @@ priTestDB.createUser({user: 'a', pwd: 'a', roles: jsTest.basicUserRoles},
assert.eq(1, testDB.auth('a', 'a'));
jsTest.log('Sending an authorized query that should be ok');
-assert.writeOK(testColl.insert({x: 1}, {writeConcern: {w: nodeCount}}));
+assert.commandWorked(testColl.insert({x: 1}, {writeConcern: {w: nodeCount}}));
conn.setSlaveOk(true);
doc = testColl.findOne();
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index 9aa9bc8db84..480d0c4318b 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -75,7 +75,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var x = 0; x < 20; x++) {
bulk.insert({v: x, k: 10});
}
-assert.writeOK(bulk.execute({w: nodeCount}));
+assert.commandWorked(bulk.execute({w: nodeCount}));
/* Although mongos never caches query results, try to do a different query
* everytime just to be sure.
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 0d1fb713c97..9b0fbe240ce 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -20,11 +20,11 @@ var test1User = {
};
function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
+ assert.commandWorked(collection.remove(pattern));
}
function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
+ assert.commandWorked(collection.insert(obj));
}
// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 3d60fb2ccca..e9b338801bf 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -20,11 +20,11 @@ var test1Reader = {
};
function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
+ assert.commandWorked(collection.remove(pattern));
}
function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
+ assert.commandWorked(collection.insert(obj));
}
// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
diff --git a/jstests/sharding/auto_rebalance_parallel.js b/jstests/sharding/auto_rebalance_parallel.js
index ef6af0d57c5..03e5755ec0c 100644
--- a/jstests/sharding/auto_rebalance_parallel.js
+++ b/jstests/sharding/auto_rebalance_parallel.js
@@ -17,10 +17,10 @@ function prepareCollectionForBalance(collName) {
var coll = st.s0.getCollection(collName);
// Create 4 chunks initially and ensure they get balanced within 1 balancer round
- assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+ assert.commandWorked(coll.insert({Key: 1, Value: 'Test value 1'}));
+ assert.commandWorked(coll.insert({Key: 10, Value: 'Test value 10'}));
+ assert.commandWorked(coll.insert({Key: 20, Value: 'Test value 20'}));
+ assert.commandWorked(coll.insert({Key: 30, Value: 'Test value 30'}));
assert.commandWorked(st.splitAt(collName, {Key: 10}));
assert.commandWorked(st.splitAt(collName, {Key: 20}));
diff --git a/jstests/sharding/auto_rebalance_parallel_replica_sets.js b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
index 0be9549f3cd..8bcbd63813a 100644
--- a/jstests/sharding/auto_rebalance_parallel_replica_sets.js
+++ b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
@@ -13,10 +13,10 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key
var coll = st.s0.getDB('TestDB').TestColl;
// Create 4 chunks initially and ensure they get balanced within 1 balancer round
-assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
-assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
-assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
-assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+assert.commandWorked(coll.insert({Key: 1, Value: 'Test value 1'}));
+assert.commandWorked(coll.insert({Key: 10, Value: 'Test value 10'}));
+assert.commandWorked(coll.insert({Key: 20, Value: 'Test value 20'}));
+assert.commandWorked(coll.insert({Key: 30, Value: 'Test value 30'}));
assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
diff --git a/jstests/sharding/autodiscover_config_rs_from_secondary.js b/jstests/sharding/autodiscover_config_rs_from_secondary.js
index 390d9bb7aa6..b9e87eeae06 100644
--- a/jstests/sharding/autodiscover_config_rs_from_secondary.js
+++ b/jstests/sharding/autodiscover_config_rs_from_secondary.js
@@ -34,7 +34,7 @@ var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to n
// perform writes to the config servers.
var mongos = MongoRunner.runMongos({configdb: seedList});
var admin = mongos.getDB('admin');
- assert.writeOK(admin.foo.insert({a: 1}));
+ assert.commandWorked(admin.foo.insert({a: 1}));
assert.eq(1, admin.foo.findOne().a);
MongoRunner.stopMongos(mongos);
}
diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js
index 58dbe7ece1e..39e05e9daae 100644
--- a/jstests/sharding/autosplit.js
+++ b/jstests/sharding/autosplit.js
@@ -38,7 +38,7 @@ function insertDocsAndWaitForSplit(numDocs) {
for (; i < curMaxKey + numDocs; i++) {
bulk.insert({num: i, s: bigString});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
waitForOngoingChunkSplits(s);
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
index 1777a82678a..95ac0835935 100644
--- a/jstests/sharding/autosplit_heuristics.js
+++ b/jstests/sharding/autosplit_heuristics.js
@@ -66,7 +66,7 @@ if (!isDebugBuild) {
// Insert enough docs to trigger splits into all chunks
for (var i = 0; i < totalInserts; i++) {
- assert.writeOK(coll.insert({_id: i % numChunks + (i / totalInserts), pad: pad}));
+ assert.commandWorked(coll.insert({_id: i % numChunks + (i / totalInserts), pad: pad}));
// Splitting is asynchronous so we should wait after each insert
// for autosplitting to happen
waitForOngoingChunkSplits(st);
diff --git a/jstests/sharding/autosplit_with_balancer.js b/jstests/sharding/autosplit_with_balancer.js
index 0372ca09b9a..b68a9e58289 100644
--- a/jstests/sharding/autosplit_with_balancer.js
+++ b/jstests/sharding/autosplit_with_balancer.js
@@ -23,7 +23,7 @@ for (var j = 0; j < 30; j++) {
bulk.insert({num: i, s: bigString});
i++;
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}));
}
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index fdc0d15509c..dc16fe7e46e 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -30,7 +30,7 @@ var bulk = s.s0.getDB('TestDB').TestColl.initializeUnorderedBulkOp();
for (var i = 0; i < 2100; i++) {
bulk.insert({_id: i, x: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({enablesharding: 'TestDB'}));
s.ensurePrimaryShard('TestDB', s.shard0.shardName);
diff --git a/jstests/sharding/balancer_window.js b/jstests/sharding/balancer_window.js
index ee2d55b1345..50ab8325802 100644
--- a/jstests/sharding/balancer_window.js
+++ b/jstests/sharding/balancer_window.js
@@ -56,16 +56,17 @@ var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shard
var startDate = new Date();
var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
-assert.writeOK(configDB.settings.update({_id: 'balancer'},
- {
- $set: {
- activeWindow: {
- start: hourMinStart.addHour(-2).toString(),
- stop: hourMinStart.addHour(-1).toString()
- },
- }
- },
- true));
+assert.commandWorked(
+ configDB.settings.update({_id: 'balancer'},
+ {
+ $set: {
+ activeWindow: {
+ start: hourMinStart.addHour(-2).toString(),
+ stop: hourMinStart.addHour(-1).toString()
+ },
+ }
+ },
+ true));
st.startBalancer();
st.waitForBalancer(true, 60000);
@@ -73,7 +74,7 @@ st.waitForBalancer(true, 60000);
var shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
assert.eq(shard0Chunks, shard0ChunksAfter);
-assert.writeOK(configDB.settings.update(
+assert.commandWorked(configDB.settings.update(
{_id: 'balancer'},
{
$set: {
diff --git a/jstests/sharding/basic_drop_coll.js b/jstests/sharding/basic_drop_coll.js
index b7fda388e34..e457dc5c9fd 100644
--- a/jstests/sharding/basic_drop_coll.js
+++ b/jstests/sharding/basic_drop_coll.js
@@ -11,7 +11,7 @@ var testDB = st.s.getDB('test');
// Test dropping an unsharded collection.
-assert.writeOK(testDB.bar.insert({x: 1}));
+assert.commandWorked(testDB.bar.insert({x: 1}));
assert.neq(null, testDB.bar.findOne({x: 1}));
assert.commandWorked(testDB.runCommand({drop: 'bar'}));
@@ -29,8 +29,8 @@ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zon
assert.commandWorked(st.s.adminCommand(
{updateZoneKeyRange: 'test.user', min: {_id: 0}, max: {_id: 10}, zone: 'foo'}));
-assert.writeOK(testDB.user.insert({_id: 10}));
-assert.writeOK(testDB.user.insert({_id: -10}));
+assert.commandWorked(testDB.user.insert({_id: 10}));
+assert.commandWorked(testDB.user.insert({_id: -10}));
assert.neq(null, st.shard0.getDB('test').user.findOne({_id: -10}));
assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10}));
diff --git a/jstests/sharding/basic_split.js b/jstests/sharding/basic_split.js
index 00a442ac353..fcab7365aad 100644
--- a/jstests/sharding/basic_split.js
+++ b/jstests/sharding/basic_split.js
@@ -46,7 +46,7 @@ var bulk = testDB.user.initializeUnorderedBulkOp();
for (var x = -1200; x < 1200; x++) {
bulk.insert({_id: x, val: kiloDoc});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
@@ -89,7 +89,7 @@ bulk = testDB.compound.initializeUnorderedBulkOp();
for (x = -1200; x < 1200; x++) {
bulk.insert({x: x, y: x, val: kiloDoc});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount());
assert.commandWorked(configDB.adminCommand(
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index 60b848dd6de..854e1364531 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -131,7 +131,7 @@ var oldChunks = config.chunks.find().toArray();
var staleMongos = MongoRunner.runMongos({configdb: configConnStr});
brokenColl = staleMongos.getCollection(brokenColl.toString());
-assert.writeOK(brokenColl.insert({hello: "world"}));
+assert.commandWorked(brokenColl.insert({hello: "world"}));
// Modify the chunks to make shards at a higher version
@@ -140,9 +140,9 @@ assert.commandWorked(
// Rewrite the old chunks back to the config server
-assert.writeOK(config.chunks.remove({}));
+assert.commandWorked(config.chunks.remove({}));
for (var i = 0; i < oldChunks.length; i++) {
- assert.writeOK(config.chunks.insert(oldChunks[i]));
+ assert.commandWorked(config.chunks.insert(oldChunks[i]));
}
// Ensure that the inserts have propagated to all secondary nodes
diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js
index a2162771492..00e03fe8019 100644
--- a/jstests/sharding/bulk_insert.js
+++ b/jstests/sharding/bulk_insert.js
@@ -14,7 +14,7 @@ var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
var collDi = st.shard0.getCollection(jsTestName() + ".collDirect");
jsTest.log('Checking write to config collections...');
-assert.writeOK(admin.TestColl.insert({SingleDoc: 1}));
+assert.commandWorked(admin.TestColl.insert({SingleDoc: 1}));
jsTest.log("Setting up collections...");
@@ -33,9 +33,9 @@ assert.commandWorked(admin.runCommand(
{moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
var resetColls = function() {
- assert.writeOK(collSh.remove({}));
- assert.writeOK(collUn.remove({}));
- assert.writeOK(collDi.remove({}));
+ assert.commandWorked(collSh.remove({}));
+ assert.commandWorked(collUn.remove({}));
+ assert.commandWorked(collDi.remove({}));
};
var isDupKeyError = function(err) {
@@ -54,13 +54,13 @@ jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
resetColls();
var inserts = [{ukey: 0}, {ukey: 1}];
-assert.writeOK(collSh.insert(inserts));
+assert.commandWorked(collSh.insert(inserts));
assert.eq(2, collSh.find().itcount());
-assert.writeOK(collUn.insert(inserts));
+assert.commandWorked(collUn.insert(inserts));
assert.eq(2, collUn.find().itcount());
-assert.writeOK(collDi.insert(inserts));
+assert.commandWorked(collDi.insert(inserts));
assert.eq(2, collDi.find().itcount());
jsTest.log("Bulk insert (no COE) with mongos error...");
@@ -107,13 +107,13 @@ jsTest.log("Bulk insert (no COE) on second shard...");
resetColls();
var inserts = [{ukey: 0}, {ukey: -1}];
-assert.writeOK(collSh.insert(inserts));
+assert.commandWorked(collSh.insert(inserts));
assert.eq(2, collSh.find().itcount());
-assert.writeOK(collUn.insert(inserts));
+assert.commandWorked(collUn.insert(inserts));
assert.eq(2, collUn.find().itcount());
-assert.writeOK(collDi.insert(inserts));
+assert.commandWorked(collDi.insert(inserts));
assert.eq(2, collDi.find().itcount());
jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
@@ -245,7 +245,7 @@ assert.commandWorked(admin.runCommand(
assert.commandWorked(admin.runCommand(
{moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
-assert.writeOK(staleCollSh.insert(inserts));
+assert.commandWorked(staleCollSh.insert(inserts));
//
// Test when the legacy batch exceeds the BSON object size limit
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 4f3a4626818..67726754282 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -76,7 +76,7 @@ while (docsInserted < numDocs) {
docsInserted++;
}
- assert.writeOK(coll.insert(bulk));
+ assert.commandWorked(coll.insert(bulk));
if (docsInserted % 10000 == 0) {
print("Inserted " + docsInserted + " documents.");
diff --git a/jstests/sharding/change_stream_chunk_migration.js b/jstests/sharding/change_stream_chunk_migration.js
index a4e74ed3efd..272353befb8 100644
--- a/jstests/sharding/change_stream_chunk_migration.js
+++ b/jstests/sharding/change_stream_chunk_migration.js
@@ -45,8 +45,8 @@ assert.commandWorked(
mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
// Insert two documents.
-assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
@@ -67,8 +67,8 @@ for (let id of [0, 20]) {
}
// Insert into both the chunks.
-assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
// Split again, and move a second chunk to the first shard. The new chunks are:
// [MinKey, 0), [0, 10), and [10, MaxKey].
@@ -82,9 +82,9 @@ assert.commandWorked(mongos.adminCommand({
}));
// Insert again, into all three chunks.
-assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
// Make sure we can see all the inserts, without any 'retryNeeded' entries.
for (let nextExpectedId of [1, 21, -2, 2, 22]) {
@@ -101,9 +101,9 @@ assert(!changeStream.hasNext());
// Insert into all three chunks.
jsTestLog("Insert into all three chunks");
-assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
assert.commandWorked(mongos.adminCommand({
@@ -114,9 +114,9 @@ assert.commandWorked(mongos.adminCommand({
}));
// Insert again, into all three chunks.
-assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
// Make sure we can see all the inserts, without any 'retryNeeded' entries.
for (let nextExpectedId of [-3, 3, 23, -4, 4, 24]) {
@@ -133,25 +133,25 @@ assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "ne
// At this point, there haven't been any migrations to that shard; check that the changeStream
// works normally.
-assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
for (let nextExpectedId of [-5, 5, 25]) {
assert.soon(() => changeStream.hasNext());
assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
}
-assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
// Now migrate a chunk to the new shard and verify the stream continues to return results
// from both before and after the migration.
jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
assert.commandWorked(mongos.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 20}, to: "newShard", _waitForDelete: true}));
-assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
for (let nextExpectedId of [16, -6, 6, 26]) {
assert.soon(() => changeStream.hasNext());
diff --git a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
index 5d854fdf44c..d2be5e2d406 100644
--- a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
+++ b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
@@ -159,7 +159,7 @@ for (let shardDB of [shard0DB, shard1DB]) {
// Write a document to shard0, and confirm that - despite the fact that shard1 is still idle - a
// getMore with a high maxTimeMS returns the document before this timeout expires.
csCursorId = reopenChangeStream(csCursorId);
-assert.writeOK(mongosColl.insert({_id: -1}));
+assert.commandWorked(mongosColl.insert({_id: -1}));
startTime = (new Date()).getTime();
const csResult = assert.commandWorked(mongosDB.runCommand(
{getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: thirtyMins}));
diff --git a/jstests/sharding/change_stream_lookup_single_shard_cluster.js b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
index 53fed919125..19fd090918c 100644
--- a/jstests/sharding/change_stream_lookup_single_shard_cluster.js
+++ b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
@@ -36,7 +36,7 @@ const mongosColl = mongosDB[jsTestName()];
assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
assert.commandWorked(
mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
// Verify that the pipeline splits and merges on mongoS despite only targeting a single shard.
const explainPlan = assert.commandWorked(
diff --git a/jstests/sharding/change_stream_metadata_notifications.js b/jstests/sharding/change_stream_metadata_notifications.js
index 48138d089ec..4fd28583dd9 100644
--- a/jstests/sharding/change_stream_metadata_notifications.js
+++ b/jstests/sharding/change_stream_metadata_notifications.js
@@ -45,15 +45,15 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {shardKey: 1}, to: st.rs1.getURL()}));
// Write a document to each chunk.
-assert.writeOK(mongosColl.insert({shardKey: -1, _id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({shardKey: 1, _id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({shardKey: -1, _id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({shardKey: 1, _id: 1}, {writeConcern: {w: "majority"}}));
let changeStream = mongosColl.watch();
// We awaited the replication of the first writes, so the change stream shouldn't return them.
-assert.writeOK(mongosColl.update({shardKey: -1, _id: -1}, {$set: {updated: true}}));
-assert.writeOK(mongosColl.update({shardKey: 1, _id: 1}, {$set: {updated: true}}));
-assert.writeOK(mongosColl.insert({shardKey: 2, _id: 2}));
+assert.commandWorked(mongosColl.update({shardKey: -1, _id: -1}, {$set: {updated: true}}));
+assert.commandWorked(mongosColl.update({shardKey: 1, _id: 1}, {$set: {updated: true}}));
+assert.commandWorked(mongosColl.insert({shardKey: 2, _id: 2}));
// Drop the collection and test that we return a "drop" entry, followed by an "invalidate"
// entry.
diff --git a/jstests/sharding/change_stream_read_preference.js b/jstests/sharding/change_stream_read_preference.js
index 1c4129e9952..41cd75ee901 100644
--- a/jstests/sharding/change_stream_read_preference.js
+++ b/jstests/sharding/change_stream_read_preference.js
@@ -60,16 +60,16 @@ for (let rs of [st.rs0, st.rs1]) {
}
// Write a document to each chunk.
-assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
// Test that change streams go to the primary by default.
let changeStreamComment = "change stream against primary";
const primaryStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
{comment: changeStreamComment});
-assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
-assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
+assert.commandWorked(mongosColl.update({_id: -1}, {$set: {updated: true}}));
+assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updated: true}}));
assert.soon(() => primaryStream.hasNext());
assert.eq(primaryStream.next().fullDocument, {_id: -1, updated: true});
@@ -100,8 +100,8 @@ const secondaryStream =
mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
{comment: changeStreamComment, $readPreference: {mode: "secondary"}});
-assert.writeOK(mongosColl.update({_id: -1}, {$set: {updatedCount: 2}}));
-assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: -1}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
assert.soon(() => secondaryStream.hasNext());
assert.eq(secondaryStream.next().fullDocument, {_id: -1, updated: true, updatedCount: 2});
diff --git a/jstests/sharding/change_stream_show_migration_events.js b/jstests/sharding/change_stream_show_migration_events.js
index c07e059e4d1..570a8039a8c 100644
--- a/jstests/sharding/change_stream_show_migration_events.js
+++ b/jstests/sharding/change_stream_show_migration_events.js
@@ -72,8 +72,8 @@ assert.commandWorked(
mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
// Insert two documents.
-assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
@@ -100,8 +100,8 @@ checkEvents(changeStreamShardZero, shardZeroEventsAfterNewShard);
checkEvents(changeStreamShardOne, shardOneEvents);
// Insert into both the chunks.
-assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
// Split again, and move a second chunk to the first shard. The new chunks are:
// [MinKey, 0), [0, 10), and [10, MaxKey].
@@ -115,9 +115,9 @@ assert.commandWorked(mongos.adminCommand({
}));
// Insert again, into all three chunks.
-assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
var shardZeroEvents = [
makeEvent(1, "insert"),
@@ -146,9 +146,9 @@ assert(!changeStreamShardOne.hasNext());
// Insert into all three chunks.
jsTestLog("Insert into all three chunks");
-assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
assert.commandWorked(mongos.adminCommand({
@@ -159,9 +159,9 @@ assert.commandWorked(mongos.adminCommand({
}));
// Insert again, into all three chunks.
-assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
// Check that each change stream returns the expected events.
shardZeroEvents = [
@@ -194,9 +194,9 @@ const changeStreamNewShard = newShard.getPrimary().getCollection('test.chunk_mig
// At this point, there haven't been any migrations to that shard; check that the changeStream
// works normally.
-assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
shardOneEvents = [
makeEvent(-5, "insert"),
@@ -208,16 +208,16 @@ assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
checkEvents(changeStreamShardOne, shardOneEvents);
assert(!changeStreamNewShard.hasNext(), "Do not expect any results yet");
-assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
// Now migrate a chunk to the new shard and verify the stream continues to return results
// from both before and after the migration.
jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
assert.commandWorked(mongos.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 20}, to: "newShard", _waitForDelete: true}));
-assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
let shardOneEventsBeforeNewShard = [
makeEvent(16, "insert"),
diff --git a/jstests/sharding/change_stream_update_lookup_collation.js b/jstests/sharding/change_stream_update_lookup_collation.js
index 9c13f4afac8..643f2869b46 100644
--- a/jstests/sharding/change_stream_update_lookup_collation.js
+++ b/jstests/sharding/change_stream_update_lookup_collation.js
@@ -72,10 +72,10 @@ assert.gte(bsonWoCompare({shardKey: "abc"}, {shardKey: "aBC"}), 1);
// know the update lookup will use both the _id and the shard key, and we want to make sure it
// is only targeting a single shard. Also note that _id is a string, since we want to make sure
// the _id index can only be used if we are using the collection's default collation.
-assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "ABC"}));
-assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "ABC"}));
-assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "abc"}));
-assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "abc"}));
+assert.commandWorked(mongosColl.insert({_id: "abc_1", shardKey: "ABC"}));
+assert.commandWorked(mongosColl.insert({_id: "abc_2", shardKey: "ABC"}));
+assert.commandWorked(mongosColl.insert({_id: "abc_1", shardKey: "abc"}));
+assert.commandWorked(mongosColl.insert({_id: "abc_2", shardKey: "abc"}));
// Verify that the post-change lookup uses the simple collation to target to a single shard,
// then uses the collection-default collation to perform the lookup on the shard.
@@ -128,10 +128,10 @@ const strengthOneCollation = {
// Insert some documents that might be confused with existing documents under the change
// stream's collation, but should not be confused during the update lookup.
-assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "ABÇ"}));
-assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "ABÇ"}));
-assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "abç"}));
-assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "abç"}));
+assert.commandWorked(mongosColl.insert({_id: "abç_1", shardKey: "ABÇ"}));
+assert.commandWorked(mongosColl.insert({_id: "abç_2", shardKey: "ABÇ"}));
+assert.commandWorked(mongosColl.insert({_id: "abç_1", shardKey: "abç"}));
+assert.commandWorked(mongosColl.insert({_id: "abç_2", shardKey: "abç"}));
assert.eq(mongosColl.find({shardKey: "abc"}).collation(strengthOneCollation).itcount(), 8);
diff --git a/jstests/sharding/change_stream_update_lookup_read_concern.js b/jstests/sharding/change_stream_update_lookup_read_concern.js
index 03b9ec86738..3dc1fb47bfd 100644
--- a/jstests/sharding/change_stream_update_lookup_read_concern.js
+++ b/jstests/sharding/change_stream_update_lookup_read_concern.js
@@ -63,7 +63,7 @@ assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})
assert.commandWorked(
mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
rst.awaitReplication();
// Make sure reads with read preference tag 'closestSecondary' go to the tagged secondary.
@@ -88,7 +88,7 @@ const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updat
comment: changeStreamComment,
$readPreference: {mode: "nearest", tags: [{tag: "closestSecondary"}]}
});
-assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 1}}));
+assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updatedCount: 1}}));
assert.soon(() => changeStream.hasNext());
let latestChange = changeStream.next();
assert.eq(latestChange.operationType, "update");
@@ -160,7 +160,7 @@ profilerHasSingleMatchingEntryOrThrow({
// the new, lagged secondary. Even though it's lagged, the lookup should use 'afterClusterTime'
// to ensure it does not return until the node can see the change it's looking up.
stopServerReplication(newClosestSecondary);
-assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
// Since we stopped replication, we expect the update lookup to block indefinitely until we
// resume replication, so we resume replication in a parallel shell while this thread is blocked
diff --git a/jstests/sharding/change_streams.js b/jstests/sharding/change_streams.js
index 08c075c1e18..83cfa09abd1 100644
--- a/jstests/sharding/change_streams.js
+++ b/jstests/sharding/change_streams.js
@@ -77,14 +77,14 @@ function runTest(collName, shardKey) {
makeShardKey(1) /* move to shard 1 */);
// Write a document to each chunk.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1)));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(-1)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(1)));
let changeStream = mongosColl.aggregate([{$changeStream: {}}]);
// Test that a change stream can see inserts on shard 0.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1000)));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1000)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(1000)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(-1000)));
assert.soon(() => changeStream.hasNext(), "expected to be able to see the first insert");
assertChangeStreamEventEq(changeStream.next(), {
@@ -96,7 +96,7 @@ function runTest(collName, shardKey) {
// Because the periodic noop writer is disabled, do another write to shard 0 in order to
// advance that shard's clock and enabling the stream to return the earlier write to shard 1
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1001)));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(1001)));
assert.soon(() => changeStream.hasNext(), "expected to be able to see the second insert");
assertChangeStreamEventEq(changeStream.next(), {
@@ -122,11 +122,11 @@ function runTest(collName, shardKey) {
changeStream.close();
jsTestLog('Testing multi-update change streams with shard key ' + shardKey);
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(10, {a: 0, b: 0})));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-10, {a: 0, b: 0})));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(10, {a: 0, b: 0})));
+ assert.commandWorked(mongosColl.insert(makeShardKeyDocument(-10, {a: 0, b: 0})));
changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert.writeOK(mongosColl.update({a: 0}, {$set: {b: 2}}, {multi: true}));
+ assert.commandWorked(mongosColl.update({a: 0}, {$set: {b: 2}}, {multi: true}));
assert.soon(() => changeStream.hasNext());
assertChangeStreamEventEq(changeStream.next(), {
@@ -154,11 +154,11 @@ function runTest(collName, shardKey) {
assert.commandWorked(
st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: false}));
- assert.writeOK(mongosColl.remove({}));
+ assert.commandWorked(mongosColl.remove({}));
// We awaited the replication of the first write, so the change stream shouldn't return it.
// Use { w: "majority" } to deal with journaling correctly, even though we only have one
// node.
- assert.writeOK(
+ assert.commandWorked(
mongosColl.insert(makeShardKeyDocument(0, {a: 1}), {writeConcern: {w: "majority"}}));
changeStream = mongosColl.aggregate([{$changeStream: {}}]);
@@ -188,15 +188,19 @@ function runTest(collName, shardKey) {
makeShardKey(1) /* move to shard 1 */);
// Write one document to each chunk.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1), {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1), {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ mongosColl.insert(makeShardKeyDocument(-1), {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ mongosColl.insert(makeShardKeyDocument(1), {writeConcern: {w: "majority"}}));
changeStream = mongosColl.aggregate([{$changeStream: {}}]);
assert(!changeStream.hasNext());
// Store a valid resume token before dropping the collection, to be used later in the test
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-2), {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(2), {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ mongosColl.insert(makeShardKeyDocument(-2), {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ mongosColl.insert(makeShardKeyDocument(2), {writeConcern: {w: "majority"}}));
assert.soon(() => changeStream.hasNext());
const resumeToken = changeStream.next()._id;
diff --git a/jstests/sharding/change_streams_establishment_finds_new_shards.js b/jstests/sharding/change_streams_establishment_finds_new_shards.js
index 146fc166d50..8f2393f99ee 100644
--- a/jstests/sharding/change_streams_establishment_finds_new_shards.js
+++ b/jstests/sharding/change_streams_establishment_finds_new_shards.js
@@ -68,8 +68,8 @@ assert(!changeStream.hasNext(), "Do not expect any results yet");
awaitNewShard();
// Insert two documents in different shards.
-assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
// Expect to see them both.
for (let id of [0, 20]) {
diff --git a/jstests/sharding/change_streams_primary_shard_unaware.js b/jstests/sharding/change_streams_primary_shard_unaware.js
index b325f770585..de89b928998 100644
--- a/jstests/sharding/change_streams_primary_shard_unaware.js
+++ b/jstests/sharding/change_streams_primary_shard_unaware.js
@@ -58,7 +58,7 @@ assert.commandWorked(mongosDB.createCollection(testName));
// triggering a refresh when a change stream is established through mongos2.
const mongos2DB = st.s2.getDB(testName);
const mongos2Coll = mongos2DB[testName];
-assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
+assert.commandWorked(mongos2Coll.insert({_id: 0, a: 0}));
// Create index on the shard key.
assert.commandWorked(mongos2Coll.createIndex({a: 1}));
@@ -92,7 +92,7 @@ assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
// Insert a doc and verify that the primary shard is now aware that the collection is sharded.
-assert.writeOK(mongosColl.insert({_id: 1, a: 1}));
+assert.commandWorked(mongosColl.insert({_id: 1, a: 1}));
assert.eq(true, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
// Verify that both cursors are able to pick up an inserted document.
@@ -123,10 +123,10 @@ assert.commandWorked(mongosDB.adminCommand({
}));
// Update the document on the primary shard.
-assert.writeOK(mongosColl.update({_id: 1, a: 1}, {$set: {b: 1}}));
+assert.commandWorked(mongosColl.update({_id: 1, a: 1}, {$set: {b: 1}}));
// Insert another document to each shard.
-assert.writeOK(mongosColl.insert({_id: -2, a: -2}));
-assert.writeOK(mongosColl.insert({_id: 2, a: 2}));
+assert.commandWorked(mongosColl.insert({_id: -2, a: -2}));
+assert.commandWorked(mongosColl.insert({_id: 2, a: 2}));
// Verify that both cursors pick up the first inserted doc regardless of the moveChunk
// operation.
diff --git a/jstests/sharding/change_streams_shards_start_in_sync.js b/jstests/sharding/change_streams_shards_start_in_sync.js
index 3928913a1bb..7b493800dcc 100644
--- a/jstests/sharding/change_streams_shards_start_in_sync.js
+++ b/jstests/sharding/change_streams_shards_start_in_sync.js
@@ -98,17 +98,17 @@ function waitForShardCursor(rs) {
// Make sure the shard 0 $changeStream cursor is established before doing the first writes.
waitForShardCursor(st.rs0);
-assert.writeOK(mongosColl.insert({_id: -1000}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -1000}, {writeConcern: {w: "majority"}}));
// This write to shard 1 occurs before the $changeStream cursor on shard 1 is open, because the
// mongos where the $changeStream is running is disconnected from shard 1.
-assert.writeOK(mongosColl.insert({_id: 1001}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: 1001}, {writeConcern: {w: "majority"}}));
jsTestLog("Reconnecting");
st.rs1.getPrimary().reconnect(st.s1);
waitForShardCursor(st.rs1);
-assert.writeOK(mongosColl.insert({_id: -1002}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosColl.insert({_id: -1002}, {writeConcern: {w: "majority"}}));
waitForShell();
st.stop();
})();
diff --git a/jstests/sharding/change_streams_unsharded_becomes_sharded.js b/jstests/sharding/change_streams_unsharded_becomes_sharded.js
index c28e19c9520..9ab4b1901fa 100644
--- a/jstests/sharding/change_streams_unsharded_becomes_sharded.js
+++ b/jstests/sharding/change_streams_unsharded_becomes_sharded.js
@@ -57,8 +57,8 @@ function testUnshardedBecomesSharded(collToWatch) {
// Verify that the cursor picks up documents inserted while the collection is unsharded. The
// 'documentKey' at this point is simply the _id field.
- assert.writeOK(mongosColl.insert({_id: 0, x: 0}));
- assert.writeOK(mongosCollOther.insert({_id: 0, y: 0}));
+ assert.commandWorked(mongosColl.insert({_id: 0, x: 0}));
+ assert.commandWorked(mongosCollOther.insert({_id: 0, y: 0}));
const [preShardCollectionChange] = cst.assertNextChangesEqual({
cursor: cursor,
expectedChanges: [{
@@ -100,8 +100,8 @@ function testUnshardedBecomesSharded(collToWatch) {
// documents. The 'documentKey' field should now include the shard key, even before a
// 'kNewShardDetected' operation has been generated by the migration of a chunk to a new
// shard.
- assert.writeOK(mongosColl.insert({_id: 1, x: 1}));
- assert.writeOK(mongosCollOther.insert({_id: 1, y: 1}));
+ assert.commandWorked(mongosColl.insert({_id: 1, x: 1}));
+ assert.commandWorked(mongosCollOther.insert({_id: 1, y: 1}));
cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [postShardCollectionChanges[0]]});
// Move the [minKey, 0) chunk to shard1.
@@ -119,8 +119,8 @@ function testUnshardedBecomesSharded(collToWatch) {
}));
// Make sure the change stream cursor sees a document inserted on the recipient shard.
- assert.writeOK(mongosColl.insert({_id: -1, x: -1}));
- assert.writeOK(mongosCollOther.insert({_id: -1, y: -1}));
+ assert.commandWorked(mongosColl.insert({_id: -1, x: -1}));
+ assert.commandWorked(mongosCollOther.insert({_id: -1, y: -1}));
cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [postShardCollectionChanges[1]]});
// Confirm that we can resume the stream on the sharded collection using the token generated
@@ -145,8 +145,8 @@ function testUnshardedBecomesSharded(collToWatch) {
// Insert a couple documents to shard1, creating a scenario where the getMore to shard0 will
// indicate that the change stream is invalidated yet shard1 will still have data to return.
- assert.writeOK(mongosColl.insert({_id: -2, x: -2}));
- assert.writeOK(mongosColl.insert({_id: -3, x: -3}));
+ assert.commandWorked(mongosColl.insert({_id: -2, x: -2}));
+ assert.commandWorked(mongosColl.insert({_id: -3, x: -3}));
// Drop and recreate the collection.
mongosColl.drop();
@@ -156,8 +156,8 @@ function testUnshardedBecomesSharded(collToWatch) {
// Shard the collection on a different shard key and ensure that each shard has a chunk.
st.shardColl(mongosColl.getName(), {z: 1}, {z: 0}, {z: -1}, mongosDB.getName());
- assert.writeOK(mongosColl.insert({_id: -1, z: -1}));
- assert.writeOK(mongosColl.insert({_id: 1, z: 1}));
+ assert.commandWorked(mongosColl.insert({_id: -1, z: -1}));
+ assert.commandWorked(mongosColl.insert({_id: 1, z: 1}));
// Verify that the change stream picks up the inserts, however the shard key is missing
// since the collection has since been dropped and recreated.
diff --git a/jstests/sharding/change_streams_whole_db.js b/jstests/sharding/change_streams_whole_db.js
index 322be4a19b4..baefc8107c3 100644
--- a/jstests/sharding/change_streams_whole_db.js
+++ b/jstests/sharding/change_streams_whole_db.js
@@ -35,7 +35,7 @@ let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collecti
assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
// Test that the change stream returns operations on the unsharded test collection.
-assert.writeOK(mongosColl.insert({_id: 0}));
+assert.commandWorked(mongosColl.insert({_id: 0}));
let expected = {
documentKey: {_id: 0},
fullDocument: {_id: 0},
@@ -52,8 +52,8 @@ const mongosCollShardedOnX = mongosDB[jsTestName() + "_sharded_on_x"];
st.shardColl(mongosCollShardedOnX.getName(), {x: 1}, {x: 0}, {x: 1}, mongosDB.getName());
// Write a document to each chunk.
-assert.writeOK(mongosCollShardedOnX.insert({_id: 0, x: -1}));
-assert.writeOK(mongosCollShardedOnX.insert({_id: 1, x: 1}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 0, x: -1}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 1, x: 1}));
// Verify that the change stream returns both inserts.
expected = [
@@ -74,8 +74,8 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
// Now send inserts to both the sharded and unsharded collections, and verify that the change
// streams returns them in order.
-assert.writeOK(mongosCollShardedOnX.insert({_id: 2, x: 2}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 2, x: 2}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
// Verify that the change stream returns both inserts.
expected = [
@@ -106,8 +106,8 @@ st.shardColl(mongosCollShardedCompound.getName(),
mongosDB.getName());
// Write a document to each chunk.
-assert.writeOK(mongosCollShardedCompound.insert({_id: 0, y: -1, x: 0}));
-assert.writeOK(mongosCollShardedCompound.insert({_id: 1, y: 1, x: 0}));
+assert.commandWorked(mongosCollShardedCompound.insert({_id: 0, y: -1, x: 0}));
+assert.commandWorked(mongosCollShardedCompound.insert({_id: 1, y: 1, x: 0}));
// Verify that the change stream returns both inserts.
expected = [
@@ -128,9 +128,9 @@ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
// Send inserts to all 3 collections and verify that the results contain the correct
// documentKeys and are in the correct order.
-assert.writeOK(mongosCollShardedOnX.insert({_id: 3, x: 3}));
-assert.writeOK(mongosColl.insert({_id: 3}));
-assert.writeOK(mongosCollShardedCompound.insert({_id: 2, x: 0, y: -2}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 3, x: 3}));
+assert.commandWorked(mongosColl.insert({_id: 3}));
+assert.commandWorked(mongosCollShardedCompound.insert({_id: 2, x: 0, y: -2}));
// Verify that the change stream returns both inserts.
expected = [
@@ -160,7 +160,7 @@ const resumeTokenBeforeDrop = results[0]._id;
// Write one more document to the collection that will be dropped, to be returned after
// resuming.
-assert.writeOK(mongosCollShardedOnX.insert({_id: 4, x: 4}));
+assert.commandWorked(mongosCollShardedOnX.insert({_id: 4, x: 4}));
// Drop the collection, invalidating the open change stream.
assertDropCollection(mongosDB, mongosCollShardedOnX.getName());
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
index bf996dda39b..90c6c6cb0f1 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -42,7 +42,7 @@ for (var s = 0; s < 2; s++) {
var bulk = shardColl.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++)
bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
assert.eq(200,
diff --git a/jstests/sharding/clone_catalog_data.js b/jstests/sharding/clone_catalog_data.js
index 3daed3214c4..94d3ee8fd8f 100644
--- a/jstests/sharding/clone_catalog_data.js
+++ b/jstests/sharding/clone_catalog_data.js
@@ -26,8 +26,8 @@
// Create some test documents and put them in each collection.
[{a: 1, b: 2, c: 4}, {a: 2, b: 4, c: 8}, {a: 3, b: 6, c: 12}].forEach(d => {
- assert.writeOK(testDB.coll1.insert(d));
- assert.writeOK(testDB.coll2.insert(d));
+ assert.commandWorked(testDB.coll1.insert(d));
+ assert.commandWorked(testDB.coll2.insert(d));
});
// Create indexes on each collection.
diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js
index d995ee19ab6..88d7c4fc3da 100644
--- a/jstests/sharding/coll_epoch_test1.js
+++ b/jstests/sharding/coll_epoch_test1.js
@@ -34,7 +34,7 @@ var bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({_id: i, test: "a"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(100, staleMongos.getCollection(coll + "").find({test: "a"}).itcount());
assert(coll.drop());
@@ -56,7 +56,7 @@ bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({notId: i, test: "b"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(100, staleMongos.getCollection(coll + "").find({test: "b"}).itcount());
assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a"]}}).itcount());
@@ -74,7 +74,7 @@ bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({test: "c"});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(100, staleMongos.getCollection(coll + "").find({test: "c"}).itcount());
assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b"]}}).itcount());
diff --git a/jstests/sharding/coll_epoch_test2.js b/jstests/sharding/coll_epoch_test2.js
index 0033e59f57d..e01513adb04 100644
--- a/jstests/sharding/coll_epoch_test2.js
+++ b/jstests/sharding/coll_epoch_test2.js
@@ -32,7 +32,7 @@ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-assert.writeOK(coll.insert({hello: "world"}));
+assert.commandWorked(coll.insert({hello: "world"}));
jsTest.log("Sharding collection across multiple shards...");
@@ -86,7 +86,7 @@ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++)
bulk.insert({_id: i});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
res = admin.runCommand({split: coll + "", middle: {_id: 200}});
assert.commandWorked(res);
@@ -112,17 +112,18 @@ assert.neq(null, readMongos.getCollection(coll + "").findOne({_id: 1}));
jsTest.log("Checking update...");
// Ensure that updating an element finds the right location
-assert.writeOK(updateMongos.getCollection(coll + "").update({_id: 1}, {$set: {updated: true}}));
+assert.commandWorked(
+ updateMongos.getCollection(coll + "").update({_id: 1}, {$set: {updated: true}}));
assert.neq(null, coll.findOne({updated: true}));
jsTest.log("Checking insert...");
// Ensure that inserting an element finds the right shard
-assert.writeOK(insertMongos.getCollection(coll + "").insert({_id: 101}));
+assert.commandWorked(insertMongos.getCollection(coll + "").insert({_id: 101}));
assert.neq(null, coll.findOne({_id: 101}));
jsTest.log("Checking remove...");
// Ensure that removing an element finds the right shard, verified by the mongos doing the sharding
-assert.writeOK(removeMongos.getCollection(coll + "").remove({_id: 2}));
+assert.commandWorked(removeMongos.getCollection(coll + "").remove({_id: 2}));
assert.eq(null, coll.findOne({_id: 2}));
coll.drop();
diff --git a/jstests/sharding/collation_lookup.js b/jstests/sharding/collation_lookup.js
index f9388cf9aa3..f5e928e8374 100644
--- a/jstests/sharding/collation_lookup.js
+++ b/jstests/sharding/collation_lookup.js
@@ -367,11 +367,11 @@ const withoutDefaultCollationColl = mongosDB[testName + "_without_default"];
assert.commandWorked(
mongosDB.createCollection(withDefaultCollationColl.getName(), caseInsensitive));
-assert.writeOK(withDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
+assert.commandWorked(withDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
-assert.writeOK(withoutDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
-assert.writeOK(withoutDefaultCollationColl.insert({_id: "uppercase", str: "ABC"}));
-assert.writeOK(withoutDefaultCollationColl.insert({_id: "unmatched", str: "def"}));
+assert.commandWorked(withoutDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
+assert.commandWorked(withoutDefaultCollationColl.insert({_id: "uppercase", str: "ABC"}));
+assert.commandWorked(withoutDefaultCollationColl.insert({_id: "unmatched", str: "def"}));
//
// Sharded collection with default collation and unsharded collection without a default
diff --git a/jstests/sharding/collation_targeting.js b/jstests/sharding/collation_targeting.js
index c58396eaa80..3fb27342ca4 100644
--- a/jstests/sharding/collation_targeting.js
+++ b/jstests/sharding/collation_targeting.js
@@ -46,10 +46,10 @@ var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
-assert.writeOK(coll.insert(a_1));
-assert.writeOK(coll.insert(a_100));
-assert.writeOK(coll.insert(a_FOO));
-assert.writeOK(coll.insert(a_foo));
+assert.commandWorked(coll.insert(a_1));
+assert.commandWorked(coll.insert(a_100));
+assert.commandWorked(coll.insert(a_FOO));
+assert.commandWorked(coll.insert(a_foo));
// Aggregate.
@@ -245,33 +245,33 @@ assert.eq(1,
// Test a remove command on strings with non-simple collation. This should be scatter-gather.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.remove({a: "foo"}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(2, writeRes.nRemoved);
explain = coll.explain().remove({a: "foo"}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_FOO));
- assert.writeOK(coll.insert(a_foo));
+ assert.commandWorked(coll.insert(a_FOO));
+ assert.commandWorked(coll.insert(a_foo));
}
// Test a remove command on strings with simple collation. This should be single-shard.
writeRes = coll.remove({a: "foo"});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = coll.explain().remove({a: "foo"});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(coll.insert(a_foo));
+assert.commandWorked(coll.insert(a_foo));
// Test a remove command on numbers with non-simple collation. This should be single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.remove({a: 100}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = coll.explain().remove({a: 100}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_100));
+ assert.commandWorked(coll.insert(a_100));
}
// A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
@@ -287,23 +287,23 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single remove on string shard key with simple collation should succeed, because it is
// single-shard.
writeRes = coll.remove({a: "foo"}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = coll.explain().remove({a: "foo"}, {justOne: true});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(coll.insert(a_foo));
+assert.commandWorked(coll.insert(a_foo));
// Single remove on number shard key with non-simple collation should succeed, because it is
// single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.remove({a: 100}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = coll.explain().remove({a: 100}, {justOne: true, collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_100));
+ assert.commandWorked(coll.insert(a_100));
}
// Single remove on string _id with non-collection-default collation should fail, because it is
@@ -314,17 +314,17 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single remove on string _id with collection-default collation should succeed, because it is
// an exact-ID query.
-assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
writeRes = coll.remove({_id: "foo"}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
// Single remove on string _id with collection-default collation explicitly given should
// succeed, because it is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
writeRes = coll.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
}
@@ -332,9 +332,9 @@ if (testDB.getMongo().writeMode() === "commands") {
// is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.remove({_id: a_100._id}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
- assert.writeOK(coll.insert(a_100));
+ assert.commandWorked(coll.insert(a_100));
}
// Update.
@@ -342,7 +342,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Test an update command on strings with non-simple collation. This should be scatter-gather.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(2, writeRes.nMatched);
explain = coll.explain().update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
@@ -352,7 +352,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Test an update command on strings with simple collation. This should be single-shard.
writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
assert.commandWorked(explain);
@@ -361,7 +361,7 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// Test an update command on numbers with non-simple collation. This should be single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain =
coll.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
@@ -382,7 +382,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single update on string shard key with simple collation should succeed, because it is
// single-shard.
writeRes = coll.update({a: "foo"}, {$set: {b: 1}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update({a: "foo"}, {$set: {b: 1}});
assert.commandWorked(explain);
@@ -392,7 +392,7 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
assert.commandWorked(explain);
@@ -402,34 +402,34 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single update on string _id with non-collection-default collation should fail, because it is
// not an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
assert.writeError(coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
- assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+ assert.commandWorked(coll.remove({_id: "foo"}, {justOne: true}));
}
// Single update on string _id with collection-default collation should succeed, because it is
// an exact-ID query.
-assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
writeRes = coll.update({_id: "foo"}, {$set: {b: 1}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
-assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+assert.commandWorked(coll.remove({_id: "foo"}, {justOne: true}));
// Single update on string _id with collection-default collation explicitly given should
// succeed, because it is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(coll.insert({_id: "foo", a: "bar"}));
writeRes = coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
- assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+ assert.commandWorked(coll.remove({_id: "foo"}, {justOne: true}));
}
// Single update on number _id with non-collection-default collation should succeed, because it
// is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update({_id: a_foo._id}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
}
@@ -443,7 +443,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Upsert on strings with simple collation should succeed, because it is single-shard.
writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
assert.commandWorked(explain);
@@ -453,7 +453,7 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
if (testDB.getMongo().writeMode() === "commands") {
writeRes = coll.update(
{a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = coll.explain().update(
{a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
diff --git a/jstests/sharding/collation_targeting_inherited.js b/jstests/sharding/collation_targeting_inherited.js
index 676dadbc972..c4e86fb1833 100644
--- a/jstests/sharding/collation_targeting_inherited.js
+++ b/jstests/sharding/collation_targeting_inherited.js
@@ -53,10 +53,10 @@ var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
-assert.writeOK(collCaseInsensitive.insert(a_1));
-assert.writeOK(collCaseInsensitive.insert(a_100));
-assert.writeOK(collCaseInsensitive.insert(a_FOO));
-assert.writeOK(collCaseInsensitive.insert(a_foo));
+assert.commandWorked(collCaseInsensitive.insert(a_1));
+assert.commandWorked(collCaseInsensitive.insert(a_100));
+assert.commandWorked(collCaseInsensitive.insert(a_FOO));
+assert.commandWorked(collCaseInsensitive.insert(a_foo));
// Aggregate.
@@ -264,34 +264,34 @@ assert.eq(1,
// Test a remove command on strings with non-simple collation inherited from collection default.
// This should be scatter-gather.
writeRes = collCaseInsensitive.remove({a: "foo"});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(2, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: "foo"});
assert.commandWorked(explain);
assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(collCaseInsensitive.insert(a_FOO));
-assert.writeOK(collCaseInsensitive.insert(a_foo));
+assert.commandWorked(collCaseInsensitive.insert(a_FOO));
+assert.commandWorked(collCaseInsensitive.insert(a_foo));
// Test a remove command on strings with simple collation. This should be single-shard.
if (testDB.getMongo().writeMode() === "commands") {
writeRes = collCaseInsensitive.remove({a: "foo"}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: "foo"}, {collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_foo));
+ assert.commandWorked(collCaseInsensitive.insert(a_foo));
}
// Test a remove command on numbers with non-simple collation inherited from collection default.
// This should be single-shard.
writeRes = collCaseInsensitive.remove({a: 100});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: 100});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(collCaseInsensitive.insert(a_100));
+assert.commandWorked(collCaseInsensitive.insert(a_100));
// A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
// exact-ID if it contains an equality on _id and either has the collection default collation or
@@ -306,24 +306,24 @@ assert.writeError(collCaseInsensitive.remove({a: "foo"}, {justOne: true}));
if (testDB.getMongo().writeMode() === "commands") {
writeRes =
collCaseInsensitive.remove({a: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: "foo"},
{justOne: true, collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_foo));
+ assert.commandWorked(collCaseInsensitive.insert(a_foo));
}
// Single remove on number shard key with non-simple collation inherited from collection default
// should succeed, because it is single-shard.
writeRes = collCaseInsensitive.remove({a: 100}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
explain = collCaseInsensitive.explain().remove({a: 100}, {justOne: true});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-assert.writeOK(collCaseInsensitive.insert(a_100));
+assert.commandWorked(collCaseInsensitive.insert(a_100));
// Single remove on string _id with non-collection-default collation should fail, because it is
// not an exact-ID query.
@@ -332,18 +332,18 @@ assert.writeError(
// Single remove on string _id with collection-default collation should succeed, because it is
// an exact-ID query.
-assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+assert.commandWorked(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
writeRes = collCaseInsensitive.remove({_id: "foo"}, {justOne: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
// Single remove on string _id with collection-default collation explicitly given should
// succeed, because it is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
writeRes =
collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
}
@@ -351,16 +351,16 @@ if (testDB.getMongo().writeMode() === "commands") {
// is an exact-ID query.
writeRes =
collCaseInsensitive.remove({_id: a_100._id}, {justOne: true, collation: {locale: "simple"}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nRemoved);
-assert.writeOK(collCaseInsensitive.insert(a_100));
+assert.commandWorked(collCaseInsensitive.insert(a_100));
// Update.
// Test an update command on strings with non-simple collation inherited from collection
// default. This should be scatter-gather.
writeRes = collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(2, writeRes.nMatched);
explain = collCaseInsensitive.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
assert.commandWorked(explain);
@@ -370,7 +370,7 @@ assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
if (testDB.getMongo().writeMode() === "commands") {
writeRes = collCaseInsensitive.update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
@@ -381,7 +381,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Test an update command on numbers with non-simple collation inherited from collection
// default. This should be single-shard.
writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true});
assert.commandWorked(explain);
@@ -401,7 +401,7 @@ assert.writeError(collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}));
if (testDB.getMongo().writeMode() === "commands") {
writeRes =
collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update(
{a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
@@ -412,7 +412,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single update on number shard key with non-simple collation inherited from collation default
// should succeed, because it is single-shard.
writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}});
assert.commandWorked(explain);
@@ -427,21 +427,21 @@ if (testDB.getMongo().writeMode() === "commands") {
// Single update on string _id with collection-default collation should succeed, because it is
// an exact-ID query.
-assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+assert.commandWorked(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
writeRes = collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
-assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
+assert.commandWorked(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
// Single update on string _id with collection-default collation explicitly given should
// succeed, because it is an exact-ID query.
if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+ assert.commandWorked(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
writeRes =
collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
- assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
+ assert.commandWorked(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
}
// Single update on number _id with non-collection-default collation inherited from collection
@@ -449,7 +449,7 @@ if (testDB.getMongo().writeMode() === "commands") {
if (testDB.getMongo().writeMode() === "commands") {
writeRes = collCaseInsensitive.update(
{_id: a_foo._id}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
}
@@ -464,7 +464,7 @@ assert.writeError(
if (testDB.getMongo().writeMode() === "commands") {
writeRes = collCaseInsensitive.update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
+ assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain = collCaseInsensitive.explain().update(
{a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
@@ -475,7 +475,7 @@ if (testDB.getMongo().writeMode() === "commands") {
// Upsert on numbers with non-simple collation inherited from collection default should succeed,
// because it is single-shard.
writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
-assert.writeOK(writeRes);
+assert.commandWorked(writeRes);
assert.eq(1, writeRes.nMatched);
explain =
collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
diff --git a/jstests/sharding/config_rs_no_primary.js b/jstests/sharding/config_rs_no_primary.js
index 6b7c7155a6e..41d74869930 100644
--- a/jstests/sharding/config_rs_no_primary.js
+++ b/jstests/sharding/config_rs_no_primary.js
@@ -35,7 +35,7 @@ var testOps = function(mongos) {
jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
mongos);
var initialCount = mongos.getDB('test').foo.count();
- assert.writeOK(mongos.getDB('test').foo.insert({a: 1}));
+ assert.commandWorked(mongos.getDB('test').foo.insert({a: 1}));
assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
assert.throws(function() {
diff --git a/jstests/sharding/convert_to_and_from_sharded.js b/jstests/sharding/convert_to_and_from_sharded.js
index 15da2e0cc73..9076e803331 100644
--- a/jstests/sharding/convert_to_and_from_sharded.js
+++ b/jstests/sharding/convert_to_and_from_sharded.js
@@ -18,13 +18,13 @@ var checkBasicCRUD = function(coll) {
var doc = coll.findOne({_id: 'marker', y: {$exists: false}});
assert.neq(null, doc);
- assert.writeOK(coll.update({_id: 'marker'}, {$set: {y: 2}}));
+ assert.commandWorked(coll.update({_id: 'marker'}, {$set: {y: 2}}));
assert.eq(2, coll.findOne({_id: 'marker'}).y);
- assert.writeOK(coll.remove({_id: 'marker'}));
+ assert.commandWorked(coll.remove({_id: 'marker'}));
assert.eq(null, coll.findOne({_id: 'marker'}));
- assert.writeOK(coll.insert({_id: 'marker'}, {writeConcern: {w: NUM_NODES}}));
+ assert.commandWorked(coll.insert({_id: 'marker'}, {writeConcern: {w: NUM_NODES}}));
assert.eq('marker', coll.findOne({_id: 'marker'})._id);
};
@@ -49,10 +49,10 @@ if (jsTestOptions().shardMixedBinVersions) {
replShard.awaitReplication();
}
-assert.writeOK(priConn.getDB('test').unsharded.insert({_id: 'marker'}));
+assert.commandWorked(priConn.getDB('test').unsharded.insert({_id: 'marker'}));
checkBasicCRUD(priConn.getDB('test').unsharded);
-assert.writeOK(priConn.getDB('test').sharded.insert({_id: 'marker'}));
+assert.commandWorked(priConn.getDB('test').sharded.insert({_id: 'marker'}));
checkBasicCRUD(priConn.getDB('test').sharded);
for (var x = 0; x < NUM_NODES; x++) {
@@ -76,7 +76,7 @@ checkBasicCRUD(st.s.getDB('test').unsharded);
checkBasicCRUD(st.s.getDB('test').sharded);
for (x = 0; x < 4; x++) {
- assert.writeOK(st.s.getDB('test').sharded.insert({_id: x}));
+ assert.commandWorked(st.s.getDB('test').sharded.insert({_id: x}));
assert.commandWorked(st.s.adminCommand({split: 'test.sharded', middle: {_id: x}}));
}
diff --git a/jstests/sharding/count_config_servers.js b/jstests/sharding/count_config_servers.js
index ff7cff2c698..7df3c4b8843 100644
--- a/jstests/sharding/count_config_servers.js
+++ b/jstests/sharding/count_config_servers.js
@@ -17,7 +17,7 @@ var configDB = st.config;
var coll = configDB.test;
for (var x = 0; x < 10; x++) {
- assert.writeOK(coll.insert({v: x}));
+ assert.commandWorked(coll.insert({v: x}));
}
if (st.configRS) {
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index 596509c1c2d..a0357de1f81 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -24,7 +24,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 300; i++) {
bulk.insert({i: i % 10});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var connA = conn;
var connB = new Mongo(st.s.host);
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index b68c4bf1bca..2926fea3b1b 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -22,7 +22,7 @@ assert.commandWorked(
st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
// Insert some data
-assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+assert.commandWorked(coll.insert({_id: true, a: true, b: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({a: 1}));
@@ -49,7 +49,7 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id:
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+assert.commandWorked(coll.insert({_id: true, a: true, b: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({a: 1}));
@@ -67,7 +67,7 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1,
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
+assert.commandWorked(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({c: 1}));
@@ -101,7 +101,7 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {'a.b'
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
+assert.commandWorked(coll.insert({_id: true, a: {b: true}, c: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({c: 1}));
@@ -124,7 +124,7 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1}
st.printShardingStatus();
// Insert some bad data manually on the shard
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
+assert.commandWorked(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
// Index without shard key query - not covered but succeeds
assert.commandWorked(coll.ensureIndex({c: 1}));
diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js
index f11ffd13f2a..86b81983a43 100644
--- a/jstests/sharding/create_idx_empty_primary.js
+++ b/jstests/sharding/create_idx_empty_primary.js
@@ -15,7 +15,7 @@ assert.commandWorked(testDB.adminCommand({shardcollection: 'test.user', key: {_i
assert.commandWorked(
testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: st.shard0.shardName}));
-assert.writeOK(testDB.user.insert({_id: 0}));
+assert.commandWorked(testDB.user.insert({_id: 0}));
var res = testDB.user.ensureIndex({i: 1});
assert.commandWorked(res);
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 6b66c9cf130..b6070673ae8 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -20,7 +20,7 @@ var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < numObjs; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(1,
s.config.chunks.count({"ns": "test.foo"}),
"test requires collection to have one chunk initially");
diff --git a/jstests/sharding/cursor_cleanup.js b/jstests/sharding/cursor_cleanup.js
index 741c7f48b3c..e78052322c7 100644
--- a/jstests/sharding/cursor_cleanup.js
+++ b/jstests/sharding/cursor_cleanup.js
@@ -27,8 +27,8 @@ for (var i = -150; i < 150; i++) {
bulk.insert({_id: i});
bulk2.insert({_id: i});
}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
+assert.commandWorked(bulk.execute());
+assert.commandWorked(bulk2.execute());
jsTest.log("Open a cursor to a sharded and unsharded collection.");
diff --git a/jstests/sharding/cursor_timeout.js b/jstests/sharding/cursor_timeout.js
index 7c43fd8f99a..316005dacad 100644
--- a/jstests/sharding/cursor_timeout.js
+++ b/jstests/sharding/cursor_timeout.js
@@ -67,7 +67,7 @@ assert.commandWorked(adminDB.runCommand({
}));
for (let x = 0; x < 20; x++) {
- assert.writeOK(routerColl.insert({x: x}));
+ assert.commandWorked(routerColl.insert({x: x}));
}
// Open both a normal and a no-timeout cursor on mongos. Batch size is 1 to ensure that
diff --git a/jstests/sharding/cursor_valid_after_shard_stepdown.js b/jstests/sharding/cursor_valid_after_shard_stepdown.js
index c26de68b8c6..52452809915 100644
--- a/jstests/sharding/cursor_valid_after_shard_stepdown.js
+++ b/jstests/sharding/cursor_valid_after_shard_stepdown.js
@@ -16,8 +16,8 @@ var db = st.s0.getDB('TestDB');
var coll = db.TestColl;
// Insert documents for the test
-assert.writeOK(coll.insert({x: 1, value: 'Test value 1'}));
-assert.writeOK(coll.insert({x: 2, value: 'Test value 2'}));
+assert.commandWorked(coll.insert({x: 1, value: 'Test value 1'}));
+assert.commandWorked(coll.insert({x: 2, value: 'Test value 2'}));
// Establish a cursor on the primary (by not using slaveOk read)
var findCursor = assert.commandWorked(db.runCommand({find: 'TestColl', batchSize: 1})).cursor;
diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js
index 87b13519678..108097bf808 100644
--- a/jstests/sharding/delete_during_migrate.js
+++ b/jstests/sharding/delete_during_migrate.js
@@ -25,7 +25,7 @@ var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 200000; i++) {
bulk.insert({a: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// enable sharding of the collection. Only 1 chunk.
t.ensureIndex({a: 1});
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 26347ec8330..3c9499b73aa 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -6,9 +6,9 @@ var s = new ShardingTest({shards: 2});
assert.eq(2, s.config.shards.count(), "server count wrong");
var test1 = s.getDB("test1").foo;
-assert.writeOK(test1.insert({a: 1}));
-assert.writeOK(test1.insert({a: 2}));
-assert.writeOK(test1.insert({a: 3}));
+assert.commandWorked(test1.insert({a: 1}));
+assert.commandWorked(test1.insert({a: 2}));
+assert.commandWorked(test1.insert({a: 3}));
assert.eq(3, test1.count());
assert.commandFailed(s.s0.adminCommand({addshard: "sdd$%", maxTimeMS: 60000}), "Bad hostname");
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
index 9de2ecb6d4a..a10c2cba5aa 100644
--- a/jstests/sharding/drop_sharded_db.js
+++ b/jstests/sharding/drop_sharded_db.js
@@ -29,7 +29,7 @@ for (var i = 0; i < numColls; i++) {
}
// Insert a document to an unsharded collection and make sure that the document is there.
-assert.writeOK(dbA.unsharded.insert({dummy: 1}));
+assert.commandWorked(dbA.unsharded.insert({dummy: 1}));
var shardName = config.databases.findOne({_id: dbA.getName()}).primary;
var shardHostConn = new Mongo(config.shards.findOne({_id: shardName}).host);
var dbAOnShard = shardHostConn.getDB(dbA.getName());
diff --git a/jstests/sharding/empty_doc_results.js b/jstests/sharding/empty_doc_results.js
index 65fe1cccd7f..0d2f8a49cb6 100644
--- a/jstests/sharding/empty_doc_results.js
+++ b/jstests/sharding/empty_doc_results.js
@@ -23,7 +23,7 @@ for (var i = -50; i < 50; i++) {
var doc = {};
if (i >= 0)
doc.positiveId = true;
- assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.insert(doc));
}
//
diff --git a/jstests/sharding/enable_sharding_basic.js b/jstests/sharding/enable_sharding_basic.js
index d185ff11b6e..046b4f6e520 100644
--- a/jstests/sharding/enable_sharding_basic.js
+++ b/jstests/sharding/enable_sharding_basic.js
@@ -37,7 +37,7 @@ assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
// Verify config.databases metadata.
-assert.writeOK(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
+assert.commandWorked(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
assert.commandWorked(st.s0.adminCommand({enableSharding: 'unsharded'}));
assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
@@ -46,8 +46,8 @@ assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitione
assert.commandFailed(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
assert.commandFailed(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
-assert.writeOK(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
-assert.writeOK(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
+assert.commandWorked(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
// Calling 'enableSharding' on one mongos and 'shardCollection' through another must work
assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
diff --git a/jstests/sharding/enforce_zone_policy.js b/jstests/sharding/enforce_zone_policy.js
index 259d05ff716..259b8443fee 100644
--- a/jstests/sharding/enforce_zone_policy.js
+++ b/jstests/sharding/enforce_zone_policy.js
@@ -15,7 +15,7 @@ var bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < 9; i++) {
bulk.insert({_id: i, x: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
diff --git a/jstests/sharding/error_during_agg_getmore.js b/jstests/sharding/error_during_agg_getmore.js
index 74933437c16..04aaef554f4 100644
--- a/jstests/sharding/error_during_agg_getmore.js
+++ b/jstests/sharding/error_during_agg_getmore.js
@@ -26,8 +26,8 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
// Write a document to each chunk.
-assert.writeOK(mongosColl.insert({_id: -1}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: -1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
// Delay messages between shard 1 and the mongos, long enough that shard 1's responses will
// likely arrive after the response from shard 0, but not so long that the background cluster
diff --git a/jstests/sharding/error_propagation.js b/jstests/sharding/error_propagation.js
index 7fe4822e295..5845581a5f1 100644
--- a/jstests/sharding/error_propagation.js
+++ b/jstests/sharding/error_propagation.js
@@ -10,11 +10,11 @@ var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 3}});
var db = st.getDB('test');
db.setSlaveOk(true);
-assert.writeOK(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
+assert.commandWorked(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
assert.commandWorked(db.runCommand(
{aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}}));
-assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
+assert.commandWorked(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
var res = db.runCommand(
{aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}});
diff --git a/jstests/sharding/exact_shard_key_target.js b/jstests/sharding/exact_shard_key_target.js
index 7ff31a97dda..d0670c20172 100644
--- a/jstests/sharding/exact_shard_key_target.js
+++ b/jstests/sharding/exact_shard_key_target.js
@@ -26,10 +26,10 @@ st.printShardingStatus();
//
// JustOne remove
coll.remove({});
-assert.writeOK(coll.insert({_id: 1, a: {b: -1}}));
-assert.writeOK(coll.insert({_id: 2, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 1, a: {b: -1}}));
+assert.commandWorked(coll.insert({_id: 2, a: {b: 1}}));
// Need orphaned data to see the impact
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
+assert.commandWorked(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
assert.eq(1, coll.remove({a: {b: 1}}, {justOne: true}).nRemoved);
assert.eq(2,
st.shard0.getCollection(coll.toString()).count() +
@@ -38,10 +38,10 @@ assert.eq(2,
//
// Non-multi update
coll.remove({});
-assert.writeOK(coll.insert({_id: 1, a: {b: 1}}));
-assert.writeOK(coll.insert({_id: 2, a: {b: -1}}));
+assert.commandWorked(coll.insert({_id: 1, a: {b: 1}}));
+assert.commandWorked(coll.insert({_id: 2, a: {b: -1}}));
// Need orphaned data to see the impact
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
+assert.commandWorked(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
assert.eq(1, coll.update({a: {b: 1}}, {$set: {updated: true}}, {multi: false}).nMatched);
assert.eq(1,
st.shard0.getCollection(coll.toString()).count({updated: true}) +
@@ -50,8 +50,8 @@ assert.eq(1,
//
// Successive upserts (replacement-style)
coll.remove({});
-assert.writeOK(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
-assert.writeOK(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
+assert.commandWorked(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
+assert.commandWorked(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
assert.eq(1,
st.shard0.getCollection(coll.toString()).count() +
st.shard1.getCollection(coll.toString()).count());
@@ -59,8 +59,8 @@ assert.eq(1,
//
// Successive upserts ($op-style)
coll.remove({});
-assert.writeOK(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
-assert.writeOK(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
+assert.commandWorked(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
+assert.commandWorked(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
assert.eq(1,
st.shard0.getCollection(coll.toString()).count() +
st.shard1.getCollection(coll.toString()).count());
diff --git a/jstests/sharding/explainFind_stale_mongos.js b/jstests/sharding/explainFind_stale_mongos.js
index 93a5d1489cc..7cf3024a545 100644
--- a/jstests/sharding/explainFind_stale_mongos.js
+++ b/jstests/sharding/explainFind_stale_mongos.js
@@ -15,7 +15,7 @@ let staleMongos = st.s0;
let freshMongos = st.s1;
jsTest.log("Make the stale mongos load a cache entry for db " + dbName + " once");
-assert.writeOK(staleMongos.getDB(dbName).getCollection(collName).insert({_id: 1}));
+assert.commandWorked(staleMongos.getDB(dbName).getCollection(collName).insert({_id: 1}));
jsTest.log("Call shardCollection on " + ns + " from the fresh mongos");
assert.commandWorked(freshMongos.adminCommand({enableSharding: dbName}));
diff --git a/jstests/sharding/explain_agg_read_pref.js b/jstests/sharding/explain_agg_read_pref.js
index 0e774e4d8a8..63e4f3362f7 100644
--- a/jstests/sharding/explain_agg_read_pref.js
+++ b/jstests/sharding/explain_agg_read_pref.js
@@ -41,7 +41,7 @@ const rs1Primary = st.rs1.getPrimary();
const rs1Secondary = st.rs1.getSecondary();
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
//
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index e92e9c4d713..92e5ff5b468 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -19,8 +19,8 @@ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 10}}));
assert.commandWorked(s.s0.adminCommand(
{movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
-assert.writeOK(db.foo.insert({num: 5}));
-assert.writeOK(db.foo.save({num: 15}));
+assert.commandWorked(db.foo.insert({num: 5}));
+assert.commandWorked(db.foo.save({num: 15}));
let a = s.rs0.getPrimary().getDB("test");
let b = s.rs1.getPrimary().getDB("test");
@@ -66,8 +66,8 @@ assert.commandWorked(s.s0.adminCommand({split: "test.foo4", middle: {num: 10}}))
assert.commandWorked(s.s0.adminCommand(
{movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
-assert.writeOK(db.foo4.insert({num: 5}));
-assert.writeOK(db.foo4.insert({num: 15}));
+assert.commandWorked(db.foo4.insert({num: 5}));
+assert.commandWorked(db.foo4.insert({num: 15}));
assert.eq(1, a.foo4.count(), "ua1");
assert.eq(1, b.foo4.count(), "ub1");
@@ -79,7 +79,7 @@ assert(a.foo4.getIndexes()[1].unique, "ua3");
assert(b.foo4.getIndexes()[1].unique, "ub3");
assert.eq(2, db.foo4.count(), "uc1");
-assert.writeOK(db.foo4.insert({num: 7}));
+assert.commandWorked(db.foo4.insert({num: 7}));
assert.eq(3, db.foo4.count(), "uc2");
assert.writeError(db.foo4.insert({num: 7}));
assert.eq(3, db.foo4.count(), "uc4");
@@ -106,12 +106,12 @@ assert(db.foo5.isCapped(), "cb1");
assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo5", key: {num: 1}}));
// ---- can't shard non-empty collection without index -----
-assert.writeOK(db.foo8.insert({a: 1}));
+assert.commandWorked(db.foo8.insert({a: 1}));
assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo8", key: {a: 1}}),
"non-empty collection");
// ---- can't shard non-empty collection with null values in shard key ----
-assert.writeOK(db.foo9.insert({b: 1}));
+assert.commandWorked(db.foo9.insert({b: 1}));
assert.commandWorked(db.foo9.createIndex({a: 1}));
assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo9", key: {a: 1}}),
"entry with null value");
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index 0ccf500804f..5edad528114 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -37,7 +37,7 @@ var bulk = dbForTest.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var x = dbForTest.foo.stats();
diff --git a/jstests/sharding/find_getmore_cmd.js b/jstests/sharding/find_getmore_cmd.js
index d711c7cda53..3591d938e07 100644
--- a/jstests/sharding/find_getmore_cmd.js
+++ b/jstests/sharding/find_getmore_cmd.js
@@ -15,12 +15,12 @@ var db = st.s.getDB("test");
var coll = db.getCollection("find_getmore_cmd");
coll.drop();
-assert.writeOK(coll.insert({_id: -9, a: 4, b: "foo foo"}));
-assert.writeOK(coll.insert({_id: -5, a: 8}));
-assert.writeOK(coll.insert({_id: -1, a: 10, b: "foo"}));
-assert.writeOK(coll.insert({_id: 1, a: 5}));
-assert.writeOK(coll.insert({_id: 5, a: 20, b: "foo foo foo"}));
-assert.writeOK(coll.insert({_id: 9, a: 3}));
+assert.commandWorked(coll.insert({_id: -9, a: 4, b: "foo foo"}));
+assert.commandWorked(coll.insert({_id: -5, a: 8}));
+assert.commandWorked(coll.insert({_id: -1, a: 10, b: "foo"}));
+assert.commandWorked(coll.insert({_id: 1, a: 5}));
+assert.commandWorked(coll.insert({_id: 5, a: 20, b: "foo foo foo"}));
+assert.commandWorked(coll.insert({_id: 9, a: 3}));
assert.commandWorked(coll.ensureIndex({b: "text"}));
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index 001a9a386d7..75698ec45a0 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -35,7 +35,7 @@ var bulk = db.sharded_coll.initializeUnorderedBulkOp();
for (var i = 0; i < numObjs; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
for (var i = 2; i < numObjs; i += 2) {
diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js
index fd950bcf43c..7a9d7845d6b 100644
--- a/jstests/sharding/forget_mr_temp_ns.js
+++ b/jstests/sharding/forget_mr_temp_ns.js
@@ -13,7 +13,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10; i++) {
bulk.insert({_id: i, even: (i % 2 == 0)});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var map = function() {
emit(this.even, 1);
diff --git a/jstests/sharding/fts_score_sort_sharded.js b/jstests/sharding/fts_score_sort_sharded.js
index 2145e987558..e49703821f3 100644
--- a/jstests/sharding/fts_score_sort_sharded.js
+++ b/jstests/sharding/fts_score_sort_sharded.js
@@ -22,10 +22,10 @@ assert.commandWorked(
//
// Insert documents into collection and create text index.
//
-assert.writeOK(coll.insert({_id: 1, a: "pizza"}));
-assert.writeOK(coll.insert({_id: -1, a: "pizza pizza"}));
-assert.writeOK(coll.insert({_id: 2, a: "pizza pizza pizza"}));
-assert.writeOK(coll.insert({_id: -2, a: "pizza pizza pizza pizza"}));
+assert.commandWorked(coll.insert({_id: 1, a: "pizza"}));
+assert.commandWorked(coll.insert({_id: -1, a: "pizza pizza"}));
+assert.commandWorked(coll.insert({_id: 2, a: "pizza pizza pizza"}));
+assert.commandWorked(coll.insert({_id: -2, a: "pizza pizza pizza pizza"}));
assert.commandWorked(coll.ensureIndex({a: "text"}));
//
diff --git a/jstests/sharding/geo_near_sharded.js b/jstests/sharding/geo_near_sharded.js
index 714876e7fda..1d7247b3771 100644
--- a/jstests/sharding/geo_near_sharded.js
+++ b/jstests/sharding/geo_near_sharded.js
@@ -36,7 +36,7 @@ function test(st, db, sharded, indexType) {
var lng = 180 - Random.rand() * 360;
bulk.insert({rand: Math.random(), loc: [lng, lat]});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
assert.eq(db[coll].count(), numPts);
assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
diff --git a/jstests/sharding/geo_near_sort.js b/jstests/sharding/geo_near_sort.js
index e2f0292904e..8c3a19465c7 100644
--- a/jstests/sharding/geo_near_sort.js
+++ b/jstests/sharding/geo_near_sort.js
@@ -46,10 +46,10 @@ const doc2 = {
a: "aa",
b: 0
};
-assert.writeOK(coll.insert(docMinus2));
-assert.writeOK(coll.insert(docMinus1));
-assert.writeOK(coll.insert(doc1));
-assert.writeOK(coll.insert(doc2));
+assert.commandWorked(coll.insert(docMinus2));
+assert.commandWorked(coll.insert(docMinus1));
+assert.commandWorked(coll.insert(doc1));
+assert.commandWorked(coll.insert(doc2));
function testSortOrders(query, indexSpec) {
assert.commandWorked(coll.createIndex(indexSpec));
diff --git a/jstests/sharding/graph_lookup.js b/jstests/sharding/graph_lookup.js
index 4678ba2f9a9..90922f815a2 100644
--- a/jstests/sharding/graph_lookup.js
+++ b/jstests/sharding/graph_lookup.js
@@ -9,8 +9,8 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: "test.foo", key: {_id:
let db = st.s0.getDB("test");
-assert.writeOK(db.foo.insert([{}, {}, {}, {}]));
-assert.writeOK(db.bar.insert({_id: 1, x: 1}));
+assert.commandWorked(db.foo.insert([{}, {}, {}, {}]));
+assert.commandWorked(db.bar.insert({_id: 1, x: 1}));
const res = db.foo
.aggregate([{
diff --git a/jstests/sharding/idhack_sharded.js b/jstests/sharding/idhack_sharded.js
index 6b9716ea608..b11e2b0e579 100644
--- a/jstests/sharding/idhack_sharded.js
+++ b/jstests/sharding/idhack_sharded.js
@@ -17,7 +17,7 @@ assert.commandWorked(coll.getDB().adminCommand(
// Test that idhack queries with projections that remove the shard key return correct results.
// SERVER-14032.
//
-assert.writeOK(coll.insert({_id: 1, x: 1, y: 1}));
+assert.commandWorked(coll.insert({_id: 1, x: 1, y: 1}));
assert.eq(1, coll.find().itcount());
assert.eq(1, coll.find({_id: 1}, {x: 0}).itcount());
assert.eq(1, coll.find({_id: 1}, {y: 1}).itcount());
@@ -26,8 +26,8 @@ coll.remove({});
//
// Test that idhack queries with covered projections do not return orphan documents. SERVER-14034.
//
-assert.writeOK(st.shard0.getCollection(coll.getFullName()).insert({_id: 1, x: 1}));
-assert.writeOK(st.shard1.getCollection(coll.getFullName()).insert({_id: 1, x: 1}));
+assert.commandWorked(st.shard0.getCollection(coll.getFullName()).insert({_id: 1, x: 1}));
+assert.commandWorked(st.shard1.getCollection(coll.getFullName()).insert({_id: 1, x: 1}));
assert.eq(2, coll.count());
assert.eq(1, coll.find().itcount());
assert.eq(1, coll.find({_id: 1}, {_id: 1}).itcount());
diff --git a/jstests/sharding/implicit_db_creation.js b/jstests/sharding/implicit_db_creation.js
index 0f45dbb94ae..03c460bdeff 100644
--- a/jstests/sharding/implicit_db_creation.js
+++ b/jstests/sharding/implicit_db_creation.js
@@ -15,14 +15,14 @@ var testDB = st.s.getDB('test');
assert.eq(null, testDB.user.findOne());
assert.eq(null, configDB.databases.findOne({_id: 'test'}));
-assert.writeOK(testDB.user.insert({x: 1}));
+assert.commandWorked(testDB.user.insert({x: 1}));
var testDBDoc = configDB.databases.findOne();
assert.eq('test', testDBDoc._id, tojson(testDBDoc));
// Test that inserting to another collection in the same database will not modify the existing
// config.databases entry.
-assert.writeOK(testDB.bar.insert({y: 1}));
+assert.commandWorked(testDB.bar.insert({y: 1}));
assert.eq(testDBDoc, configDB.databases.findOne());
st.s.adminCommand({enableSharding: 'foo'});
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js
index a017b463037..d93392537af 100644
--- a/jstests/sharding/in_memory_sort_limit.js
+++ b/jstests/sharding/in_memory_sort_limit.js
@@ -25,7 +25,7 @@ var bulkOp = mongosCol.initializeOrderedBulkOp();
for (var i = 0; i < 12800; i++) {
bulkOp.insert({x: i, str: filler});
}
-assert.writeOK(bulkOp.execute());
+assert.commandWorked(bulkOp.execute());
var passLimit = 2000;
var failLimit = 4000;
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index b440c535230..67c4c3ceeb6 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -12,7 +12,7 @@ for (var i = 0; i < 22; i++) {
for (var j = 0; j < 300; j++) {
bulk.insert({num: j, x: 1});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
if (i == 0) {
s.adminCommand({enablesharding: "" + coll._db});
diff --git a/jstests/sharding/inserts_consistent.js b/jstests/sharding/inserts_consistent.js
index 4bea7d95474..79bfcecfcf5 100644
--- a/jstests/sharding/inserts_consistent.js
+++ b/jstests/sharding/inserts_consistent.js
@@ -31,13 +31,13 @@ jsTest.log("Inserting docs that needs to be retried...");
var nextId = -1;
for (var i = 0; i < 2; i++) {
printjson("Inserting " + nextId);
- assert.writeOK(collB.insert({_id: nextId--, hello: "world"}));
+ assert.commandWorked(collB.insert({_id: nextId--, hello: "world"}));
}
jsTest.log("Inserting doc which successfully goes through...");
// Do second write
-assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
+assert.commandWorked(collB.insert({_id: nextId--, goodbye: "world"}));
// Assert that write went through
assert.eq(coll.find().itcount(), 3);
@@ -60,7 +60,7 @@ printjson(adminB.runCommand({flushRouterConfig: 1}));
jsTest.log("Inserting second doc which successfully goes through...");
// Do second write
-assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
+assert.commandWorked(collB.insert({_id: nextId--, goodbye: "world"}));
jsTest.log("All docs written this time!");
diff --git a/jstests/sharding/invalid_system_views_sharded_collection.js b/jstests/sharding/invalid_system_views_sharded_collection.js
index 899d4482987..62189edcffc 100644
--- a/jstests/sharding/invalid_system_views_sharded_collection.js
+++ b/jstests/sharding/invalid_system_views_sharded_collection.js
@@ -26,10 +26,10 @@ function runTest(st, badViewDefinition) {
assert.commandWorked(viewsCollection.createIndex({x: 1}));
const unshardedColl = db.getCollection("unshardedColl");
- assert.writeOK(unshardedColl.insert({b: "boo"}));
+ assert.commandWorked(unshardedColl.insert({b: "boo"}));
- assert.writeOK(db.system.views.insert(badViewDefinition),
- "failed to insert " + tojson(badViewDefinition));
+ assert.commandWorked(db.system.views.insert(badViewDefinition),
+ "failed to insert " + tojson(badViewDefinition));
// Test that a command involving views properly fails with a views-specific error code.
assert.commandFailedWithCode(
@@ -44,12 +44,12 @@ function runTest(st, badViewDefinition) {
" in system.views";
}
- assert.writeOK(viewsCollection.insert({y: "baz", a: 5}), makeErrorMessage("insert"));
+ assert.commandWorked(viewsCollection.insert({y: "baz", a: 5}), makeErrorMessage("insert"));
- assert.writeOK(viewsCollection.update({y: "baz"}, {$set: {y: "qux"}}),
- makeErrorMessage("update"));
+ assert.commandWorked(viewsCollection.update({y: "baz"}, {$set: {y: "qux"}}),
+ makeErrorMessage("update"));
- assert.writeOK(viewsCollection.remove({y: "baz"}), makeErrorMessage("remove"));
+ assert.commandWorked(viewsCollection.remove({y: "baz"}), makeErrorMessage("remove"));
assert.commandWorked(
db.runCommand(
diff --git a/jstests/sharding/json_schema.js b/jstests/sharding/json_schema.js
index 5a4a68102b7..b2a1ff21c77 100644
--- a/jstests/sharding/json_schema.js
+++ b/jstests/sharding/json_schema.js
@@ -30,10 +30,10 @@ assert.commandWorked(testDB.adminCommand(
{moveChunk: coll.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
// Write one document into each of the chunks.
-assert.writeOK(coll.insert({_id: -150, a: 1}));
-assert.writeOK(coll.insert({_id: -50, a: 10}));
-assert.writeOK(coll.insert({_id: 50, a: "str"}));
-assert.writeOK(coll.insert({_id: 150}));
+assert.commandWorked(coll.insert({_id: -150, a: 1}));
+assert.commandWorked(coll.insert({_id: -50, a: 10}));
+assert.commandWorked(coll.insert({_id: 50, a: "str"}));
+assert.commandWorked(coll.insert({_id: 150}));
// Test that $jsonSchema in a find command returns the correct results.
assert.eq(4, coll.find({$jsonSchema: {}}).itcount());
@@ -46,7 +46,7 @@ let res = coll.update(
{$jsonSchema: {properties: {_id: {type: "number", minimum: 100}, a: {type: "number"}}}},
{$inc: {a: 1}},
{multi: true});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(1, res.nModified);
const schema = {
@@ -54,7 +54,7 @@ const schema = {
required: ["_id"]
};
res = coll.update({$jsonSchema: schema}, {$set: {b: 1}}, {multi: true});
-assert.writeOK(res);
+assert.commandWorked(res);
assert.eq(1, res.nModified);
// Test that $jsonSchema works correctly in a findAndModify command.
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index fe8e04e7492..f26b0f36268 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -24,7 +24,7 @@ for (; x < 1500; x++) {
bulk.insert({x: x, big: big});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
s.printShardingStatus(true);
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index e0d19e8874b..86ac72100bd 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -216,7 +216,8 @@ for (var i = 0; i < types.length; i++) {
assert.eq(1, c.find({xx: {$exists: true}}).count(), curT.name + " xx 2 ");
assert.eq(curT.values[3], getKey(c.findOne({xx: 17})), curT.name + " xx 3 ");
- assert.writeOK(c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
+ assert.commandWorked(
+ c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
assert.commandWorked(c.ensureIndex({_id: 1}));
diff --git a/jstests/sharding/kill_pinned_cursor.js b/jstests/sharding/kill_pinned_cursor.js
index 1c19626ab77..fcf9d0fa96c 100644
--- a/jstests/sharding/kill_pinned_cursor.js
+++ b/jstests/sharding/kill_pinned_cursor.js
@@ -27,7 +27,7 @@ let coll = mongosDB.jstest_kill_pinned_cursor;
coll.drop();
for (let i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
diff --git a/jstests/sharding/killop.js b/jstests/sharding/killop.js
index 7f2e4d23173..bedc712eb3d 100644
--- a/jstests/sharding/killop.js
+++ b/jstests/sharding/killop.js
@@ -9,7 +9,7 @@ const conn = st.s;
const db = conn.getDB("killOp");
const coll = db.test;
-assert.writeOK(db.getCollection(coll.getName()).insert({x: 1}));
+assert.commandWorked(db.getCollection(coll.getName()).insert({x: 1}));
const kFailPointName = "waitInFindBeforeMakingBatch";
assert.commandWorked(conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "alwaysOn"}));
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index 35e38722edb..7fecb777de5 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -22,13 +22,13 @@ var configSecondaryList = st.configRS.getSecondaries();
var configSecondaryToKill = configSecondaryList[0];
var delayedConfigSecondary = configSecondaryList[1];
-assert.writeOK(testDB.user.insert({_id: 1}));
+assert.commandWorked(testDB.user.insert({_id: 1}));
delayedConfigSecondary.getDB('admin').adminCommand(
{configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
// Do one metadata write in order to bump the optime on mongos
-assert.writeOK(st.getDB('config').TestConfigColl.insert({TestKey: 'Test value'}));
+assert.commandWorked(st.getDB('config').TestConfigColl.insert({TestKey: 'Test value'}));
st.configRS.stopMaster();
MongoRunner.stopMongod(configSecondaryToKill);
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index c3df1b4baf2..eb5bc916196 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -33,7 +33,7 @@ while (inserted < (400 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
diff --git a/jstests/sharding/large_skip_one_shard.js b/jstests/sharding/large_skip_one_shard.js
index e1f717a5f5a..95637ee0a50 100644
--- a/jstests/sharding/large_skip_one_shard.js
+++ b/jstests/sharding/large_skip_one_shard.js
@@ -18,7 +18,7 @@ assert(admin.runCommand({moveChunk: collSharded + "", find: {_id: 0}, to: st.sha
function testSelectWithSkip(coll) {
for (var i = -100; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
}
// Run a query which only requires 5 results from a single shard
diff --git a/jstests/sharding/linearizable_read_concern.js b/jstests/sharding/linearizable_read_concern.js
index 9b3ac62acce..d83362a898c 100644
--- a/jstests/sharding/linearizable_read_concern.js
+++ b/jstests/sharding/linearizable_read_concern.js
@@ -91,7 +91,7 @@ jsTestLog("Testing linearizable read from primaries.");
// Execute a linearizable read from primaries (targeting both shards) which should succeed.
st.s.setReadPref("primary");
-var res = assert.writeOK(testDB.runReadCommand({
+var res = assert.commandWorked(testDB.runReadCommand({
find: collName,
sort: {x: 1},
filter: dualShardQueryFilter,
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index ce13ea5871d..fe34dbe0aa7 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -31,9 +31,9 @@ var dbEntryCheck = function(dbEntry, onConfig) {
// Non-config-server db checks.
{
- assert.writeOK(mongos.getDB("blah").foo.insert({_id: 1}));
- assert.writeOK(mongos.getDB("foo").foo.insert({_id: 1}));
- assert.writeOK(mongos.getDB("raw").foo.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("blah").foo.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("foo").foo.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("raw").foo.insert({_id: 1}));
res = mongos.adminCommand("listDatabases");
dbArray = res.databases;
@@ -53,8 +53,8 @@ var dbEntryCheck = function(dbEntry, onConfig) {
// Admin and config are always reported on the config shard.
{
- assert.writeOK(mongos.getDB("admin").test.insert({_id: 1}));
- assert.writeOK(mongos.getDB("config").test.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("admin").test.insert({_id: 1}));
+ assert.commandWorked(mongos.getDB("config").test.insert({_id: 1}));
res = mongos.adminCommand("listDatabases");
dbArray = res.databases;
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 55b7548d6db..c2f9a8a75ad 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -135,9 +135,9 @@ var assertCanRunCommands = function(mongo, st) {
// this will throw if it fails
test.system.users.findOne();
- assert.writeOK(test.foo.save({_id: 0}));
- assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeOK(test.foo.remove({_id: 0}));
+ assert.commandWorked(test.foo.save({_id: 0}));
+ assert.commandWorked(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.commandWorked(test.foo.remove({_id: 0}));
// Multi-shard
test.foo.mapReduce(
diff --git a/jstests/sharding/lookup.js b/jstests/sharding/lookup.js
index 3c0364bd6a4..82a8c63624b 100644
--- a/jstests/sharding/lookup.js
+++ b/jstests/sharding/lookup.js
@@ -42,13 +42,13 @@ function runTest(coll, from, thirdColl, fourthColl) {
assert.commandWorked(thirdColl.remove({}));
assert.commandWorked(fourthColl.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 2}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
+ assert.commandWorked(coll.insert({_id: 1, a: null}));
+ assert.commandWorked(coll.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 0, b: 1}));
- assert.writeOK(from.insert({_id: 1, b: null}));
- assert.writeOK(from.insert({_id: 2}));
+ assert.commandWorked(from.insert({_id: 0, b: 1}));
+ assert.commandWorked(from.insert({_id: 1, b: null}));
+ assert.commandWorked(from.insert({_id: 2}));
//
// Basic functionality.
@@ -258,17 +258,17 @@ function runTest(coll, from, thirdColl, fourthColl) {
//
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.writeOK(coll.insert({_id: 3, a: {c: 1}}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
+ assert.commandWorked(coll.insert({_id: 1, a: null}));
+ assert.commandWorked(coll.insert({_id: 2}));
+ assert.commandWorked(coll.insert({_id: 3, a: {c: 1}}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: 1}));
- assert.writeOK(from.insert({_id: 1, b: null}));
- assert.writeOK(from.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 3, b: {c: 1}}));
- assert.writeOK(from.insert({_id: 4, b: {c: 2}}));
+ assert.commandWorked(from.insert({_id: 0, b: 1}));
+ assert.commandWorked(from.insert({_id: 1, b: null}));
+ assert.commandWorked(from.insert({_id: 2}));
+ assert.commandWorked(from.insert({_id: 3, b: {c: 1}}));
+ assert.commandWorked(from.insert({_id: 4, b: {c: 2}}));
// Once without a dotted field.
let pipeline = [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}];
@@ -293,11 +293,11 @@ function runTest(coll, from, thirdColl, fourthColl) {
// With an $unwind stage.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {b: 1}}));
- assert.writeOK(coll.insert({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 0, a: {b: 1}}));
+ assert.commandWorked(coll.insert({_id: 1}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, target: 1}));
+ assert.commandWorked(from.insert({_id: 0, target: 1}));
pipeline = [
{
@@ -331,11 +331,11 @@ function runTest(coll, from, thirdColl, fourthColl) {
// This must only do an equality match rather than treating the value as a regex.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: /a regex/}));
+ assert.commandWorked(coll.insert({_id: 0, a: /a regex/}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: /a regex/}));
- assert.writeOK(from.insert({_id: 1, b: "string that matches /a regex/"}));
+ assert.commandWorked(from.insert({_id: 0, b: /a regex/}));
+ assert.commandWorked(from.insert({_id: 1, b: "string that matches /a regex/"}));
pipeline = [
{
@@ -356,11 +356,11 @@ function runTest(coll, from, thirdColl, fourthColl) {
// Basic array corresponding to multiple documents.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 2]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [0, 1, 2]}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
+ assert.commandWorked(from.insert({_id: 0}));
+ assert.commandWorked(from.insert({_id: 1}));
pipeline = [
{
@@ -377,11 +377,11 @@ function runTest(coll, from, thirdColl, fourthColl) {
// Basic array corresponding to a single document.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [1]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [1]}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
+ assert.commandWorked(from.insert({_id: 0}));
+ assert.commandWorked(from.insert({_id: 1}));
pipeline = [
{
@@ -398,14 +398,14 @@ function runTest(coll, from, thirdColl, fourthColl) {
// Array containing regular expressions.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [/a regex/, /^x/]}));
- assert.writeOK(coll.insert({_id: 1, a: [/^x/]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [/a regex/, /^x/]}));
+ assert.commandWorked(coll.insert({_id: 1, a: [/^x/]}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: "should not match a regex"}));
- assert.writeOK(from.insert({_id: 1, b: "xxxx"}));
- assert.writeOK(from.insert({_id: 2, b: /a regex/}));
- assert.writeOK(from.insert({_id: 3, b: /^x/}));
+ assert.commandWorked(from.insert({_id: 0, b: "should not match a regex"}));
+ assert.commandWorked(from.insert({_id: 1, b: "xxxx"}));
+ assert.commandWorked(from.insert({_id: 2, b: /a regex/}));
+ assert.commandWorked(from.insert({_id: 3, b: /^x/}));
pipeline = [
{
@@ -425,13 +425,13 @@ function runTest(coll, from, thirdColl, fourthColl) {
// 'localField' references a field within an array of sub-objects.
assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {b: 2}]}));
+ assert.commandWorked(coll.insert({_id: 0, a: [{b: 1}, {b: 2}]}));
assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
- assert.writeOK(from.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 3}));
+ assert.commandWorked(from.insert({_id: 0}));
+ assert.commandWorked(from.insert({_id: 1}));
+ assert.commandWorked(from.insert({_id: 2}));
+ assert.commandWorked(from.insert({_id: 3}));
pipeline = [
{
diff --git a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
index 9d71a70e135..4f010f1bb86 100644
--- a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
@@ -64,8 +64,8 @@ function shardKeyFromId(id) {
// Do some writes.
for (let id = 0; id < nDocs; ++id) {
const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
- assert.writeOK(mongosColl.insert(documentKey));
- assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 1}}));
+ assert.commandWorked(mongosColl.insert(documentKey));
+ assert.commandWorked(mongosColl.update(documentKey, {$set: {updatedCount: 1}}));
}
[changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
@@ -89,7 +89,7 @@ for (let id = 0; id < nDocs; ++id) {
// migrated.
for (let id = 0; id < nDocs; ++id) {
const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
- assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 2}}));
+ assert.commandWorked(mongosColl.update(documentKey, {$set: {updatedCount: 2}}));
}
// Move the upper chunk back to shard 0.
diff --git a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
index 058a92c6832..422fb652d4a 100644
--- a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
@@ -55,8 +55,8 @@ const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updat
// Write enough documents that we likely have some on each shard.
const nDocs = 1000;
for (let id = 0; id < nDocs; ++id) {
- assert.writeOK(mongosColl.insert({_id: id, shardKey: id}));
- assert.writeOK(mongosColl.update({shardKey: id}, {$set: {updatedCount: 1}}));
+ assert.commandWorked(mongosColl.insert({_id: id, shardKey: id}));
+ assert.commandWorked(mongosColl.update({shardKey: id}, {$set: {updatedCount: 1}}));
}
for (let id = 0; id < nDocs; ++id) {
diff --git a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
index f6235d1082c..d512e9ff66c 100644
--- a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
@@ -43,16 +43,16 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
// Write a document to each chunk.
-assert.writeOK(mongosColl.insert({_id: -1}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: -1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
// Do some writes.
-assert.writeOK(mongosColl.insert({_id: 1000}));
-assert.writeOK(mongosColl.insert({_id: -1000}));
-assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 1}}));
-assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 1}}));
+assert.commandWorked(mongosColl.insert({_id: 1000}));
+assert.commandWorked(mongosColl.insert({_id: -1000}));
+assert.commandWorked(mongosColl.update({_id: 1000}, {$set: {updatedCount: 1}}));
+assert.commandWorked(mongosColl.update({_id: -1000}, {$set: {updatedCount: 1}}));
for (let nextId of [1000, -1000]) {
assert.soon(() => changeStream.hasNext());
@@ -72,8 +72,8 @@ for (let nextId of [1000, -1000]) {
// Test that the change stream can still see the updated post image, even if a chunk is
// migrated.
-assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 2}}));
-assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: 1000}, {$set: {updatedCount: 2}}));
+assert.commandWorked(mongosColl.update({_id: -1000}, {$set: {updatedCount: 2}}));
// Split the [0, MaxKey) chunk into 2: [0, 500), [500, MaxKey).
assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 500}}));
diff --git a/jstests/sharding/lookup_mongod_unaware.js b/jstests/sharding/lookup_mongod_unaware.js
index 2750425205e..56a4beafee9 100644
--- a/jstests/sharding/lookup_mongod_unaware.js
+++ b/jstests/sharding/lookup_mongod_unaware.js
@@ -73,16 +73,16 @@ const expectedResults = [
assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
-assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
-assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 1, a: null}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 1, b: null}));
// Send writes through mongos1 such that it's aware of the collections and believes they are
// unsharded.
-assert.writeOK(mongos1LocalColl.insert({_id: 2}));
-assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
+assert.commandWorked(mongos1LocalColl.insert({_id: 2}));
+assert.commandWorked(mongos1ForeignColl.insert({_id: 2}));
//
// Test unsharded local and sharded foreign collections, with the primary shard unaware that
@@ -161,9 +161,9 @@ assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
// Recreate the foreign collection as unsharded.
mongos0ForeignColl.drop();
-assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 2}));
// Verify $lookup results through the fresh mongos.
restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
diff --git a/jstests/sharding/lookup_stale_mongos.js b/jstests/sharding/lookup_stale_mongos.js
index f1e71280a18..2c74af07b28 100644
--- a/jstests/sharding/lookup_stale_mongos.js
+++ b/jstests/sharding/lookup_stale_mongos.js
@@ -40,16 +40,16 @@ const expectedResults = [
assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
-assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
-assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 1, a: null}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 1, b: null}));
// Send writes through mongos1 such that it's aware of the collections and believes they are
// unsharded.
-assert.writeOK(mongos1LocalColl.insert({_id: 2}));
-assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
+assert.commandWorked(mongos1LocalColl.insert({_id: 2}));
+assert.commandWorked(mongos1ForeignColl.insert({_id: 2}));
//
// Test unsharded local and sharded foreign collections, with mongos unaware that the foreign
@@ -106,9 +106,9 @@ assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
// Recreate the foreign collection as unsharded through mongos0.
mongos0ForeignColl.drop();
-assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
-assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.commandWorked(mongos0ForeignColl.insert({_id: 2}));
// Issue a $lookup through mongos1, which is unaware that the foreign collection is now
// unsharded.
@@ -121,9 +121,9 @@ assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
// Recreate the local collection as unsharded through mongos0.
mongos0LocalColl.drop();
-assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
-assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
-assert.writeOK(mongos0LocalColl.insert({_id: 2}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 1, a: null}));
+assert.commandWorked(mongos0LocalColl.insert({_id: 2}));
// Issue a $lookup through mongos1, which is unaware that the local collection is now
// unsharded.
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
index b51b0111a1e..9abe9c922c2 100644
--- a/jstests/sharding/mapReduce_inSharded.js
+++ b/jstests/sharding/mapReduce_inSharded.js
@@ -25,7 +25,7 @@ for (var j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
@@ -74,9 +74,9 @@ verifyOutput(out);
// Ensure that the collation option is propagated to the shards. This uses a case-insensitive
// collation, and the query seeding the mapReduce should only match the document if the
// collation is passed along to the shards.
-assert.writeOK(db.srcSharded.remove({}));
+assert.commandWorked(db.srcSharded.remove({}));
assert.eq(db.srcSharded.find().itcount(), 0);
-assert.writeOK(db.srcSharded.insert({i: 0, j: 0, str: "FOO"}));
+assert.commandWorked(db.srcSharded.insert({i: 0, j: 0, str: "FOO"}));
out = db.srcSharded.mapReduce(
map,
reduce,
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index 7a8730d2c4d..e8d2c44b94e 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -26,7 +26,7 @@ for (var j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
index 07da267d132..004db315f97 100644
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ b/jstests/sharding/mapReduce_nonSharded.js
@@ -21,7 +21,7 @@ for (j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index eeb88371a7e..60a6ab8c0d0 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -21,7 +21,7 @@ for (j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
diff --git a/jstests/sharding/mapReduce_outSharded_checkUUID.js b/jstests/sharding/mapReduce_outSharded_checkUUID.js
index 25a499c4bed..5e4386da1d2 100644
--- a/jstests/sharding/mapReduce_outSharded_checkUUID.js
+++ b/jstests/sharding/mapReduce_outSharded_checkUUID.js
@@ -34,7 +34,7 @@ for (var j = 0; j < 100; j++) {
bulk.insert({j: j, i: i});
}
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
function map() {
emit(this.i, 1);
@@ -69,8 +69,8 @@ assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
assert.commandWorked(admin.runCommand({split: "mrShard.outSharded", middle: {"_id": 2000}}));
assert.commandWorked(admin.runCommand(
{moveChunk: "mrShard.outSharded", find: {"_id": 2000}, to: st.shard0.shardName}));
-assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 1000}));
-assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 2001}));
+assert.commandWorked(st.s.getCollection("mrShard.outSharded").insert({_id: 1000}));
+assert.commandWorked(st.s.getCollection("mrShard.outSharded").insert({_id: 2001}));
origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index 16c56658a5d..8579149cb36 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -53,7 +53,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = -nDocsPerShard; i < nDocsPerShard; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(nDocsPerShard, shards[0].getCollection(coll.getFullName()).count());
assert.eq(nDocsPerShard, shards[1].getCollection(coll.getFullName()).count());
diff --git a/jstests/sharding/merge_chunks_compound_shard_key.js b/jstests/sharding/merge_chunks_compound_shard_key.js
index 3472073f4c5..4eb965329c2 100644
--- a/jstests/sharding/merge_chunks_compound_shard_key.js
+++ b/jstests/sharding/merge_chunks_compound_shard_key.js
@@ -56,11 +56,11 @@ assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 0}}))
assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 1}}));
jsTest.log("Insert some data into each of the chunk ranges.");
-assert.writeOK(coll.insert({x: -1, y: 2}));
-assert.writeOK(coll.insert({x: 0, y: 2}));
-assert.writeOK(coll.insert({x: 1, y: 2}));
-assert.writeOK(coll.insert({x: 2, y: 1}));
-assert.writeOK(coll.insert({x: 2, y: 3}));
+assert.commandWorked(coll.insert({x: -1, y: 2}));
+assert.commandWorked(coll.insert({x: 0, y: 2}));
+assert.commandWorked(coll.insert({x: 1, y: 2}));
+assert.commandWorked(coll.insert({x: 2, y: 1}));
+assert.commandWorked(coll.insert({x: 2, y: 3}));
// Chunks after merge:
// (MinKey, { x: 0, y: 1 })
diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js
index 3166f47113e..d4f74c26fb6 100644
--- a/jstests/sharding/merge_chunks_test.js
+++ b/jstests/sharding/merge_chunks_test.js
@@ -36,10 +36,10 @@ assert.commandWorked(
st.printShardingStatus();
// Insert some data into each of the consolidated ranges
-assert.writeOK(coll.insert({_id: 0}));
-assert.writeOK(coll.insert({_id: 10}));
-assert.writeOK(coll.insert({_id: 40}));
-assert.writeOK(coll.insert({_id: 110}));
+assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 10}));
+assert.commandWorked(coll.insert({_id: 40}));
+assert.commandWorked(coll.insert({_id: 110}));
var staleCollection = staleMongos.getCollection(coll + "");
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index e525a909fea..60306797dfd 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -3,7 +3,8 @@
var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
-assert.writeOK(s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
+assert.commandWorked(
+ s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', s.shard1.shardName);
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
@@ -19,7 +20,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var x = 0; x < 100; x++) {
bulk.insert({x: x, big: big});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 30}}));
assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 66}}));
@@ -35,7 +36,7 @@ print("direct : " + direct);
var directDB = direct.getDB("test");
for (var done = 0; done < 2 * 1024 * 1024; done += big.length) {
- assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
+ assert.commandWorked(directDB.foo.insert({x: 50 + Math.random(), big: big}));
}
s.printShardingStatus();
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index 13195b61b65..b9df583c56a 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -36,7 +36,7 @@ for (var i = 0; i < 40; i++) {
bulk.insert({data: dataObj});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(40, coll.count(), "prep1");
assert.commandWorked(admin.runCommand({shardcollection: "" + coll, key: {_id: 1}}));
diff --git a/jstests/sharding/migrate_overwrite_id.js b/jstests/sharding/migrate_overwrite_id.js
index 8060a2de8b4..97f8aa5c218 100644
--- a/jstests/sharding/migrate_overwrite_id.js
+++ b/jstests/sharding/migrate_overwrite_id.js
@@ -19,8 +19,8 @@ var id = 12345;
jsTest.log("Inserting a document with id : 12345 into both shards with diff shard key...");
-assert.writeOK(coll.insert({_id: id, skey: -1}));
-assert.writeOK(coll.insert({_id: id, skey: 1}));
+assert.commandWorked(coll.insert({_id: id, skey: -1}));
+assert.commandWorked(coll.insert({_id: id, skey: 1}));
printjson(st.shard0.getCollection(coll + "").find({_id: id}).toArray());
printjson(st.shard1.getCollection(coll + "").find({_id: id}).toArray());
diff --git a/jstests/sharding/migration_critical_section_concurrency.js b/jstests/sharding/migration_critical_section_concurrency.js
index e98f1f05262..db9f6c7b749 100644
--- a/jstests/sharding/migration_critical_section_concurrency.js
+++ b/jstests/sharding/migration_critical_section_concurrency.js
@@ -18,15 +18,15 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll0', key: {
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll0', middle: {Key: 0}}));
var coll0 = testDB.Coll0;
-assert.writeOK(coll0.insert({Key: -1, Value: '-1'}));
-assert.writeOK(coll0.insert({Key: 1, Value: '1'}));
+assert.commandWorked(coll0.insert({Key: -1, Value: '-1'}));
+assert.commandWorked(coll0.insert({Key: 1, Value: '1'}));
assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll1', key: {Key: 1}}));
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll1', middle: {Key: 0}}));
var coll1 = testDB.Coll1;
-assert.writeOK(coll1.insert({Key: -1, Value: '-1'}));
-assert.writeOK(coll1.insert({Key: 1, Value: '1'}));
+assert.commandWorked(coll1.insert({Key: -1, Value: '-1'}));
+assert.commandWorked(coll1.insert({Key: 1, Value: '1'}));
// Ensure that coll0 has chunks on both shards so we can test queries against both donor and
// recipient for Coll1's migration below
@@ -44,8 +44,8 @@ waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
// Ensure that all operations for 'Coll0', which is not being migrated are not stalled
assert.eq(1, coll0.find({Key: {$lte: -1}}).itcount());
assert.eq(1, coll0.find({Key: {$gte: 1}}).itcount());
-assert.writeOK(coll0.insert({Key: -2, Value: '-2'}));
-assert.writeOK(coll0.insert({Key: 2, Value: '2'}));
+assert.commandWorked(coll0.insert({Key: -2, Value: '-2'}));
+assert.commandWorked(coll0.insert({Key: 2, Value: '2'}));
assert.eq(2, coll0.find({Key: {$lte: -1}}).itcount());
assert.eq(2, coll0.find({Key: {$gte: 1}}).itcount());
@@ -56,7 +56,7 @@ assert.eq(1, coll1.find({Key: {$gte: 1}}).itcount());
// Ensure that all operations for non-sharded collections are not stalled
var collUnsharded = testDB.CollUnsharded;
assert.eq(0, collUnsharded.find({}).itcount());
-assert.writeOK(collUnsharded.insert({TestKey: 0, Value: 'Zero'}));
+assert.commandWorked(collUnsharded.insert({TestKey: 0, Value: 'Zero'}));
assert.eq(1, collUnsharded.find({}).itcount());
unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
diff --git a/jstests/sharding/migration_ignore_interrupts_1.js b/jstests/sharding/migration_ignore_interrupts_1.js
index 0272a204661..d093f0a8f0b 100644
--- a/jstests/sharding/migration_ignore_interrupts_1.js
+++ b/jstests/sharding/migration_ignore_interrupts_1.js
@@ -23,9 +23,9 @@ st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 0}}));
assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
-assert.writeOK(coll1.insert({a: -10}));
-assert.writeOK(coll1.insert({a: 0}));
-assert.writeOK(coll1.insert({a: 10}));
+assert.commandWorked(coll1.insert({a: -10}));
+assert.commandWorked(coll1.insert({a: 0}));
+assert.commandWorked(coll1.insert({a: 10}));
assert.eq(3, shard0Coll1.find().itcount());
assert.eq(0, shard1Coll1.find().itcount());
assert.eq(0, shard2Coll1.find().itcount());
diff --git a/jstests/sharding/migration_ignore_interrupts_2.js b/jstests/sharding/migration_ignore_interrupts_2.js
index b60fa50ccf2..f5147659c96 100644
--- a/jstests/sharding/migration_ignore_interrupts_2.js
+++ b/jstests/sharding/migration_ignore_interrupts_2.js
@@ -17,7 +17,7 @@ var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbNa
assert.commandWorked(admin.runCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
-assert.writeOK(coll1.insert({a: 0}));
+assert.commandWorked(coll1.insert({a: 0}));
assert.eq(1, shard0Coll1.find().itcount());
assert.eq(0, shard1Coll1.find().itcount());
assert.eq(1, coll1.find().itcount());
diff --git a/jstests/sharding/migration_ignore_interrupts_3.js b/jstests/sharding/migration_ignore_interrupts_3.js
index e48159b77b8..9474643c60d 100644
--- a/jstests/sharding/migration_ignore_interrupts_3.js
+++ b/jstests/sharding/migration_ignore_interrupts_3.js
@@ -25,14 +25,14 @@ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
-assert.writeOK(coll1.insert({a: 0}));
+assert.commandWorked(coll1.insert({a: 0}));
assert.eq(1, shard0Coll1.find().itcount());
assert.eq(0, shard1Coll1.find().itcount());
assert.eq(0, shard2Coll1.find().itcount());
assert.eq(1, coll1.find().itcount());
assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
-assert.writeOK(coll2.insert({a: 0}));
+assert.commandWorked(coll2.insert({a: 0}));
assert.eq(1, shard0Coll2.find().itcount());
assert.eq(0, shard1Coll2.find().itcount());
assert.eq(0, shard2Coll2.find().itcount());
diff --git a/jstests/sharding/migration_ignore_interrupts_4.js b/jstests/sharding/migration_ignore_interrupts_4.js
index bc692a9897c..3d4ad25be63 100644
--- a/jstests/sharding/migration_ignore_interrupts_4.js
+++ b/jstests/sharding/migration_ignore_interrupts_4.js
@@ -25,14 +25,14 @@ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
-assert.writeOK(coll1.insert({a: 0}));
+assert.commandWorked(coll1.insert({a: 0}));
assert.eq(1, shard0Coll1.find().itcount());
assert.eq(0, shard1Coll1.find().itcount());
assert.eq(0, shard2Coll1.find().itcount());
assert.eq(1, coll1.find().itcount());
assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
-assert.writeOK(coll2.insert({a: 0}));
+assert.commandWorked(coll2.insert({a: 0}));
assert.eq(1, shard0Coll2.find().itcount());
assert.eq(0, shard1Coll2.find().itcount());
assert.eq(0, shard2Coll2.find().itcount());
@@ -79,8 +79,8 @@ joinMoveChunk = moveChunkParallel(
waitForMigrateStep(shard2, migrateStepNames.cloned);
// Populate donor (shard0) xfermods log.
-assert.writeOK(coll2.insert({a: 1}));
-assert.writeOK(coll2.insert({a: 2}));
+assert.commandWorked(coll2.insert({a: 1}));
+assert.commandWorked(coll2.insert({a: 2}));
assert.eq(3, coll2.find().itcount(), "Failed to insert documents into coll2.");
assert.eq(3, shard0Coll2.find().itcount());
diff --git a/jstests/sharding/migration_move_chunk_after_receive.js b/jstests/sharding/migration_move_chunk_after_receive.js
index fe28af0d8c3..4f58b88d693 100644
--- a/jstests/sharding/migration_move_chunk_after_receive.js
+++ b/jstests/sharding/migration_move_chunk_after_receive.js
@@ -20,10 +20,10 @@ var testColl = testDB.TestColl;
// Create 3 chunks with one document each and move them so that 0 is on shard0, 1 is on shard1,
// etc.
-assert.writeOK(testColl.insert({Key: 0, Value: 'Value'}));
-assert.writeOK(testColl.insert({Key: 100, Value: 'Value'}));
-assert.writeOK(testColl.insert({Key: 101, Value: 'Value'}));
-assert.writeOK(testColl.insert({Key: 200, Value: 'Value'}));
+assert.commandWorked(testColl.insert({Key: 0, Value: 'Value'}));
+assert.commandWorked(testColl.insert({Key: 100, Value: 'Value'}));
+assert.commandWorked(testColl.insert({Key: 101, Value: 'Value'}));
+assert.commandWorked(testColl.insert({Key: 200, Value: 'Value'}));
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 100}}));
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 101}}));
diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js
index 73ee2dea163..8ce1cfc77ca 100644
--- a/jstests/sharding/migration_sets_fromMigrate_flag.js
+++ b/jstests/sharding/migration_sets_fromMigrate_flag.js
@@ -54,7 +54,7 @@ jsTest.log('Inserting 5 docs into donor shard, ensuring one orphan on the recipi
// Insert just one document into the collection and fail a migration after the cloning step in
// order to get an orphan onto the recipient shard with the correct UUID for the collection.
-assert.writeOK(coll.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: 2}));
assert.eq(1, donorColl.count());
assert.commandWorked(
recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "alwaysOn"}));
@@ -65,10 +65,10 @@ assert.commandWorked(
recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "off"}));
// Insert the remaining documents into the collection.
-assert.writeOK(coll.insert({_id: 0}));
-assert.writeOK(coll.insert({_id: 1}));
-assert.writeOK(coll.insert({_id: 3}));
-assert.writeOK(coll.insert({_id: 4}));
+assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(coll.insert({_id: 3}));
+assert.commandWorked(coll.insert({_id: 4}));
assert.eq(5, donorColl.count());
/**
@@ -100,8 +100,8 @@ waitForMigrateStep(recipient, migrateStepNames.cloned);
jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
-assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
-assert.writeOK(coll.remove({_id: 4}));
+assert.commandWorked(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
+assert.commandWorked(coll.remove({_id: 4}));
/**
* Finish migration. Unpause recipient migration, wait for it to collect
diff --git a/jstests/sharding/migration_with_source_ops.js b/jstests/sharding/migration_with_source_ops.js
index 91c7a460196..76dfbb9b239 100644
--- a/jstests/sharding/migration_with_source_ops.js
+++ b/jstests/sharding/migration_with_source_ops.js
@@ -53,9 +53,9 @@ assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
// 10 documents in each chunk on the donor
jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
for (var i = 0; i < 10; ++i)
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
for (var i = 20; i < 30; ++i)
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
assert.eq(20, coll.count());
/**
@@ -99,15 +99,15 @@ var joinMoveChunk = moveChunkParallel(
waitForMigrateStep(recipient, migrateStepNames.cloned);
jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
-assert.writeOK(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
+assert.commandWorked(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
-assert.writeOK(coll.insert({a: 10}));
-assert.writeOK(coll.insert({a: 30}));
+assert.commandWorked(coll.insert({a: 10}));
+assert.commandWorked(coll.insert({a: 30}));
jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
-assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
-assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
+assert.commandWorked(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
+assert.commandWorked(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
/**
* Finish migration. Unpause recipient migration, wait for it to collect
diff --git a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
index 012324e9946..133a1d3f84b 100644
--- a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
+++ b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
@@ -16,7 +16,7 @@ var recoveryDoc = {
minOpTimeUpdaters: 2
};
-assert.writeOK(st.shard0.getDB('admin').system.version.insert(recoveryDoc));
+assert.commandWorked(st.shard0.getDB('admin').system.version.insert(recoveryDoc));
// Make sure test is setup correctly.
var minOpTimeRecoveryDoc =
diff --git a/jstests/sharding/missing_key.js b/jstests/sharding/missing_key.js
index 14078cbff24..e6b04623fad 100644
--- a/jstests/sharding/missing_key.js
+++ b/jstests/sharding/missing_key.js
@@ -8,8 +8,8 @@ var st = new ShardingTest({shards: 1});
var db = st.s.getDB('testDb');
var coll = db.testColl;
-assert.writeOK(coll.insert({x: 1, z: 1}));
-assert.writeOK(coll.insert({y: 1, z: 1}));
+assert.commandWorked(coll.insert({x: 1, z: 1}));
+assert.commandWorked(coll.insert({y: 1, z: 1}));
assert.commandWorked(db.adminCommand({enableSharding: 'testDb'}));
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index 6dc458c2ae7..ae6aa1643d1 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -11,7 +11,7 @@ print("Creating unsharded connection...");
var mongos2 = st._mongos[1];
var coll = mongos2.getCollection("test.foo");
-assert.writeOK(coll.insert({i: 0}));
+assert.commandWorked(coll.insert({i: 0}));
print("Sharding collection...");
@@ -30,7 +30,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({i: i + 1});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
st.printShardingStatus(true);
diff --git a/jstests/sharding/mongos_query_comment.js b/jstests/sharding/mongos_query_comment.js
index ccb10d16824..a17500758ea 100644
--- a/jstests/sharding/mongos_query_comment.js
+++ b/jstests/sharding/mongos_query_comment.js
@@ -28,7 +28,7 @@ const shardColl = shardDB.test;
const collNS = mongosColl.getFullName();
for (let i = 0; i < 5; ++i) {
- assert.writeOK(mongosColl.insert({_id: i, a: i}));
+ assert.commandWorked(mongosColl.insert({_id: i, a: i}));
}
// The profiler will be used to verify that comments are present on the shard.
diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
index 25f18ef1f92..fa03a0310ba 100644
--- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
@@ -52,8 +52,8 @@ var collSharded = mongos.getCollection("fooSharded.barSharded");
var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
// Create the unsharded database with shard0 primary
-assert.writeOK(collUnsharded.insert({some: "doc"}));
-assert.writeOK(collUnsharded.remove({}));
+assert.commandWorked(collUnsharded.insert({some: "doc"}));
+assert.commandWorked(collUnsharded.remove({}));
assert.commandWorked(
admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
@@ -110,9 +110,9 @@ var mongosConnNew = null;
var wc = {writeConcern: {w: 2, wtimeout: 60000}};
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
jsTest.log("Stopping primary of third shard...");
@@ -126,15 +126,15 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
jsTest.log("Testing idle connection with third primary down...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
@@ -150,11 +150,11 @@ mongosConnNew = authDBUsers(new Mongo(mongos.host));
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
gc(); // Clean up new connections
@@ -175,15 +175,15 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
jsTest.log("Testing idle connection with second primary down...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
@@ -202,11 +202,11 @@ mongosConnNew.setSlaveOk();
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
gc(); // Clean up new connections
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index d41759de5db..b6fffb409e5 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -29,8 +29,8 @@ var collSharded = mongos.getCollection("fooSharded.barSharded");
var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
// Create the unsharded database
-assert.writeOK(collUnsharded.insert({some: "doc"}));
-assert.writeOK(collUnsharded.remove({}));
+assert.commandWorked(collUnsharded.insert({some: "doc"}));
+assert.commandWorked(collUnsharded.remove({}));
assert.commandWorked(
admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
@@ -69,9 +69,9 @@ var mongosConnNew = null;
var wc = {writeConcern: {w: 2, wtimeout: 60000}};
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
jsTest.log("Stopping primary of third shard...");
@@ -85,15 +85,15 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
jsTest.log("Testing idle connection with third primary down...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
@@ -109,11 +109,11 @@ mongosConnNew = new Mongo(mongos.host);
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
gc(); // Clean up new connections
@@ -173,16 +173,16 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
// Writes
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
jsTest.log("Testing idle connection with second primary down...");
// Writes
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
// Reads with read prefs
mongosConnIdle.setSlaveOk();
@@ -325,11 +325,11 @@ gc(); // Clean up new connections incrementally to compensate for slow win32 ma
// Writes
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
mongosConnNew = new Mongo(mongos.host);
assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
mongosConnNew = new Mongo(mongos.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
gc(); // Clean up new connections
diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js
index 7d4560b5ee6..479ca437f97 100644
--- a/jstests/sharding/mongos_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_shard_failure_tolerance.js
@@ -32,8 +32,8 @@ assert.commandWorked(
admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
// Create the unsharded database
-assert.writeOK(collUnsharded.insert({some: "doc"}));
-assert.writeOK(collUnsharded.remove({}));
+assert.commandWorked(collUnsharded.insert({some: "doc"}));
+assert.commandWorked(collUnsharded.remove({}));
st.ensurePrimaryShard(collUnsharded.getDB().toString(), st.shard0.shardName);
//
@@ -46,9 +46,9 @@ var mongosConnActive = new Mongo(st.s0.host);
var mongosConnIdle = null;
var mongosConnNew = null;
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
jsTest.log("Stopping third shard...");
@@ -62,15 +62,15 @@ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne(
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
jsTest.log("Testing idle connection...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
@@ -86,11 +86,11 @@ mongosConnNew = new Mongo(st.s0.host);
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
gc(); // Clean up new connections
@@ -104,16 +104,16 @@ jsTest.log("Testing active connection...");
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
+assert.commandWorked(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
+assert.commandWorked(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
jsTest.log("Testing idle connection...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
+assert.commandWorked(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
+assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
@@ -127,13 +127,13 @@ mongosConnNew = new Mongo(st.s0.host);
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
+assert.commandWorked(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
mongosConnNew = new Mongo(st.s0.host);
assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}));
mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
+assert.commandWorked(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
st.stop();
})();
diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js
index 66b71aa12c3..f920992799c 100644
--- a/jstests/sharding/mongos_validate_writes.js
+++ b/jstests/sharding/mongos_validate_writes.js
@@ -37,7 +37,7 @@ coll.ensureIndex({b: 1});
st.shardColl(coll, {b: 1}, {b: 0}, {b: 1}, coll.getDB(), true);
// Make sure that we can successfully insert, even though we have stale state
-assert.writeOK(staleCollA.insert({b: "b"}));
+assert.commandWorked(staleCollA.insert({b: "b"}));
// Make sure we unsuccessfully insert with old info
assert.writeError(staleCollB.insert({a: "a"}));
@@ -48,7 +48,7 @@ coll.ensureIndex({c: 1});
st.shardColl(coll, {c: 1}, {c: 0}, {c: 1}, coll.getDB(), true);
// Make sure we can successfully upsert, even though we have stale state
-assert.writeOK(staleCollA.update({c: "c"}, {c: "c"}, true));
+assert.commandWorked(staleCollA.update({c: "c"}, {c: "c"}, true));
// Make sure we unsuccessfully upsert with old info
assert.writeError(staleCollB.update({b: "b"}, {b: "b"}, true));
@@ -59,9 +59,9 @@ coll.ensureIndex({d: 1});
st.shardColl(coll, {d: 1}, {d: 0}, {d: 1}, coll.getDB(), true);
// Make sure we can successfully update, even though we have stale state
-assert.writeOK(coll.insert({d: "d"}));
+assert.commandWorked(coll.insert({d: "d"}));
-assert.writeOK(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
+assert.commandWorked(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
assert.eq(staleCollA.findOne().x, "x");
// Make sure we unsuccessfully update with old info
@@ -76,9 +76,9 @@ st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
st.shardColl(coll, {e: 1}, {e: 0}, {e: 1}, coll.getDB(), true);
// Make sure we can successfully remove, even though we have stale state
-assert.writeOK(coll.insert({e: "e"}));
+assert.commandWorked(coll.insert({e: "e"}));
-assert.writeOK(staleCollA.remove({e: "e"}, true));
+assert.commandWorked(staleCollA.remove({e: "e"}, true));
assert.eq(null, staleCollA.findOne());
// Make sure we unsuccessfully remove with old info
diff --git a/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js b/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
index 5bece4f1c76..16536ea4bb1 100644
--- a/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
+++ b/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
@@ -20,10 +20,10 @@ st.ensurePrimaryShard(dbName, donor.shardName);
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
jsTest.log("Insert a document with {_id: 0} into " + ns + " through mongos");
-assert.writeOK(st.s.getCollection(ns).insert({_id: 0}));
+assert.commandWorked(st.s.getCollection(ns).insert({_id: 0}));
jsTest.log("Insert a document with {_id: 1} into " + ns + " directly on the recipient");
-assert.writeOK(recipient.getCollection(ns).insert({_id: 1}));
+assert.commandWorked(recipient.getCollection(ns).insert({_id: 1}));
jsTest.log("Check that the UUID on the recipient differs from the UUID on the donor");
const recipientUUIDBefore =
diff --git a/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js b/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
index c7602b4f644..1c6cc248d7a 100644
--- a/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
@@ -93,7 +93,7 @@ var tests = [
for (let i = 0; i < 10; i++) {
bulk.insert({x: 10});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
},
checkRetryResult: function(result, retryResult) {
checkFindAndModifyResult(result, retryResult);
diff --git a/jstests/sharding/move_chunk_open_cursors.js b/jstests/sharding/move_chunk_open_cursors.js
index 1b15fb198cf..312f8143048 100644
--- a/jstests/sharding/move_chunk_open_cursors.js
+++ b/jstests/sharding/move_chunk_open_cursors.js
@@ -15,7 +15,7 @@ let bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < nDocs; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Make sure we know which shard will host the data to begin.
st.ensurePrimaryShard(dbName, st.shard0.shardName);
diff --git a/jstests/sharding/move_chunk_remove_with_write_retryability.js b/jstests/sharding/move_chunk_remove_with_write_retryability.js
index c417710f462..e493bea1632 100644
--- a/jstests/sharding/move_chunk_remove_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_remove_with_write_retryability.js
@@ -36,7 +36,7 @@ var setup = function(coll) {
bulk.insert({x: 10});
bulk.insert({x: 20});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
};
var checkRetryResult = function(result, retryResult) {
assert.eq(result.ok, retryResult.ok);
diff --git a/jstests/sharding/move_primary_clone_test.js b/jstests/sharding/move_primary_clone_test.js
index 81811e2e993..404562e7cd1 100644
--- a/jstests/sharding/move_primary_clone_test.js
+++ b/jstests/sharding/move_primary_clone_test.js
@@ -115,8 +115,8 @@ function createCollections(sharded) {
assert.commandWorked(db.createCollection('bar', barOptions));
for (let i = 0; i < 3; i++) {
- assert.writeOK(db.foo.insert({a: i}));
- assert.writeOK(db.bar.insert({a: i}));
+ assert.commandWorked(db.foo.insert({a: i}));
+ assert.commandWorked(db.bar.insert({a: i}));
}
assert.eq(3, db.foo.count());
assert.eq(3, db.bar.count());
diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js
index f4c1c2f00a6..653326e2ab6 100644
--- a/jstests/sharding/movechunk_include.js
+++ b/jstests/sharding/movechunk_include.js
@@ -32,7 +32,7 @@ function setupMoveChunkTest(shardOptions) {
bulk.insert({_id: num++, s: str});
data += str.length;
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
// Make sure there are chunks to move
for (var i = 0; i < 10; ++i) {
diff --git a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
index 3a03a485dc9..8c0a9d81a3c 100644
--- a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
+++ b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
@@ -23,7 +23,7 @@ assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.TestColl', ke
var coll = mongos.getDB('TestDB').TestColl;
// We have one chunk initially
-assert.writeOK(coll.insert({Key: 0, Value: 'Test value'}));
+assert.commandWorked(coll.insert({Key: 0, Value: 'Test value'}));
pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
diff --git a/jstests/sharding/movechunk_parallel.js b/jstests/sharding/movechunk_parallel.js
index 4c486e64f89..ca16d4caa8b 100644
--- a/jstests/sharding/movechunk_parallel.js
+++ b/jstests/sharding/movechunk_parallel.js
@@ -18,10 +18,10 @@ assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key
var coll = st.s0.getDB('TestDB').TestColl;
// Create 4 chunks initially
-assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
-assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
-assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
-assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+assert.commandWorked(coll.insert({Key: 1, Value: 'Test value 1'}));
+assert.commandWorked(coll.insert({Key: 10, Value: 'Test value 10'}));
+assert.commandWorked(coll.insert({Key: 20, Value: 'Test value 20'}));
+assert.commandWorked(coll.insert({Key: 30, Value: 'Test value 30'}));
assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index b3d4bd7a9c3..f1531142bd0 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -40,7 +40,7 @@ var bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
numDocs += numBatch;
@@ -100,7 +100,7 @@ bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("No errors on insert batch.");
numDocs += numBatch;
diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js
index bb129b2c6b7..e0cb7c94d26 100644
--- a/jstests/sharding/mr_and_agg_versioning.js
+++ b/jstests/sharding/mr_and_agg_versioning.js
@@ -26,7 +26,7 @@ var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Add orphaned documents directly to the shards to ensure they are properly filtered out.
st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js
index 52622b4ce66..195444c97f0 100644
--- a/jstests/sharding/mr_shard_version.js
+++ b/jstests/sharding/mr_shard_version.js
@@ -16,7 +16,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i, key: "" + (i % numKeys), value: i % numKeys});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(numDocs, coll.find().itcount());
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index fad2106aad5..796ef9e3a7e 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -12,7 +12,7 @@ assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num:
st.configRS.awaitLastOpCommitted();
// "test.existing" - unsharded
-assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('test').existing.insert({_id: 1}));
assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
@@ -27,7 +27,7 @@ assert.eq(1, st.s0.getDB('test').existing.count({_id: 1})); // SERVER-2828
assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
// Test stats
-assert.writeOK(st.s0.getDB('test').existing2.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('test').existing2.insert({_id: 1}));
assert.eq(1, st.s0.getDB('test').existing2.count({_id: 1}));
assert.eq(1, st.s1.getDB('test').existing2.count({_id: 1}));
@@ -43,10 +43,10 @@ assert.commandWorked(st.s1.adminCommand({split: "test.existing2", middle: {_id:
}
// Test admin commands
-assert.writeOK(st.s0.getDB('test').existing3.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('test').existing3.insert({_id: 1}));
assert.eq(1, st.s0.getDB('test').existing3.count({_id: 1}));
assert.eq(1, st.s1.getDB('test').existing3.count({_id: 1}));
-assert.writeOK(st.s1.adminCommand({shardcollection: "test.existing3", key: {_id: 1}}));
+assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing3", key: {_id: 1}}));
st.configRS.awaitLastOpCommitted();
assert.commandWorked(st.s1.adminCommand({split: "test.existing3", middle: {_id: 5}}));
diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js
index 7e2dce7c8b0..8650046fe78 100644
--- a/jstests/sharding/multi_mongos2a.js
+++ b/jstests/sharding/multi_mongos2a.js
@@ -9,7 +9,7 @@ st.ensurePrimaryShard('test', st.shard1.shardName);
assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
-assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
+assert.commandWorked(st.s0.getDB('test').existing.insert({_id: 1}));
assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
diff --git a/jstests/sharding/multi_shard_transaction_without_majority_reads.js b/jstests/sharding/multi_shard_transaction_without_majority_reads.js
index 8ddb69a665d..e694f5d4c7a 100644
--- a/jstests/sharding/multi_shard_transaction_without_majority_reads.js
+++ b/jstests/sharding/multi_shard_transaction_without_majority_reads.js
@@ -15,22 +15,22 @@ st.ensurePrimaryShard('TestDB', st.shard0.shardName);
assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
const coll = st.s0.getDB('TestDB').TestColl;
-assert.writeOK(coll.insert({_id: -1, x: 0}));
-assert.writeOK(coll.insert({_id: 1, x: 0}));
+assert.commandWorked(coll.insert({_id: -1, x: 0}));
+assert.commandWorked(coll.insert({_id: 1, x: 0}));
assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: 1}}));
assert.commandWorked(
st.s0.adminCommand({moveChunk: 'TestDB.TestColl', find: {_id: 1}, to: st.shard1.shardName}));
-assert.writeOK(coll.update({_id: -1}, {$inc: {x: 1}}));
-assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
+assert.commandWorked(coll.update({_id: -1}, {$inc: {x: 1}}));
+assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}}));
const session = st.s0.startSession();
const sessionColl = session.getDatabase('TestDB').TestColl;
session.startTransaction();
-assert.writeOK(sessionColl.update({_id: -1}, {$inc: {x: 1}}));
-assert.writeOK(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
+assert.commandWorked(sessionColl.update({_id: -1}, {$inc: {x: 1}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
assert.commandFailedWithCode(session.commitTransaction_forTesting(),
ErrorCodes.ReadConcernMajorityNotEnabled);
diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js
index 90330f43cc0..14d1e0d6a39 100644
--- a/jstests/sharding/multi_write_target.js
+++ b/jstests/sharding/multi_write_target.js
@@ -23,13 +23,13 @@ assert.commandWorked(
jsTest.log("Testing multi-update...");
// Put data on all shards
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
+assert.commandWorked(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+assert.commandWorked(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+assert.commandWorked(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
// Non-multi-update doesn't work without shard key
assert.writeError(coll.update({x: 1}, {$set: {updated: true}}, {multi: false}));
-assert.writeOK(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
// Ensure update goes to *all* shards
assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updated: true}));
@@ -38,7 +38,7 @@ assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updated: true
// _id update works, and goes to all shards even on the stale mongos
var staleColl = st.s1.getCollection('foo.bar');
-assert.writeOK(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
+assert.commandWorked(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
// Ensure _id update goes to *all* shards
assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updatedById: true}));
@@ -49,7 +49,7 @@ jsTest.log("Testing multi-delete...");
// non-multi-delete doesn't work without shard key
assert.writeError(coll.remove({x: 1}, {justOne: true}));
-assert.writeOK(coll.remove({x: 1}, {justOne: false}));
+assert.commandWorked(coll.remove({x: 1}, {justOne: false}));
// Ensure delete goes to *all* shards
assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
@@ -57,12 +57,12 @@ assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({x: 1}));
assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
// Put more on all shards
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
-assert.writeOK(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+assert.commandWorked(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+assert.commandWorked(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
// Data not in chunks
-assert.writeOK(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
+assert.commandWorked(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
-assert.writeOK(coll.remove({_id: 0}, {justOne: true}));
+assert.commandWorked(coll.remove({_id: 0}, {justOne: true}));
// Ensure _id delete goes to *all* shards
assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
diff --git a/jstests/sharding/oplog_document_key.js b/jstests/sharding/oplog_document_key.js
index d138457e4f0..3830f41686c 100644
--- a/jstests/sharding/oplog_document_key.js
+++ b/jstests/sharding/oplog_document_key.js
@@ -19,28 +19,28 @@ assert.commandWorked(db.adminCommand({shardcollection: 'test.byX', key: {x: 1}})
assert.commandWorked(db.adminCommand({shardcollection: 'test.byXId', key: {x: 1, _id: 1}}));
assert.commandWorked(db.adminCommand({shardcollection: 'test.byIdX', key: {_id: 1, x: 1}}));
-assert.writeOK(db.un.insert({_id: 10, x: 50, y: 60}));
-assert.writeOK(db.un.insert({_id: 30, x: 70, y: 80}));
+assert.commandWorked(db.un.insert({_id: 10, x: 50, y: 60}));
+assert.commandWorked(db.un.insert({_id: 30, x: 70, y: 80}));
-assert.writeOK(db.byId.insert({_id: 11, x: 51, y: 61}));
-assert.writeOK(db.byId.insert({_id: 31, x: 71, y: 81}));
+assert.commandWorked(db.byId.insert({_id: 11, x: 51, y: 61}));
+assert.commandWorked(db.byId.insert({_id: 31, x: 71, y: 81}));
-assert.writeOK(db.byX.insert({_id: 12, x: 52, y: 62}));
-assert.writeOK(db.byX.insert({_id: 32, x: 72, y: 82}));
+assert.commandWorked(db.byX.insert({_id: 12, x: 52, y: 62}));
+assert.commandWorked(db.byX.insert({_id: 32, x: 72, y: 82}));
-assert.writeOK(db.byXId.insert({_id: 13, x: 53, y: 63}));
-assert.writeOK(db.byXId.insert({_id: 33, x: 73, y: 83}));
+assert.commandWorked(db.byXId.insert({_id: 13, x: 53, y: 63}));
+assert.commandWorked(db.byXId.insert({_id: 33, x: 73, y: 83}));
-assert.writeOK(db.byIdX.insert({_id: 14, x: 54, y: 64}));
-assert.writeOK(db.byIdX.insert({_id: 34, x: 74, y: 84}));
+assert.commandWorked(db.byIdX.insert({_id: 14, x: 54, y: 64}));
+assert.commandWorked(db.byIdX.insert({_id: 34, x: 74, y: 84}));
var oplog = st.rs0.getPrimary().getDB('local').oplog.rs;
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'un'");
-assert.writeOK(db.un.update({_id: 10, x: 50}, {$set: {y: 70}})); // in place
-assert.writeOK(db.un.update({_id: 30, x: 70}, {y: 75})); // replacement
+assert.commandWorked(db.un.update({_id: 10, x: 50}, {$set: {y: 70}})); // in place
+assert.commandWorked(db.un.update({_id: 30, x: 70}, {y: 75})); // replacement
// unsharded, only _id appears in o2:
@@ -53,8 +53,8 @@ assert.eq(b.o2, {_id: 30});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'byId'");
-assert.writeOK(db.byId.update({_id: 11}, {$set: {y: 71}})); // in place
-assert.writeOK(db.byId.update({_id: 31}, {x: 71, y: 76})); // replacement
+assert.commandWorked(db.byId.update({_id: 11}, {$set: {y: 71}})); // in place
+assert.commandWorked(db.byId.update({_id: 31}, {x: 71, y: 76})); // replacement
// sharded by {_id: 1}: only _id appears in o2:
@@ -67,8 +67,8 @@ assert.eq(b.o2, {_id: 31});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'byX'");
-assert.writeOK(db.byX.update({x: 52}, {$set: {y: 72}})); // in place
-assert.writeOK(db.byX.update({x: 72}, {x: 72, y: 77})); // replacement
+assert.commandWorked(db.byX.update({x: 52}, {$set: {y: 72}})); // in place
+assert.commandWorked(db.byX.update({x: 72}, {x: 72, y: 77})); // replacement
// sharded by {x: 1}: x appears in o2, followed by _id:
@@ -81,8 +81,8 @@ assert.eq(b.o2, {x: 72, _id: 32});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'byXId'");
-assert.writeOK(db.byXId.update({_id: 13, x: 53}, {$set: {y: 73}})); // in place
-assert.writeOK(db.byXId.update({_id: 33, x: 73}, {x: 73, y: 78})); // replacement
+assert.commandWorked(db.byXId.update({_id: 13, x: 53}, {$set: {y: 73}})); // in place
+assert.commandWorked(db.byXId.update({_id: 33, x: 73}, {x: 73, y: 78})); // replacement
// sharded by {x: 1, _id: 1}: x appears in o2, followed by _id:
@@ -95,8 +95,8 @@ assert.eq(b.o2, {x: 73, _id: 33});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test update command on 'byIdX'");
-assert.writeOK(db.byIdX.update({_id: 14, x: 54}, {$set: {y: 74}})); // in place
-assert.writeOK(db.byIdX.update({_id: 34, x: 74}, {x: 74, y: 79})); // replacement
+assert.commandWorked(db.byIdX.update({_id: 14, x: 54}, {$set: {y: 74}})); // in place
+assert.commandWorked(db.byIdX.update({_id: 34, x: 74}, {x: 74, y: 79})); // replacement
// sharded by {_id: 1, x: 1}: _id appears in o2, followed by x:
@@ -109,8 +109,8 @@ assert.eq(b.o2, {_id: 34, x: 74});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test remove command: 'un'");
-assert.writeOK(db.un.remove({_id: 10}));
-assert.writeOK(db.un.remove({_id: 30}));
+assert.commandWorked(db.un.remove({_id: 10}));
+assert.commandWorked(db.un.remove({_id: 30}));
a = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 10});
assert.eq(a.o, {_id: 10});
@@ -120,8 +120,8 @@ assert.eq(b.o, {_id: 30});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test remove command: 'byX'");
-assert.writeOK(db.byX.remove({_id: 12}));
-assert.writeOK(db.byX.remove({_id: 32}));
+assert.commandWorked(db.byX.remove({_id: 12}));
+assert.commandWorked(db.byX.remove({_id: 32}));
a = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 12});
assert.eq(a.o, {x: 52, _id: 12});
@@ -131,8 +131,8 @@ assert.eq(b.o, {x: 72, _id: 32});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test remove command: 'byXId'");
-assert.writeOK(db.byXId.remove({_id: 13}));
-assert.writeOK(db.byXId.remove({_id: 33}));
+assert.commandWorked(db.byXId.remove({_id: 13}));
+assert.commandWorked(db.byXId.remove({_id: 33}));
a = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 13});
assert.eq(a.o, {x: 53, _id: 13});
@@ -142,8 +142,8 @@ assert.eq(b.o, {x: 73, _id: 33});
////////////////////////////////////////////////////////////////////////
jsTest.log("Test remove command: 'byIdX'");
-assert.writeOK(db.byIdX.remove({_id: 14}));
-assert.writeOK(db.byIdX.remove({_id: 34}));
+assert.commandWorked(db.byIdX.remove({_id: 14}));
+assert.commandWorked(db.byIdX.remove({_id: 34}));
a = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 14});
assert.eq(a.o, {_id: 14, x: 54});
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index c02e708fd53..1be6fa06f0d 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -25,7 +25,7 @@ s.startBalancer();
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < N; i++)
bulk.insert({_id: i});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var doCommand = function(dbname, cmd) {
x = benchRun({
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index b1a730db297..b0e37e91bba 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -32,7 +32,7 @@ for (i = 0; i < 100; i++) {
bulk.insert({num: i, str: longStr});
bulk.insert({num: i + 100, x: i, str: longStr});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// no usable index yet, should throw
assert.throws(function() {
@@ -43,13 +43,13 @@ assert.throws(function() {
assert.commandWorked(coll.ensureIndex({num: 1, x: 1}));
// usable index, but doc with empty 'num' value, so still should throw
-assert.writeOK(coll.insert({x: -5}));
+assert.commandWorked(coll.insert({x: -5}));
assert.throws(function() {
s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
});
// remove the bad doc. now should finally succeed
-assert.writeOK(coll.remove({x: -5}));
+assert.commandWorked(coll.remove({x: -5}));
assert.commandWorked(s.s0.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}}));
// make sure extra index is not created
@@ -167,7 +167,7 @@ for (i = 0; i < 3; i++) {
bulk.insert({skey: 0, extra: i, superfluous: j});
}
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
// split on that key, and check it makes 2 chunks
var splitRes = admin.runCommand({split: coll2 + "", middle: {skey: 0}});
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index 288d6abe694..87d4e81a315 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -19,7 +19,7 @@ while (inserted < (20 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Make sure that there's only one chunk holding all the data.
s.printChunks();
diff --git a/jstests/sharding/primary_config_server_blackholed_from_mongos.js b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
index 674dc1f9235..1b2dfbc15b5 100644
--- a/jstests/sharding/primary_config_server_blackholed_from_mongos.js
+++ b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
@@ -16,7 +16,7 @@ var bulk = testDB.ShardedColl.initializeUnorderedBulkOp();
for (var i = 0; i < 1000; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
const configPrimary = st.configRS.getPrimary();
const admin = configPrimary.getDB("admin");
@@ -49,7 +49,7 @@ assert.writeError(
jsTest.log('Doing CRUD operations on the sharded collection');
assert.eq(1000, testDB.ShardedColl.find().itcount());
-assert.writeOK(testDB.ShardedColl.insert({_id: 1000}));
+assert.commandWorked(testDB.ShardedColl.insert({_id: 1000}));
assert.eq(1001, testDB.ShardedColl.find().count());
jsTest.log('Doing read operations on a config server collection');
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 18bc8bdea6e..c33ca3a1ac5 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -102,7 +102,7 @@ config.getCollectionInfos().forEach(function(c) {
assert.commandWorked(configCopy.createCollection(c.name, c.options));
// Clone the docs.
config.getCollection(c.name).find().hint({_id: 1}).forEach(function(d) {
- assert.writeOK(configCopy.getCollection(c.name).insert(d));
+ assert.commandWorked(configCopy.getCollection(c.name).insert(d));
});
// Build the indexes.
config.getCollection(c.name).getIndexes().forEach(function(i) {
@@ -179,11 +179,11 @@ function testCollDetails(args) {
assert.commandWorked(admin.runCommand(cmdObj));
if (args.hasOwnProperty("unique")) {
- assert.writeOK(mongos.getDB("config").collections.update({_id: collName},
- {$set: {"unique": args.unique}}));
+ assert.commandWorked(mongos.getDB("config").collections.update(
+ {_id: collName}, {$set: {"unique": args.unique}}));
}
if (args.hasOwnProperty("noBalance")) {
- assert.writeOK(mongos.getDB("config").collections.update(
+ assert.commandWorked(mongos.getDB("config").collections.update(
{_id: collName}, {$set: {"noBalance": args.noBalance}}));
}
@@ -217,7 +217,7 @@ function testCollDetails(args) {
mongos.getCollection(collName).drop();
} catch (e) {
// Ignore drop errors because they are from the illegal values in the collection entry
- assert.writeOK(mongos.getDB("config").collections.remove({_id: collName}));
+ assert.commandWorked(mongos.getDB("config").collections.remove({_id: collName}));
}
testCollDetailsNum++;
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index 65739b2b9a5..d6755814229 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -190,7 +190,7 @@ var queryConfigChunks = function(st) {
// Setup.
assert.commandWorked(st.s.adminCommand({shardcollection: testColl.getFullName(), key: {e: 1}}));
for (var i = 0; i < testCollData.length; i++) {
- assert.writeOK(testColl.insert(testCollData[i]));
+ assert.commandWorked(testColl.insert(testCollData[i]));
}
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 2}}));
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 6}}));
@@ -256,7 +256,7 @@ var queryUserCreated = function(database) {
// Setup.
dropCollectionIfExists(userColl);
for (var i = 0; i < userCollData.length; i++) {
- assert.writeOK(userColl.insert(userCollData[i]));
+ assert.commandWorked(userColl.insert(userCollData[i]));
}
assert.commandWorked(userColl.createIndex({s: 1}));
diff --git a/jstests/sharding/query_sharded.js b/jstests/sharding/query_sharded.js
index 92ef5f19678..b79cff55cb1 100644
--- a/jstests/sharding/query_sharded.js
+++ b/jstests/sharding/query_sharded.js
@@ -11,7 +11,7 @@ var coll = mongos.getCollection("foo.bar");
//
// Ensure we can't use exhaust option through mongos
coll.remove({});
-assert.writeOK(coll.insert({a: 'b'}));
+assert.commandWorked(coll.insert({a: 'b'}));
var query = coll.find({});
assert.neq(null, query.next());
query = coll.find({}).addOption(DBQuery.Option.exhaust);
@@ -23,7 +23,7 @@ assert.throws(function() {
//
// Ensure we can't trick mongos by inserting exhaust option on a command through mongos
coll.remove({});
-assert.writeOK(coll.insert({a: 'b'}));
+assert.commandWorked(coll.insert({a: 'b'}));
var cmdColl = mongos.getCollection(coll.getDB().toString() + ".$cmd");
var cmdQuery = cmdColl.find({ping: 1}).limit(1);
assert.commandWorked(cmdQuery.next());
diff --git a/jstests/sharding/read_pref_multi_mongos_stale_config.js b/jstests/sharding/read_pref_multi_mongos_stale_config.js
index b451b976d39..b3c6ab6bc42 100644
--- a/jstests/sharding/read_pref_multi_mongos_stale_config.js
+++ b/jstests/sharding/read_pref_multi_mongos_stale_config.js
@@ -24,8 +24,8 @@ var toShard = configDB2.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
assert.commandWorked(testDB2.adminCommand({moveChunk: 'test.user', to: toShard, find: {x: 50}}));
// Insert a document into each chunk
-assert.writeOK(testDB2.user.insert({x: 30}));
-assert.writeOK(testDB2.user.insert({x: 130}));
+assert.commandWorked(testDB2.user.insert({x: 30}));
+assert.commandWorked(testDB2.user.insert({x: 130}));
// The testDB1 mongos does not know the chunk has been moved, and will retry
var cursor = testDB1.user.find({x: 30}).readPref('primary');
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index 40326f50fec..512719b08b6 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -30,16 +30,16 @@ var collSOk = mongosSOK.getCollection("" + coll);
var rsA = shardTest.rs0;
var rsB = shardTest.rs1;
-assert.writeOK(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
-assert.writeOK(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
+assert.commandWorked(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
+assert.commandWorked(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
rsA.awaitReplication();
rsB.awaitReplication();
print("1: initial insert");
-assert.writeOK(coll.save({_id: -1, a: "a", date: new Date()}));
-assert.writeOK(coll.save({_id: 1, b: "b", date: new Date()}));
+assert.commandWorked(coll.save({_id: -1, a: "a", date: new Date()}));
+assert.commandWorked(coll.save({_id: 1, b: "b", date: new Date()}));
print("2: shard collection");
diff --git a/jstests/sharding/refine_collection_shard_key_basic.js b/jstests/sharding/refine_collection_shard_key_basic.js
index 1b10759142d..a698649a3be 100644
--- a/jstests/sharding/refine_collection_shard_key_basic.js
+++ b/jstests/sharding/refine_collection_shard_key_basic.js
@@ -33,7 +33,7 @@ function enableShardingAndShardColl(keyDoc) {
function dropAndRecreateColl(keyDoc) {
assert.commandWorked(mongos.getDB(kDbName).runCommand({drop: kCollName}));
- assert.writeOK(mongos.getCollection(kNsName).insert(keyDoc));
+ assert.commandWorked(mongos.getCollection(kNsName).insert(keyDoc));
}
function dropAndReshardColl(keyDoc) {
@@ -81,8 +81,8 @@ function setupCRUDBeforeRefine() {
const sessionDB = session.getDatabase(kDbName);
// The documents below will be read after refineCollectionShardKey to verify data integrity.
- assert.writeOK(sessionDB.getCollection(kCollName).insert({a: 5, b: 5, c: 5, d: 5}));
- assert.writeOK(sessionDB.getCollection(kCollName).insert({a: 10, b: 10, c: 10, d: 10}));
+ assert.commandWorked(sessionDB.getCollection(kCollName).insert({a: 5, b: 5, c: 5, d: 5}));
+ assert.commandWorked(sessionDB.getCollection(kCollName).insert({a: 10, b: 10, c: 10, d: 10}));
}
function validateCRUDAfterRefine() {
@@ -104,17 +104,17 @@ function validateCRUDAfterRefine() {
ErrorCodes.ShardKeyNotFound);
assert.writeErrorWithCode(sessionDB.getCollection(kCollName).insert({a: -1, b: -1}),
ErrorCodes.ShardKeyNotFound);
- assert.writeOK(sessionDB.getCollection(kCollName).insert({a: 1, b: 1, c: 1, d: 1}));
- assert.writeOK(sessionDB.getCollection(kCollName).insert({a: -1, b: -1, c: -1, d: -1}));
+ assert.commandWorked(sessionDB.getCollection(kCollName).insert({a: 1, b: 1, c: 1, d: 1}));
+ assert.commandWorked(sessionDB.getCollection(kCollName).insert({a: -1, b: -1, c: -1, d: -1}));
// The full shard key is required when updating documents.
assert.writeErrorWithCode(
sessionDB.getCollection(kCollName).update({a: 1, b: 1}, {$set: {b: 2}}), 31025);
assert.writeErrorWithCode(
sessionDB.getCollection(kCollName).update({a: -1, b: -1}, {$set: {b: 2}}), 31025);
- assert.writeOK(
+ assert.commandWorked(
sessionDB.getCollection(kCollName).update({a: 1, b: 1, c: 1, d: 1}, {$set: {b: 2}}));
- assert.writeOK(
+ assert.commandWorked(
sessionDB.getCollection(kCollName).update({a: -1, b: -1, c: -1, d: -1}, {$set: {b: 4}}));
assert.eq(2, sessionDB.getCollection(kCollName).findOne({a: 1}).b);
@@ -131,10 +131,12 @@ function validateCRUDAfterRefine() {
ErrorCodes.ShardKeyNotFound);
assert.writeErrorWithCode(sessionDB.getCollection(kCollName).remove({a: -1, b: -1}, true),
ErrorCodes.ShardKeyNotFound);
- assert.writeOK(sessionDB.getCollection(kCollName).remove({a: 1, b: 2, c: 1, d: 1}, true));
- assert.writeOK(sessionDB.getCollection(kCollName).remove({a: -1, b: 4, c: -1, d: -1}, true));
- assert.writeOK(sessionDB.getCollection(kCollName).remove({a: 5, b: 5, c: 5, d: 5}, true));
- assert.writeOK(sessionDB.getCollection(kCollName).remove({a: 10, b: 10, c: 10, d: 10}, true));
+ assert.commandWorked(sessionDB.getCollection(kCollName).remove({a: 1, b: 2, c: 1, d: 1}, true));
+ assert.commandWorked(
+ sessionDB.getCollection(kCollName).remove({a: -1, b: 4, c: -1, d: -1}, true));
+ assert.commandWorked(sessionDB.getCollection(kCollName).remove({a: 5, b: 5, c: 5, d: 5}, true));
+ assert.commandWorked(
+ sessionDB.getCollection(kCollName).remove({a: 10, b: 10, c: 10, d: 10}, true));
assert.eq(null, sessionDB.getCollection(kCollName).findOne());
}
@@ -259,7 +261,7 @@ assert.commandFailedWithCode(
mongos.adminCommand({refineCollectionShardKey: kNsName, key: {_id: 1, aKey: 1}}),
ErrorCodes.NamespaceNotFound);
-assert.writeOK(mongos.getCollection(kNsName).insert({aKey: 1}));
+assert.commandWorked(mongos.getCollection(kNsName).insert({aKey: 1}));
// Should fail because namespace 'db.foo' is not sharded. NOTE: This NamespaceNotSharded error
// is thrown in RefineCollectionShardKeyCommand by 'getShardedCollectionRoutingInfoWithRefresh'.
@@ -378,7 +380,7 @@ assert.commandFailedWithCode(
// Should fail because only a multikey index exists for new shard key {_id: 1, aKey: 1}.
dropAndReshardColl({_id: 1});
assert.commandWorked(mongos.getCollection(kNsName).createIndex({_id: 1, aKey: 1}));
-assert.writeOK(mongos.getCollection(kNsName).insert({aKey: [1, 2, 3, 4, 5]}));
+assert.commandWorked(mongos.getCollection(kNsName).insert({aKey: [1, 2, 3, 4, 5]}));
assert.commandFailedWithCode(
mongos.adminCommand({refineCollectionShardKey: kNsName, key: {_id: 1, aKey: 1}}),
@@ -408,7 +410,7 @@ validateConfigChangelog(1);
// shard key {_id: 1, aKey: 1}.
dropAndReshardColl({_id: 1});
assert.commandWorked(mongos.getCollection(kNsName).createIndex({_id: 1, aKey: 1}));
-assert.writeOK(mongos.getCollection(kNsName).insert({_id: 12345}));
+assert.commandWorked(mongos.getCollection(kNsName).insert({_id: 12345}));
assert.commandFailedWithCode(
mongos.adminCommand({refineCollectionShardKey: kNsName, key: {_id: 1, aKey: 1}}),
diff --git a/jstests/sharding/refine_collection_shard_key_jumbo.js b/jstests/sharding/refine_collection_shard_key_jumbo.js
index 9abee48c7fe..2d198f30179 100644
--- a/jstests/sharding/refine_collection_shard_key_jumbo.js
+++ b/jstests/sharding/refine_collection_shard_key_jumbo.js
@@ -25,7 +25,7 @@ function generateJumboChunk() {
bulk.insert({x: x, y: i, big: big});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
}
function runBalancer() {
diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js
index df836cd8ef0..63dc78a61e2 100644
--- a/jstests/sharding/regex_targeting.js
+++ b/jstests/sharding/regex_targeting.js
@@ -70,84 +70,84 @@ assert.writeError(collHashed.insert({_id: /regex value/, hash: 0}));
//
//
// (For now) we can insert a regex shard key
-assert.writeOK(collSharded.insert({a: /regex value/}));
-assert.writeOK(collCompound.insert({a: /regex value/, b: "other value"}));
-assert.writeOK(collNested.insert({a: {b: /regex value/}}));
-assert.writeOK(collHashed.insert({hash: /regex value/}));
+assert.commandWorked(collSharded.insert({a: /regex value/}));
+assert.commandWorked(collCompound.insert({a: /regex value/, b: "other value"}));
+assert.commandWorked(collNested.insert({a: {b: /regex value/}}));
+assert.commandWorked(collHashed.insert({hash: /regex value/}));
//
//
// Query by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({a: "abcde-0"}));
-assert.writeOK(coll.insert({a: "abcde-1"}));
-assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.commandWorked(coll.insert({a: "abcde-0"}));
+assert.commandWorked(coll.insert({a: "abcde-1"}));
+assert.commandWorked(coll.insert({a: /abcde.*/}));
assert.eq(coll.find().itcount(), coll.find({a: /abcde.*/}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({a: "abcde-0"}));
-assert.writeOK(collSharded.insert({a: "abcde-1"}));
-assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.commandWorked(collSharded.insert({a: "abcde-0"}));
+assert.commandWorked(collSharded.insert({a: "abcde-1"}));
+assert.commandWorked(collSharded.insert({a: /abcde.*/}));
assert.eq(collSharded.find().itcount(), collSharded.find({a: /abcde.*/}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
-assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
-assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.commandWorked(collCompound.insert({a: "abcde-0", b: 0}));
+assert.commandWorked(collCompound.insert({a: "abcde-1", b: 0}));
+assert.commandWorked(collCompound.insert({a: /abcde.*/, b: 0}));
assert.eq(collCompound.find().itcount(), collCompound.find({a: /abcde.*/}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
-assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
-assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(collNested.insert({a: {b: /abcde.*/}}));
assert.eq(collNested.find().itcount(), collNested.find({'a.b': /abcde.*/}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+ assert.commandWorked(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.commandWorked(collHashed.insert({hash: /abcde.*/}));
assert.eq(collHashed.find().itcount(), collHashed.find({hash: /abcde.*/}).itcount());
//
//
// Update by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({a: "abcde-0"}));
-assert.writeOK(coll.insert({a: "abcde-1"}));
-assert.writeOK(coll.insert({a: /abcde.*/}));
-assert.writeOK(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(coll.insert({a: "abcde-0"}));
+assert.commandWorked(coll.insert({a: "abcde-1"}));
+assert.commandWorked(coll.insert({a: /abcde.*/}));
+assert.commandWorked(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(coll.find().itcount(), coll.find({updated: true}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({a: "abcde-0"}));
-assert.writeOK(collSharded.insert({a: "abcde-1"}));
-assert.writeOK(collSharded.insert({a: /abcde.*/}));
-assert.writeOK(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(collSharded.insert({a: "abcde-0"}));
+assert.commandWorked(collSharded.insert({a: "abcde-1"}));
+assert.commandWorked(collSharded.insert({a: /abcde.*/}));
+assert.commandWorked(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(collSharded.find().itcount(), collSharded.find({updated: true}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
-assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
-assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
-assert.writeOK(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(collCompound.insert({a: "abcde-0", b: 0}));
+assert.commandWorked(collCompound.insert({a: "abcde-1", b: 0}));
+assert.commandWorked(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.commandWorked(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(collCompound.find().itcount(), collCompound.find({updated: true}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
-assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
-assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
-assert.writeOK(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(collNested.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(collNested.find().itcount(), collNested.find({updated: true}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+ assert.commandWorked(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({hash: /abcde.*/}));
-assert.writeOK(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(collHashed.insert({hash: /abcde.*/}));
+assert.commandWorked(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
assert.eq(collHashed.find().itcount(), collHashed.find({updated: true}).itcount());
collSharded.remove({});
@@ -219,40 +219,40 @@ assert.commandFailedWithCode(collNested.update({c: 1}, {a: {b: /abcde.*/}}, {ups
//
// Remove by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({a: "abcde-0"}));
-assert.writeOK(coll.insert({a: "abcde-1"}));
-assert.writeOK(coll.insert({a: /abcde.*/}));
-assert.writeOK(coll.remove({a: /abcde.*/}));
+assert.commandWorked(coll.insert({a: "abcde-0"}));
+assert.commandWorked(coll.insert({a: "abcde-1"}));
+assert.commandWorked(coll.insert({a: /abcde.*/}));
+assert.commandWorked(coll.remove({a: /abcde.*/}));
assert.eq(0, coll.find({}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({a: "abcde-0"}));
-assert.writeOK(collSharded.insert({a: "abcde-1"}));
-assert.writeOK(collSharded.insert({a: /abcde.*/}));
-assert.writeOK(collSharded.remove({a: /abcde.*/}));
+assert.commandWorked(collSharded.insert({a: "abcde-0"}));
+assert.commandWorked(collSharded.insert({a: "abcde-1"}));
+assert.commandWorked(collSharded.insert({a: /abcde.*/}));
+assert.commandWorked(collSharded.remove({a: /abcde.*/}));
assert.eq(0, collSharded.find({}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
-assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
-assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
-assert.writeOK(collCompound.remove({a: /abcde.*/}));
+assert.commandWorked(collCompound.insert({a: "abcde-0", b: 0}));
+assert.commandWorked(collCompound.insert({a: "abcde-1", b: 0}));
+assert.commandWorked(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.commandWorked(collCompound.remove({a: /abcde.*/}));
assert.eq(0, collCompound.find({}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
-assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
-assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
-assert.writeOK(collNested.remove({'a.b': /abcde.*/}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(collNested.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.remove({'a.b': /abcde.*/}));
assert.eq(0, collNested.find({}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+ assert.commandWorked(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({hash: /abcde.*/}));
-assert.writeOK(collHashed.remove({hash: /abcde.*/}));
+assert.commandWorked(collHashed.insert({hash: /abcde.*/}));
+assert.commandWorked(collHashed.remove({hash: /abcde.*/}));
assert.eq(0, collHashed.find({}).itcount());
//
@@ -260,23 +260,24 @@ assert.eq(0, collHashed.find({}).itcount());
// Query/Update/Remove by nested regex is different depending on how the nested regex is
// specified
coll.remove({});
-assert.writeOK(coll.insert({a: {b: "abcde-0"}}));
-assert.writeOK(coll.insert({a: {b: "abcde-1"}}));
-assert.writeOK(coll.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(coll.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(coll.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(coll.insert({a: {b: /abcde.*/}}));
assert.eq(1, coll.find({a: {b: /abcde.*/}}).itcount());
-assert.writeOK(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
assert.eq(1, coll.find({updated: true}).itcount());
-assert.writeOK(coll.remove({a: {b: /abcde.*/}}));
+assert.commandWorked(coll.remove({a: {b: /abcde.*/}}));
assert.eq(2, coll.find().itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
-assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
-assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-0"}}));
+assert.commandWorked(collNested.insert({a: {b: "abcde-1"}}));
+assert.commandWorked(collNested.insert({a: {b: /abcde.*/}}));
assert.eq(1, collNested.find({a: {b: /abcde.*/}}).itcount());
-assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.commandWorked(
+ collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
assert.eq(1, collNested.find({updated: true}).itcount());
-assert.writeOK(collNested.remove({a: {b: /abcde.*/}}));
+assert.commandWorked(collNested.remove({a: {b: /abcde.*/}}));
assert.eq(2, collNested.find().itcount());
st.stop();
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index eb7418b76ed..ee216f724bc 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -90,7 +90,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 300; i++) {
bulk.insert({i: i % 10, str: str});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.eq(300, coll.find().itcount());
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index bcd37cdf570..8597a4ae6c9 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -11,14 +11,14 @@ var s = new ShardingTest({shards: 2, mongos: 1, rs: {oplogSize: 10}});
var db = s.getDB("test");
var replTest = s.rs0;
-assert.writeOK(db.foo.insert({_id: 1}));
+assert.commandWorked(db.foo.insert({_id: 1}));
db.foo.renameCollection('bar');
assert.isnull(db.getLastError(), '1.0');
assert.eq(db.bar.findOne(), {_id: 1}, '1.1');
assert.eq(db.bar.count(), 1, '1.2');
assert.eq(db.foo.count(), 0, '1.3');
-assert.writeOK(db.foo.insert({_id: 2}));
+assert.commandWorked(db.foo.insert({_id: 2}));
db.foo.renameCollection('bar', true);
assert.isnull(db.getLastError(), '2.0');
assert.eq(db.bar.findOne(), {_id: 2}, '2.1');
@@ -50,7 +50,7 @@ assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl', d
jsTest.log("Testing write concern (1)");
-assert.writeOK(db.foo.insert({_id: 3}));
+assert.commandWorked(db.foo.insert({_id: 3}));
db.foo.renameCollection('bar', true);
var ans = db.runCommand({getLastError: 1, w: 3});
@@ -75,7 +75,7 @@ let liveSlaves = replTest._slaves.filter(function(node) {
replTest.awaitSecondaryNodes(null, liveSlaves);
awaitRSClientHosts(s.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name);
-assert.writeOK(db.foo.insert({_id: 4}));
+assert.commandWorked(db.foo.insert({_id: 4}));
assert.commandWorked(db.foo.renameCollection('bar', true));
ans = db.runCommand({getLastError: 1, w: 3, wtimeout: 5000});
diff --git a/jstests/sharding/rename_across_mongos.js b/jstests/sharding/rename_across_mongos.js
index de2fa50bcea..0c378c5054a 100644
--- a/jstests/sharding/rename_across_mongos.js
+++ b/jstests/sharding/rename_across_mongos.js
@@ -10,7 +10,7 @@ st.s1.getDB(dbName).dropDatabase();
// Create collection on first mongos and insert a document
assert.commandWorked(st.s0.getDB(dbName).runCommand({create: 'CollNameBeforeRename'}));
-assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
+assert.commandWorked(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
if (st.configRS) {
// Ensure that the second mongos will see the newly created database metadata when
diff --git a/jstests/sharding/replication_with_undefined_shard_key.js b/jstests/sharding/replication_with_undefined_shard_key.js
index 2da48889a4c..cc8f0e89f4a 100644
--- a/jstests/sharding/replication_with_undefined_shard_key.js
+++ b/jstests/sharding/replication_with_undefined_shard_key.js
@@ -15,15 +15,15 @@ assert.commandWorked(mongosDB.adminCommand({
}));
// Insert a document with a literal undefined value.
-assert.writeOK(mongosColl.insert({x: undefined}));
+assert.commandWorked(mongosColl.insert({x: undefined}));
jsTestLog("Doing writes that generate oplog entries including undefined document key");
-assert.writeOK(mongosColl.update(
+assert.commandWorked(mongosColl.update(
{},
{$set: {a: 1}},
{multi: true, writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
-assert.writeOK(
+assert.commandWorked(
mongosColl.remove({}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
st.stop();
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index 453c4d980f1..402cc7f9016 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -43,7 +43,7 @@ replTest.awaitSecondaryNodes();
awaitRSClientHosts(st.s0, replTest.nodes, {ok: true});
replTest.awaitNodesAgreeOnPrimary();
-assert.writeOK(st.s0.getDB('test').user.insert({x: 1}));
+assert.commandWorked(st.s0.getDB('test').user.insert({x: 1}));
st.stop();
})();
diff --git a/jstests/sharding/resume_change_stream.js b/jstests/sharding/resume_change_stream.js
index 19c53012fda..0c4c788484f 100644
--- a/jstests/sharding/resume_change_stream.js
+++ b/jstests/sharding/resume_change_stream.js
@@ -54,20 +54,20 @@ function testResume(mongosColl, collToWatch) {
{moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
// Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
let changeStream = cst.startWatchingChanges(
{pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
// We awaited the replication of the first writes, so the change stream shouldn't return
// them.
- assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
+ assert.commandWorked(mongosColl.update({_id: -1}, {$set: {updated: true}}));
// Record current time to resume a change stream later in the test.
const resumeTimeFirstUpdate = mongosDB.runCommand({isMaster: 1}).$clusterTime.clusterTime;
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
+ assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updated: true}}));
// Test that we see the two writes, and remember their resume tokens.
let next = cst.getOneChange(changeStream);
@@ -82,8 +82,8 @@ function testResume(mongosColl, collToWatch) {
// Write some additional documents, then test that it's possible to resume after the first
// update.
- assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
changeStream = cst.startWatchingChanges({
pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
@@ -115,7 +115,7 @@ function testResume(mongosColl, collToWatch) {
while (!oplogIsRolledOver()) {
let idVal = 100 + (i++);
- assert.writeOK(
+ assert.commandWorked(
mongosColl.insert({_id: idVal, long_str: largeStr}, {writeConcern: {w: "majority"}}));
sleep(100);
}
@@ -165,23 +165,23 @@ function testResume(mongosColl, collToWatch) {
// Insert test documents.
for (let counter = 0; counter < numberOfDocs / 5; ++counter) {
- assert.writeOK(mongosColl.insert({_id: "abcd" + counter, shardKey: counter * 5 + 0},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "Abcd" + counter, shardKey: counter * 5 + 1},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "aBcd" + counter, shardKey: counter * 5 + 2},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "abCd" + counter, shardKey: counter * 5 + 3},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "abcD" + counter, shardKey: counter * 5 + 4},
- {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "abcd" + counter, shardKey: counter * 5 + 0},
+ {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "Abcd" + counter, shardKey: counter * 5 + 1},
+ {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "aBcd" + counter, shardKey: counter * 5 + 2},
+ {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "abCd" + counter, shardKey: counter * 5 + 3},
+ {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mongosColl.insert({_id: "abcD" + counter, shardKey: counter * 5 + 4},
+ {writeConcern: {w: "majority"}}));
}
let allChangesCursor = cst.startWatchingChanges(
{pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
// Perform the multi-update that will induce timestamp collisions
- assert.writeOK(mongosColl.update({}, {$set: {updated: true}}, {multi: true}));
+ assert.commandWorked(mongosColl.update({}, {$set: {updated: true}}, {multi: true}));
// Loop over documents and open inner change streams resuming from a specified position.
// Note we skip the last document as it does not have the next document so we would
diff --git a/jstests/sharding/resume_change_stream_from_stale_mongos.js b/jstests/sharding/resume_change_stream_from_stale_mongos.js
index fbc8bd904bb..d4d935eb6a5 100644
--- a/jstests/sharding/resume_change_stream_from_stale_mongos.js
+++ b/jstests/sharding/resume_change_stream_from_stale_mongos.js
@@ -34,8 +34,8 @@ st.ensurePrimaryShard(firstMongosDB.getName(), st.rs0.getURL());
// record a resume token after the first chunk migration.
let changeStream = firstMongosColl.aggregate([{$changeStream: {}}]);
-assert.writeOK(firstMongosColl.insert({_id: -1}));
-assert.writeOK(firstMongosColl.insert({_id: 1}));
+assert.commandWorked(firstMongosColl.insert({_id: -1}));
+assert.commandWorked(firstMongosColl.insert({_id: 1}));
for (let nextId of [-1, 1]) {
assert.soon(() => changeStream.hasNext());
@@ -54,8 +54,8 @@ assert.commandWorked(firstMongosDB.adminCommand(
{moveChunk: firstMongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
// Then do one insert to each shard.
-assert.writeOK(firstMongosColl.insert({_id: -2}));
-assert.writeOK(firstMongosColl.insert({_id: 2}));
+assert.commandWorked(firstMongosColl.insert({_id: -2}));
+assert.commandWorked(firstMongosColl.insert({_id: 2}));
// The change stream should see all the inserts after internally re-establishing cursors after
// the chunk split.
@@ -69,8 +69,8 @@ for (let nextId of [-2, 2]) {
}
// Do some writes that occur on each shard after the resume token.
-assert.writeOK(firstMongosColl.insert({_id: -3}));
-assert.writeOK(firstMongosColl.insert({_id: 3}));
+assert.commandWorked(firstMongosColl.insert({_id: -3}));
+assert.commandWorked(firstMongosColl.insert({_id: 3}));
// Now try to resume the change stream using a stale mongos which believes the collection is
// unsharded. The first mongos should use the shard versioning protocol to discover that the
diff --git a/jstests/sharding/resume_change_stream_on_subset_of_shards.js b/jstests/sharding/resume_change_stream_on_subset_of_shards.js
index b914a310e82..6176ead3664 100644
--- a/jstests/sharding/resume_change_stream_on_subset_of_shards.js
+++ b/jstests/sharding/resume_change_stream_on_subset_of_shards.js
@@ -40,13 +40,13 @@ assert.commandWorked(mongosDB.adminCommand(
let changeStream = mongosColl.watch();
// ... then do one write to produce a resume token...
-assert.writeOK(mongosColl.insert({_id: -2}));
+assert.commandWorked(mongosColl.insert({_id: -2}));
assert.soon(() => changeStream.hasNext());
const resumeToken = changeStream.next()._id;
// ... followed by one write to each chunk for testing purposes, i.e. shards 0 and 1.
-assert.writeOK(mongosColl.insert({_id: -1}));
-assert.writeOK(mongosColl.insert({_id: 1}));
+assert.commandWorked(mongosColl.insert({_id: -1}));
+assert.commandWorked(mongosColl.insert({_id: 1}));
// The change stream should see all the inserts after establishing cursors on all shards.
for (let nextId of [-1, 1]) {
@@ -58,7 +58,7 @@ for (let nextId of [-1, 1]) {
}
// Insert another document after storing the resume token.
-assert.writeOK(mongosColl.insert({_id: 2}));
+assert.commandWorked(mongosColl.insert({_id: 2}));
// Resume the change stream and verify that it correctly sees the next insert. This is meant
// to test resuming a change stream when not all shards are aware that the collection exists,
diff --git a/jstests/sharding/retryable_writes.js b/jstests/sharding/retryable_writes.js
index d35172edf3a..9da8f40effa 100644
--- a/jstests/sharding/retryable_writes.js
+++ b/jstests/sharding/retryable_writes.js
@@ -141,8 +141,8 @@ function runTests(mainConn, priConn) {
initialStatus = priConn.adminCommand({serverStatus: 1});
verifyServerStatusFields(initialStatus);
- assert.writeOK(testDBMain.user.insert({_id: 40, x: 1}));
- assert.writeOK(testDBMain.user.insert({_id: 50, y: 1}));
+ assert.commandWorked(testDBMain.user.insert({_id: 40, x: 1}));
+ assert.commandWorked(testDBMain.user.insert({_id: 50, y: 1}));
assert.eq(2, testDBPri.user.find({x: 1}).itcount());
assert.eq(2, testDBPri.user.find({y: 1}).itcount());
@@ -294,8 +294,8 @@ function runTests(mainConn, priConn) {
initialStatus = priConn.adminCommand({serverStatus: 1});
verifyServerStatusFields(initialStatus);
- assert.writeOK(testDBMain.user.insert({_id: 70, f: 1}));
- assert.writeOK(testDBMain.user.insert({_id: 80, f: 1}));
+ assert.commandWorked(testDBMain.user.insert({_id: 70, f: 1}));
+ assert.commandWorked(testDBMain.user.insert({_id: 80, f: 1}));
cmd = {
findAndModify: 'user',
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index edf537d4ed1..cd9b405d4f4 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -40,7 +40,7 @@ st.printShardingStatus();
var inserts = [{_id: -1}, {_id: 1}, {_id: 1000}];
collOneShard.insert(inserts);
-assert.writeOK(collAllShards.insert(inserts));
+assert.commandWorked(collAllShards.insert(inserts));
var returnPartialFlag = 1 << 7;
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 08a7c3a017a..e1954ce0c8a 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -72,7 +72,7 @@ let testCases = {
addShardToZone: {skip: "primary only"},
aggregate: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
checkResults: function(res) {
@@ -107,7 +107,7 @@ let testCases = {
convertToCapped: {skip: "primary only"},
count: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {count: coll, query: {x: 1}},
checkResults: function(res) {
@@ -128,8 +128,8 @@ let testCases = {
delete: {skip: "primary only"},
distinct: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {distinct: coll, key: "x"},
checkResults: function(res) {
@@ -156,7 +156,7 @@ let testCases = {
filemd5: {skip: "does not return user data"},
find: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {find: coll, filter: {x: 1}},
checkResults: function(res) {
@@ -207,8 +207,8 @@ let testCases = {
makeSnapshot: {skip: "does not return user data"},
mapReduce: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {
mapReduce: coll,
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index 78ade52128e..3853784a980 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -79,7 +79,7 @@ let testCases = {
addShardToZone: {skip: "primary only"},
aggregate: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
checkResults: function(res) {
@@ -120,7 +120,7 @@ let testCases = {
convertToCapped: {skip: "primary only"},
count: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {count: coll, query: {x: 1}},
checkResults: function(res) {
@@ -147,8 +147,8 @@ let testCases = {
delete: {skip: "primary only"},
distinct: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {distinct: coll, key: "x"},
checkResults: function(res) {
@@ -180,7 +180,7 @@ let testCases = {
filemd5: {skip: "does not return user data"},
find: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {find: coll, filter: {x: 1}},
checkResults: function(res) {
@@ -237,8 +237,8 @@ let testCases = {
makeSnapshot: {skip: "does not return user data"},
mapReduce: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {
mapReduce: coll,
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index 541124a7048..8557f2a8b53 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -72,7 +72,7 @@ let testCases = {
addShardToZone: {skip: "primary only"},
aggregate: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
checkResults: function(res) {
@@ -108,7 +108,7 @@ let testCases = {
convertToCapped: {skip: "primary only"},
count: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {count: coll, query: {x: 1}},
checkResults: function(res) {
@@ -130,8 +130,8 @@ let testCases = {
delete: {skip: "primary only"},
distinct: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {distinct: coll, key: "x"},
checkResults: function(res) {
@@ -158,7 +158,7 @@ let testCases = {
filemd5: {skip: "does not return user data"},
find: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {find: coll, filter: {x: 1}},
checkResults: function(res) {
@@ -210,8 +210,8 @@ let testCases = {
makeSnapshot: {skip: "does not return user data"},
mapReduce: {
setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.commandWorked(mongosConn.getCollection(nss).insert({x: 1}));
},
command: {
mapReduce: coll,
diff --git a/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js b/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
index 6c8b150aebb..af7d221d17a 100644
--- a/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
+++ b/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
@@ -25,7 +25,7 @@ let freshMongos = st.s0;
let staleMongos = st.s1;
jsTest.log("do insert from stale mongos to make it load the routing table before the move");
-assert.writeOK(staleMongos.getCollection(ns).insert({x: 1}));
+assert.commandWorked(staleMongos.getCollection(ns).insert({x: 1}));
jsTest.log("do moveChunk from fresh mongos");
assert.commandWorked(freshMongos.adminCommand({
diff --git a/jstests/sharding/secondary_shard_versioning.js b/jstests/sharding/secondary_shard_versioning.js
index 94e49c09a5d..a5c684dccb1 100644
--- a/jstests/sharding/secondary_shard_versioning.js
+++ b/jstests/sharding/secondary_shard_versioning.js
@@ -20,7 +20,7 @@ let freshMongos = st.s0;
let staleMongos = st.s1;
jsTest.log("do insert from stale mongos to make it load the routing table before the move");
-assert.writeOK(staleMongos.getDB('test').foo.insert({x: 1}));
+assert.commandWorked(staleMongos.getDB('test').foo.insert({x: 1}));
jsTest.log("do moveChunk from fresh mongos");
assert.commandWorked(freshMongos.adminCommand({
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index f54a70aad7c..b4c7c0842d8 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -7,9 +7,9 @@
var s = new ShardingTest({shards: 2});
var db = s.getDB("test");
-assert.writeOK(db.foo.insert({num: 1, name: "eliot"}));
-assert.writeOK(db.foo.insert({num: 2, name: "sara"}));
-assert.writeOK(db.foo.insert({num: -1, name: "joe"}));
+assert.commandWorked(db.foo.insert({num: 1, name: "eliot"}));
+assert.commandWorked(db.foo.insert({num: 2, name: "sara"}));
+assert.commandWorked(db.foo.insert({num: -1, name: "joe"}));
assert.commandWorked(db.foo.ensureIndex({num: 1}));
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index fd8d8657af6..2bcb9f99843 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -28,9 +28,9 @@ assert.eq(2, s.config.chunks.count({"ns": "test.foo"}), "should be 2 shards");
var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
-assert.writeOK(db.foo.save({num: 1, name: "eliot"}));
-assert.writeOK(db.foo.save({num: 2, name: "sara"}));
-assert.writeOK(db.foo.save({num: -1, name: "joe"}));
+assert.commandWorked(db.foo.save({num: 1, name: "eliot"}));
+assert.commandWorked(db.foo.save({num: 2, name: "sara"}));
+assert.commandWorked(db.foo.save({num: -1, name: "joe"}));
assert.eq(
3, s.getPrimaryShard("test").getDB("test").foo.find().length(), "not right directly to db A");
@@ -64,15 +64,15 @@ assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same aft
placeCheck(3);
// Test inserts go to right server/shard
-assert.writeOK(db.foo.save({num: 3, name: "bob"}));
+assert.commandWorked(db.foo.save({num: 3, name: "bob"}));
assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
+assert.commandWorked(db.foo.save({num: -2, name: "funny man"}));
assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
+assert.commandWorked(db.foo.save({num: 0, name: "funny guy"}));
assert.eq(2, primary.foo.find().length(), "boundary A");
assert.eq(4, secondary.foo.find().length(), "boundary B");
@@ -197,7 +197,7 @@ placeCheck(8);
printAll();
var total = db.foo.find().count();
-var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
+var res = assert.commandWorked(db.foo.update({}, {$inc: {x: 1}}, false, true));
printAll();
assert.eq(total, res.nModified, res.toString());
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index d0957a1c45d..74dcb30c25e 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -75,7 +75,7 @@ function doCounts(name, total, onlyItCounts) {
}
var total = doCounts("before wrong save");
-assert.writeOK(secondary.insert({_id: 111, num: -3}));
+assert.commandWorked(secondary.insert({_id: 111, num: -3}));
doCounts("after wrong save", total, true);
e = a.find().explain("executionStats").executionStats;
assert.eq(3, e.nReturned, "ex1");
diff --git a/jstests/sharding/shard7.js b/jstests/sharding/shard7.js
index 20122b60e24..094688d6e37 100644
--- a/jstests/sharding/shard7.js
+++ b/jstests/sharding/shard7.js
@@ -41,9 +41,9 @@ assert.eq(0, aggregate.toArray().length);
c.save({a: null, b: null});
c.save({a: 1, b: 1});
-assert.writeOK(c.remove(unsatisfiable));
+assert.commandWorked(c.remove(unsatisfiable));
assert.eq(2, c.count());
-assert.writeOK(c.update(unsatisfiable, {$set: {c: 1}}, false, true));
+assert.commandWorked(c.update(unsatisfiable, {$set: {c: 1}}, false, true));
assert.eq(2, c.count());
assert.eq(0, c.count({c: 1}));
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index 5ed9e129a4d..ea5f350483b 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -79,7 +79,7 @@ var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
};
// Simulate the upsert that is performed by a config server on addShard.
- assert.writeOK(mongodConn.getDB('admin').system.version.update(
+ assert.commandWorked(mongodConn.getDB('admin').system.version.update(
{
_id: shardIdentityDoc._id,
shardName: shardIdentityDoc.shardName,
diff --git a/jstests/sharding/shard_aware_init_secondaries.js b/jstests/sharding/shard_aware_init_secondaries.js
index a1387592212..bc919924a37 100644
--- a/jstests/sharding/shard_aware_init_secondaries.js
+++ b/jstests/sharding/shard_aware_init_secondaries.js
@@ -37,7 +37,7 @@ var shardIdentityQuery = {
var shardIdentityUpdate = {
$set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
};
-assert.writeOK(priConn.getDB('admin').system.version.update(
+assert.commandWorked(priConn.getDB('admin').system.version.update(
shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 2}}));
var secConn = replTest.getSecondary();
diff --git a/jstests/sharding/shard_aware_primary_failover.js b/jstests/sharding/shard_aware_primary_failover.js
index 9e7f572c3e9..d26c4e34a1c 100644
--- a/jstests/sharding/shard_aware_primary_failover.js
+++ b/jstests/sharding/shard_aware_primary_failover.js
@@ -37,7 +37,7 @@ var shardIdentityQuery = {
var shardIdentityUpdate = {
$set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
};
-assert.writeOK(primaryConn.getDB('admin').system.version.update(
+assert.commandWorked(primaryConn.getDB('admin').system.version.update(
shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 'majority'}}));
replTest.stopMaster();
diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js
index f417cdc4165..aa9be219496 100644
--- a/jstests/sharding/shard_collection_basic.js
+++ b/jstests/sharding/shard_collection_basic.js
@@ -55,7 +55,7 @@ function getIndexSpecByName(coll, indexName) {
// Fail if db is not sharded.
assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
// Fail if db is not sharding enabled.
assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
@@ -71,7 +71,7 @@ assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: "aaa"}));
assert.commandFailed(
mongos.getDB('test').runCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
// Can't shard if key is not specified.
assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo'}));
@@ -119,28 +119,28 @@ testAndClenaupWithKeyNoIndexOK({_id: 'hashed'});
testAndClenaupWithKeyNoIndexOK({a: 1});
// Cant shard collection with data and no index on the shard key.
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyOK({a: 1});
// Shard by a hashed key.
testAndClenaupWithKeyNoIndexOK({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyOK({a: 'hashed'});
// Shard by a compound key.
testAndClenaupWithKeyNoIndexOK({x: 1, y: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
testAndClenaupWithKeyNoIndexFailed({x: 1, y: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
testAndClenaupWithKeyOK({x: 1, y: 1});
testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 1});
@@ -152,21 +152,21 @@ testAndClenaupWithKeyOK({'z.x': 'hashed'});
// Can't shard by a multikey.
assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 1});
assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1, b: 1}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
testAndClenaupWithKeyOK({a: 'hashed'});
// Cant shard by a parallel arrays.
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
@@ -260,7 +260,7 @@ assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key
// shard key as a prefix has a non-simple collation.
mongos.getDB(kDbName).foo.drop();
assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 'foo'}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 'foo'}));
// This index will inherit the collection's default collation.
assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
assert.commandFailed(mongos.adminCommand(
diff --git a/jstests/sharding/shard_collection_existing_zones.js b/jstests/sharding/shard_collection_existing_zones.js
index 8030b40ee9a..eb7343f221c 100644
--- a/jstests/sharding/shard_collection_existing_zones.js
+++ b/jstests/sharding/shard_collection_existing_zones.js
@@ -130,7 +130,7 @@ function testNonemptyZonedCollection() {
[{min: {x: 0}, max: {x: 10}}, {min: {x: 10}, max: {x: 20}}, {min: {x: 20}, max: {x: 40}}];
for (let i = 0; i < 40; i++) {
- assert.writeOK(testColl.insert({x: i}));
+ assert.commandWorked(testColl.insert({x: i}));
}
assert.commandWorked(testColl.createIndex(shardKey));
diff --git a/jstests/sharding/shard_collection_verify_initial_chunks.js b/jstests/sharding/shard_collection_verify_initial_chunks.js
index 65c5897371e..38e3d4d67a6 100644
--- a/jstests/sharding/shard_collection_verify_initial_chunks.js
+++ b/jstests/sharding/shard_collection_verify_initial_chunks.js
@@ -27,7 +27,7 @@ assert.commandFailed(mongos.adminCommand(
{shardCollection: 'TestDB.RangeCollEmpty', key: {aKey: 1}, numInitialChunks: 6}));
// Unsupported: Hashed sharding + numInitialChunks + non-empty collection
-assert.writeOK(db.HashedCollNotEmpty.insert({aKey: 1}));
+assert.commandWorked(db.HashedCollNotEmpty.insert({aKey: 1}));
assert.commandWorked(db.HashedCollNotEmpty.createIndex({aKey: "hashed"}));
assert.commandFailed(mongos.adminCommand(
{shardCollection: 'TestDB.HashedCollNotEmpty', key: {aKey: "hashed"}, numInitialChunks: 6}));
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 8a5c19d1eb9..6e3242647f8 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -17,7 +17,7 @@ var bulk = db.data.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i, s: bigString});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
var avgObjSize = db.data.stats().avgObjSize;
var dataSize = db.data.stats().size;
diff --git a/jstests/sharding/shard_existing_coll_chunk_count.js b/jstests/sharding/shard_existing_coll_chunk_count.js
index 91a6abca2ee..7ee54444976 100644
--- a/jstests/sharding/shard_existing_coll_chunk_count.js
+++ b/jstests/sharding/shard_existing_coll_chunk_count.js
@@ -52,7 +52,7 @@ var runCase = function(opts) {
for (; i < limit; i++) {
bulk.insert({i, pad});
}
- assert.writeOK(bulk.execute());
+ assert.commandWorked(bulk.execute());
// Create shard key index.
assert.commandWorked(coll.createIndex({i: 1}));
@@ -132,7 +132,7 @@ runCase({
// Lower chunksize to 1MB, and restart the mongod for it to take. We also
// need to restart mongos for the case of the last-stable suite where the
// shard is also last-stable.
-assert.writeOK(
+assert.commandWorked(
s.getDB("config").getCollection("settings").update({_id: "chunksize"}, {$set: {value: 1}}, {
upsert: true
}));
diff --git a/jstests/sharding/shard_identity_rollback.js b/jstests/sharding/shard_identity_rollback.js
index fd437c2ea1b..fe5a24c3e96 100644
--- a/jstests/sharding/shard_identity_rollback.js
+++ b/jstests/sharding/shard_identity_rollback.js
@@ -37,7 +37,7 @@ var shardIdentityDoc = {
clusterId: ObjectId()
};
-assert.writeOK(priConn.getDB('admin').system.version.update(
+assert.commandWorked(priConn.getDB('admin').system.version.update(
{_id: 'shardIdentity'}, shardIdentityDoc, {upsert: true}));
// Ensure sharding state on the primary was initialized
@@ -71,7 +71,7 @@ restartServerReplication(secondaries);
// Wait for a new healthy primary
var newPriConn = replTest.getPrimary();
assert.neq(priConn, newPriConn);
-assert.writeOK(newPriConn.getDB('test').foo.insert({a: 1}, {writeConcern: {w: 'majority'}}));
+assert.commandWorked(newPriConn.getDB('test').foo.insert({a: 1}, {writeConcern: {w: 'majority'}}));
// Restart the original primary so it triggers a rollback of the shardIdentity insert.
jsTest.log("Restarting original primary");
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index a068da936fb..d5ee0becf29 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -34,7 +34,7 @@ var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({x: i, text: textString});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Get connection to mongos for the cluster
var mongosConn = shardingTest.s;
@@ -50,7 +50,7 @@ assert.commandWorked(mongosConn.getDB('admin').runCommand(
{shardcollection: testDBName + '.' + testCollName, key: {x: 1}}));
// Test case where GLE should return an error
-assert.writeOK(testDB.foo.insert({_id: 'a', x: 1}));
+assert.commandWorked(testDB.foo.insert({_id: 'a', x: 1}));
assert.writeError(testDB.foo.insert({_id: 'a', x: 1}, {writeConcern: {w: 2, wtimeout: 30000}}));
// Add more data
@@ -58,7 +58,7 @@ bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = numDocs; i < 2 * numDocs; i++) {
bulk.insert({x: i, text: textString});
}
-assert.writeOK(bulk.execute({w: replNodes, wtimeout: 30000}));
+assert.commandWorked(bulk.execute({w: replNodes, wtimeout: 30000}));
// Take down two nodes and make sure slaveOk reads still work
var primary = replSet1._master;
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 3076dde5b7e..29a6ee2fc02 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -29,12 +29,12 @@ var coll = db.getCollection(collName);
// Split chunk again
assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
-assert.writeOK(coll.update({_id: 3}, {_id: 3}));
+assert.commandWorked(coll.update({_id: 3}, {_id: 3}));
// Split chunk again
assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
-assert.writeOK(coll.update({_id: 3}, {_id: 3}));
+assert.commandWorked(coll.update({_id: 3}, {_id: 3}));
// Split chunk again
assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js
index 13715d62ddc..93d43b0aa98 100644
--- a/jstests/sharding/shard_kill_and_pooling.js
+++ b/jstests/sharding/shard_kill_and_pooling.js
@@ -22,7 +22,7 @@ for (var test = 0; test < 2; test++) {
var coll = mongos.getCollection("foo.bar");
var db = coll.getDB();
- assert.writeOK(coll.insert({hello: "world"}));
+ assert.commandWorked(coll.insert({hello: "world"}));
jsTest.log("Creating new connections...");
diff --git a/jstests/sharding/sharded_limit_batchsize.js b/jstests/sharding/sharded_limit_batchsize.js
index e7f1f589ca9..9113b7dd8ba 100644
--- a/jstests/sharding/sharded_limit_batchsize.js
+++ b/jstests/sharding/sharded_limit_batchsize.js
@@ -101,14 +101,14 @@ assert.commandWorked(db.adminCommand(
// Write 20 documents which all go to the primary shard in the unsharded collection.
for (var i = 1; i <= 10; ++i) {
// These go to shard 1.
- assert.writeOK(shardedCol.insert({_id: i, x: i}));
+ assert.commandWorked(shardedCol.insert({_id: i, x: i}));
// These go to shard 0.
- assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
+ assert.commandWorked(shardedCol.insert({_id: -i, x: -i}));
// These go to shard 0 inside the non-sharded collection.
- assert.writeOK(unshardedCol.insert({_id: i, x: i}));
- assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
+ assert.commandWorked(unshardedCol.insert({_id: i, x: i}));
+ assert.commandWorked(unshardedCol.insert({_id: -i, x: -i}));
}
//
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index c38b178f73a..2f2289182e5 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -23,7 +23,7 @@ var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
var inserts = [{_id: 0}, {_id: 1}, {_id: 2}];
-assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
+assert.commandWorked(st.s1.getCollection(coll.toString()).insert(inserts));
profileEntry = profileColl.findOne();
assert.neq(null, profileEntry);
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index f07708d2d23..d6e0384d7f7 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -20,7 +20,7 @@ while (inserted < (20 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index 5dd2eabebfb..50365ace129 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -34,7 +34,7 @@ while (inserted < (40 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10);
@@ -60,7 +60,7 @@ while (getShardSize(shardConn) < maxSizeBytes) {
for (var x = 0; x < 20; x++) {
localBulk.insert({x: x, val: bigString});
}
- assert.writeOK(localBulk.execute());
+ assert.commandWorked(localBulk.execute());
// Force the storage engine to flush files to disk so shardSize will get updated.
assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index d51f5d41d32..4600861ede5 100644
--- a/jstests/sharding/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -31,7 +31,7 @@ while (inserted < (40 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
inserted += bigString.length;
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index e97a6366120..57dfe723830 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -62,7 +62,7 @@ for (i = 0; i < N; i++) {
for (i = 0; i < N * 9; i++) {
doUpdate(bulk, false);
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
for (var i = 0; i < 50; i++) {
s.printChunks("test.foo");
@@ -130,7 +130,7 @@ function diff1() {
}
}
- assert.writeOK(res);
+ assert.commandWorked(res);
});
} else {
consecutiveNoProgressMadeErrors = 0;
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
index f196381528e..3bac25b5e34 100644
--- a/jstests/sharding/sharding_migrate_cursor1.js
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -34,7 +34,7 @@ var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({_id: i, s: bigString});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
diff --git a/jstests/sharding/sharding_multiple_ns_rs.js b/jstests/sharding/sharding_multiple_ns_rs.js
index cd4a70fda15..96584ab161d 100644
--- a/jstests/sharding/sharding_multiple_ns_rs.js
+++ b/jstests/sharding/sharding_multiple_ns_rs.js
@@ -22,8 +22,8 @@ for (var i = 0; i < 100; i++) {
bulk.insert({_id: i, x: i});
bulk2.insert({_id: i, x: i});
}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
+assert.commandWorked(bulk.execute());
+assert.commandWorked(bulk2.execute());
s.splitAt("test.foo", {_id: 50});
diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js
index af021bf9741..d15f97bafd6 100644
--- a/jstests/sharding/sharding_rs1.js
+++ b/jstests/sharding/sharding_rs1.js
@@ -23,7 +23,7 @@ while (insertedBytes < (10 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString, x: Math.random()});
insertedBytes += bigString.length;
}
-assert.writeOK(bulk.execute({w: 3}));
+assert.commandWorked(bulk.execute({w: 3}));
assert.commandWorked(s.s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index 2b35cf695e4..d2a6f0e2b74 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -129,7 +129,7 @@ for (var i = 0; i < 100; i++) {
continue;
bulk.insert({x: i});
}
-assert.writeOK(bulk.execute({w: 3}));
+assert.commandWorked(bulk.execute({w: 3}));
// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
// replication for this and future tests to pass
@@ -225,7 +225,7 @@ rs.getSecondaries().forEach(function(secondary) {
// Modify data only on the primary replica of the primary shard.
// { x: 60 } goes to the shard of "rs", which is the primary shard.
-assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
+assert.commandWorked(ts.insert({primaryOnly: true, x: 60}));
// Read from secondary through mongos, the doc is not there due to replication delay or fsync.
// But we can guarantee not to read from primary.
assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
@@ -234,7 +234,7 @@ rs.getSecondaries().forEach(function(secondary) {
secondary.getDB("test").fsyncUnlock();
});
// Clean up the data
-assert.writeOK(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
+assert.commandWorked(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
for (var i = 0; i < 10; i++) {
m = new Mongo(s.s.name);
diff --git a/jstests/sharding/sharding_statistics_server_status.js b/jstests/sharding/sharding_statistics_server_status.js
index 575a84f2152..54421d568d9 100644
--- a/jstests/sharding/sharding_statistics_server_status.js
+++ b/jstests/sharding/sharding_statistics_server_status.js
@@ -87,7 +87,7 @@ incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
// Insert docs and then move chunk again from shard1 to shard0.
for (let i = 0; i < numDocsToInsert; ++i) {
- assert.writeOK(coll.insert({_id: i}));
+ assert.commandWorked(coll.insert({_id: i}));
++numDocsInserted;
}
assert.commandWorked(mongos.adminCommand(
diff --git a/jstests/sharding/shards_and_config_return_last_committed_optime.js b/jstests/sharding/shards_and_config_return_last_committed_optime.js
index 780090d9f67..2268240a52d 100644
--- a/jstests/sharding/shards_and_config_return_last_committed_optime.js
+++ b/jstests/sharding/shards_and_config_return_last_committed_optime.js
@@ -116,7 +116,7 @@ let secondary = st.rs0.getSecondary();
st.rs0.awaitLastOpCommitted();
stopServerReplication(secondary);
-assert.writeOK(primary.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 1}}));
+assert.commandWorked(primary.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 1}}));
// Sharded collection.
assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "sharding-aware shard primary");
@@ -141,7 +141,7 @@ secondary = st.configRS.getSecondary();
st.configRS.awaitLastOpCommitted();
stopServerReplication(secondary);
-assert.writeOK(primary.getDB("config").foo.insert({x: 1}, {writeConcern: {w: 1}}));
+assert.commandWorked(primary.getDB("config").foo.insert({x: 1}, {writeConcern: {w: 1}}));
assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "config server primary");
assertReturnsLastCommittedOpTime(secondary.getDB("test"), "foo", "config server secondary");
diff --git a/jstests/sharding/snapshot_cursor_commands_mongos.js b/jstests/sharding/snapshot_cursor_commands_mongos.js
index a853cc10942..e71fffdfdc4 100644
--- a/jstests/sharding/snapshot_cursor_commands_mongos.js
+++ b/jstests/sharding/snapshot_cursor_commands_mongos.js
@@ -199,7 +199,7 @@ function runTest(testScenario, {useCausalConsistency, commands, collName}) {
// Insert an 11th document which should not be visible to the snapshot cursor. This
// write is performed outside of the session.
- assert.writeOK(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
verifyInvalidGetMoreAttempts(mainDb, collName, cursorId, lsid, txnNumber);
@@ -247,7 +247,7 @@ function runTest(testScenario, {useCausalConsistency, commands, collName}) {
assert.eq(11, res.cursor.firstBatch.length);
// Remove the 11th document to preserve the collection for the next command.
- assert.writeOK(mainDb[collName].remove({_id: 10}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(mainDb[collName].remove({_id: 10}, {writeConcern: {w: "majority"}}));
assert.commandWorked(session.commitTransaction_forTesting());
session.endSession();
diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js
index 7a40714c35c..ba2af4a6ed2 100644
--- a/jstests/sharding/split_with_force.js
+++ b/jstests/sharding/split_with_force.js
@@ -24,7 +24,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < (250 * 1000) + 10; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Insert a bunch of data into the rest of the collection...");
@@ -32,7 +32,7 @@ bulk = coll.initializeUnorderedBulkOp();
for (var i = 1; i <= (250 * 1000); i++) {
bulk.insert({_id: -i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Get split points of the chunk using force : true...");
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 8e281dcbe20..f45dda6892e 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -27,7 +27,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 1024; i++) {
bulk.insert({_id: -(i + 1)});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Insert 32 docs into the high chunk of a collection");
@@ -35,7 +35,7 @@ bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 32; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Split off MaxKey chunk...");
diff --git a/jstests/sharding/stale_mongos_updates_and_removes.js b/jstests/sharding/stale_mongos_updates_and_removes.js
index 85eb196e93e..44343e53a63 100644
--- a/jstests/sharding/stale_mongos_updates_and_removes.js
+++ b/jstests/sharding/stale_mongos_updates_and_removes.js
@@ -27,8 +27,8 @@ function resetCollection() {
assert.commandWorked(staleMongos.adminCommand({shardCollection: collNS, key: {x: 1}}));
for (let i = 0; i < numShardKeys; i++) {
- assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
- assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
+ assert.commandWorked(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
+ assert.commandWorked(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
}
// Make sure data has replicated to all config servers so freshMongos finds a sharded
@@ -116,7 +116,7 @@ function checkAllRemoveQueries(makeMongosStaleFunc) {
function doRemove(query, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
- assert.writeOK(staleMongos.getCollection(collNS).remove(query, multiOption));
+ assert.commandWorked(staleMongos.getCollection(collNS).remove(query, multiOption));
if (multiOption.justOne) {
// A total of one document should have been removed from the collection.
assert.eq(numDocs - 1, staleMongos.getCollection(collNS).find().itcount());
@@ -159,7 +159,7 @@ function checkAllUpdateQueries(makeMongosStaleFunc) {
function doUpdate(query, update, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
- assert.writeOK(staleMongos.getCollection(collNS).update(query, update, multiOption));
+ assert.commandWorked(staleMongos.getCollection(collNS).update(query, update, multiOption));
if (multiOption.multi) {
// All documents matching the query should have been updated.
assert.eq(staleMongos.getCollection(collNS).find(query).itcount(),
diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js
index 1183e369b2e..065218712bd 100644
--- a/jstests/sharding/stale_version_write.js
+++ b/jstests/sharding/stale_version_write.js
@@ -10,10 +10,10 @@ var mongosB = st.s1;
jsTest.log("Adding new collections...");
var collA = mongosA.getCollection(jsTestName() + ".coll");
-assert.writeOK(collA.insert({hello: "world"}));
+assert.commandWorked(collA.insert({hello: "world"}));
var collB = mongosB.getCollection("" + collA);
-assert.writeOK(collB.insert({hello: "world"}));
+assert.commandWorked(collB.insert({hello: "world"}));
jsTest.log("Enabling sharding...");
@@ -26,7 +26,7 @@ collA.findOne();
jsTest.log("Trigger shard version mismatch...");
-assert.writeOK(collB.insert({goodbye: "world"}));
+assert.commandWorked(collB.insert({goodbye: "world"}));
print("Inserted...");
diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js
index 21fd233944c..31d84293d6b 100644
--- a/jstests/sharding/startup_with_all_configs_down.js
+++ b/jstests/sharding/startup_with_all_configs_down.js
@@ -29,7 +29,7 @@ var st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
jsTestLog("Setting up initial data");
for (var i = 0; i < 100; i++) {
- assert.writeOK(st.s.getDB('test').foo.insert({_id: i}));
+ assert.commandWorked(st.s.getDB('test').foo.insert({_id: i}));
}
assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index 8354b00114f..0f9f00a667c 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -37,7 +37,7 @@ s.adminCommand({
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < N; i++)
bulk.insert({_id: i});
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
// Flush all writes to disk since some of the stats are dependent on state in disk (like
// totalIndexSize).
diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js
index b85a188938d..86441a17cd6 100644
--- a/jstests/sharding/test_stacked_migration_cleanup.js
+++ b/jstests/sharding/test_stacked_migration_cleanup.js
@@ -29,7 +29,7 @@ var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10 * 1000; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
jsTest.log("Opening a mongod cursor...");
@@ -53,7 +53,7 @@ bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < numChunks; i++) {
bulk.insert({_id: i});
}
-assert.writeOK(bulk.execute());
+assert.commandWorked(bulk.execute());
sleep(10 * 1000);
diff --git a/jstests/sharding/time_zone_info_mongos.js b/jstests/sharding/time_zone_info_mongos.js
index 73b59b16f7a..5ed55fdf879 100644
--- a/jstests/sharding/time_zone_info_mongos.js
+++ b/jstests/sharding/time_zone_info_mongos.js
@@ -48,8 +48,8 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
// Write a document containing a 'date' field to each chunk.
-assert.writeOK(mongosColl.insert({_id: -1, date: ISODate("2017-11-13T12:00:00.000+0000")}));
-assert.writeOK(mongosColl.insert({_id: 1, date: ISODate("2017-11-13T03:00:00.000+0600")}));
+assert.commandWorked(mongosColl.insert({_id: -1, date: ISODate("2017-11-13T12:00:00.000+0000")}));
+assert.commandWorked(mongosColl.insert({_id: 1, date: ISODate("2017-11-13T03:00:00.000+0600")}));
// Constructs a pipeline which splits the 'date' field into its constituent parts on mongoD,
// reassembles the original date on mongoS, and verifies that the two match. All timezone
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index bbf930c9b8f..b648e2afcab 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -10,7 +10,7 @@ function shardSetup(shardConfig, dbName, collName) {
var db = st.getDB(dbName);
// Set the balancer mode to only balance on autoSplit
- assert.writeOK(st.s.getDB('config').settings.update(
+ assert.commandWorked(st.s.getDB('config').settings.update(
{_id: 'balancer'},
{'$unset': {stopped: ''}, '$set': {mode: 'autoSplitOnly'}},
{writeConcern: {w: 'majority'}}));
@@ -108,7 +108,7 @@ function runTest(test) {
}
}
- assert.writeOK(configDB.tags.remove({ns: db + "." + collName}));
+ assert.commandWorked(configDB.tags.remove({ns: db + "." + collName}));
// End of test cleanup
}
diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js
index 329ad529ac3..ab7b1c058a8 100644
--- a/jstests/sharding/trace_missing_docs_test.js
+++ b/jstests/sharding/trace_missing_docs_test.js
@@ -23,9 +23,9 @@ var testDocMissing = function(useReplicaSet) {
coll.ensureIndex({sk: 1});
assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {sk: 1}}));
- assert.writeOK(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
- assert.writeOK(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
- assert.writeOK(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
+ assert.commandWorked(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
+ assert.commandWorked(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
+ assert.commandWorked(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
assert.commandWorked(admin.runCommand(
{moveChunk: coll + "", find: {sk: 0}, to: st.shard1.shardName, _waitForDelete: true}));
diff --git a/jstests/sharding/transactions_causal_consistency.js b/jstests/sharding/transactions_causal_consistency.js
index e2f6a9aed58..e67fd331bd1 100644
--- a/jstests/sharding/transactions_causal_consistency.js
+++ b/jstests/sharding/transactions_causal_consistency.js
@@ -29,8 +29,10 @@ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
// Verifies transactions using causal consistency read all causally prior operations.
function runTest(st, readConcern) {
@@ -69,7 +71,7 @@ function runTest(st, readConcern) {
// Clean up for the next iteration.
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard1.shardName}));
- assert.writeOK(sessionDB[collName].remove(docToInsert));
+ assert.commandWorked(sessionDB[collName].remove(docToInsert));
}
const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
diff --git a/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js b/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
index 11a2c39997f..b87b5b25e9a 100644
--- a/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
+++ b/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
@@ -27,8 +27,8 @@ const session = st.s.startSession();
const unshardedCollDB = session.getDatabase(unshardedDbName);
const shardedCollDB = session.getDatabase(shardedDbName);
-assert.writeOK(unshardedCollDB[unshardedCollName].insert({_id: "jack"}));
-assert.writeOK(shardedCollDB[shardedCollName].insert({_id: "jack"}));
+assert.commandWorked(unshardedCollDB[unshardedCollName].insert({_id: "jack"}));
+assert.commandWorked(shardedCollDB[shardedCollName].insert({_id: "jack"}));
// Reload metadata to avoid stale config or stale database version errors.
flushRoutersAndRefreshShardMetadata(st, {ns: shardedNs, dbNames: [unshardedDbName]});
diff --git a/jstests/sharding/transactions_implicit_abort.js b/jstests/sharding/transactions_implicit_abort.js
index 003b6e4cefe..09845a80493 100644
--- a/jstests/sharding/transactions_implicit_abort.js
+++ b/jstests/sharding/transactions_implicit_abort.js
@@ -15,8 +15,10 @@ const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
// Set up a sharded collection with one chunk on each shard.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
diff --git a/jstests/sharding/transactions_multi_writes.js b/jstests/sharding/transactions_multi_writes.js
index e4c8b43cd95..f24adef1dc7 100644
--- a/jstests/sharding/transactions_multi_writes.js
+++ b/jstests/sharding/transactions_multi_writes.js
@@ -30,9 +30,9 @@ assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 10}}));
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName}));
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 15}, to: st.shard2.shardName}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 3, counter: 0, skey: 15}));
+assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
+assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
+assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 3, counter: 0, skey: 15}));
// Runs the given multi-write and asserts a manually inserted orphan document is not affected.
// The write is assumed to target chunks [min, 0) and [0, 10), which begin on shard0 and shard1,
@@ -58,7 +58,8 @@ function runTest(st, session, writeCmd, staleRouter) {
}
const orphanShardDB = st[orphanShardName].getPrimary().getDB(dbName);
- assert.writeOK(orphanShardDB[collName].insert(orphanDoc, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(
+ orphanShardDB[collName].insert(orphanDoc, {writeConcern: {w: "majority"}}));
// Start a transaction with majority read concern to ensure the orphan will be visible if
// its shard is targeted and send the multi-write.
@@ -98,13 +99,13 @@ function runTest(st, session, writeCmd, staleRouter) {
// Reset the database state for the next iteration.
if (isUpdate) {
- assert.writeOK(sessionDB[collName].update({}, {$set: {counter: 0}}, {multi: true}));
+ assert.commandWorked(sessionDB[collName].update({}, {$set: {counter: 0}}, {multi: true}));
} else { // isDelete
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
+ assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
+ assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
}
- assert.writeOK(orphanShardDB[collName].remove({skey: orphanDoc.skey}));
+ assert.commandWorked(orphanShardDB[collName].remove({skey: orphanDoc.skey}));
if (staleRouter) {
// Move the chunk back with the main router so it isn't stale.
diff --git a/jstests/sharding/transactions_read_concerns.js b/jstests/sharding/transactions_read_concerns.js
index af2c24b2b02..17fee3c6404 100644
--- a/jstests/sharding/transactions_read_concerns.js
+++ b/jstests/sharding/transactions_read_concerns.js
@@ -17,8 +17,10 @@ const st = new ShardingTest({shards: 2, config: 1});
// Set up a sharded collection with 2 chunks, one on each shard.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
@@ -65,7 +67,7 @@ function runTest(st, readConcern, sessionOptions) {
assert.commandWorked(session.commitTransaction_forTesting());
// Clean up for the next iteration.
- assert.writeOK(sessionDB[collName].remove({_id: 5}));
+ assert.commandWorked(sessionDB[collName].remove({_id: 5}));
}
// Specifying no read concern level is allowed and should not compute a global snapshot.
diff --git a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
index 12c7fa1fab3..f02c181c3d4 100644
--- a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
+++ b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
@@ -26,8 +26,10 @@ const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
// Set up one sharded collection with 2 chunks, both on the primary shard.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
@@ -155,8 +157,8 @@ function runTest(testCase, moveChunkBack) {
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
}
- assert.writeOK(sessionColl.remove({}));
- assert.writeOK(sessionColl.insert([{_id: 5}, {_id: -5}]));
+ assert.commandWorked(sessionColl.remove({}));
+ assert.commandWorked(sessionColl.insert([{_id: 5}, {_id: -5}]));
}
kCommandTestCases.forEach(testCase => runTest(testCase, false /*moveChunkBack*/));
diff --git a/jstests/sharding/transactions_snapshot_errors_first_statement.js b/jstests/sharding/transactions_snapshot_errors_first_statement.js
index 3b0f5f74953..ed503f89c25 100644
--- a/jstests/sharding/transactions_snapshot_errors_first_statement.js
+++ b/jstests/sharding/transactions_snapshot_errors_first_statement.js
@@ -71,7 +71,7 @@ function runTest(st, collName, numShardsToError, errorCode, isSharded) {
// Clean up after insert to avoid duplicate key errors.
if (commandName === "insert") {
- assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
+ assert.commandWorked(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
}
//
@@ -89,7 +89,7 @@ function runTest(st, collName, numShardsToError, errorCode, isSharded) {
// Clean up after insert to avoid duplicate key errors.
if (commandName === "insert") {
- assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
+ assert.commandWorked(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
}
//
@@ -118,7 +118,8 @@ enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
jsTestLog("Unsharded transaction");
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
for (let errorCode of kSnapshotErrors) {
@@ -132,7 +133,8 @@ st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
jsTestLog("One shard sharded transaction");
diff --git a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
index d3ac5f6e15c..87a29e271fa 100644
--- a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
+++ b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
@@ -76,7 +76,8 @@ enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
jsTestLog("Unsharded transaction");
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
// Single shard case simulates the storage engine discarding an in-use snapshot.
@@ -91,7 +92,8 @@ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
// Set up 2 chunks, [minKey, 10), [10, maxKey), each with one document (includes the document
// already inserted).
assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
jsTestLog("One shard transaction");
diff --git a/jstests/sharding/transactions_stale_database_version_errors.js b/jstests/sharding/transactions_stale_database_version_errors.js
index e92aed58b9c..8507030ba4c 100644
--- a/jstests/sharding/transactions_stale_database_version_errors.js
+++ b/jstests/sharding/transactions_stale_database_version_errors.js
@@ -15,7 +15,8 @@ enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
// Set up two unsharded collections in different databases with shard0 as their primary.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
@@ -45,7 +46,8 @@ session.startTransaction();
const dbName2 = "test2";
const sessionDB2 = session.getDatabase(dbName2);
-assert.writeOK(st.s.getDB(dbName2)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName2)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName2}));
st.ensurePrimaryShard(dbName2, st.shard1.shardName);
@@ -70,7 +72,7 @@ assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.N
const otherDbName = "other_test";
const otherCollName = "bar";
-assert.writeOK(
+assert.commandWorked(
st.s.getDB(otherDbName)[otherCollName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: otherDbName}));
st.ensurePrimaryShard(otherDbName, st.shard0.shardName);
@@ -80,7 +82,7 @@ const sessionOtherDB = session.getDatabase(otherDbName);
// Advance the router's cached last committed opTime for Shard0, so it chooses a read timestamp
// after the collection is created on shard1, to avoid SnapshotUnavailable.
assert.commandWorked(sessionOtherDB.runCommand({find: otherCollName})); // Not database versioned.
-assert.writeOK(sessionDB[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(sessionDB[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
session.startTransaction();
diff --git a/jstests/sharding/transactions_stale_shard_version_errors.js b/jstests/sharding/transactions_stale_shard_version_errors.js
index 3bc71a01083..2ff76e94b0c 100644
--- a/jstests/sharding/transactions_stale_shard_version_errors.js
+++ b/jstests/sharding/transactions_stale_shard_version_errors.js
@@ -34,8 +34,10 @@ assert.commandWorked(st.rs2.getPrimary().adminCommand(
// Shard two collections in the same database, each with 2 chunks, [minKey, 0), [0, maxKey),
// with one document each, all on Shard0.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
@@ -48,9 +50,10 @@ expectChunks(st, ns, [2, 0, 0]);
const otherCollName = "bar";
const otherNs = dbName + "." + otherCollName;
-assert.writeOK(
+assert.commandWorked(
st.s.getDB(dbName)[otherCollName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[otherCollName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[otherCollName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({shardCollection: otherNs, key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({split: otherNs, middle: {_id: 0}}));
diff --git a/jstests/sharding/transactions_target_at_point_in_time.js b/jstests/sharding/transactions_target_at_point_in_time.js
index 3cdfb4b49fe..1e1de688828 100644
--- a/jstests/sharding/transactions_target_at_point_in_time.js
+++ b/jstests/sharding/transactions_target_at_point_in_time.js
@@ -28,8 +28,10 @@ const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
// Set up one sharded collection with 2 chunks, both on the primary shard.
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
diff --git a/jstests/sharding/transactions_view_resolution.js b/jstests/sharding/transactions_view_resolution.js
index 1a8224ee089..25e4c083e55 100644
--- a/jstests/sharding/transactions_view_resolution.js
+++ b/jstests/sharding/transactions_view_resolution.js
@@ -23,7 +23,7 @@ const unshardedViewName = "unsharded_view";
const viewOnShardedViewName = "sharded_view_view";
function setUpUnshardedCollectionAndView(st, session, primaryShard) {
- assert.writeOK(st.s.getDB(unshardedDbName)[unshardedCollName].insert(
+ assert.commandWorked(st.s.getDB(unshardedDbName)[unshardedCollName].insert(
{_id: 1, x: "unsharded"}, {writeConcern: {w: "majority"}}));
st.ensurePrimaryShard(unshardedDbName, primaryShard);
@@ -37,9 +37,9 @@ function setUpUnshardedCollectionAndView(st, session, primaryShard) {
function setUpShardedCollectionAndView(st, session, primaryShard) {
const ns = shardedDbName + "." + shardedCollName;
- assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
+ assert.commandWorked(st.s.getDB(shardedDbName)[shardedCollName].insert(
{_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
+ assert.commandWorked(st.s.getDB(shardedDbName)[shardedCollName].insert(
{_id: 1}, {writeConcern: {w: "majority"}}));
assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
st.ensurePrimaryShard(shardedDbName, primaryShard);
@@ -270,7 +270,7 @@ function assertAggResultEqInTransaction(coll, pipeline, expected) {
// is supported.
const lookupDbName = "dbForLookup";
const lookupCollName = "collForLookup";
-assert.writeOK(
+assert.commandWorked(
st.s.getDB(lookupDbName)[lookupCollName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
const lookupColl = session.getDatabase(unshardedDbName)[unshardedCollName];
diff --git a/jstests/sharding/transactions_writes_not_retryable.js b/jstests/sharding/transactions_writes_not_retryable.js
index 99dc2155469..7c33eab52cb 100644
--- a/jstests/sharding/transactions_writes_not_retryable.js
+++ b/jstests/sharding/transactions_writes_not_retryable.js
@@ -97,7 +97,8 @@ const sessionDB = session.getDatabase(dbName);
// Unsharded.
jsTestLog("Testing against unsharded collection");
-assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
kCmdTestCases.forEach(cmdTestCase => {
runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, false /*isSharded*/);
diff --git a/jstests/sharding/txn_recover_decision_using_recovery_router.js b/jstests/sharding/txn_recover_decision_using_recovery_router.js
index d148c0fdfbf..47e0f835305 100644
--- a/jstests/sharding/txn_recover_decision_using_recovery_router.js
+++ b/jstests/sharding/txn_recover_decision_using_recovery_router.js
@@ -245,7 +245,8 @@ txnNumber++;
const recoveryToken = startNewMultiShardWriteTransaction();
assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
-assert.writeOK(st.rs1.getPrimary().getDB("config").transactions.remove({}, false /* justOne */));
+assert.commandWorked(
+ st.rs1.getPrimary().getDB("config").transactions.remove({}, false /* justOne */));
assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
ErrorCodes.NoSuchTransaction);
diff --git a/jstests/sharding/txn_writes_during_movechunk.js b/jstests/sharding/txn_writes_during_movechunk.js
index 357ea22e14e..8ad9237f24c 100644
--- a/jstests/sharding/txn_writes_during_movechunk.js
+++ b/jstests/sharding/txn_writes_during_movechunk.js
@@ -13,8 +13,8 @@ st.ensurePrimaryShard('test', st.shard0.shardName);
assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
let coll = st.s.getDB('test').user;
-assert.writeOK(coll.insert({_id: 'updateMe'}));
-assert.writeOK(coll.insert({_id: 'deleteMe'}));
+assert.commandWorked(coll.insert({_id: 'updateMe'}));
+assert.commandWorked(coll.insert({_id: 'deleteMe'}));
pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
diff --git a/jstests/sharding/unique_index_on_shardservers.js b/jstests/sharding/unique_index_on_shardservers.js
index 4ee9bb007d1..2af5b7a418d 100644
--- a/jstests/sharding/unique_index_on_shardservers.js
+++ b/jstests/sharding/unique_index_on_shardservers.js
@@ -10,7 +10,8 @@ let rs = st.rs0;
// Create `test.coll` and add some indexes on it:
// with index versions as default, v=1 and v=2; both unique and standard types
-assert.writeOK(mongos.getDB("test").coll.insert({_id: 1, a: 1, b: 1, c: 1, d: 1, e: 1, f: 1}));
+assert.commandWorked(
+ mongos.getDB("test").coll.insert({_id: 1, a: 1, b: 1, c: 1, d: 1, e: 1, f: 1}));
assert.commandWorked(mongos.getDB("test").coll.createIndex({a: 1}, {"v": 1}));
assert.commandWorked(mongos.getDB("test").coll.createIndex({b: 1}, {"v": 1, "unique": true}));
assert.commandWorked(mongos.getDB("test").coll.createIndex({c: 1}, {"v": 2}));
diff --git a/jstests/sharding/unowned_doc_filtering.js b/jstests/sharding/unowned_doc_filtering.js
index 5a337aaa454..3567b9c4dda 100644
--- a/jstests/sharding/unowned_doc_filtering.js
+++ b/jstests/sharding/unowned_doc_filtering.js
@@ -25,7 +25,7 @@ var inserts = [];
for (var i = 0; i < 100; i++) {
inserts.push({x: i});
}
-assert.writeOK(testDB.foo.insert(inserts));
+assert.commandWorked(testDB.foo.insert(inserts));
assert.commandWorked(testDB.adminCommand({split: 'test.foo', find: {x: 50}}));
assert.commandWorked(
diff --git a/jstests/sharding/unsharded_collection_targetting.js b/jstests/sharding/unsharded_collection_targetting.js
index 5393a212ae4..4ae771e3d6e 100644
--- a/jstests/sharding/unsharded_collection_targetting.js
+++ b/jstests/sharding/unsharded_collection_targetting.js
@@ -22,11 +22,11 @@ st.ensurePrimaryShard(mongosDB.getName(), st.rs1.getURL());
// about the location of the collection before the move.
const mongos2DB = st.s1.getDB(testName);
const mongos2Coll = mongos2DB[testName];
-assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
+assert.commandWorked(mongos2Coll.insert({_id: 0, a: 0}));
st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-assert.writeOK(mongos2Coll.insert({_id: 1, a: 0}));
+assert.commandWorked(mongos2Coll.insert({_id: 1, a: 0}));
st.stop();
})();
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
index 96bf4f454dc..73c3d460403 100644
--- a/jstests/sharding/update_immutable_fields.js
+++ b/jstests/sharding/update_immutable_fields.js
@@ -43,18 +43,18 @@ shard0Coll.remove({});
assert.writeError(shard0Coll.save({_id: 3}));
// Full shard key in save
-assert.writeOK(shard0Coll.save({_id: 1, a: 1}));
+assert.commandWorked(shard0Coll.save({_id: 1, a: 1}));
// Full shard key on replacement (basically the same as above)
shard0Coll.remove({});
-assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}, true));
+assert.commandWorked(shard0Coll.update({_id: 1}, {a: 1}, true));
// Full shard key after $set
shard0Coll.remove({});
-assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
+assert.commandWorked(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
// Update existing doc (replacement), same shard key value
-assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}));
+assert.commandWorked(shard0Coll.update({_id: 1}, {a: 1}));
// Update existing doc ($set), same shard key value
assert.commandWorked(shard0Coll.update({_id: 1}, {$set: {a: 1}}));
@@ -74,7 +74,7 @@ assert.writeError(shard0Coll.update({_id: 1}, {$unset: {a: 1}}));
// Error due to removing all the embedded fields.
shard0Coll.remove({});
-assert.writeOK(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
+assert.commandWorked(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
assert.writeError(shard0Coll.update({}, {$unset: {"a.c": 1}}));
assert.writeError(shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}}));
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index ea1939bfd72..c0466216647 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -51,7 +51,7 @@ for (let i = 0; i < 2; i++) {
assert.writeError(coll.update({_id: 1, key: 1}, {$set: {key: 2}}));
assert.eq(coll.findOne({_id: 1}).key, 1, 'key unchanged');
- assert.writeOK(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
+ assert.commandWorked(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
coll.update({key: 17}, {$inc: {x: 5}}, true);
assert.eq(5, coll.findOne({key: 17}).x, "up1");
@@ -60,12 +60,12 @@ for (let i = 0; i < 2; i++) {
assert.eq(5, coll.findOne({key: 18}).x, "up2");
// Make sure we can extract exact _id from certain queries
- assert.writeOK(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
// Invalid extraction of exact _id from query
assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
@@ -77,12 +77,12 @@ for (let i = 0; i < 2; i++) {
assert.writeError(coll.update({'_id.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
// Make sure we can extract exact shard key from certain queries
- assert.writeOK(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
// Invalid extraction of exact key from query
assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
@@ -93,16 +93,20 @@ for (let i = 0; i < 2; i++) {
assert[hashedKey ? "writeError" : "writeOK"](
coll.update({key: {$gt: 0}}, {$set: {x: 1}}, {multi: false}));
// Note: {key:-1} and {key:-2} fall on shard0 for both hashed and ascending shardkeys.
- assert.writeOK(coll.update({$or: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(
+ coll.update({$or: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(
+ coll.update({$and: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
// In cases where an inexact query does target multiple shards, single update is rejected.
assert.writeError(coll.update({key: {$gt: MinKey}}, {$set: {x: 1}}, {multi: false}));
assert.writeError(coll.update({$or: [{key: -10}, {key: 10}]}, {$set: {x: 1}}, {multi: false}));
// Make sure failed shard key or _id extraction doesn't affect the other
- assert.writeOK(coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(
+ coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
+ assert.commandWorked(
+ coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
}
s.stop();
diff --git a/jstests/sharding/upsert_sharded.js b/jstests/sharding/upsert_sharded.js
index 32a59b9a586..eb92f0c41b4 100644
--- a/jstests/sharding/upsert_sharded.js
+++ b/jstests/sharding/upsert_sharded.js
@@ -20,7 +20,7 @@ var upsertedResult = function(query, expr) {
};
var upsertedField = function(query, expr, fieldName) {
- assert.writeOK(upsertedResult(query, expr));
+ assert.commandWorked(upsertedResult(query, expr));
return coll.findOne()[fieldName];
};
diff --git a/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js b/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
index 2d6b4c57020..8b49447bed6 100644
--- a/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
+++ b/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
@@ -22,7 +22,7 @@ for (let i = 0; i < 3; i++) {
// It shouldn't matter whether the collection existed on the shard already or not; test
// both cases.
if (i === 0) {
- assert.writeOK(st.s.getDB(db).getCollection(coll).insert({x: 1}));
+ assert.commandWorked(st.s.getDB(db).getCollection(coll).insert({x: 1}));
}
assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
diff --git a/jstests/sharding/validate_collection.js b/jstests/sharding/validate_collection.js
index 0584c2a8c63..647d5c95378 100644
--- a/jstests/sharding/validate_collection.js
+++ b/jstests/sharding/validate_collection.js
@@ -19,12 +19,12 @@ var s = st.s;
var testDb = st.getDB('test');
function setup() {
- assert.writeOK(testDb.test.insert({_id: 0}));
- assert.writeOK(testDb.test.insert({_id: 1}));
+ assert.commandWorked(testDb.test.insert({_id: 0}));
+ assert.commandWorked(testDb.test.insert({_id: 1}));
- assert.writeOK(testDb.dummy.insert({_id: 0}));
- assert.writeOK(testDb.dummy.insert({_id: 1}));
- assert.writeOK(testDb.dummy.insert({_id: 2}));
+ assert.commandWorked(testDb.dummy.insert({_id: 0}));
+ assert.commandWorked(testDb.dummy.insert({_id: 1}));
+ assert.commandWorked(testDb.dummy.insert({_id: 2}));
}
function validate(valid) {
diff --git a/jstests/sharding/view_rewrite.js b/jstests/sharding/view_rewrite.js
index e0177f84b80..dae49dc2b0b 100644
--- a/jstests/sharding/view_rewrite.js
+++ b/jstests/sharding/view_rewrite.js
@@ -41,7 +41,7 @@ assert.commandWorked(
mongosDB.adminCommand({moveChunk: coll.getFullName(), find: {a: 5}, to: "view_rewrite-rs1"}));
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(mongosDB.createView("view", coll.getName(), []));
diff --git a/jstests/sharding/views.js b/jstests/sharding/views.js
index 876406902a6..48b724a6a99 100644
--- a/jstests/sharding/views.js
+++ b/jstests/sharding/views.js
@@ -58,7 +58,7 @@ assert.commandWorked(
db.adminCommand({moveChunk: coll.getFullName(), find: {a: 25}, to: st.shard1.shardName}));
for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+ assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(db.createView("view", coll.getName(), [{$match: {a: {$gte: 4}}}]));
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index 0c808102bf3..197d29ccc90 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -22,7 +22,7 @@ assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
// a max chunk size of 1MB we'd expect the autosplitter to split this into
// at least 3 chunks
for (var x = 0; x < 3100; x++) {
- assert.writeOK(testDB.runCommand(
+ assert.commandWorked(testDB.runCommand(
{insert: 'insert', documents: [{x: x, v: doc1k}], ordered: false, writeConcern: {w: 1}}));
}
@@ -41,7 +41,7 @@ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key:
assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
for (var x = 0; x < 2100; x++) {
- assert.writeOK(testDB.runCommand({
+ assert.commandWorked(testDB.runCommand({
update: 'update',
updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
ordered: false,
@@ -62,7 +62,7 @@ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key:
assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
for (var x = 0; x < 1100; x++) {
- assert.writeOK(testDB.runCommand({
+ assert.commandWorked(testDB.runCommand({
delete: 'delete',
deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
ordered: false,
@@ -94,7 +94,7 @@ for (var x = 0; x < 2100; x += 400) {
docs.push({x: (x + y), v: doc1k});
}
- assert.writeOK(testDB.runCommand(
+ assert.commandWorked(testDB.runCommand(
{insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}}));
}
@@ -118,7 +118,7 @@ for (var x = 0; x < 2100; x += 400) {
docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
}
- assert.writeOK(
+ assert.commandWorked(
testDB.runCommand({update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}}));
}
@@ -142,7 +142,7 @@ for (var x = 0; x < 2100; x += 400) {
docs.push({q: {x: id, v: doc1k}, top: 0});
}
- assert.writeOK(testDB.runCommand({
+ assert.commandWorked(testDB.runCommand({
delete: 'delete',
deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
ordered: false,
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index dcd3b93fb71..1278d6c6dfe 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -38,7 +38,7 @@ for (var j = 0; j < 100; j++) {
bulk.insert({i: idInc++, val: valInc++, y: str});
}
}
-assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
+assert.commandWorked(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
jsTest.log("Documents inserted, doing double-checks of insert...");
@@ -126,7 +126,7 @@ for (var j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
bulk.insert({i: idInc++, val: valInc++, y: str});
}
- assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
+ assert.commandWorked(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
}
jsTest.log("No errors...");
diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js
index 1d4ccbdb3d6..9a330271347 100644
--- a/jstests/sharding/zero_shard_version.js
+++ b/jstests/sharding/zero_shard_version.js
@@ -25,7 +25,7 @@ var checkShardMajorVersion = function(conn, expectedVersion) {
// mongos0: 1|0|a
var testDB_s1 = st.s1.getDB('test');
-assert.writeOK(testDB_s1.user.insert({x: 1}));
+assert.commandWorked(testDB_s1.user.insert({x: 1}));
assert.commandWorked(
testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
@@ -62,7 +62,7 @@ assert.neq(null, testDB_s3.user.findOne({x: 1}));
// mongos versions: s0, s2, s3: 2|0|a
testDB_s1.user.drop();
-assert.writeOK(testDB_s1.user.insert({x: 10}));
+assert.commandWorked(testDB_s1.user.insert({x: 10}));
// shard0: 0|0|0
// shard1: 0|0|0