diff options
author | James Wahlin <james.wahlin@mongodb.com> | 2019-08-14 13:52:59 +0000 |
---|---|---|
committer | evergreen <evergreen@mongodb.com> | 2019-08-14 13:52:59 +0000 |
commit | 39c3a5d77b976e131d37476f2e7255d6058f5093 (patch) | |
tree | 01cc28719f215b17196ec913f475cd8efda9b37d /jstests/replsets | |
parent | 69d0dd1dc4fb1f78d21c47aa5dd82aa9077b69eb (diff) | |
download | mongo-39c3a5d77b976e131d37476f2e7255d6058f5093.tar.gz |
SERVER-42773 Replace uses of the assert.writeOK() Javascript assertion with assert.commandWorked()
Diffstat (limited to 'jstests/replsets')
121 files changed, 417 insertions, 410 deletions
diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js index a82ca0880cb..210bcc139af 100644 --- a/jstests/replsets/apply_batch_only_goes_forward.js +++ b/jstests/replsets/apply_batch_only_goes_forward.js @@ -43,10 +43,10 @@ var stepDownSecs = 30; var stepDownCmd = {replSetStepDown: stepDownSecs, force: true}; // Write op -assert.writeOK( +assert.commandWorked( mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}})); replTest.waitForState(slave, ReplSetTest.State.SECONDARY); -assert.writeOK( +assert.commandWorked( mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}})); // Set minvalid to something far in the future for the current primary, to simulate recovery. @@ -63,7 +63,7 @@ const minValidUpdate = { }; jsTestLog("Current minvalid is " + tojson(mMinvalid.findOne())); jsTestLog("Updating minValid to: " + tojson(minValidUpdate)); -printjson(assert.writeOK(mMinvalid.update( +printjson(assert.commandWorked(mMinvalid.update( {}, minValidUpdate, {upsert: true, writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}))); @@ -80,7 +80,7 @@ replTest.awaitNodesAgreeOnPrimary(); // Slave is now master... Do a write to advance the optime on the primary so that it will be // considered as a sync source - this is more relevant to PV0 because we do not write a new // entry to the oplog on becoming primary. -assert.writeOK(replTest.getPrimary().getDB("test").foo.save( +assert.commandWorked(replTest.getPrimary().getDB("test").foo.save( {}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); // Sync source selection will log this message if it does not detect min valid in the sync diff --git a/jstests/replsets/apply_batches_totalMillis.js b/jstests/replsets/apply_batches_totalMillis.js index fd8b2872065..5470ac28173 100644 --- a/jstests/replsets/apply_batches_totalMillis.js +++ b/jstests/replsets/apply_batches_totalMillis.js @@ -21,7 +21,7 @@ function performBulkInsert(coll, key, num) { doc[key] = i; bulk.insert(doc); } - assert.writeOK(bulk.execute()); + assert.commandWorked(bulk.execute()); rst.awaitReplication(); } @@ -35,7 +35,7 @@ let secondary = rst.getSecondary(); let coll = primary.getDB(name)["foo"]; // Perform an initial write on the system and ensure steady state. -assert.writeOK(coll.insert({init: 0})); +assert.commandWorked(coll.insert({init: 0})); rst.awaitReplication(); let baseTime = getTotalMillis(secondary); diff --git a/jstests/replsets/apply_ops_create_view.js b/jstests/replsets/apply_ops_create_view.js index 9535790f5a9..8be1cdcc5bc 100644 --- a/jstests/replsets/apply_ops_create_view.js +++ b/jstests/replsets/apply_ops_create_view.js @@ -9,7 +9,7 @@ replTest.initiate(); const db = replTest.getPrimary().getDB('test'); assert.commandWorked(db.createCollection("bar")); -assert.writeOK(db.bar.insert({a: 1, b: "hi"})); +assert.commandWorked(db.bar.insert({a: 1, b: "hi"})); const cmd = { applyOps: [{op: "c", ns: db + ".$cmd", o: {create: "foo", viewOn: "bar"}}] diff --git a/jstests/replsets/apply_ops_idempotency.js b/jstests/replsets/apply_ops_idempotency.js index 16761c27bee..5ec1a6bf134 100644 --- a/jstests/replsets/apply_ops_idempotency.js +++ b/jstests/replsets/apply_ops_idempotency.js @@ -80,28 +80,28 @@ var getCollections = (mydb, prefixes) => prefixes.map((prefix) => mydb[prefix]); var tests = { crud: (mydb) => { let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']); - assert.writeOK(x.insert({_id: 1})); - assert.writeOK(x.update({_id: 1}, {$set: {x: 1}})); - assert.writeOK(x.remove({_id: 1})); - - assert.writeOK(y.update({_id: 1}, {y: 1})); - assert.writeOK(y.insert({_id: 2, y: false, z: false})); - assert.writeOK(y.update({_id: 2}, {y: 2})); - - assert.writeOK(z.insert({_id: 1, z: 1})); - assert.writeOK(z.remove({_id: 1})); - assert.writeOK(z.insert({_id: 1})); - assert.writeOK(z.insert({_id: 2, z: 2})); + assert.commandWorked(x.insert({_id: 1})); + assert.commandWorked(x.update({_id: 1}, {$set: {x: 1}})); + assert.commandWorked(x.remove({_id: 1})); + + assert.commandWorked(y.update({_id: 1}, {y: 1})); + assert.commandWorked(y.insert({_id: 2, y: false, z: false})); + assert.commandWorked(y.update({_id: 2}, {y: 2})); + + assert.commandWorked(z.insert({_id: 1, z: 1})); + assert.commandWorked(z.remove({_id: 1})); + assert.commandWorked(z.insert({_id: 1})); + assert.commandWorked(z.insert({_id: 2, z: 2})); }, renameCollectionWithinDatabase: (mydb) => { let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']); - assert.writeOK(x.insert({_id: 1, x: 1})); - assert.writeOK(y.insert({_id: 1, y: 1})); + assert.commandWorked(x.insert({_id: 1, x: 1})); + assert.commandWorked(y.insert({_id: 1, y: 1})); assert.commandWorked(x.renameCollection(z.getName())); - assert.writeOK(z.insert({_id: 2, x: 2})); - assert.writeOK(x.insert({_id: 2, x: false})); - assert.writeOK(y.insert({y: 2})); + assert.commandWorked(z.insert({_id: 2, x: 2})); + assert.commandWorked(x.insert({_id: 2, x: false})); + assert.commandWorked(y.insert({y: 2})); assert.commandWorked(y.renameCollection(x.getName(), true)); assert.commandWorked(z.renameCollection(y.getName())); @@ -129,14 +129,14 @@ var tests = { let otherdb = mydb.getSiblingDB(mydb + '_'); let [x, y] = getCollections(mydb, ['x', 'y']); let [z] = getCollections(otherdb, ['z']); - assert.writeOK(x.insert({_id: 1, x: 1})); - assert.writeOK(y.insert({_id: 1, y: 1})); + assert.commandWorked(x.insert({_id: 1, x: 1})); + assert.commandWorked(y.insert({_id: 1, y: 1})); assert.commandWorked( mydb.adminCommand({renameCollection: x.getFullName(), to: z.getFullName()})); - assert.writeOK(z.insert({_id: 2, x: 2})); - assert.writeOK(x.insert({_id: 2, x: false})); - assert.writeOK(y.insert({y: 2})); + assert.commandWorked(z.insert({_id: 2, x: 2})); + assert.commandWorked(x.insert({_id: 2, x: false})); + assert.commandWorked(y.insert({y: 2})); assert.commandWorked(mydb.adminCommand( {renameCollection: y.getFullName(), to: x.getFullName(), dropTarget: true})); @@ -170,10 +170,10 @@ var tests = { createIndex: (mydb) => { let [x, y] = getCollections(mydb, ['x', 'y']); assert.commandWorked(x.createIndex({x: 1})); - assert.writeOK(x.insert({_id: 1, x: 1})); - assert.writeOK(y.insert({_id: 1, y: 1})); + assert.commandWorked(x.insert({_id: 1, x: 1})); + assert.commandWorked(y.insert({_id: 1, y: 1})); assert.commandWorked(y.createIndex({y: 1})); - assert.writeOK(y.insert({_id: 2, y: 2})); + assert.commandWorked(y.insert({_id: 2, y: 2})); }, }; diff --git a/jstests/replsets/apply_ops_lastop.js b/jstests/replsets/apply_ops_lastop.js index e1c9fdb1823..c6257fe907c 100644 --- a/jstests/replsets/apply_ops_lastop.js +++ b/jstests/replsets/apply_ops_lastop.js @@ -30,7 +30,7 @@ var badPreCondition = [{ns: 'foo.bar', q: {_id: 10, a: "aaa"}, res: {a: "aaa"}}] var majorityWriteConcern = {w: 'majority', wtimeout: 30000}; // Set up some data -assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works +assert.commandWorked(coll.insert({x: 1})); // creating the collection so applyOps works assert.commandWorked( m1.getDB('foo').runCommand({applyOps: insertApplyOps, writeConcern: majorityWriteConcern})); var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp; diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index 42dc2638c28..4d599df4010 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -73,7 +73,7 @@ rs.awaitSecondaryNodes(); var mId = rs.getNodeId(master); var slave = rs._slaves[0]; assert.eq(1, master.getDB("admin").auth("foo", "bar")); -assert.writeOK(master.getDB("test").foo.insert( +assert.commandWorked(master.getDB("test").foo.insert( {x: 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); print("try some legal and illegal reads"); @@ -110,7 +110,7 @@ var bulk = master.getDB("test").foo.initializeUnorderedBulkOp(); for (var i = 0; i < 1000; i++) { bulk.insert({x: i, foo: "bar"}); } -assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS})); +assert.commandWorked(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS})); print("fail over"); rs.stop(mId); @@ -123,7 +123,7 @@ bulk = master.getDB("test").foo.initializeUnorderedBulkOp(); for (var i = 0; i < 1000; i++) { bulk.insert({x: i, foo: "bar"}); } -assert.writeOK(bulk.execute({w: 2})); +assert.commandWorked(bulk.execute({w: 2})); print("resync"); rs.restart(mId, {"keyFile": key1_600}); diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js index 179edf015d6..16a94763b04 100644 --- a/jstests/replsets/auth_no_pri.js +++ b/jstests/replsets/auth_no_pri.js @@ -14,7 +14,7 @@ master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, { // Can authenticate replset connection when whole set is up. var conn = new Mongo(rs.getURL()); assert(conn.getDB('admin').auth('admin', 'pwd')); -assert.writeOK(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}})); +assert.commandWorked(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}})); // Make sure there is no primary rs.stop(0); diff --git a/jstests/replsets/await_replication_timeout.js b/jstests/replsets/await_replication_timeout.js index ce89a30c296..dad1a172b95 100644 --- a/jstests/replsets/await_replication_timeout.js +++ b/jstests/replsets/await_replication_timeout.js @@ -13,7 +13,7 @@ var testColl = testDB.getCollection(collName); // Insert a document and implicitly create the collection. let resetCollection = function(w) { - assert.writeOK( + assert.commandWorked( testColl.insert({_id: 0}, {writeConcern: {w: w, wtimeout: replTest.kDefaultTimeoutMS}})); assert.eq(1, testColl.find().itcount()); }; diff --git a/jstests/replsets/background_index.js b/jstests/replsets/background_index.js index 3b302644438..662abef771d 100644 --- a/jstests/replsets/background_index.js +++ b/jstests/replsets/background_index.js @@ -19,7 +19,7 @@ var coll = primary.getCollection("test.foo"); var adminDB = primary.getDB("admin"); for (var i = 0; i < 100; i++) { - assert.writeOK(coll.insert({_id: i, x: i * 3, str: "hello world"})); + assert.commandWorked(coll.insert({_id: i, x: i * 3, str: "hello world"})); } // Add a background index. diff --git a/jstests/replsets/capped_insert_order.js b/jstests/replsets/capped_insert_order.js index f1a63ea683e..91f94f9579b 100644 --- a/jstests/replsets/capped_insert_order.js +++ b/jstests/replsets/capped_insert_order.js @@ -28,7 +28,7 @@ var batch = masterColl.initializeOrderedBulkOp(); for (var i = 0; i < nDocuments; i++) { batch.insert({_id: i}); } -assert.writeOK(batch.execute()); +assert.commandWorked(batch.execute()); replTest.awaitReplication(); function checkCollection(coll) { diff --git a/jstests/replsets/catchup.js b/jstests/replsets/catchup.js index 9a837346c33..a5b6292fa95 100644 --- a/jstests/replsets/catchup.js +++ b/jstests/replsets/catchup.js @@ -63,7 +63,7 @@ function stopReplicationAndEnforceNewPrimaryToCatchUp() { var oldPrimary = rst.getPrimary(); stopServerReplication(oldSecondaries); for (var i = 0; i < 3; i++) { - assert.writeOK(oldPrimary.getDB("test").foo.insert({x: i})); + assert.commandWorked(oldPrimary.getDB("test").foo.insert({x: i})); } var latestOpOnOldPrimary = getLatestOp(oldPrimary); diff --git a/jstests/replsets/catchup_takeover_one_high_priority.js b/jstests/replsets/catchup_takeover_one_high_priority.js index 10c6b99307f..39beecd3dce 100644 --- a/jstests/replsets/catchup_takeover_one_high_priority.js +++ b/jstests/replsets/catchup_takeover_one_high_priority.js @@ -57,14 +57,14 @@ sleep(3000); var primary = replSet.getPrimary(); var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}}; -assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern)); +assert.commandWorked(primary.getDB(name).bar.insert({y: 100}, writeConcern)); // Write something so that node 0 is ahead of node 1. stopServerReplication(nodes[1]); writeConcern = { writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS} }; -assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern)); +assert.commandWorked(primary.getDB(name).bar.insert({x: 100}, writeConcern)); nodes[2].reconnect(nodes[0]); nodes[2].reconnect(nodes[1]); diff --git a/jstests/replsets/catchup_takeover_two_nodes_ahead.js b/jstests/replsets/catchup_takeover_two_nodes_ahead.js index 31b78302329..28d60b2ba0a 100644 --- a/jstests/replsets/catchup_takeover_two_nodes_ahead.js +++ b/jstests/replsets/catchup_takeover_two_nodes_ahead.js @@ -31,14 +31,14 @@ replSet.awaitReplication(); stopServerReplication(nodes.slice(2, 5)); var primary = replSet.getPrimary(); var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}}; -assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern)); +assert.commandWorked(primary.getDB(name).bar.insert({x: 100}, writeConcern)); // Write something so that node 0 is ahead of node 1. stopServerReplication(nodes[1]); writeConcern = { writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS} }; -assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern)); +assert.commandWorked(primary.getDB(name).bar.insert({y: 100}, writeConcern)); const initialPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1})); const initialNode2Status = assert.commandWorked(nodes[2].adminCommand({serverStatus: 1})); diff --git a/jstests/replsets/chaining_removal.js b/jstests/replsets/chaining_removal.js index 1569cad2f71..11ab2e9e43d 100644 --- a/jstests/replsets/chaining_removal.js +++ b/jstests/replsets/chaining_removal.js @@ -43,7 +43,7 @@ syncFrom(nodes[4], nodes[1], replTest); // write that should reach all nodes var timeout = ReplSetTest.kDefaultTimeoutMS; var options = {writeConcern: {w: numNodes, wtimeout: timeout}}; -assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options)); +assert.commandWorked(primary.getDB(name).foo.insert({x: 1}, options)); // Re-enable 'maxSyncSourceLagSecs' checking on sync source. assert.commandWorked(nodes[1].getDB('admin').runCommand( @@ -67,7 +67,7 @@ primary = replTest.getPrimary(); const liveSlaves = [nodes[1], nodes[2], nodes[3]]; replTest.awaitReplication(null, null, liveSlaves); options.writeConcern.w = 4; -assert.writeOK(primary.getDB(name).foo.insert({x: 2}, options)); +assert.commandWorked(primary.getDB(name).foo.insert({x: 2}, options)); replTest.stopSet(); }()); diff --git a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js b/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js index 29beca07a26..9fa0cf055c0 100644 --- a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js +++ b/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js @@ -46,9 +46,9 @@ assert.commandWorked(secondaryDB.adminCommand( stopServerReplication(secondary); jsTestLog("Do some writes on the primary."); -assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 1}})); -assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 2}})); -assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 3}})); +assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 1}})); +assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 2}})); +assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 3}})); // Restart server replication on secondary and wait for the failpoint to be hit. jsTestLog("Restarting server replication on secondary."); diff --git a/jstests/replsets/clean_shutdown_oplog_state.js b/jstests/replsets/clean_shutdown_oplog_state.js index 35957ed44b3..33d68dc4887 100644 --- a/jstests/replsets/clean_shutdown_oplog_state.js +++ b/jstests/replsets/clean_shutdown_oplog_state.js @@ -34,7 +34,7 @@ primary.getCollection("test.coll").insert({_id: -1}); // Start a w:2 write that will block until replication is resumed. var waitForReplStart = startParallelShell(function() { - printjson(assert.writeOK( + printjson(assert.commandWorked( db.getCollection('side').insert({}, {writeConcern: {w: 2, wtimeout: 30 * 60 * 1000}}))); }, primary.host.split(':')[1]); @@ -43,7 +43,7 @@ var op = primary.getCollection("test.coll").initializeUnorderedBulkOp(); for (var i = 0; i < 1000 * 1000; i++) { op.insert({_id: i}); } -assert.writeOK(op.execute()); +assert.commandWorked(op.execute()); // Resume replication and wait for ops to start replicating, then do a clean shutdown on the // secondary. diff --git a/jstests/replsets/collate_id.js b/jstests/replsets/collate_id.js index 588c02e979a..e51a0ffcb88 100644 --- a/jstests/replsets/collate_id.js +++ b/jstests/replsets/collate_id.js @@ -51,8 +51,8 @@ for (var i = 0; i < 1000; i++) { } } - assert.writeOK(primaryColl.insert({_id: strId})); - assert.writeOK(primaryColl.remove({_id: strId})); + assert.commandWorked(primaryColl.insert({_id: strId})); + assert.commandWorked(primaryColl.remove({_id: strId})); } // Since the inserts and deletes happen in pairs, we should be left with an empty collection on diff --git a/jstests/replsets/dbhash_system_collections.js b/jstests/replsets/dbhash_system_collections.js index d3f7b83c323..9922bc1968f 100644 --- a/jstests/replsets/dbhash_system_collections.js +++ b/jstests/replsets/dbhash_system_collections.js @@ -10,14 +10,14 @@ var primary = rst.getPrimary(); var secondary = rst.getSecondary(); var testDB = primary.getDB('test'); -assert.writeOK(testDB.system.users.insert({users: 1})); -assert.writeOK(testDB.system.js.insert({js: 1})); +assert.commandWorked(testDB.system.users.insert({users: 1})); +assert.commandWorked(testDB.system.js.insert({js: 1})); var adminDB = primary.getDB('admin'); -assert.writeOK(adminDB.system.roles.insert({roles: 1})); -assert.writeOK(adminDB.system.version.insert({version: 1})); -assert.writeOK(adminDB.system.new_users.insert({new_users: 1})); -assert.writeOK(adminDB.system.backup_users.insert({backup_users: 1})); +assert.commandWorked(adminDB.system.roles.insert({roles: 1})); +assert.commandWorked(adminDB.system.version.insert({version: 1})); +assert.commandWorked(adminDB.system.new_users.insert({new_users: 1})); +assert.commandWorked(adminDB.system.backup_users.insert({backup_users: 1})); rst.awaitReplication(); diff --git a/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js b/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js index 2577744902e..676817cb31b 100644 --- a/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js +++ b/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js @@ -42,7 +42,7 @@ let bulk = testColl.initializeUnorderedBulkOp(); for (let i = 0; i < 2; ++i) { bulk.insert({_id: i}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); jsTest.log("Setting up a prepared transaction..."); const session = primary.startSession(); diff --git a/jstests/replsets/drain.js b/jstests/replsets/drain.js index e1d008aebc0..7a8bdea6a53 100644 --- a/jstests/replsets/drain.js +++ b/jstests/replsets/drain.js @@ -32,7 +32,7 @@ var secondary = replSet.getSecondary(); var numDocuments = 20; var bulk = primary.getDB("foo").foo.initializeUnorderedBulkOp(); var bigString = Array(1024 * 1024).toString(); -assert.writeOK(primary.getDB("foo").foo.insert({big: bigString})); +assert.commandWorked(primary.getDB("foo").foo.insert({big: bigString})); replSet.awaitReplication(); assert.commandWorked( secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}), @@ -42,7 +42,7 @@ var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffe for (var i = 1; i < numDocuments; ++i) { bulk.insert({big: bigString}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments); assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount()); @@ -101,7 +101,7 @@ assert.commandWorked( // Ensure new primary is writable jsTestLog('New primary should be writable after draining is complete'); -assert.writeOK(primary.getDB("foo").flag.insert({sentinel: 1})); +assert.commandWorked(primary.getDB("foo").flag.insert({sentinel: 1})); // Check for at least two entries. There was one prior to freezing op application on the // secondary and we cannot guarantee all writes reached the secondary's op queue prior to // shutting down the original primary. diff --git a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js index d7b16cdc790..78f7436853e 100644 --- a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js +++ b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js @@ -57,8 +57,8 @@ assert.commandWorked(toColl.ensureIndex({a: 1}, {name: longIndexName})); assert.commandWorked(toColl.ensureIndex({b: 1}, {name: shortIndexName})); // Insert documents into both collections so that we can tell them apart. -assert.writeOK(fromColl.insert({_id: 'from'})); -assert.writeOK(toColl.insert({_id: 'to'})); +assert.commandWorked(fromColl.insert({_id: 'from'})); +assert.commandWorked(toColl.insert({_id: 'to'})); replTest.awaitReplication(); // Prevent renameCollection from being applied on the secondary so that we can examine the state diff --git a/jstests/replsets/drop_collections_two_phase_write_concern.js b/jstests/replsets/drop_collections_two_phase_write_concern.js index e7b60eb18fb..7a55c28d967 100644 --- a/jstests/replsets/drop_collections_two_phase_write_concern.js +++ b/jstests/replsets/drop_collections_two_phase_write_concern.js @@ -47,7 +47,7 @@ const writeConcernForSuccessfulOp = { w: 'majority', wtimeout: replTest.kDefaultTimeoutMS }; -assert.writeOK(collForInserts.insert({_id: 0}, {writeConcern: writeConcernForSuccessfulOp})); +assert.commandWorked(collForInserts.insert({_id: 0}, {writeConcern: writeConcernForSuccessfulOp})); // PREPARE collection drop. twoPhaseDropTest.prepareDropCollection(collName); @@ -80,7 +80,7 @@ try { // After the reaper is unblocked, an operation waiting on a majority write concern should run // complete successfully. -assert.writeOK(collForInserts.insert({_id: 3}, {writeConcern: writeConcernForSuccessfulOp})); +assert.commandWorked(collForInserts.insert({_id: 3}, {writeConcern: writeConcernForSuccessfulOp})); assert.eq(4, collForInserts.find().itcount()); // COMMIT collection drop. diff --git a/jstests/replsets/drop_databases_two_phase.js b/jstests/replsets/drop_databases_two_phase.js index 5a00ebe2e9e..9fc7e175592 100644 --- a/jstests/replsets/drop_databases_two_phase.js +++ b/jstests/replsets/drop_databases_two_phase.js @@ -65,7 +65,7 @@ var collNameToDrop = "collectionToDrop"; // Create the collection that will be dropped and let it replicate. var collToDrop = dbToDrop.getCollection(collNameToDrop); -assert.writeOK( +assert.commandWorked( collToDrop.insert({_id: 0}, {writeConcern: {w: 2, wtimeout: replTest.kDefaultTimeoutMS}})); assert.eq(1, collToDrop.find().itcount()); diff --git a/jstests/replsets/drop_oplog.js b/jstests/replsets/drop_oplog.js index a53da5ae483..2a64ef6c436 100644 --- a/jstests/replsets/drop_oplog.js +++ b/jstests/replsets/drop_oplog.js @@ -28,7 +28,7 @@ let renameOutput = localDB.oplog.rs.renameCollection("poison"); assert.eq(renameOutput.ok, 0); assert.eq(renameOutput.errmsg, "can't rename live oplog while replicating"); -assert.writeOK(localDB.foo.insert({a: 1})); +assert.commandWorked(localDB.foo.insert({a: 1})); renameOutput = localDB.foo.renameCollection("oplog.rs"); assert.eq(renameOutput.ok, 0); assert.eq(renameOutput.errmsg, "can't rename to live oplog while replicating"); diff --git a/jstests/replsets/emptycapped.js b/jstests/replsets/emptycapped.js index e15322935eb..462d6e16f58 100644 --- a/jstests/replsets/emptycapped.js +++ b/jstests/replsets/emptycapped.js @@ -12,7 +12,7 @@ var primaryAdminDB = rst.getPrimary().getDB('admin'); var secondaryTestDB = rst.getSecondary().getDB('test'); // Truncate a non-capped collection. -assert.writeOK(primaryTestDB.noncapped.insert({x: 1})); +assert.commandWorked(primaryTestDB.noncapped.insert({x: 1})); assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'noncapped'})); assert.eq(primaryTestDB.noncapped.find().itcount(), 0, @@ -31,7 +31,7 @@ assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: 'nonexistent // Truncate a capped collection. assert.commandWorked(primaryTestDB.createCollection("capped", {capped: true, size: 4096})); -assert.writeOK(primaryTestDB.capped.insert({})); +assert.commandWorked(primaryTestDB.capped.insert({})); assert.eq(primaryTestDB.capped.find().itcount(), 1, "Expected 1 document to exist after an insert"); assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'capped'})); assert.eq(primaryTestDB.capped.find().itcount(), @@ -49,7 +49,7 @@ assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "oplog.rs"} // Test system collections, which cannot be truncated except system.profile. // Truncate the local system.js collection. -assert.writeOK(primaryTestDB.system.js.insert({_id: "mystring", value: "var root = this;"})); +assert.commandWorked(primaryTestDB.system.js.insert({_id: "mystring", value: "var root = this;"})); assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.js"}), ErrorCodes.IllegalOperation); diff --git a/jstests/replsets/index_delete.js b/jstests/replsets/index_delete.js index 59486bb2932..09a877ec845 100644 --- a/jstests/replsets/index_delete.js +++ b/jstests/replsets/index_delete.js @@ -43,7 +43,7 @@ var bulk = masterDB[collName].initializeUnorderedBulkOp(); for (var i = 0; i < size; ++i) { bulk.insert({i: i, j: i, k: i}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); jsTest.log("Creating index"); masterDB[collName].ensureIndex({i: 1}); diff --git a/jstests/replsets/index_restart_secondary.js b/jstests/replsets/index_restart_secondary.js index 0b391d5f8e2..d785a394ee3 100644 --- a/jstests/replsets/index_restart_secondary.js +++ b/jstests/replsets/index_restart_secondary.js @@ -42,7 +42,7 @@ if (conns[0].getDB('test').serverBuildInfo().bits !== 32) { for (var i = 0; i < size; ++i) { bulk.insert({i: i}); } - assert.writeOK(bulk.execute({w: "majority"})); + assert.commandWorked(bulk.execute({w: "majority"})); jsTest.log("Creating index"); masterDB.jstests_fgsec.ensureIndex({i: 1}); diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js index 92ace5d0252..0a536b4d601 100644 --- a/jstests/replsets/initial_sync1.js +++ b/jstests/replsets/initial_sync1.js @@ -39,7 +39,7 @@ var bulk = foo.bar.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { bulk.insert({date: new Date(), x: i, str: "all the talk on the market"}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); print("total in foo: " + foo.bar.find().itcount()); print("4. Make sure synced"); @@ -97,7 +97,7 @@ bulk = foo.bar.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { bulk.insert({date: new Date(), x: i, str: "all the talk on the market"}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); print("11. Everyone happy eventually"); replTest.awaitReplication(); diff --git a/jstests/replsets/initial_sync4.js b/jstests/replsets/initial_sync4.js index 504e7e737ab..80103839bfb 100644 --- a/jstests/replsets/initial_sync4.js +++ b/jstests/replsets/initial_sync4.js @@ -22,7 +22,7 @@ for (var i = 0; i < N; ++i) { bulk.insert({_id: i, x: i, a: {}}); } - assert.writeOK(bulk.execute()); + assert.commandWorked(bulk.execute()); jsTestLog("3. Make sure synced"); replTest.awaitReplication(); diff --git a/jstests/replsets/initial_sync_applier_error.js b/jstests/replsets/initial_sync_applier_error.js index 2bd65f51e12..51454c98ea9 100644 --- a/jstests/replsets/initial_sync_applier_error.js +++ b/jstests/replsets/initial_sync_applier_error.js @@ -24,7 +24,7 @@ replSet.initiate(); var primary = replSet.getPrimary(); var coll = primary.getDB('test').getCollection(name); -assert.writeOK(coll.insert({_id: 0, content: "hi"})); +assert.commandWorked(coll.insert({_id: 0, content: "hi"})); // Add a secondary node but make it hang after retrieving the last op on the source // but before copying databases. diff --git a/jstests/replsets/initial_sync_capped_index.js b/jstests/replsets/initial_sync_capped_index.js index a7c1a2a3de4..905c785d6e5 100644 --- a/jstests/replsets/initial_sync_capped_index.js +++ b/jstests/replsets/initial_sync_capped_index.js @@ -34,7 +34,7 @@ load("jstests/libs/check_log.js"); */ function overflowCappedColl(coll, docToInsert) { // Insert one document and save its _id. - assert.writeOK(coll.insert(docToInsert)); + assert.commandWorked(coll.insert(docToInsert)); var origFirstDocId = coll.findOne()["_id"]; // Detect overflow by seeing if the original first doc of the collection is still present. @@ -99,7 +99,7 @@ checkLog.contains( // additional documents. var docsToAppend = 2; for (var i = 0; i < docsToAppend; i++) { - assert.writeOK(primaryDB[cappedCollName].insert(largeDoc)); + assert.commandWorked(primaryDB[cappedCollName].insert(largeDoc)); } // Let the 'getMore' requests for the capped collection clone continue. diff --git a/jstests/replsets/initial_sync_document_validation.js b/jstests/replsets/initial_sync_document_validation.js index 79d06f75140..fad4601e797 100644 --- a/jstests/replsets/initial_sync_document_validation.js +++ b/jstests/replsets/initial_sync_document_validation.js @@ -15,7 +15,7 @@ var primary = replSet.getPrimary(); var secondary = replSet.getSecondary(); var coll = primary.getDB('test').getCollection(name); -assert.writeOK(coll.insert({_id: 0, x: 1})); +assert.commandWorked(coll.insert({_id: 0, x: 1})); assert.commandWorked(coll.runCommand("collMod", {"validator": {a: {$exists: true}}})); secondary = replSet.restart(secondary, {startClean: true}); diff --git a/jstests/replsets/initial_sync_drop_collection.js b/jstests/replsets/initial_sync_drop_collection.js index 6488f55e01e..72fa5b5d273 100644 --- a/jstests/replsets/initial_sync_drop_collection.js +++ b/jstests/replsets/initial_sync_drop_collection.js @@ -35,7 +35,7 @@ var nss = primaryColl.getFullName(); // the collection on the secondary is empty. function setupTest({failPoint, secondaryStartupParams}) { jsTestLog("Writing data to collection."); - assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}])); + assert.commandWorked(primaryColl.insert([{_id: 1}, {_id: 2}])); jsTestLog("Restarting secondary with failPoint " + failPoint + " set for " + nss); secondaryStartupParams = secondaryStartupParams || {}; @@ -69,7 +69,7 @@ function finishTest({failPoint, secondaryStartupParams, expectedLog, waitForDrop if (createNew) { jsTestLog("Creating a new collection with the same name: " + primaryColl.getFullName()); - assert.writeOK(primaryColl.insert({_id: "not the same collection"})); + assert.commandWorked(primaryColl.insert({_id: "not the same collection"})); } jsTestLog("Allowing secondary to continue."); diff --git a/jstests/replsets/initial_sync_during_stepdown.js b/jstests/replsets/initial_sync_during_stepdown.js index 9d68ac69c49..fe5d1a79587 100644 --- a/jstests/replsets/initial_sync_during_stepdown.js +++ b/jstests/replsets/initial_sync_during_stepdown.js @@ -34,7 +34,7 @@ function setupTest({ secondaryStartupParams: secondaryStartupParams = {} }) { jsTestLog("Writing data to collection."); - assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}])); + assert.commandWorked(primaryColl.insert([{_id: 1}, {_id: 2}])); jsTestLog("Stopping secondary."); rst.stop(secondary); @@ -145,7 +145,7 @@ checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled // Insert more data so that these are replicated to secondary node via oplog fetcher. jsTestLog("Inserting more data on primary."); -assert.writeOK(primaryColl.insert([{_id: 3}, {_id: 4}])); +assert.commandWorked(primaryColl.insert([{_id: 3}, {_id: 4}])); // Insert is successful. So, enable fail point "waitWithPinnedCursorDuringGetMoreBatch" // such that it doesn't drop locks when getmore cmd waits inside the fail point block. diff --git a/jstests/replsets/initial_sync_fail_insert_once.js b/jstests/replsets/initial_sync_fail_insert_once.js index 0a1f0a11a8a..079b9d34358 100644 --- a/jstests/replsets/initial_sync_fail_insert_once.js +++ b/jstests/replsets/initial_sync_fail_insert_once.js @@ -15,7 +15,7 @@ var primary = replSet.getPrimary(); var secondary = replSet.getSecondary(); var coll = primary.getDB('test').getCollection(name); -assert.writeOK(coll.insert({_id: 0, x: 1}, {writeConcern: {w: 2}})); +assert.commandWorked(coll.insert({_id: 0, x: 1}, {writeConcern: {w: 2}})); jsTest.log("Enabling Failpoint failCollectionInserts on " + tojson(secondary)); assert.commandWorked(secondary.getDB("admin").adminCommand({ diff --git a/jstests/replsets/initial_sync_fcv.js b/jstests/replsets/initial_sync_fcv.js index af0a466848c..f805f602f06 100644 --- a/jstests/replsets/initial_sync_fcv.js +++ b/jstests/replsets/initial_sync_fcv.js @@ -24,7 +24,7 @@ const primary = rst.getPrimary(); const dbName = 'foo'; const collName = 'bar'; -assert.writeOK(primary.getDB(dbName).getCollection(collName).insert({a: 1})); +assert.commandWorked(primary.getDB(dbName).getCollection(collName).insert({a: 1})); function runInitialSync(cmd, initialFCV) { assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: initialFCV})); diff --git a/jstests/replsets/initial_sync_invalid_views.js b/jstests/replsets/initial_sync_invalid_views.js index a02498aaa40..5f00bc5e2a3 100644 --- a/jstests/replsets/initial_sync_invalid_views.js +++ b/jstests/replsets/initial_sync_invalid_views.js @@ -14,7 +14,7 @@ replSet.initiate(); let primary = replSet.getPrimary(); let coll = primary.getDB('test').foo; -assert.writeOK(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 1})); // Add a secondary node but make it hang before copying databases. let secondary = replSet.add(); @@ -24,7 +24,7 @@ assert.commandWorked(secondary.getDB('admin').runCommand( {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'})); replSet.reInitiate(); -assert.writeOK(primary.getDB('test').system.views.insert({invalid: NumberLong(1000)})); +assert.commandWorked(primary.getDB('test').system.views.insert({invalid: NumberLong(1000)})); assert.commandWorked(secondary.getDB('admin').runCommand( {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'})); diff --git a/jstests/replsets/initial_sync_move_forward.js b/jstests/replsets/initial_sync_move_forward.js index 2561e16b0c1..534a2d8d72e 100644 --- a/jstests/replsets/initial_sync_move_forward.js +++ b/jstests/replsets/initial_sync_move_forward.js @@ -32,7 +32,7 @@ for (var i = 0; i < count - 2; ++i) { var longString = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; bulk.insert({_id: count - 2, x: count - 2, longString: longString}); bulk.insert({_id: count - 1, x: count - 1, longString: longString}); -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); // Create a unique index on {x: 1}. assert.commandWorked(masterColl.ensureIndex({x: 1}, {unique: true})); @@ -63,16 +63,16 @@ assert.soon(function() { // Delete {_id: count - 2} to make a hole. Grow {_id: 0} so that it moves into that hole. This // will cause the secondary to clone {_id: 0} again. // Change the value for 'x' so that we are not testing the uniqueness of 'x' in this case. -assert.writeOK(masterColl.remove({_id: 0, x: 0})); -assert.writeOK(masterColl.remove({_id: count - 2, x: count - 2})); -assert.writeOK(masterColl.insert({_id: 0, x: count, longString: longString})); +assert.commandWorked(masterColl.remove({_id: 0, x: 0})); +assert.commandWorked(masterColl.remove({_id: count - 2, x: count - 2})); +assert.commandWorked(masterColl.insert({_id: 0, x: count, longString: longString})); // Delete {_id: count - 1} to make a hole. Grow {x: 1} so that it moves into that hole. This // will cause the secondary to clone {x: 1} again. // Change the value for _id so that we are not testing the uniqueness of _id in this case. -assert.writeOK(masterColl.remove({_id: 1, x: 1})); -assert.writeOK(masterColl.remove({_id: count - 1, x: count - 1})); -assert.writeOK(masterColl.insert({_id: count, x: 1, longString: longString})); +assert.commandWorked(masterColl.remove({_id: 1, x: 1})); +assert.commandWorked(masterColl.remove({_id: count - 1, x: count - 1})); +assert.commandWorked(masterColl.insert({_id: count, x: 1, longString: longString})); // Resume initial sync. assert.commandWorked(secondary.adminCommand( diff --git a/jstests/replsets/initial_sync_oplog_hole.js b/jstests/replsets/initial_sync_oplog_hole.js index 190099cd571..a5bd24b96b3 100644 --- a/jstests/replsets/initial_sync_oplog_hole.js +++ b/jstests/replsets/initial_sync_oplog_hole.js @@ -35,7 +35,7 @@ TestData.testName = testName; TestData.collectionName = collName; jsTestLog("Writing data before oplog hole to collection."); -assert.writeOK(primaryColl.insert({_id: "a"})); +assert.commandWorked(primaryColl.insert({_id: "a"})); // Make sure it gets written out. assert.eq(primaryColl.find({_id: "a"}).itcount(), 1); @@ -55,7 +55,7 @@ checkLog.contains(primaryDB.getMongo(), "hangAfterCollectionInserts fail point enabled for " + primaryColl.getFullName()); jsTest.log("Create a write following the uncommitted write."); -assert.writeOK(primaryColl.insert({_id: "c"})); +assert.commandWorked(primaryColl.insert({_id: "c"})); // Make sure it gets written out. assert.eq(primaryColl.find({_id: "c"}).itcount(), 1); diff --git a/jstests/replsets/initial_sync_oplog_rollover.js b/jstests/replsets/initial_sync_oplog_rollover.js index 7ffe8c98dd4..268548f1f80 100644 --- a/jstests/replsets/initial_sync_oplog_rollover.js +++ b/jstests/replsets/initial_sync_oplog_rollover.js @@ -30,7 +30,7 @@ replSet.initiate(); var primary = replSet.getPrimary(); var coll = primary.getDB('test').foo; -assert.writeOK(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 1})); function getFirstOplogEntry(conn) { return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0]; @@ -53,7 +53,7 @@ checkLog.contains(secondary, const largeStr = new Array(4 * 1024 * oplogSizeOnPrimary).join('aaaaaaaa'); var i = 0; while (bsonWoCompare(getFirstOplogEntry(primary), firstOplogEntry) === 0) { - assert.writeOK(coll.insert({a: 2, x: i++, long_str: largeStr})); + assert.commandWorked(coll.insert({a: 2, x: i++, long_str: largeStr})); sleep(100); } diff --git a/jstests/replsets/initial_sync_rename_collection.js b/jstests/replsets/initial_sync_rename_collection.js index 9c63d7c55b1..957dae9db8a 100644 --- a/jstests/replsets/initial_sync_rename_collection.js +++ b/jstests/replsets/initial_sync_rename_collection.js @@ -32,8 +32,8 @@ const collAcrossFinal_name = 'renamed_across'; // Create two collections on the same database. One will be renamed within the database // and the other will be renamed to a different database. -assert.writeOK(primary_db0[collRenameWithinDB_name].save({})); -assert.writeOK(primary_db0[collRenameAcrossDBs_name].save({})); +assert.commandWorked(primary_db0[collRenameWithinDB_name].save({})); +assert.commandWorked(primary_db0[collRenameAcrossDBs_name].save({})); jsTestLog('Waiting for replication'); rst.awaitReplication(); diff --git a/jstests/replsets/initial_sync_replSetGetStatus.js b/jstests/replsets/initial_sync_replSetGetStatus.js index 60fd36a9c77..7d325997328 100644 --- a/jstests/replsets/initial_sync_replSetGetStatus.js +++ b/jstests/replsets/initial_sync_replSetGetStatus.js @@ -18,8 +18,8 @@ replSet.initiate(); var primary = replSet.getPrimary(); var coll = primary.getDB('test').foo; -assert.writeOK(coll.insert({a: 1})); -assert.writeOK(coll.insert({a: 2})); +assert.commandWorked(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 2})); // Add a secondary node but make it hang before copying databases. var secondary = replSet.add(); @@ -47,8 +47,8 @@ assert(!res.initialSyncStatus, assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "t"}), ErrorCodes.TypeMismatch); -assert.writeOK(coll.insert({a: 3})); -assert.writeOK(coll.insert({a: 4})); +assert.commandWorked(coll.insert({a: 3})); +assert.commandWorked(coll.insert({a: 4})); // Let initial sync continue working. assert.commandWorked(secondary.getDB('admin').runCommand( diff --git a/jstests/replsets/initial_sync_unsupported_auth_schema.js b/jstests/replsets/initial_sync_unsupported_auth_schema.js index bb3013b38a3..bff265a0653 100644 --- a/jstests/replsets/initial_sync_unsupported_auth_schema.js +++ b/jstests/replsets/initial_sync_unsupported_auth_schema.js @@ -16,7 +16,7 @@ function testInitialSyncAbortsWithUnsupportedAuthSchema(schema) { // invalid or outdated version var versionColl = rst.getPrimary().getDB('admin').system.version; var res = versionColl.insert(schema); - assert.writeOK(res); + assert.commandWorked(res); // Add another node to the replica set to allow an initial sync to occur var initSyncNode = rst.add({setParameter: 'numInitialSyncAttempts=1'}); @@ -63,7 +63,7 @@ function testInitialSyncAbortsWithExistingUserAndNoAuthSchema() { // a corresponding auth schema var userColl = rst.getPrimary().getDB('admin').system.users; var res = userColl.insert({}); - assert.writeOK(res); + assert.commandWorked(res); // Add another node to the replica set to allow an initial sync to occur var initSyncNode = rst.add({setParameter: 'numInitialSyncAttempts=1'}); diff --git a/jstests/replsets/initial_sync_uuid_not_found.js b/jstests/replsets/initial_sync_uuid_not_found.js index 0942ac1f54b..50cc9f6b11b 100644 --- a/jstests/replsets/initial_sync_uuid_not_found.js +++ b/jstests/replsets/initial_sync_uuid_not_found.js @@ -22,7 +22,7 @@ const primaryDB = primary.getDB('d'); const primaryColl = primaryDB.coll; jsTestLog('Create a collection (with a UUID) and insert a document.'); -assert.writeOK(primaryColl.insert({_id: 0})); +assert.commandWorked(primaryColl.insert({_id: 0})); const collInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0]; assert(collInfo.info.uuid, 'newly created collection expected to have a UUID: ' + tojson(collInfo)); @@ -48,7 +48,7 @@ function ResyncWithFailpoint(failpointName, failpointData) { jsTestLog('Remove collection on the primary and insert a new document, recreating it.'); assert(primaryColl.drop()); - assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 'majority'}})); + assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: 'majority'}})); const newCollInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0]; assert(collInfo.info.uuid, 'recreated collection expected to have a UUID: ' + tojson(collInfo)); assert.neq(collInfo.info.uuid, diff --git a/jstests/replsets/initial_sync_views.js b/jstests/replsets/initial_sync_views.js index ae202aff0e7..b9866272f28 100644 --- a/jstests/replsets/initial_sync_views.js +++ b/jstests/replsets/initial_sync_views.js @@ -16,7 +16,7 @@ replTest.initiate(); let primaryDB = replTest.getPrimary().getDB(testName); for (let i = 0; i < 10; ++i) { - assert.writeOK(primaryDB.coll.insert({a: i})); + assert.commandWorked(primaryDB.coll.insert({a: i})); } // Setup view. diff --git a/jstests/replsets/initial_sync_with_write_load.js b/jstests/replsets/initial_sync_with_write_load.js index 180487abe50..fc1164c6c43 100644 --- a/jstests/replsets/initial_sync_with_write_load.js +++ b/jstests/replsets/initial_sync_with_write_load.js @@ -35,7 +35,7 @@ assert(master == conns[0], "conns[0] assumed to be master"); assert(a_conn.host == master.host); // create an oplog entry with an insert -assert.writeOK( +assert.commandWorked( A.foo.insert({x: 1}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); replTest.stop(BID); @@ -44,7 +44,7 @@ var work = function() { print("starting loadgen"); var start = new Date().getTime(); - assert.writeOK(db.timeToStartTrigger.insert({_id: 1})); + assert.commandWorked(db.timeToStartTrigger.insert({_id: 1})); while (true) { for (x = 0; x < 100; x++) { diff --git a/jstests/replsets/interrupted_batch_insert.js b/jstests/replsets/interrupted_batch_insert.js index d8b6419398b..b5c9e62c18b 100644 --- a/jstests/replsets/interrupted_batch_insert.js +++ b/jstests/replsets/interrupted_batch_insert.js @@ -79,7 +79,7 @@ restartServerReplication(conns[2]); // Issue a write to the new primary. var collOnNewPrimary = replTest.nodes[1].getCollection(collName); -assert.writeOK(collOnNewPrimary.insert({singleDoc: 1}, {writeConcern: {w: "majority"}})); +assert.commandWorked(collOnNewPrimary.insert({singleDoc: 1}, {writeConcern: {w: "majority"}})); // Isolate node 1, forcing it to step down as primary, and reconnect node 0, allowing it to step // up again. diff --git a/jstests/replsets/last_vote.js b/jstests/replsets/last_vote.js index 62901259364..4b77dcbd334 100644 --- a/jstests/replsets/last_vote.js +++ b/jstests/replsets/last_vote.js @@ -37,7 +37,7 @@ function getLastVoteDoc(conn) { function setLastVoteDoc(conn, term, candidate) { var newLastVote = {term: term, candidateIndex: rst.getNodeId(candidate)}; - return assert.writeOK(conn.getCollection(lastVoteNS).update({}, newLastVote)); + return assert.commandWorked(conn.getCollection(lastVoteNS).update({}, newLastVote)); } function assertNodeHasLastVote(node, term, candidate) { diff --git a/jstests/replsets/lastop.js b/jstests/replsets/lastop.js index c1fa2ffb21f..1abcd15abeb 100644 --- a/jstests/replsets/lastop.js +++ b/jstests/replsets/lastop.js @@ -15,35 +15,35 @@ var m2 = new Mongo(primary.host); // Do a write with m1, then a write with m2, then a no-op write with m1. m1 should have a lastOp // of m2's write. -assert.writeOK(m1.getCollection("test.foo").insert({m1: 1})); +assert.commandWorked(m1.getCollection("test.foo").insert({m1: 1})); var firstOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; -assert.writeOK(m2.getCollection("test.foo").insert({m2: 99})); +assert.commandWorked(m2.getCollection("test.foo").insert({m2: 99})); var secondOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // No-op update -assert.writeOK(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}})); +assert.commandWorked(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}})); var noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, secondOp); -assert.writeOK(m1.getCollection("test.foo").remove({m1: 1})); +assert.commandWorked(m1.getCollection("test.foo").remove({m1: 1})); var thirdOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; -assert.writeOK(m2.getCollection("test.foo").insert({m2: 98})); +assert.commandWorked(m2.getCollection("test.foo").insert({m2: 98})); var fourthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // No-op delete -assert.writeOK(m1.getCollection("test.foo").remove({m1: 1})); +assert.commandWorked(m1.getCollection("test.foo").remove({m1: 1})); noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, fourthOp); // Dummy write, for a new lastOp. -assert.writeOK(m1.getCollection("test.foo").insert({m1: 99})); +assert.commandWorked(m1.getCollection("test.foo").insert({m1: 99})); var fifthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; -assert.writeOK(m2.getCollection("test.foo").insert({m2: 97})); +assert.commandWorked(m2.getCollection("test.foo").insert({m2: 97})); var sixthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // No-op find-and-modify delete @@ -55,7 +55,7 @@ assert.eq(noOp, sixthOp); assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1})); var seventhOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; -assert.writeOK(m2.getCollection("test.foo").insert({m2: 96})); +assert.commandWorked(m2.getCollection("test.foo").insert({m2: 96})); var eighthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // No-op create index. @@ -64,10 +64,10 @@ noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, eighthOp); -assert.writeOK(m1.getCollection("test.foo").insert({_id: 1, x: 1})); +assert.commandWorked(m1.getCollection("test.foo").insert({_id: 1, x: 1})); var ninthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; -assert.writeOK(m2.getCollection("test.foo").insert({m2: 991})); +assert.commandWorked(m2.getCollection("test.foo").insert({m2: 991})); var tenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // update with immutable field error @@ -78,7 +78,7 @@ noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, tenthOp); -assert.writeOK(m2.getCollection("test.foo").insert({m2: 992})); +assert.commandWorked(m2.getCollection("test.foo").insert({m2: 992})); var eleventhOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // find-and-modify immutable field error @@ -94,13 +94,13 @@ noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, eleventhOp); var bigString = new Array(3000).toString(); -assert.writeOK(m2.getCollection("test.foo").insert({m2: 994, m3: bigString})); +assert.commandWorked(m2.getCollection("test.foo").insert({m2: 994, m3: bigString})); // No-op insert -assert.writeOK(m1.getCollection("test.foo").insert({_id: 5, x: 5})); +assert.commandWorked(m1.getCollection("test.foo").insert({_id: 5, x: 5})); var thirteenthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; -assert.writeOK(m2.getCollection("test.foo").insert({m2: 991})); +assert.commandWorked(m2.getCollection("test.foo").insert({m2: 991})); var fourteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // Hits DuplicateKey error and fails insert -- no-op diff --git a/jstests/replsets/libs/election_handoff.js b/jstests/replsets/libs/election_handoff.js index f05e6b702d7..f29f55853bd 100644 --- a/jstests/replsets/libs/election_handoff.js +++ b/jstests/replsets/libs/election_handoff.js @@ -50,7 +50,7 @@ var ElectionHandoffTest = (function() { // Make sure all secondaries are ready before stepping down. We must additionally // make sure that the primary is aware that the secondaries are ready and caught up // to the primary's lastApplied, so we issue a dummy write and wait on its optime. - assert.writeOK(primary.getDB("test").secondariesMustBeCaughtUpToHere.insert( + assert.commandWorked(primary.getDB("test").secondariesMustBeCaughtUpToHere.insert( {"a": 1}, {writeConcern: {w: rst.nodes.length}})); rst.awaitNodesAgreeOnAppliedOpTime(); diff --git a/jstests/replsets/libs/rename_across_dbs.js b/jstests/replsets/libs/rename_across_dbs.js index fe42cab63b6..d32d6a11627 100644 --- a/jstests/replsets/libs/rename_across_dbs.js +++ b/jstests/replsets/libs/rename_across_dbs.js @@ -105,7 +105,7 @@ var RenameAcrossDatabasesTest = function(options) { // options.dropTarget is true. const dropTarget = options.dropTarget || false; if (dropTarget) { - assert.writeOK(targetColl.insert({_id: 1000, target: 1})); + assert.commandWorked(targetColl.insert({_id: 1000, target: 1})); assert.commandWorked(targetColl.createIndex({target: 1})); } @@ -116,7 +116,7 @@ var RenameAcrossDatabasesTest = function(options) { const numDocs = 10; _testLog('Inserting ' + numDocs + ' documents into source collection.'); for (let i = 0; i < numDocs; ++i) { - assert.writeOK(sourceColl.insert({_id: i, source: 1})); + assert.commandWorked(sourceColl.insert({_id: i, source: 1})); } const numNonIdIndexes = 3; _testLog('Creating ' + numNonIdIndexes + ' indexes.'); diff --git a/jstests/replsets/libs/rollback_test.js b/jstests/replsets/libs/rollback_test.js index 3abb424585b..478034a6ba8 100644 --- a/jstests/replsets/libs/rollback_test.js +++ b/jstests/replsets/libs/rollback_test.js @@ -309,7 +309,7 @@ function RollbackTest(name = "RollbackTest", replSet) { // Insert one document to ensure rollback will not be skipped. let dbName = "EnsureThereIsAtLeastOneOperationToRollback"; - assert.writeOK(curPrimary.getDB(dbName).ensureRollback.insert( + assert.commandWorked(curPrimary.getDB(dbName).ensureRollback.insert( {thisDocument: 'is inserted to ensure rollback is not skipped'})); log(`Isolating the primary ${curPrimary.host} so it will step down`); diff --git a/jstests/replsets/libs/secondary_reads_test.js b/jstests/replsets/libs/secondary_reads_test.js index 192421827f8..1d712fce05a 100644 --- a/jstests/replsets/libs/secondary_reads_test.js +++ b/jstests/replsets/libs/secondary_reads_test.js @@ -97,7 +97,7 @@ function SecondaryReadsTest(name = "secondary_reads_test") { this.stopReaders = function() { print("signaling readers to stop..."); assert.gt(readers.length, 0, "no readers to stop"); - assert.writeOK(primaryDB.getCollection(signalColl).insert({_id: testDoneId})); + assert.commandWorked(primaryDB.getCollection(signalColl).insert({_id: testDoneId})); for (let i = 0; i < readers.length; i++) { const await = readers[i]; await (); diff --git a/jstests/replsets/libs/tags.js b/jstests/replsets/libs/tags.js index 2f52516e4b3..e5861ee0bad 100644 --- a/jstests/replsets/libs/tags.js +++ b/jstests/replsets/libs/tags.js @@ -174,7 +174,7 @@ var TagsTest = function(options) { var writeConcern = { writeConcern: {w: expectedWritableNodesCount, wtimeout: replTest.kDefaultTimeoutMS} }; - assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern)); + assert.commandWorked(primary.getDB('foo').bar.insert({x: 100}, writeConcern)); jsTestLog('ensurePrimary - Successfully written a document to primary node (' + replTest.nodes[nodeId].host + ') using a write concern of w:' + expectedWritableNodesCount); @@ -218,7 +218,7 @@ var TagsTest = function(options) { jsTestLog('Non-existent write concern should be rejected.'); options = {writeConcern: {w: 'blahblah', wtimeout: ReplSetTest.kDefaultTimeoutMS}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); var result = assert.writeError(primary.getDB('foo').bar.insert(doc, options)); assert.neq(null, result.getWriteConcernError()); assert.eq(ErrorCodes.UnknownReplWriteConcern, @@ -227,7 +227,7 @@ var TagsTest = function(options) { jsTestLog('Write concern "3 or 4" should fail - 3 and 4 are not connected to the primary.'); var options = {writeConcern: {w: '3 or 4', wtimeout: failTimeout}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); result = primary.getDB('foo').bar.insert(doc, options); assert.neq(null, result.getWriteConcernError()); assert(result.getWriteConcernError().errInfo.wtimeout); @@ -240,12 +240,12 @@ var TagsTest = function(options) { jsTestLog('Write concern "3 or 4" should work - 4 is now connected to the primary ' + primary.host + ' via node 1 ' + replTest.nodes[1].host); options = {writeConcern: {w: '3 or 4', wtimeout: ReplSetTest.kDefaultTimeoutMS}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); - assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "3 and 4" should fail - 3 is not connected to the primary.'); options = {writeConcern: {w: '3 and 4', wtimeout: failTimeout}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); result = assert.writeError(primary.getDB('foo').bar.insert(doc, options)); assert.neq(null, result.getWriteConcernError()); assert(result.getWriteConcernError().errInfo.wtimeout, @@ -259,24 +259,24 @@ var TagsTest = function(options) { jsTestLog('Write concern "3 and 4" should work - ' + 'nodes 3 and 4 are connected to primary via node 1.'); options = {writeConcern: {w: '3 and 4', wtimeout: ReplSetTest.kDefaultTimeoutMS}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); - assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "2" - writes to primary only.'); options = {writeConcern: {w: '2', wtimeout: 0}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); - assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "1 and 2"'); options = {writeConcern: {w: '1 and 2', wtimeout: 0}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); - assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "2 dc and 3 server"'); primary = ensurePrimary(2, replTest.nodes.slice(0, 3), replTest.nodes.length); options = {writeConcern: {w: '2 dc and 3 server', wtimeout: ReplSetTest.kDefaultTimeoutMS}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); - assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Bringing down current primary node 2 ' + primary.host + ' to allow node 1 ' + replTest.nodes[1].host + ' to become primary.'); @@ -295,13 +295,13 @@ var TagsTest = function(options) { jsTestLog('Write concern "3 and 4" should still work with new primary node 1 ' + primary.host); options = {writeConcern: {w: '3 and 4', wtimeout: ReplSetTest.kDefaultTimeoutMS}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); - assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "2" should fail because node 2 ' + replTest.nodes[2].host + ' is down.'); options = {writeConcern: {w: '2', wtimeout: failTimeout}}; - assert.writeOK(primary.getDB('foo').bar.insert(doc)); + assert.commandWorked(primary.getDB('foo').bar.insert(doc)); result = assert.writeError(primary.getDB('foo').bar.insert(doc, options)); assert.neq(null, result.getWriteConcernError()); assert(result.getWriteConcernError().errInfo.wtimeout); diff --git a/jstests/replsets/linearizable_read_concern.js b/jstests/replsets/linearizable_read_concern.js index 5984577a4ed..58f4f633a6c 100644 --- a/jstests/replsets/linearizable_read_concern.js +++ b/jstests/replsets/linearizable_read_concern.js @@ -52,12 +52,12 @@ var primary = replTest.getPrimary(); var secondaries = replTest.getSecondaries(); // Do a write to have something to read. -assert.writeOK(primary.getDB("test").foo.insert( +assert.commandWorked(primary.getDB("test").foo.insert( {"number": 7}, {"writeConcern": {"w": "majority", "wtimeout": ReplSetTest.kDefaultTimeoutMS}})); jsTestLog("Testing linearizable readConcern parsing"); // This command is sent to the primary, and the primary is fully connected so it should work. -var goodRead = assert.writeOK(primary.getDB("test").runCommand( +var goodRead = assert.commandWorked(primary.getDB("test").runCommand( {'find': 'foo', readConcern: {level: "linearizable"}, "maxTimeMS": 60000})); assert.eq(goodRead.cursor.firstBatch[0].number, 7); diff --git a/jstests/replsets/localhostAuthBypass.js b/jstests/replsets/localhostAuthBypass.js index 5a0a1b95562..1470167021e 100644 --- a/jstests/replsets/localhostAuthBypass.js +++ b/jstests/replsets/localhostAuthBypass.js @@ -85,9 +85,9 @@ var assertCanRunCommands = function(mongo) { // will throw on failure test.system.users.findOne(); - assert.writeOK(test.foo.save({_id: 0})); - assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}})); - assert.writeOK(test.foo.remove({_id: 0})); + assert.commandWorked(test.foo.save({_id: 0})); + assert.commandWorked(test.foo.update({_id: 0}, {$set: {x: 20}})); + assert.commandWorked(test.foo.remove({_id: 0})); test.foo.mapReduce( function() { diff --git a/jstests/replsets/maintenance_non-blocking.js b/jstests/replsets/maintenance_non-blocking.js index 5581ffe3546..ffc2c374c21 100644 --- a/jstests/replsets/maintenance_non-blocking.js +++ b/jstests/replsets/maintenance_non-blocking.js @@ -20,7 +20,7 @@ doTest = function() { // save some records var len = 100; for (var i = 0; i < len; ++i) { - assert.writeOK(mColl.save({a: i})); + assert.commandWorked(mColl.save({a: i})); } print("******* replSetMaintenance called on secondary ************* "); @@ -31,7 +31,7 @@ doTest = function() { assert.eq(false, ismaster.secondary); print("******* writing to primary ************* "); - assert.writeOK(mColl.save({_id: -1})); + assert.commandWorked(mColl.save({_id: -1})); printjson(sDB.currentOp()); assert.neq(null, mColl.findOne()); diff --git a/jstests/replsets/mr_nonrepl_coll_in_local_db.js b/jstests/replsets/mr_nonrepl_coll_in_local_db.js index a6d6cad7e7a..5edce6d6a87 100644 --- a/jstests/replsets/mr_nonrepl_coll_in_local_db.js +++ b/jstests/replsets/mr_nonrepl_coll_in_local_db.js @@ -29,7 +29,7 @@ for (let i = 0; i < 1000; i++) { const array = Array.from({lengthToInsert: 10000}, _ => Math.floor(Math.random() * 100)); bulk.insert({arr: array}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); // Run a simple map-reduce. const result = coll.mapReduce( diff --git a/jstests/replsets/noop_writes_wait_for_write_concern.js b/jstests/replsets/noop_writes_wait_for_write_concern.js index d5731c2b7c4..d8c2a970a0d 100644 --- a/jstests/replsets/noop_writes_wait_for_write_concern.js +++ b/jstests/replsets/noop_writes_wait_for_write_concern.js @@ -51,7 +51,7 @@ var commands = []; commands.push({ req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1}}]}, setupFunc: function() { - assert.writeOK(coll.insert({_id: 1})); + assert.commandWorked(coll.insert({_id: 1})); }, confirmFunc: function(res) { assert.commandWorkedIgnoringWriteConcernErrors(res); @@ -66,8 +66,8 @@ commands.push({ commands.push({ req: {update: collName, updates: [{q: {a: 1}, u: {b: 2}}]}, setupFunc: function() { - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.update({a: 1}, {b: 2})); + assert.commandWorked(coll.insert({a: 1})); + assert.commandWorked(coll.update({a: 1}, {b: 2})); }, confirmFunc: function(res) { assert.commandWorkedIgnoringWriteConcernErrors(res); @@ -82,8 +82,8 @@ commands.push({ commands.push({ req: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 2}}}]}, setupFunc: function() { - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.update({a: 1}, {$set: {b: 2}})); + assert.commandWorked(coll.insert({a: 1})); + assert.commandWorked(coll.update({a: 1}, {$set: {b: 2}})); }, confirmFunc: function(res) { assert.commandWorkedIgnoringWriteConcernErrors(res); @@ -97,8 +97,8 @@ commands.push({ commands.push({ req: {delete: collName, deletes: [{q: {a: 1}, limit: 1}]}, setupFunc: function() { - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.remove({a: 1})); + assert.commandWorked(coll.insert({a: 1})); + assert.commandWorked(coll.remove({a: 1})); }, confirmFunc: function(res) { assert.commandWorkedIgnoringWriteConcernErrors(res); @@ -110,7 +110,7 @@ commands.push({ commands.push({ req: {createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}, setupFunc: function() { - assert.writeOK(coll.insert({a: 1})); + assert.commandWorked(coll.insert({a: 1})); assert.commandWorkedIgnoringWriteConcernErrors( db.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]})); }, @@ -125,7 +125,7 @@ commands.push({ commands.push({ req: {findAndModify: collName, query: {a: 1}, update: {b: 2}}, setupFunc: function() { - assert.writeOK(coll.insert({a: 1})); + assert.commandWorked(coll.insert({a: 1})); assert.commandWorkedIgnoringWriteConcernErrors( db.runCommand({findAndModify: collName, query: {a: 1}, update: {b: 2}})); }, @@ -141,7 +141,7 @@ commands.push({ commands.push({ req: {findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}}, setupFunc: function() { - assert.writeOK(coll.insert({a: 1})); + assert.commandWorked(coll.insert({a: 1})); assert.commandWorkedIgnoringWriteConcernErrors( db.runCommand({findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}})); }, @@ -156,7 +156,7 @@ commands.push({ commands.push({ req: {dropDatabase: 1}, setupFunc: function() { - assert.writeOK(coll.insert({a: 1})); + assert.commandWorked(coll.insert({a: 1})); assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({dropDatabase: 1})); }, confirmFunc: function(res) { @@ -167,7 +167,7 @@ commands.push({ commands.push({ req: {drop: collName}, setupFunc: function() { - assert.writeOK(coll.insert({a: 1})); + assert.commandWorked(coll.insert({a: 1})); assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({drop: collName})); }, confirmFunc: function(res) { @@ -188,7 +188,7 @@ commands.push({ commands.push({ req: {insert: collName, documents: [{_id: 1}]}, setupFunc: function() { - assert.writeOK(coll.insert({_id: 1})); + assert.commandWorked(coll.insert({_id: 1})); }, confirmFunc: function(res) { assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res); diff --git a/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js b/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js index e024a9853f7..4c295b38b73 100644 --- a/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js +++ b/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js @@ -39,7 +39,7 @@ function testFCVNoop(targetVersion) { replTest.stop(1); // Insert a document to ensure there is a last optime. - assert.writeOK(primary.getDB("test").foo.insert({x: 1})); + assert.commandWorked(primary.getDB("test").foo.insert({x: 1})); // We run the command on a different connection. If the the command were run on the same // connection, then the client last op for the noop write would be the last op of the diff --git a/jstests/replsets/opcounters_repl.js b/jstests/replsets/opcounters_repl.js index 5bf31a1f5ee..7ba6d802b89 100644 --- a/jstests/replsets/opcounters_repl.js +++ b/jstests/replsets/opcounters_repl.js @@ -66,21 +66,21 @@ assert.eq(diff.secondary.command, 1); // 2. Insert a document. diff = getOpCountersDiff(() => { - assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 2}})); + assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: 2}})); }); assert.eq(diff.primary.insert, 1); assert.eq(diff.secondary.insert, 1); // 3. Update a document. diff = getOpCountersDiff(() => { - assert.writeOK(primaryColl.update({_id: 0}, {$set: {a: 1}}, {writeConcern: {w: 2}})); + assert.commandWorked(primaryColl.update({_id: 0}, {$set: {a: 1}}, {writeConcern: {w: 2}})); }); assert.eq(diff.primary.update, 1); assert.eq(diff.secondary.update, 1); // 4. Delete a document. diff = getOpCountersDiff(() => { - assert.writeOK(primaryColl.remove({_id: 0}, {writeConcern: {w: 2}})); + assert.commandWorked(primaryColl.remove({_id: 0}, {writeConcern: {w: 2}})); }); assert.eq(diff.primary.delete, 1); assert.eq(diff.secondary.delete, 1); diff --git a/jstests/replsets/oplog_format.js b/jstests/replsets/oplog_format.js index d4ae2345f30..fc72b20d581 100644 --- a/jstests/replsets/oplog_format.js +++ b/jstests/replsets/oplog_format.js @@ -34,7 +34,7 @@ assertLastOplog({_id: 1}, null, "save -- setup "); var msg = "IncRewriteExistingField: $inc $set"; coll.save({_id: 1, a: 2}); assertLastOplog({_id: 1, a: 2}, {_id: 1}, "save " + msg); -var res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}})); +var res = assert.commandWorked(coll.update({}, {$inc: {a: 1}, $set: {b: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: 3, b: 2}, coll.findOne({}), msg); assertLastOplog({$v: 1, $set: {a: 3, b: 2}}, {_id: 1}, msg); @@ -42,7 +42,7 @@ assertLastOplog({$v: 1, $set: {a: 3, b: 2}}, {_id: 1}, msg); var msg = "IncRewriteNonExistingField: $inc $set"; coll.save({_id: 1, c: 0}); assertLastOplog({_id: 1, c: 0}, {_id: 1}, "save " + msg); -res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}})); +res = assert.commandWorked(coll.update({}, {$inc: {a: 1}, $set: {b: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, c: 0, a: 1, b: 2}, coll.findOne({}), msg); assertLastOplog({$v: 1, $set: {a: 1, b: 2}}, {_id: 1}, msg); @@ -50,7 +50,7 @@ assertLastOplog({$v: 1, $set: {a: 1, b: 2}}, {_id: 1}, msg); var msg = "TwoNestedPulls: two $pull"; coll.save({_id: 1, a: {b: [1, 2], c: [1, 2]}}); assertLastOplog({_id: 1, a: {b: [1, 2], c: [1, 2]}}, {_id: 1}, "save " + msg); -res = assert.writeOK(coll.update({}, {$pull: {'a.b': 2, 'a.c': 2}})); +res = assert.commandWorked(coll.update({}, {$pull: {'a.b': 2, 'a.c': 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: {b: [1], c: [1]}}, coll.findOne({}), msg); assertLastOplog({$v: 1, $set: {'a.b': [1], 'a.c': [1]}}, {_id: 1}, msg); @@ -58,7 +58,7 @@ assertLastOplog({$v: 1, $set: {'a.b': [1], 'a.c': [1]}}, {_id: 1}, msg); var msg = "MultiSets: two $set"; coll.save({_id: 1, a: 1, b: 1}); assertLastOplog({_id: 1, a: 1, b: 1}, {_id: 1}, "save " + msg); -res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}})); +res = assert.commandWorked(coll.update({}, {$set: {a: 2, b: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg); assertLastOplog({$v: 1, $set: {a: 2, b: 2}}, {_id: 1}, msg); @@ -68,36 +68,36 @@ assertLastOplog({$v: 1, $set: {a: 2, b: 2}}, {_id: 1}, msg); var msg = "bad single $set"; coll.save({_id: 1, a: 1}); assertLastOplog({_id: 1, a: 1}, {_id: 1}, "save " + msg); -res = assert.writeOK(coll.update({}, {$set: {a: 2}})); +res = assert.commandWorked(coll.update({}, {$set: {a: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: 2}, coll.findOne({}), msg); assertLastOplog({$v: 1, $set: {a: 2}}, {_id: 1}, msg); var msg = "bad single $inc"; -res = assert.writeOK(coll.update({}, {$inc: {a: 1}})); +res = assert.commandWorked(coll.update({}, {$inc: {a: 1}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: 3}, coll.findOne({}), msg); assertLastOplog({$v: 1, $set: {a: 3}}, {_id: 1}, msg); var msg = "bad double $set"; -res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}})); +res = assert.commandWorked(coll.update({}, {$set: {a: 2, b: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg); assertLastOplog({$v: 1, $set: {a: 2, b: 2}}, {_id: 1}, msg); var msg = "bad save"; -assert.writeOK(coll.save({_id: 1, a: [2]})); +assert.commandWorked(coll.save({_id: 1, a: [2]})); assert.docEq({_id: 1, a: [2]}, coll.findOne({}), msg); assertLastOplog({_id: 1, a: [2]}, {_id: 1}, msg); var msg = "bad array $inc"; -res = assert.writeOK(coll.update({}, {$inc: {"a.0": 1}})); +res = assert.commandWorked(coll.update({}, {$inc: {"a.0": 1}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg); var lastTS = assertLastOplog({$v: 1, $set: {"a.0": 3}}, {_id: 1}, msg); var msg = "bad $setOnInsert"; -res = assert.writeOK(coll.update({}, {$setOnInsert: {a: -1}})); +res = assert.commandWorked(coll.update({}, {$setOnInsert: {a: -1}})); assert.eq(res.nMatched, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg); // No-op var otherTS = assertLastOplog({$v: 1, $set: {"a.0": 3}}, {_id: 1}, msg); // Nothing new @@ -107,7 +107,7 @@ coll.remove({}); assert.eq(coll.find().itcount(), 0, "collection not empty"); var msg = "bad $setOnInsert w/upsert"; -res = assert.writeOK(coll.update({}, {$setOnInsert: {a: 200}}, {upsert: true})); // upsert +res = assert.commandWorked(coll.update({}, {$setOnInsert: {a: 200}}, {upsert: true})); // upsert assert.eq(res.nUpserted, 1, "update failed for '" + msg + "': " + res.toString()); var id = res.getUpsertedId()._id; assert.docEq({_id: id, a: 200}, coll.findOne({}), msg); // No-op @@ -131,21 +131,22 @@ assertLastOplog({$set:{"a": [1,2,3]}}, {_id:1}, msg); // new format var msg = "bad array $push 2"; coll.save({_id: 1, a: "foo"}); -res = assert.writeOK(coll.update({}, {$push: {c: 18}})); +res = assert.commandWorked(coll.update({}, {$push: {c: 18}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: "foo", c: [18]}, coll.findOne({}), msg); assertLastOplog({$v: 1, $set: {"c": [18]}}, {_id: 1}, msg); var msg = "bad array $push $slice"; coll.save({_id: 1, a: {b: [18]}}); -res = assert.writeOK(coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [1, 2], $slice: -2}}})); +res = assert.commandWorked( + coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [1, 2], $slice: -2}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: {b: [1, 2]}}, coll.findOne({}), msg); assertLastOplog({$v: 1, $set: {"a.b": [1, 2]}}, {_id: 1}, msg); var msg = "bad array $push $sort ($slice -100)"; coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}}); -res = assert.writeOK( +res = assert.commandWorked( coll.update({}, {$push: {"a.b": {$each: [{c: -1}], $sort: {c: 1}, $slice: -100}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: {b: [{c: -1}, {c: 1}, {c: 2}]}}, coll.findOne({}), msg); @@ -153,7 +154,7 @@ assertLastOplog({$v: 1, $set: {"a.b": [{c: -1}, {c: 1}, {c: 2}]}}, {_id: 1}, msg var msg = "bad array $push $slice $sort"; coll.save({_id: 1, a: [{b: 2}, {b: 1}]}); -res = assert.writeOK( +res = assert.commandWorked( coll.update({_id: {$gt: 0}}, {$push: {a: {$each: [{b: -1}], $slice: -2, $sort: {b: 1}}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: [{b: 1}, {b: 2}]}, coll.findOne({}), msg); @@ -161,7 +162,7 @@ assertLastOplog({$v: 1, $set: {a: [{b: 1}, {b: 2}]}}, {_id: 1}, msg); var msg = "bad array $push $slice $sort first two"; coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}}); -res = assert.writeOK( +res = assert.commandWorked( coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: 1}}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: {b: [{c: 1}, {c: 2}]}}, coll.findOne({}), msg); @@ -169,7 +170,7 @@ assertLastOplog({$v: 1, $set: {"a.b": [{c: 1}, {c: 2}]}}, {_id: 1}, msg); var msg = "bad array $push $slice $sort reversed first two"; coll.save({_id: 1, a: {b: [{c: 1}, {c: 2}]}}); -res = assert.writeOK( +res = assert.commandWorked( coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: -1}}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id: 1, a: {b: [{c: 1}, {c: -1}]}}, coll.findOne({}), msg); diff --git a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js index 04f12dc7c1c..cf7911c248e 100644 --- a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js +++ b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js @@ -30,7 +30,7 @@ var lastTs = lastOplogDoc.ts; var newTs = Timestamp(lastTs.t + 1, 1); var term = lastOplogDoc.t; -assert.writeOK(oplog.insert({ +assert.commandWorked(oplog.insert({ ts: newTs, t: term, h: 1, @@ -56,8 +56,8 @@ var injectedMinValidDoc = { // This weird mechanism is the only way to bypass mongod's attempt to fill in null // Timestamps. var minValidColl = conn.getCollection('local.replset.minvalid'); -assert.writeOK(minValidColl.remove({})); -assert.writeOK(minValidColl.update({}, {$set: injectedMinValidDoc}, {upsert: true})); +assert.commandWorked(minValidColl.remove({})); +assert.commandWorked(minValidColl.update({}, {$set: injectedMinValidDoc}, {upsert: true})); assert.eq(minValidColl.findOne(), injectedMinValidDoc, "If the Timestamps differ, the server may be filling in the null timestamps"); diff --git a/jstests/replsets/oplog_term.js b/jstests/replsets/oplog_term.js index f21e01f4a98..b3de5b8c57d 100644 --- a/jstests/replsets/oplog_term.js +++ b/jstests/replsets/oplog_term.js @@ -12,7 +12,7 @@ replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000); // Default protocol version is 1 - 'term' field should present in oplog entry. var primary = replSet.getPrimary(); var collection = primary.getDB('test').getCollection(name); -assert.writeOK(collection.save({_id: 1})); +assert.commandWorked(collection.save({_id: 1})); var oplogEntry = getLatestOp(primary); assert(oplogEntry, 'unexpected empty oplog'); diff --git a/jstests/replsets/oplog_wallclock.js b/jstests/replsets/oplog_wallclock.js index a744c207d7e..47cf929e3ea 100644 --- a/jstests/replsets/oplog_wallclock.js +++ b/jstests/replsets/oplog_wallclock.js @@ -17,13 +17,13 @@ replSet.initiate(); var primary = replSet.getPrimary(); var collection = primary.getDB('test').getCollection(name); -assert.writeOK(collection.insert({_id: 1, val: 'x'})); +assert.commandWorked(collection.insert({_id: 1, val: 'x'})); assertLastOplogHasWT(primary, 'insert'); -assert.writeOK(collection.update({_id: 1}, {val: 'y'})); +assert.commandWorked(collection.update({_id: 1}, {val: 'y'})); assertLastOplogHasWT(primary, 'update'); -assert.writeOK(collection.remove({_id: 1})); +assert.commandWorked(collection.remove({_id: 1})); assertLastOplogHasWT(primary, 'remove'); replSet.stopSet(); diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js index 867c6258441..be355209f3f 100644 --- a/jstests/replsets/optime.js +++ b/jstests/replsets/optime.js @@ -77,7 +77,7 @@ let initialReplStatusInfo = master.getDB('admin').runCommand({replSetGetStatus: // Do an insert to increment optime, but without rolling the oplog // latestOptime should be updated, but earliestOptime should be unchanged var options = {writeConcern: {w: replTest.nodes.length}}; -assert.writeOK(master.getDB('test').foo.insert({a: 1}, options)); +assert.commandWorked(master.getDB('test').foo.insert({a: 1}, options)); assert.soon(function() { return optimesAndWallTimesAreEqual(replTest, isPersistent); }); diff --git a/jstests/replsets/prepare_transaction_index_build.js b/jstests/replsets/prepare_transaction_index_build.js index aa5d53673e1..36c5533a08e 100644 --- a/jstests/replsets/prepare_transaction_index_build.js +++ b/jstests/replsets/prepare_transaction_index_build.js @@ -29,7 +29,7 @@ const bulk = testColl.initializeUnorderedBulkOp(); for (let i = 0; i < 10; ++i) { bulk.insert({x: i}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); // activate failpoint to hang index build on secondary. secondary.getDB("admin").runCommand( diff --git a/jstests/replsets/read_after_optime.js b/jstests/replsets/read_after_optime.js index dad48acd925..ef2b736ab83 100644 --- a/jstests/replsets/read_after_optime.js +++ b/jstests/replsets/read_after_optime.js @@ -11,7 +11,7 @@ var config = replTest.getReplSetConfigFromNode(); var runTest = function(testDB, primaryConn) { var dbName = testDB.getName(); - assert.writeOK(primaryConn.getDB(dbName).user.insert({x: 1}, {writeConcern: {w: 2}})); + assert.commandWorked(primaryConn.getDB(dbName).user.insert({x: 1}, {writeConcern: {w: 2}})); var localDB = primaryConn.getDB('local'); diff --git a/jstests/replsets/read_committed.js b/jstests/replsets/read_committed.js index 79a9cd3b0fa..7260fec8698 100644 --- a/jstests/replsets/read_committed.js +++ b/jstests/replsets/read_committed.js @@ -24,27 +24,27 @@ const testCases = { insert: { prepareCollection: function(coll) {}, // No-op write: function(coll, writeConcern) { - assert.writeOK(coll.insert({_id: 1}, writeConcern)); + assert.commandWorked(coll.insert({_id: 1}, writeConcern)); }, expectedBefore: [], expectedAfter: [{_id: 1}], }, update: { prepareCollection: function(coll) { - assert.writeOK(coll.insert({_id: 1, state: 'before'}, majorityWriteConcern)); + assert.commandWorked(coll.insert({_id: 1, state: 'before'}, majorityWriteConcern)); }, write: function(coll, writeConcern) { - assert.writeOK(coll.update({_id: 1}, {$set: {state: 'after'}}, writeConcern)); + assert.commandWorked(coll.update({_id: 1}, {$set: {state: 'after'}}, writeConcern)); }, expectedBefore: [{_id: 1, state: 'before'}], expectedAfter: [{_id: 1, state: 'after'}], }, remove: { prepareCollection: function(coll) { - assert.writeOK(coll.insert({_id: 1}, majorityWriteConcern)); + assert.commandWorked(coll.insert({_id: 1}, majorityWriteConcern)); }, write: function(coll, writeConcern) { - assert.writeOK(coll.remove({_id: 1}, writeConcern)); + assert.commandWorked(coll.remove({_id: 1}, writeConcern)); }, expectedBefore: [{_id: 1}], expectedAfter: [], @@ -121,7 +121,7 @@ for (var testName in testCases) { var test = testCases[testName]; const setUpInitialState = function setUpInitialState() { - assert.writeOK(coll.remove({}, majorityWriteConcern)); + assert.commandWorked(coll.remove({}, majorityWriteConcern)); test.prepareCollection(coll); // Do some sanity checks. assert.eq(doDirtyRead(coll), test.expectedBefore); diff --git a/jstests/replsets/read_committed_after_rollback.js b/jstests/replsets/read_committed_after_rollback.js index 097c75c1564..68d7d138040 100644 --- a/jstests/replsets/read_committed_after_rollback.js +++ b/jstests/replsets/read_committed_after_rollback.js @@ -70,8 +70,8 @@ var oldPrimaryColl = oldPrimary.getCollection(collName); var newPrimaryColl = newPrimary.getCollection(collName); // Set up initial state. -assert.writeOK(oldPrimaryColl.insert({_id: 1, state: 'old'}, - {writeConcern: {w: 'majority', wtimeout: 30000}})); +assert.commandWorked(oldPrimaryColl.insert({_id: 1, state: 'old'}, + {writeConcern: {w: 'majority', wtimeout: 30000}})); assert.eq(doDirtyRead(oldPrimaryColl), 'old'); assert.eq(doCommittedRead(oldPrimaryColl), 'old'); assert.eq(doDirtyRead(newPrimaryColl), 'old'); @@ -86,7 +86,7 @@ oldPrimary.disconnect([newPrimary, pureSecondary]); assert.eq(doDirtyRead(newPrimaryColl), 'old'); // This write will only make it to oldPrimary and will never become committed. -assert.writeOK(oldPrimaryColl.save({_id: 1, state: 'INVALID'})); +assert.commandWorked(oldPrimaryColl.save({_id: 1, state: 'INVALID'})); assert.eq(doDirtyRead(oldPrimaryColl), 'INVALID'); assert.eq(doCommittedRead(oldPrimaryColl), 'old'); @@ -106,7 +106,7 @@ assert.soon(function() { // Stop applier on pureSecondary to ensure that writes to newPrimary won't become committed yet. assert.commandWorked( pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"})); -assert.writeOK(newPrimaryColl.save({_id: 1, state: 'new'})); +assert.commandWorked(newPrimaryColl.save({_id: 1, state: 'new'})); assert.eq(doDirtyRead(newPrimaryColl), 'new'); // Note that we still can't do a committed read from the new primary and reliably get anything, // since we never proved that it learned about the commit level from the old primary before @@ -135,12 +135,12 @@ assert.commandWorked( pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"})); // Do a write to the new primary so that the old primary can establish a sync source to learn // about the new commit. -assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert( +assert.commandWorked(newPrimary.getDB(name).unrelatedCollection.insert( {a: 1}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}})); assert.eq(doCommittedRead(newPrimaryColl), 'new'); // Do another write to the new primary so that the old primary can be sure to receive the // new committed optime. -assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert( +assert.commandWorked(newPrimary.getDB(name).unrelatedCollection.insert( {a: 2}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}})); assert.eq(doCommittedRead(oldPrimaryColl), 'new'); diff --git a/jstests/replsets/read_committed_stale_history.js b/jstests/replsets/read_committed_stale_history.js index f40841575f4..79564b62821 100644 --- a/jstests/replsets/read_committed_stale_history.js +++ b/jstests/replsets/read_committed_stale_history.js @@ -57,7 +57,7 @@ var primary = rst.getPrimary(); var secondaries = rst.getSecondaries(); assert.eq(nodes[0], primary); // Wait for all data bearing nodes to get up to date. -assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert( +assert.commandWorked(nodes[0].getDB(dbName).getCollection(collName).insert( {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}})); // Stop the secondaries from replicating. @@ -67,7 +67,7 @@ assert.commandWorked( nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'})); jsTestLog("Do a write that won't ever reach a majority of nodes"); -assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert({a: 2})); +assert.commandWorked(nodes[0].getDB(dbName).getCollection(collName).insert({a: 2})); // Ensure that the write that was just done is not visible in the committed snapshot. checkDocNotCommitted(nodes[0], {a: 2}); @@ -90,7 +90,7 @@ restartServerReplication(secondaries); waitForPrimary(nodes[1]); jsTest.log("Do a write to the new primary"); -assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert( +assert.commandWorked(nodes[1].getDB(dbName).getCollection(collName).insert( {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}})); // Ensure the new primary still cannot see the write from the old primary. diff --git a/jstests/replsets/read_committed_with_catalog_changes.js b/jstests/replsets/read_committed_with_catalog_changes.js index 10c444a1ed9..7e2e054c526 100644 --- a/jstests/replsets/read_committed_with_catalog_changes.js +++ b/jstests/replsets/read_committed_with_catalog_changes.js @@ -38,10 +38,10 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority. const testCases = { createCollectionInExistingDB: { prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); + assert.commandWorked(db.other.insert({_id: 1})); }, performOp: function(db) { - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); }, blockedCollections: ['coll'], unblockedCollections: ['other'], @@ -49,15 +49,15 @@ const testCases = { createCollectionInNewDB: { prepare: function(db) {}, performOp: function(db) { - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); }, blockedCollections: ['coll'], unblockedCollections: ['otherDoesNotExist'], // Only existent collections are blocked. }, dropCollection: { prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.other.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); }, performOp: function(db) { assert(db.coll.drop()); @@ -67,7 +67,7 @@ const testCases = { }, dropDB: { prepare: function(db) { - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); // Drop collection explicitly during the preparation phase while we are still able // to write to a majority. Otherwise, dropDatabase() will drop the collection // and wait for the collection drop to be replicated to a majority of the nodes. @@ -81,19 +81,19 @@ const testCases = { }, dropAndRecreateCollection: { prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.other.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); }, performOp: function(db) { assert(db.coll.drop()); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); }, blockedCollections: ['coll'], unblockedCollections: ['other'], }, dropAndRecreateDB: { prepare: function(db) { - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); // Drop collection explicitly during the preparation phase while we are still able // to write to a majority. Otherwise, dropDatabase() will drop the collection // and wait for the collection drop to be replicated to a majority of the nodes. @@ -101,15 +101,15 @@ const testCases = { }, performOp: function(db) { assert.commandWorked(db.dropDatabase({w: 1})); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); }, blockedCollections: ['coll'], unblockedCollections: ['otherDoesNotExist'], }, renameCollectionToNewName: { prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); - assert.writeOK(db.from.insert({_id: 1})); + assert.commandWorked(db.other.insert({_id: 1})); + assert.commandWorked(db.from.insert({_id: 1})); }, performOp: function(db) { assert.commandWorked(db.from.renameCollection('coll')); @@ -119,9 +119,9 @@ const testCases = { }, renameCollectionToExistingName: { prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); - assert.writeOK(db.from.insert({_id: 'from'})); - assert.writeOK(db.coll.insert({_id: 'coll'})); + assert.commandWorked(db.other.insert({_id: 1})); + assert.commandWorked(db.from.insert({_id: 'from'})); + assert.commandWorked(db.coll.insert({_id: 'coll'})); }, performOp: function(db) { assert.commandWorked(db.from.renameCollection('coll', true)); @@ -131,8 +131,8 @@ const testCases = { }, createIndexForeground: { prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.other.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); }, performOp: function(db) { assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: false})); @@ -142,8 +142,8 @@ const testCases = { }, createIndexBackground: { prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.other.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); }, performOp: function(db) { assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: true})); @@ -153,8 +153,8 @@ const testCases = { }, dropIndex: { prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.other.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); assert.commandWorked(db.coll.ensureIndex({x: 1})); }, performOp: function(db) { @@ -167,8 +167,8 @@ const testCases = { // Remaining cases are local-only operations. reIndex: { prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.other.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); assert.commandWorked(db.coll.ensureIndex({x: 1})); }, performOp: function(db) { @@ -181,8 +181,8 @@ const testCases = { compact: { // At least on WiredTiger, compact is fully inplace so it doesn't need to block readers. prepare: function(db) { - assert.writeOK(db.other.insert({_id: 1})); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.other.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); assert.commandWorked(db.coll.ensureIndex({x: 1})); }, performOp: function(db) { @@ -249,7 +249,7 @@ var mainDB = primary.getDB('mainDB'); // This DB won't be used by any tests so it should always be unblocked. var otherDB = primary.getDB('otherDB'); var otherDBCollection = otherDB.collection; -assert.writeOK(otherDBCollection.insert( +assert.commandWorked(otherDBCollection.insert( {}, {writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}})); assertReadsSucceed(otherDBCollection); @@ -285,7 +285,7 @@ for (var testName in testCases) { // performing the operation. This will ensure that the operation happens after an // uncommitted write which prevents it from immediately being marked as committed. if (test.localOnly) { - assert.writeOK(otherDBCollection.insert({})); + assert.commandWorked(otherDBCollection.insert({})); } // Perform the op and ensure that blocked collections block and unblocked ones don't. diff --git a/jstests/replsets/read_majority_two_arbs.js b/jstests/replsets/read_majority_two_arbs.js index f49ebe71dd7..53ac988b6fb 100644 --- a/jstests/replsets/read_majority_two_arbs.js +++ b/jstests/replsets/read_majority_two_arbs.js @@ -62,7 +62,8 @@ function doCommittedRead() { } jsTest.log("doing write"); -assert.writeOK(t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}})); +assert.commandWorked( + t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}})); jsTest.log("doing read"); assert.eq(doDirtyRead(), 0); jsTest.log("doing committed read"); diff --git a/jstests/replsets/read_operations_during_step_down.js b/jstests/replsets/read_operations_during_step_down.js index 667e353d2fe..d6bdd779be3 100644 --- a/jstests/replsets/read_operations_during_step_down.js +++ b/jstests/replsets/read_operations_during_step_down.js @@ -28,7 +28,7 @@ TestData.dbName = dbName; TestData.collName = collName; jsTestLog("1. Do a document write"); -assert.writeOK( +assert.commandWorked( Â Â Â Â Â Â Â primaryColl.insert({_id: 0}, {"writeConcern": {"w": "majority"}})); rst.awaitReplication(); diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js index 8b789db3547..5c3114a0893 100644 --- a/jstests/replsets/replset2.js +++ b/jstests/replsets/replset2.js @@ -46,7 +46,7 @@ doTest = function(signal) { printjson(master.getDB("admin").runCommand("replSetGetStatus")); - assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS})); + assert.commandWorked(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS})); print("replset2.js **** TEMP 1a ****"); @@ -66,7 +66,7 @@ doTest = function(signal) { print("replset2.js **** Try inserting a single record ****"); master.getDB(testDB).dropDatabase(); var options = {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}; - assert.writeOK(master.getDB(testDB).foo.insert({n: 1}, options)); + assert.commandWorked(master.getDB(testDB).foo.insert({n: 1}, options)); m1 = master.getDB(testDB).foo.findOne({n: 1}); printjson(m1); @@ -85,7 +85,7 @@ doTest = function(signal) { for (var n = 0; n < 1000; n++) { bulk.insert({n: n, data: bigData}); } - assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS})); + assert.commandWorked(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS})); print("replset2.js **** V1 "); diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js index 65f1be46e16..87eb6d6361c 100644 --- a/jstests/replsets/replset8.js +++ b/jstests/replsets/replset8.js @@ -29,7 +29,7 @@ for (var i = 0; i < doccount; ++i) { bulk.insert({_id: i, x: bigstring}); bigstring += "a"; } -var result = assert.writeOK(bulk.execute()); +var result = assert.commandWorked(bulk.execute()); jsTestLog('insert 0-' + (doccount - 1) + ' result: ' + tojson(result)); assert.eq(doccount, result.nInserted); assert.eq(doccount + 1, mdc.find().itcount()); @@ -40,7 +40,7 @@ bulk = mdc.initializeUnorderedBulkOp(); for (i = doccount; i < doccount * 2; ++i) { bulk.insert({_id: i, x: i}); } -result = assert.writeOK(bulk.execute()); +result = assert.commandWorked(bulk.execute()); jsTestLog('insert ' + doccount + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result)); assert.eq(doccount, result.nInserted); assert.eq(doccount * 2 + 1, mdc.find().itcount()); @@ -50,7 +50,7 @@ bulk = mdc.initializeUnorderedBulkOp(); for (i = 0; i < doccount; ++i) { bulk.find({_id: i}).remove(); } -result = assert.writeOK(bulk.execute()); +result = assert.commandWorked(bulk.execute()); jsTestLog('delete 0-' + (doccount - 1) + ' result: ' + tojson(result)); assert.eq(doccount, result.nRemoved); assert.eq(doccount + 1, mdc.find().itcount()); @@ -68,7 +68,7 @@ for (i = doccount * 2; i > doccount; --i) { bulk.find({_id: i}).update({$set: {x: bigstring}}); bigstring = bigstring.slice(0, -1); // remove last char } -result = assert.writeOK(bulk.execute({w: rt.nodes.length})); +result = assert.commandWorked(bulk.execute({w: rt.nodes.length})); jsTestLog('update ' + (doccount + 1) + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result)); assert.eq(doccount - 1, result.nMatched); assert.eq(doccount - 1, result.nModified); diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js index bb1c1f7dc76..9851e501f69 100644 --- a/jstests/replsets/replsetprio1.js +++ b/jstests/replsets/replsetprio1.js @@ -34,11 +34,11 @@ replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY); // do some writes on 1 var master = replTest.getPrimary(); for (var i = 0; i < 1000; i++) { - assert.writeOK(master.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}})); + assert.commandWorked(master.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}})); } for (i = 0; i < 1000; i++) { - assert.writeOK(master.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}})); + assert.commandWorked(master.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}})); } // bring 2 back up, 2 should wait until caught up and then become master diff --git a/jstests/replsets/restore_term.js b/jstests/replsets/restore_term.js index 072a22eb974..0414feb49af 100644 --- a/jstests/replsets/restore_term.js +++ b/jstests/replsets/restore_term.js @@ -31,7 +31,7 @@ var primaryColl = primary.getDB("test").coll; // Current term may be greater than 1 if election race happens. var firstSuccessfulTerm = getCurrentTerm(primary); assert.gte(firstSuccessfulTerm, 1); -assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}})); +assert.commandWorked(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}})); assert.eq(getCurrentTerm(primary), firstSuccessfulTerm); // Check that the insert op has the initial term. diff --git a/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js b/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js index 54e826dcfeb..e4728529ebe 100644 --- a/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js +++ b/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js @@ -20,8 +20,8 @@ var priConn = replTest.getPrimary(); var db = priConn.getDB('TestDB'); var config = priConn.getDB('config'); -assert.writeOK(db.user.insert({_id: 0})); -assert.writeOK(db.user.insert({_id: 1})); +assert.commandWorked(db.user.insert({_id: 0})); +assert.commandWorked(db.user.insert({_id: 1})); const lsid1 = UUID(); const lsid2 = UUID(); @@ -48,7 +48,7 @@ assert.eq(1, config.transactions.find({'_id.id': lsid1}).itcount()); assert.eq(1, config.transactions.find({'_id.id': lsid2}).itcount()); // Invalidating lsid1 doesn't impact lsid2, but allows same statement to be executed again -assert.writeOK(config.transactions.remove({'_id.id': lsid1})); +assert.commandWorked(config.transactions.remove({'_id.id': lsid1})); assert.commandWorked(db.runCommand(cmdObj1)); assert.eq(2, db.user.find({_id: 0}).toArray()[0].x); assert.commandWorked(db.runCommand(cmdObj2)); @@ -65,8 +65,8 @@ assert.writeError(config.transactions.insert({_id: {UnknownField: 'Garbage'}})); // Ensure inserting an invalid session record manually without all the required fields causes // the session to not work anymore for retryable writes for that session, but not for any other const lsidManual = config.transactions.find({'_id.id': lsid1}).toArray()[0]._id; -assert.writeOK(config.transactions.remove({'_id.id': lsid1})); -assert.writeOK(config.transactions.insert({_id: lsidManual})); +assert.commandWorked(config.transactions.remove({'_id.id': lsid1})); +assert.commandWorked(config.transactions.insert({_id: lsidManual})); const lsid3 = UUID(); assert.commandWorked(db.runCommand({ diff --git a/jstests/replsets/retryable_writes_failover.js b/jstests/replsets/retryable_writes_failover.js index 2073e2fbded..7f3c16eee6d 100644 --- a/jstests/replsets/retryable_writes_failover.js +++ b/jstests/replsets/retryable_writes_failover.js @@ -128,8 +128,8 @@ let deleteCmd = { primary = replTest.getPrimary(); testDB = primary.getDB("test"); -assert.writeOK(testDB.foo.insert({_id: 40, x: 1})); -assert.writeOK(testDB.foo.insert({_id: 50, y: 1})); +assert.commandWorked(testDB.foo.insert({_id: 40, x: 1})); +assert.commandWorked(testDB.foo.insert({_id: 50, y: 1})); // Run the command on the primary and wait for replication. result = assert.commandWorked(testDB.runCommand(deleteCmd)); diff --git a/jstests/replsets/rollback_all_op_types.js b/jstests/replsets/rollback_all_op_types.js index 8ffc53f2faf..5eddbe528e4 100644 --- a/jstests/replsets/rollback_all_op_types.js +++ b/jstests/replsets/rollback_all_op_types.js @@ -40,23 +40,23 @@ let rollbackOps = { assert.commandWorked(db.createCollection(collName)); }, op: (db, collName) => { - assert.writeOK(db[collName].insert({_id: 0})); + assert.commandWorked(db[collName].insert({_id: 0})); } }], "update": [{ init: (db, collName) => { - assert.writeOK(db[collName].insert({_id: 0, val: 0})); + assert.commandWorked(db[collName].insert({_id: 0, val: 0})); }, op: (db, collName) => { - assert.writeOK(db[collName].update({_id: 0}, {val: 1})); + assert.commandWorked(db[collName].update({_id: 0}, {val: 1})); }, }], "delete": [{ init: (db, collName) => { - assert.writeOK(db[collName].insert({_id: 0})); + assert.commandWorked(db[collName].insert({_id: 0})); }, op: (db, collName) => { - assert.writeOK(db[collName].remove({_id: 0})); + assert.commandWorked(db[collName].remove({_id: 0})); }, }], "create": [{ diff --git a/jstests/replsets/rollback_collmods.js b/jstests/replsets/rollback_collmods.js index 6a741ec6174..7c96235f33c 100644 --- a/jstests/replsets/rollback_collmods.js +++ b/jstests/replsets/rollback_collmods.js @@ -29,10 +29,10 @@ function printCollectionOptions(rollbackTest, time) { // Operations that will be present on both nodes, before the common point. let CommonOps = (node) => { let testDb = node.getDB(dbName); - assert.writeOK(testDb[coll1Name].insert({a: 1, b: 1})); - assert.writeOK(testDb[coll2Name].insert({a: 2, b: 2})); - assert.writeOK(testDb[coll3Name].insert({a: 3, b: 3})); - assert.writeOK(testDb[coll4Name].insert({a: 4, b: 4})); + assert.commandWorked(testDb[coll1Name].insert({a: 1, b: 1})); + assert.commandWorked(testDb[coll2Name].insert({a: 2, b: 2})); + assert.commandWorked(testDb[coll3Name].insert({a: 3, b: 3})); + assert.commandWorked(testDb[coll4Name].insert({a: 4, b: 4})); // Start with no validation action. assert.commandWorked(testDb.runCommand({ diff --git a/jstests/replsets/rollback_creates_rollback_directory.js b/jstests/replsets/rollback_creates_rollback_directory.js index 907e81e1f8c..3cb47eb65a2 100644 --- a/jstests/replsets/rollback_creates_rollback_directory.js +++ b/jstests/replsets/rollback_creates_rollback_directory.js @@ -47,21 +47,21 @@ function runRollbackDirectoryTest(shouldCreateRollbackFiles) { }, "Arbiter failed to initialize."); var options = {writeConcern: {w: 2, wtimeout: replTest.kDefaultTimeoutMS}, upsert: true}; - assert.writeOK(A.foo.update({key: 'value1'}, {$set: {req: 'req'}}, options)); + assert.commandWorked(A.foo.update({key: 'value1'}, {$set: {req: 'req'}}, options)); var AID = replTest.getNodeId(a_conn); replTest.stop(AID); master = replTest.getPrimary(); assert(b_conn.host == master.host); options = {writeConcern: {w: 1, wtimeout: replTest.kDefaultTimeoutMS}, upsert: true}; - assert.writeOK(B.foo.update({key: 'value1'}, {$set: {res: 'res'}}, options)); + assert.commandWorked(B.foo.update({key: 'value1'}, {$set: {res: 'res'}}, options)); var BID = replTest.getNodeId(b_conn); replTest.stop(BID); replTest.restart(AID); master = replTest.getPrimary(); assert(a_conn.host == master.host); options = {writeConcern: {w: 1, wtimeout: replTest.kDefaultTimeoutMS}, upsert: true}; - assert.writeOK(A.foo.update({key: 'value2'}, {$set: {req: 'req'}}, options)); + assert.commandWorked(A.foo.update({key: 'value2'}, {$set: {req: 'req'}}, options)); replTest.restart(BID); // should rollback reconnect(B); diff --git a/jstests/replsets/rollback_crud_op_sequences.js b/jstests/replsets/rollback_crud_op_sequences.js index ce21957f45e..a2e89332141 100644 --- a/jstests/replsets/rollback_crud_op_sequences.js +++ b/jstests/replsets/rollback_crud_op_sequences.js @@ -58,15 +58,15 @@ var a = a_conn.getDB("foo"); var b = b_conn.getDB("foo"); // initial data for both nodes -assert.writeOK(a.bar.insert({q: 0})); -assert.writeOK(a.bar.insert({q: 1, a: "foo"})); -assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1})); -assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"})); -assert.writeOK(a.bar.insert({q: 40, a: 1})); -assert.writeOK(a.bar.insert({q: 40, a: 2})); -assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'})); +assert.commandWorked(a.bar.insert({q: 0})); +assert.commandWorked(a.bar.insert({q: 1, a: "foo"})); +assert.commandWorked(a.bar.insert({q: 2, a: "foo", x: 1})); +assert.commandWorked(a.bar.insert({q: 3, bb: 9, a: "foo"})); +assert.commandWorked(a.bar.insert({q: 40, a: 1})); +assert.commandWorked(a.bar.insert({q: 40, a: 2})); +assert.commandWorked(a.bar.insert({q: 70, txt: 'willremove'})); a.createCollection("kap", {capped: true, size: 5000}); -assert.writeOK(a.kap.insert({foo: 1})); +assert.commandWorked(a.kap.insert({foo: 1})); // going back to empty on capped is a special case and must be tested a.createCollection("kap2", {capped: true, size: 5501}); replTest.awaitReplication(); @@ -83,17 +83,17 @@ assert.soon(function() { }, "node B did not become master as expected", ReplSetTest.kDefaultTimeoutMS); // do operations on B and B alone, these will be rolled back -assert.writeOK(b.bar.insert({q: 4})); -assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true})); -assert.writeOK(b.bar.remove({q: 40})); // multi remove test -assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true})); +assert.commandWorked(b.bar.insert({q: 4})); +assert.commandWorked(b.bar.update({q: 3}, {q: 3, rb: true})); +assert.commandWorked(b.bar.remove({q: 40})); // multi remove test +assert.commandWorked(b.bar.update({q: 2}, {q: 39, rb: true})); // rolling back a delete will involve reinserting the item(s) -assert.writeOK(b.bar.remove({q: 1})); -assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}})); -assert.writeOK(b.kap.insert({foo: 2})); -assert.writeOK(b.kap2.insert({foo: 2})); +assert.commandWorked(b.bar.remove({q: 1})); +assert.commandWorked(b.bar.update({q: 0}, {$inc: {y: 1}})); +assert.commandWorked(b.kap.insert({foo: 2})); +assert.commandWorked(b.kap2.insert({foo: 2})); // create a collection (need to roll back the whole thing) -assert.writeOK(b.newcoll.insert({a: true})); +assert.commandWorked(b.newcoll.insert({a: true})); // create a new empty collection (need to roll back the whole thing) b.createCollection("abc"); @@ -117,9 +117,9 @@ assert.soon(function() { } }); assert.gte(a.bar.find().itcount(), 1, "count check"); -assert.writeOK(a.bar.insert({txt: 'foo'})); -assert.writeOK(a.bar.remove({q: 70})); -assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}})); +assert.commandWorked(a.bar.insert({txt: 'foo'})); +assert.commandWorked(a.bar.remove({q: 70})); +assert.commandWorked(a.bar.update({q: 0}, {$inc: {y: 33}})); // A is 1 2 3 7 8 // B is 1 2 3 4 5 6 diff --git a/jstests/replsets/rollback_ddl_op_sequences.js b/jstests/replsets/rollback_ddl_op_sequences.js index 79883eac336..d5a224fcbed 100644 --- a/jstests/replsets/rollback_ddl_op_sequences.js +++ b/jstests/replsets/rollback_ddl_op_sequences.js @@ -63,23 +63,23 @@ var a = a_conn.getDB("foo"); var b = b_conn.getDB("foo"); // initial data for both nodes -assert.writeOK(a.b.insert({x: 1})); +assert.commandWorked(a.b.insert({x: 1})); a.b.ensureIndex({x: 1}); -assert.writeOK(a.oldname.insert({y: 1})); -assert.writeOK(a.oldname.insert({y: 2})); +assert.commandWorked(a.oldname.insert({y: 1})); +assert.commandWorked(a.oldname.insert({y: 2})); a.oldname.ensureIndex({y: 1}, true); -assert.writeOK(a.bar.insert({q: 0})); -assert.writeOK(a.bar.insert({q: 1, a: "foo"})); -assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1})); -assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"})); -assert.writeOK(a.bar.insert({q: 40333333, a: 1})); +assert.commandWorked(a.bar.insert({q: 0})); +assert.commandWorked(a.bar.insert({q: 1, a: "foo"})); +assert.commandWorked(a.bar.insert({q: 2, a: "foo", x: 1})); +assert.commandWorked(a.bar.insert({q: 3, bb: 9, a: "foo"})); +assert.commandWorked(a.bar.insert({q: 40333333, a: 1})); for (var i = 0; i < 200; i++) { - assert.writeOK(a.bar.insert({i: i})); + assert.commandWorked(a.bar.insert({i: i})); } -assert.writeOK(a.bar.insert({q: 40, a: 2})); -assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'})); +assert.commandWorked(a.bar.insert({q: 40, a: 2})); +assert.commandWorked(a.bar.insert({q: 70, txt: 'willremove'})); a.createCollection("kap", {capped: true, size: 5000}); -assert.writeOK(a.kap.insert({foo: 1})); +assert.commandWorked(a.kap.insert({foo: 1})); replTest.awaitReplication(); // isolate A and wait for B to become master @@ -94,17 +94,17 @@ assert.soon(function() { }); // do operations on B and B alone, these will be rolled back -assert.writeOK(b.bar.insert({q: 4})); -assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true})); -assert.writeOK(b.bar.remove({q: 40})); // multi remove test -assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true})); +assert.commandWorked(b.bar.insert({q: 4})); +assert.commandWorked(b.bar.update({q: 3}, {q: 3, rb: true})); +assert.commandWorked(b.bar.remove({q: 40})); // multi remove test +assert.commandWorked(b.bar.update({q: 2}, {q: 39, rb: true})); // rolling back a delete will involve reinserting the item(s) -assert.writeOK(b.bar.remove({q: 1})); -assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}})); -assert.writeOK(b.kap.insert({foo: 2})); -assert.writeOK(b.kap2.insert({foo: 2})); +assert.commandWorked(b.bar.remove({q: 1})); +assert.commandWorked(b.bar.update({q: 0}, {$inc: {y: 1}})); +assert.commandWorked(b.kap.insert({foo: 2})); +assert.commandWorked(b.kap2.insert({foo: 2})); // create a collection (need to roll back the whole thing) -assert.writeOK(b.newcoll.insert({a: true})); +assert.commandWorked(b.newcoll.insert({a: true})); // create a new empty collection (need to roll back the whole thing) b.createCollection("abc"); // drop a collection - we'll need all its data back! @@ -119,8 +119,8 @@ assert(b.fooname.find().itcount() > 0, "count rename"); b.fooname.ensureIndex({q: 1}); // test roll back (drop) a whole database var abc = b.getSisterDB("abc"); -assert.writeOK(abc.foo.insert({x: 1})); -assert.writeOK(abc.bar.insert({y: 999})); +assert.commandWorked(abc.foo.insert({x: 1})); +assert.commandWorked(abc.bar.insert({y: 999})); // isolate B, bring A back into contact with the arbiter, then wait for A to become master // insert new data into A so that B will need to rollback when it reconnects to A @@ -142,9 +142,9 @@ assert.soon(function() { } }); assert(a.bar.find().itcount() >= 1, "count check"); -assert.writeOK(a.bar.insert({txt: 'foo'})); -assert.writeOK(a.bar.remove({q: 70})); -assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}})); +assert.commandWorked(a.bar.insert({txt: 'foo'})); +assert.commandWorked(a.bar.remove({q: 70})); +assert.commandWorked(a.bar.update({q: 0}, {$inc: {y: 33}})); // A is 1 2 3 7 8 // B is 1 2 3 4 5 6 diff --git a/jstests/replsets/rollback_drop_database.js b/jstests/replsets/rollback_drop_database.js index aa783cc9dd3..58eb31ae257 100644 --- a/jstests/replsets/rollback_drop_database.js +++ b/jstests/replsets/rollback_drop_database.js @@ -22,7 +22,7 @@ let rollbackNode = rollbackTest.getPrimary(); let syncSourceNode = rollbackTest.getSecondary(); // Perform initial insert (common operation). -assert.writeOK(rollbackNode.getDB(oldDbName)["beforeRollback"].insert({"num": 1})); +assert.commandWorked(rollbackNode.getDB(oldDbName)["beforeRollback"].insert({"num": 1})); // Set a failpoint on the original primary, so that it blocks after it commits the last // 'dropCollection' entry but before the 'dropDatabase' entry is logged. @@ -63,7 +63,7 @@ rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); // Perform an insert on another database while interfacing with the new primary. // This is the sync source's divergent oplog entry. -assert.writeOK(syncSourceNode.getDB(newDbName)["afterRollback"].insert({"num": 2})); +assert.commandWorked(syncSourceNode.getDB(newDbName)["afterRollback"].insert({"num": 2})); rollbackTest.transitionToSyncSourceOperationsDuringRollback(); rollbackTest.transitionToSteadyStateOperations(); diff --git a/jstests/replsets/rollback_rename_collection_on_sync_source.js b/jstests/replsets/rollback_rename_collection_on_sync_source.js index be03faa94bf..fa227752e14 100644 --- a/jstests/replsets/rollback_rename_collection_on_sync_source.js +++ b/jstests/replsets/rollback_rename_collection_on_sync_source.js @@ -18,18 +18,18 @@ let doc2 = {x: 2}; let CommonOps = (node) => { // Insert a document that will exist on the sync source and rollback node. - assert.writeOK(node.getDB(dbName)[sourceCollName].insert(doc1)); + assert.commandWorked(node.getDB(dbName)[sourceCollName].insert(doc1)); }; let RollbackOps = (node) => { // Delete the document on rollback node so it will be refetched from sync source. - assert.writeOK(node.getDB(dbName)[sourceCollName].remove(doc1)); + assert.commandWorked(node.getDB(dbName)[sourceCollName].remove(doc1)); }; let SyncSourceOps = (node) => { // Rename the original collection on the sync source. assert.commandWorked(node.getDB(dbName)[sourceCollName].renameCollection(destCollName)); - assert.writeOK(node.getDB(dbName)[destCollName].insert(doc2)); + assert.commandWorked(node.getDB(dbName)[destCollName].insert(doc2)); }; // Set up Rollback Test. diff --git a/jstests/replsets/rollback_views.js b/jstests/replsets/rollback_views.js index a802eb81663..7f7537b25e6 100644 --- a/jstests/replsets/rollback_views.js +++ b/jstests/replsets/rollback_views.js @@ -68,7 +68,7 @@ let a1 = nodeA.getDB("test1"); let b1 = nodeB.getDB("test1"); // Initial data for both nodes. -assert.writeOK(a1.coll.insert([{_id: 1, x: 1}, {_id: 2, x: 2}])); +assert.commandWorked(a1.coll.insert([{_id: 1, x: 1}, {_id: 2, x: 2}])); // Wait for initial replication. replTest.awaitReplication(); @@ -80,16 +80,16 @@ assert.soon(() => replTest.getPrimary() == nodeB, "node B did not become primary // Do operations on B and B alone, these will be rolled back. // For the collection creation, first create a view with the same name, stressing rollback. -assert.writeOK(b1.coll.remove({x: 2})); +assert.commandWorked(b1.coll.remove({x: 2})); assert.commandWorked(b1.createView("x", "coll", [{$match: {x: 1}}])); let b2 = b1.getSiblingDB("test2"); -assert.writeOK(b2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}])); +assert.commandWorked(b2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}])); assert.commandWorked(b2.createView("y", "coll", [{$match: {y: 2}}])); let b3 = b1.getSiblingDB("test3"); assert.commandWorked(b3.createView("z", "coll", [])); -assert.writeOK(b3.system.views.remove({})); -assert.writeOK(b3.z.insert([{z: 1}, {z: 2}, {z: 3}])); -assert.writeOK(b3.z.remove({z: 1})); +assert.commandWorked(b3.system.views.remove({})); +assert.commandWorked(b3.z.insert([{z: 1}, {z: 2}, {z: 3}])); +assert.commandWorked(b3.z.remove({z: 1})); // Isolate B, bring A back into contact with the arbiter, then wait for A to become primary. // Insert new data into A, so that B will need to rollback when it reconnects to A. @@ -100,12 +100,12 @@ assert.soon(() => replTest.getPrimary() == nodeA, "nodeA did not become primary // A is now primary and will perform writes that must be copied by B after rollback. assert.eq(a1.coll.find().itcount(), 2, "expected two documents in test1.coll"); -assert.writeOK(a1.x.insert({_id: 3, x: "string in test1.x"})); +assert.commandWorked(a1.x.insert({_id: 3, x: "string in test1.x"})); let a2 = a1.getSiblingDB("test2"); assert.commandWorked(a2.createView("y", "coll", [{$match: {y: 2}}])); -assert.writeOK(a2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}])); +assert.commandWorked(a2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}])); let a3 = a1.getSiblingDB("test3"); -assert.writeOK(a3.coll.insert([{z: 1}, {z: 2}, {z: 3}])); +assert.commandWorked(a3.coll.insert([{z: 1}, {z: 2}, {z: 3}])); assert.commandWorked(a3.createView("z", "coll", [{$match: {z: 3}}])); // A is collections: test1.{coll,x}, test2.{coll,system.views}, test3.{coll,system.views} diff --git a/jstests/replsets/rollback_waits_for_bgindex_completion.js b/jstests/replsets/rollback_waits_for_bgindex_completion.js index e6433d558e6..857a90e1148 100644 --- a/jstests/replsets/rollback_waits_for_bgindex_completion.js +++ b/jstests/replsets/rollback_waits_for_bgindex_completion.js @@ -61,7 +61,7 @@ CommonOps(originalPrimary); // Insert a document so that there is an operation to rollback. const rollbackNode = rollbackTest.transitionToRollbackOperations(); -assert.writeOK(rollbackNode.getDB(dbName)["rollbackColl"].insert({x: 1})); +assert.commandWorked(rollbackNode.getDB(dbName)["rollbackColl"].insert({x: 1})); // Allow rollback to start. There are no sync source ops. rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); diff --git a/jstests/replsets/rollback_with_socket_error_then_steady_state.js b/jstests/replsets/rollback_with_socket_error_then_steady_state.js index 713658e1b5f..b7437d63b41 100644 --- a/jstests/replsets/rollback_with_socket_error_then_steady_state.js +++ b/jstests/replsets/rollback_with_socket_error_then_steady_state.js @@ -41,7 +41,7 @@ jsTestLog("Make sure node 0 is primary."); stepUp(rst, nodes[0]); assert.eq(nodes[0], rst.getPrimary()); // Wait for all data bearing nodes to get up to date. -assert.writeOK(nodes[0].getCollection(collName).insert( +assert.commandWorked(nodes[0].getCollection(collName).insert( {a: counter++}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); jsTestLog("Create two partitions: [1] and [0,2,3,4]."); @@ -51,7 +51,7 @@ nodes[1].disconnect(nodes[3]); nodes[1].disconnect(nodes[4]); jsTestLog("Do a write that is replicated to [0,2,3,4]."); -assert.writeOK(nodes[0].getCollection(collName).insert( +assert.commandWorked(nodes[0].getCollection(collName).insert( {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); jsTestLog("Repartition to: [0,2] and [1,3,4]."); @@ -68,7 +68,7 @@ waitForState(nodes[1], ReplSetTest.State.PRIMARY); assert.eq(nodes[1], rst.getPrimary()); jsTestLog("Do a write to node 1 on the [1,3,4] side of the partition."); -assert.writeOK(nodes[1].getCollection(collName).insert({a: counter++})); +assert.commandWorked(nodes[1].getCollection(collName).insert({a: counter++})); // Turn on failpoint on node 2 to pause rollback before doing anything. assert.commandWorked( @@ -112,7 +112,7 @@ waitForState(nodes[0], ReplSetTest.State.PRIMARY); assert.eq(nodes[0], rst.getPrimary()); jsTestLog("w:2 write to node 0 (replicated to node 2)"); -assert.writeOK(nodes[0].getCollection(collName).insert( +assert.commandWorked(nodes[0].getCollection(collName).insert( {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); // At this point node 2 has failed rollback before making any durable changes, including writing diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js index 209ed8172e1..6da89504ba9 100644 --- a/jstests/replsets/rslib.js +++ b/jstests/replsets/rslib.js @@ -42,7 +42,7 @@ syncFrom = function(syncingNode, desiredSyncSource, rst) { stopServerReplication(syncingNode); - assert.writeOK(rst.getPrimary().getDB(dummyName).getCollection(dummyName).insert({a: 1})); + assert.commandWorked(rst.getPrimary().getDB(dummyName).getCollection(dummyName).insert({a: 1})); // Wait for 'desiredSyncSource' to get the dummy write we just did so we know it's // definitely ahead of 'syncingNode' before we call replSetSyncFrom. assert.soonNoExcept(function() { diff --git a/jstests/replsets/secondary_as_sync_source.js b/jstests/replsets/secondary_as_sync_source.js index ec18cebff0d..1d12f08b037 100644 --- a/jstests/replsets/secondary_as_sync_source.js +++ b/jstests/replsets/secondary_as_sync_source.js @@ -21,7 +21,7 @@ function addTestDocuments(db) { for (var i = 0; i < size; ++i) { bulk.insert({i: i}); } - assert.writeOK(bulk.execute()); + assert.commandWorked(bulk.execute()); } let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, useBridge: true}); diff --git a/jstests/replsets/server_status_metrics.js b/jstests/replsets/server_status_metrics.js index 7b271d197f4..e7784ba0c76 100644 --- a/jstests/replsets/server_status_metrics.js +++ b/jstests/replsets/server_status_metrics.js @@ -41,7 +41,7 @@ var primary = rt.getPrimary(); var testDB = primary.getDB("test"); assert.commandWorked(testDB.createCollection('a')); -assert.writeOK(testDB.b.insert({}, {writeConcern: {w: 2}})); +assert.commandWorked(testDB.b.insert({}, {writeConcern: {w: 2}})); var ss = secondary.getDB("test").serverStatus(); // The number of ops received and the number of ops applied are not guaranteed to be the same @@ -55,12 +55,12 @@ var bulk = testDB.a.initializeUnorderedBulkOp(); for (x = 0; x < 1000; x++) { bulk.insert({}); } -assert.writeOK(bulk.execute({w: 2})); +assert.commandWorked(bulk.execute({w: 2})); testSecondaryMetrics(secondary, 1000, secondaryBaseOplogOpsApplied, secondaryBaseOplogOpsReceived); var options = {writeConcern: {w: 2}, multi: true, upsert: true}; -assert.writeOK(testDB.a.update({}, {$set: {d: new Date()}}, options)); +assert.commandWorked(testDB.a.update({}, {$set: {d: new Date()}}, options)); testSecondaryMetrics(secondary, 2000, secondaryBaseOplogOpsApplied, secondaryBaseOplogOpsReceived); @@ -70,15 +70,15 @@ var startNum = testDB.serverStatus().metrics.getLastError.wtime.num; printjson(primary.getDB("test").serverStatus().metrics); -assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: 1, wtimeout: 5000}})); +assert.commandWorked(testDB.a.insert({x: 1}, {writeConcern: {w: 1, wtimeout: 5000}})); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum); -assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: -11, wtimeout: 5000}})); +assert.commandWorked(testDB.a.insert({x: 1}, {writeConcern: {w: -11, wtimeout: 5000}})); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum); -assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: 2, wtimeout: 5000}})); +assert.commandWorked(testDB.a.insert({x: 1}, {writeConcern: {w: 2, wtimeout: 5000}})); assert(testDB.serverStatus().metrics.getLastError.wtime.totalMillis >= startMillis); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum + 1); diff --git a/jstests/replsets/server_status_repl.js b/jstests/replsets/server_status_repl.js index c00fcc8818a..058fc14c5fa 100644 --- a/jstests/replsets/server_status_repl.js +++ b/jstests/replsets/server_status_repl.js @@ -9,7 +9,7 @@ var primary = rt.getPrimary(); var testDB = primary.getDB("test"); assert.commandWorked(testDB.createCollection('a')); -assert.writeOK(testDB.b.insert({}, {writeConcern: {w: 2}})); +assert.commandWorked(testDB.b.insert({}, {writeConcern: {w: 2}})); var ss = primary.getDB("test").serverStatus({repl: 1}); assert.neq(ss.repl.replicationProgress, null, tojson(ss.repl)); diff --git a/jstests/replsets/shutdown_primary.js b/jstests/replsets/shutdown_primary.js index bcaefe8c541..038049c6753 100644 --- a/jstests/replsets/shutdown_primary.js +++ b/jstests/replsets/shutdown_primary.js @@ -25,13 +25,13 @@ replTest.initiate(); var primary = replTest.getPrimary(); var testDB = primary.getDB(name); var timeout = ReplSetTest.kDefaultTimeoutMS; -assert.writeOK(testDB.foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: timeout}})); +assert.commandWorked(testDB.foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: timeout}})); jsTestLog("Blocking replication to secondaries."); stopReplicationOnSecondaries(replTest); jsTestLog("Executing write to primary."); -assert.writeOK(testDB.foo.insert({x: 2})); +assert.commandWorked(testDB.foo.insert({x: 2})); jsTestLog("Attempting to shut down primary."); assert.commandFailedWithCode(primary.adminCommand({shutdown: 1}), @@ -39,7 +39,7 @@ assert.commandFailedWithCode(primary.adminCommand({shutdown: 1}), "shut down did not fail with 'ExceededTimeLimit'"); jsTestLog("Verifying primary did not shut down."); -assert.writeOK(testDB.foo.insert({x: 3})); +assert.commandWorked(testDB.foo.insert({x: 3})); jsTestLog("Shutting down primary in a parallel shell"); var awaitShell = startParallelShell(function() { diff --git a/jstests/replsets/single_server_majority.js b/jstests/replsets/single_server_majority.js index c7c2ec862f4..039d8ccebeb 100644 --- a/jstests/replsets/single_server_majority.js +++ b/jstests/replsets/single_server_majority.js @@ -9,5 +9,5 @@ col = db.getCollection("single_server_majority"); col.drop(); // see if we can get a majority write on this single server -assert.writeOK(col.save({a: "test"}, {writeConcern: {w: 'majority'}})); +assert.commandWorked(col.save({a: "test"}, {writeConcern: {w: 'majority'}})); MongoRunner.stopMongod(mongod);
\ No newline at end of file diff --git a/jstests/replsets/slave_delay_clean_shutdown.js b/jstests/replsets/slave_delay_clean_shutdown.js index 2d3e75824da..8a68a2d0a58 100644 --- a/jstests/replsets/slave_delay_clean_shutdown.js +++ b/jstests/replsets/slave_delay_clean_shutdown.js @@ -26,7 +26,7 @@ rst.initiate(conf); var master = rst.getPrimary(); // Waits for PRIMARY state. // Push some ops through before setting slave delay. -assert.writeOK(master.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}})); +assert.commandWorked(master.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}})); // Set slaveDelay and wait for secondary to receive the change. conf = rst.getReplSetConfigFromNode(); @@ -40,7 +40,7 @@ sleep(2000); // The secondary apply loop only checks for slaveDelay changes onc var secondary = rst.getSecondary(); const lastOp = getLatestOp(secondary); -assert.writeOK(master.getCollection(ns).insert([{}, {}, {}])); +assert.commandWorked(master.getCollection(ns).insert([{}, {}, {}])); assert.soon(() => secondary.adminCommand('serverStatus').metrics.repl.buffer.count > 0, () => secondary.adminCommand('serverStatus').metrics.repl); assert.neq(getLatestOp(master), lastOp); diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js index 98dc46bacc4..770c2430fdd 100644 --- a/jstests/replsets/slavedelay1.js +++ b/jstests/replsets/slavedelay1.js @@ -26,7 +26,7 @@ doTest = function(signal) { waitForAllMembers(master); // insert a record - assert.writeOK(master.foo.insert({x: 1}, {writeConcern: {w: 2}})); + assert.commandWorked(master.foo.insert({x: 1}, {writeConcern: {w: 2}})); var doc = master.foo.findOne(); assert.eq(doc.x, 1); @@ -65,7 +65,7 @@ doTest = function(signal) { master = reconfig(replTest, config); master = master.getSisterDB(name); - assert.writeOK(master.foo.insert( + assert.commandWorked(master.foo.insert( {_id: 123, x: 'foo'}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); for (var i = 0; i < 8; i++) { diff --git a/jstests/replsets/step_down_during_draining.js b/jstests/replsets/step_down_during_draining.js index 47c8ee2651a..b42ecd48d61 100644 --- a/jstests/replsets/step_down_during_draining.js +++ b/jstests/replsets/step_down_during_draining.js @@ -63,7 +63,7 @@ function stepUpNode(node) { // Do an initial insert to prevent the secondary from going into recovery var numDocuments = 20; var coll = primary.getDB("foo").foo; -assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}})); +assert.commandWorked(coll.insert({x: 0}, {writeConcern: {w: 3}})); replSet.awaitReplication(); // Enable fail point to stop replication. @@ -72,7 +72,7 @@ secondaries.forEach(enableFailPoint); var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count; for (var i = 1; i < numDocuments; ++i) { - assert.writeOK(coll.insert({x: i})); + assert.commandWorked(coll.insert({x: i})); } jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments); assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount()); @@ -124,7 +124,7 @@ assert.commandWorked( // Ensure new primary is writable. jsTestLog('New primary should be writable after draining is complete'); -assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1})); +assert.commandWorked(secondary.getDB("foo").flag.insert({sentinel: 1})); // Check that all writes reached the secondary's op queue prior to // stepping down the original primary and got applied. assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments); diff --git a/jstests/replsets/step_down_during_draining2.js b/jstests/replsets/step_down_during_draining2.js index 1e97f93865a..eb68e6ce0bf 100644 --- a/jstests/replsets/step_down_during_draining2.js +++ b/jstests/replsets/step_down_during_draining2.js @@ -67,7 +67,7 @@ function stepUpNode(node) { // Do an initial insert to prevent the secondary from going into recovery var numDocuments = 20; var coll = primary.getDB("foo").foo; -assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}})); +assert.commandWorked(coll.insert({x: 0}, {writeConcern: {w: 3}})); replSet.awaitReplication(); // Enable fail point to stop replication. @@ -76,7 +76,7 @@ secondaries.forEach(enableFailPoint); var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count; for (var i = 1; i < numDocuments; ++i) { - assert.writeOK(coll.insert({x: i})); + assert.commandWorked(coll.insert({x: i})); } jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments); assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount()); @@ -165,7 +165,7 @@ assert.soon(function() { }); jsTestLog('Ensure new primary is writable.'); -assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}, {writeConcern: {w: 3}})); +assert.commandWorked(secondary.getDB("foo").flag.insert({sentinel: 1}, {writeConcern: {w: 3}})); // Check that no writes were lost. assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments); replSet.stopSet(); diff --git a/jstests/replsets/step_down_during_draining3.js b/jstests/replsets/step_down_during_draining3.js index 98c42955fc6..c3751ec136d 100644 --- a/jstests/replsets/step_down_during_draining3.js +++ b/jstests/replsets/step_down_during_draining3.js @@ -56,7 +56,7 @@ function stepUpNode(node) { // Do an initial insert to prevent the secondary from going into recovery var numDocuments = 20; var coll = primary.getDB("foo").foo; -assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}})); +assert.commandWorked(coll.insert({x: 0}, {writeConcern: {w: 3}})); replSet.awaitReplication(); // Enable fail point to stop replication. @@ -65,7 +65,7 @@ secondaries.forEach(enableFailPoint); var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count; for (var i = 1; i < numDocuments; ++i) { - assert.writeOK(coll.insert({x: i})); + assert.commandWorked(coll.insert({x: i})); } jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments); assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount()); diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js index 6abdd335e92..7ce17186af4 100644 --- a/jstests/replsets/stepdown.js +++ b/jstests/replsets/stepdown.js @@ -28,7 +28,7 @@ var master = replTest.getPrimary(); // do a write print("\ndo a write"); -assert.writeOK(master.getDB("foo").bar.insert({x: 1})); +assert.commandWorked(master.getDB("foo").bar.insert({x: 1})); replTest.awaitReplication(); // In the event of any error, we have to unlock any nodes that we have fsyncLocked. @@ -61,7 +61,7 @@ try { for (var i = 0; i < 11; i++) { // do another write - assert.writeOK(master.getDB("foo").bar.insert({x: i})); + assert.commandWorked(master.getDB("foo").bar.insert({x: i})); } let res = assert.commandWorked(master.adminCommand({replSetGetStatus: 1})); diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js index 508645cdf5a..5566e7379a1 100644 --- a/jstests/replsets/stepdown3.js +++ b/jstests/replsets/stepdown3.js @@ -20,7 +20,7 @@ replTest.awaitReplication(); // on the secondary (due to starting up), and we need to be within 10 seconds // to step down. var options = {writeConcern: {w: 2, wtimeout: 30000}}; -assert.writeOK(master.getDB("test").foo.insert({x: 2}, options)); +assert.commandWorked(master.getDB("test").foo.insert({x: 2}, options)); // lock secondary, to pause replication print("\nlock secondary"); var locked = replTest._slaves[0]; diff --git a/jstests/replsets/stepdown_catch_up_opt.js b/jstests/replsets/stepdown_catch_up_opt.js index 82c31b49a0a..718056d71c0 100644 --- a/jstests/replsets/stepdown_catch_up_opt.js +++ b/jstests/replsets/stepdown_catch_up_opt.js @@ -53,7 +53,7 @@ function disableFailPoint() { // shut down. try { jsTestLog('Write to primary to make secondary out of sync.'); - assert.writeOK(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.'); + assert.commandWorked(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.'); sleep(1000); // Secondary is now at least 1 second behind. diff --git a/jstests/replsets/stepdown_kill_other_ops.js b/jstests/replsets/stepdown_kill_other_ops.js index 06fc8de563f..00630104ea0 100644 --- a/jstests/replsets/stepdown_kill_other_ops.js +++ b/jstests/replsets/stepdown_kill_other_ops.js @@ -19,7 +19,7 @@ replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY); var primary = replSet.getPrimary(); assert.eq(primary.host, nodes[0], "primary assumed to be node 0"); -assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000})); +assert.commandWorked(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000})); replSet.awaitReplication(); jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable"); diff --git a/jstests/replsets/stepdown_killop.js b/jstests/replsets/stepdown_killop.js index 87d7d884a8b..e3446a5cdb5 100644 --- a/jstests/replsets/stepdown_killop.js +++ b/jstests/replsets/stepdown_killop.js @@ -35,7 +35,7 @@ assert.eq(primary.host, nodes[0], "primary assumed to be node 0"); // do a write then ask the PRIMARY to stepdown jsTestLog("Initiating stepdown"); -assert.writeOK(primary.getDB(name).foo.insert( +assert.commandWorked(primary.getDB(name).foo.insert( {myDoc: true, x: 1}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); var stepDownCmd = function() { var res = @@ -76,7 +76,7 @@ primary.getDB('admin').killOp(stepDownOpID); var exitCode = stepDowner(); assert.eq(0, exitCode); -assert.writeOK(primary.getDB(name).foo.remove({})); +assert.commandWorked(primary.getDB(name).foo.remove({})); restartServerReplication(secondary); replSet.stopSet(); })(); diff --git a/jstests/replsets/stepdown_long_wait_time.js b/jstests/replsets/stepdown_long_wait_time.js index 5958aa3a86c..6ece19ba068 100644 --- a/jstests/replsets/stepdown_long_wait_time.js +++ b/jstests/replsets/stepdown_long_wait_time.js @@ -33,7 +33,7 @@ stopServerReplication(secondary); jsTestLog("do a write then ask the PRIMARY to stepdown"); var options = {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}; -assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options)); +assert.commandWorked(primary.getDB(name).foo.insert({x: 1}, options)); var stepDownCmd = function() { assert.commandWorked(db.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60})); diff --git a/jstests/replsets/stepdown_needs_electable_secondary.js b/jstests/replsets/stepdown_needs_electable_secondary.js index 4d2124cc831..cb92b5133d4 100644 --- a/jstests/replsets/stepdown_needs_electable_secondary.js +++ b/jstests/replsets/stepdown_needs_electable_secondary.js @@ -64,7 +64,8 @@ jsTestLog("Doing a write to primary."); var testDB = replTest.getPrimary().getDB('testdb'); var coll = testDB.stepdown_needs_electable_secondary; var timeout = ReplSetTest.kDefaultTimeoutMS; -assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 1, wtimeout: timeout}})); +assert.commandWorked( + coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 1, wtimeout: timeout}})); // Try to step down with only the primary caught up (1 node out of 5). // stepDown should fail. @@ -89,7 +90,8 @@ jsTestLog("Re-enabling writes to unelectable secondary: node #" + restartServerReplication(secondaryB_unelectable); // Wait for this secondary to catch up by issuing a write that must be replicated to 2 nodes -assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 2, wtimeout: timeout}})); +assert.commandWorked( + coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 2, wtimeout: timeout}})); // Try to step down and fail jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up."); @@ -103,7 +105,8 @@ jsTestLog("Re-enabling writes to unelectable secondary: node #" + restartServerReplication(secondaryC_unelectable); // Wait for this secondary to catch up by issuing a write that must be replicated to 3 nodes -assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 3, wtimeout: timeout}})); +assert.commandWorked( + coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 3, wtimeout: timeout}})); // Try to step down and fail jsTestLog("Trying to step down primary with a caught up majority that " + @@ -118,7 +121,8 @@ jsTestLog("Re-enabling writes to electable secondary: node #" + restartServerReplication(secondaryA_electable); // Wait for this secondary to catch up by issuing a write that must be replicated to 4 nodes -assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 4, wtimeout: timeout}})); +assert.commandWorked( + coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 4, wtimeout: timeout}})); // Try to step down. We expect success, so catch the exception thrown by 'replSetStepDown'. jsTestLog("Trying to step down primary with a caught up majority that " + diff --git a/jstests/replsets/stepdown_needs_majority.js b/jstests/replsets/stepdown_needs_majority.js index cb465fb3f30..ba50f56f1c2 100644 --- a/jstests/replsets/stepdown_needs_majority.js +++ b/jstests/replsets/stepdown_needs_majority.js @@ -62,7 +62,7 @@ stopReplicationOnSecondaries(replTest); // Write to the primary and attempt stepdown // jsTestLog("Issuing a write to the primary(" + primary.host + ") with write_concern:1"); -assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 1, wtimeout: timeout}})); +assert.commandWorked(coll.insert(dummy_doc, {writeConcern: {w: 1, wtimeout: timeout}})); jsTestLog("Trying to step down primary with only 1 node out of 5 caught up."); assertStepDownFailsWithExceededTimeLimit(primary); @@ -75,7 +75,7 @@ jsTestLog("Reenabling writes to one secondary (" + nodeIdStr(replTest, secondary restartServerReplication(secondaryA); jsTestLog("Issuing a write to the primary with write_concern:2"); -assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 2, wtimeout: timeout}})); +assert.commandWorked(coll.insert(dummy_doc, {writeConcern: {w: 2, wtimeout: timeout}})); jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up."); assertStepDownFailsWithExceededTimeLimit(primary); @@ -88,7 +88,7 @@ jsTestLog("Reenabling writes to another secondary (" + nodeIdStr(replTest, secon restartServerReplication(secondaryB); jsTestLog("Issuing a write to the primary with write_concern:3"); -assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 3, wtimeout: timeout}})); +assert.commandWorked(coll.insert(dummy_doc, {writeConcern: {w: 3, wtimeout: timeout}})); jsTestLog("Trying to step down primary with 3 nodes out of 5 caught up."); assertStepDownSucceeds(primary); diff --git a/jstests/replsets/stepup.js b/jstests/replsets/stepup.js index d4ce932a5bc..8c5b83ab586 100644 --- a/jstests/replsets/stepup.js +++ b/jstests/replsets/stepup.js @@ -27,14 +27,14 @@ assert.eq(primary, rst.getPrimary()); assert.commandWorked( secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'})); -assert.writeOK(primary.getDB("test").bar.insert({x: 2}, {writeConcern: {w: 1}})); +assert.commandWorked(primary.getDB("test").bar.insert({x: 2}, {writeConcern: {w: 1}})); res = secondary.adminCommand({replSetStepUp: 1}); assert.commandFailedWithCode(res, ErrorCodes.CommandFailed); assert.commandWorked( secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'})); // Wait for the secondary to catch up by replicating a doc to both nodes. -assert.writeOK(primary.getDB("test").bar.insert({x: 3}, {writeConcern: {w: "majority"}})); +assert.commandWorked(primary.getDB("test").bar.insert({x: 3}, {writeConcern: {w: "majority"}})); // Step up the secondary. Retry since the old primary may step down when we try to ask for its // vote. diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js index 4a1053b04bb..281acafa667 100644 --- a/jstests/replsets/sync2.js +++ b/jstests/replsets/sync2.js @@ -39,13 +39,13 @@ var option = {writeConcern: {w: conns.length - 1, wtimeout: replTest.kDefaultTim // to bridging, it will not change sync sources and receive the write in time. This was not a // problem in 3.0 because the old version of mongobridge caused all the nodes to restart during // partitioning, forcing the set to rebuild the spanning tree. -assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option)); +assert.commandWorked(master.getDB("foo").bar.insert({x: 1}, option)); // 4 is connected to 3 conns[4].disconnect(conns[2]); conns[4].reconnect(conns[3]); -assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option)); +assert.commandWorked(master.getDB("foo").bar.insert({x: 1}, option)); replTest.stopSet(); }()); diff --git a/jstests/replsets/system_profile.js b/jstests/replsets/system_profile.js index 4e525d4fc93..97ba6d7abfd 100644 --- a/jstests/replsets/system_profile.js +++ b/jstests/replsets/system_profile.js @@ -18,13 +18,13 @@ var getLatestOp = function() { }; var primaryDB = rst.getPrimary().getDB('test'); -assert.writeOK(primaryDB.foo.insert({})); +assert.commandWorked(primaryDB.foo.insert({})); var op = getLatestOp(); // Enable profiling on the primary assert.commandWorked(primaryDB.runCommand({profile: 2})); assert.eq(op, getLatestOp(), "oplog entry created when profile was enabled"); -assert.writeOK(primaryDB.foo.insert({})); +assert.commandWorked(primaryDB.foo.insert({})); op = getLatestOp(); assert.commandWorked(primaryDB.runCommand({profile: 0})); assert.eq(op, getLatestOp(), "oplog entry created when profile was disabled"); @@ -36,7 +36,7 @@ assert.eq(op, getLatestOp(), "oplog entry created when system.profile was droppe assert.commandWorked(primaryDB.createCollection("system.profile", {capped: true, size: 1000})); assert.eq(op, getLatestOp(), "oplog entry created when system.profile was created"); assert.commandWorked(primaryDB.runCommand({profile: 2})); -assert.writeOK(primaryDB.foo.insert({})); +assert.commandWorked(primaryDB.foo.insert({})); op = getLatestOp(); assert.commandWorked(primaryDB.runCommand({profile: 0})); diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js index 361b6204c08..03d401412a2 100644 --- a/jstests/replsets/tags2.js +++ b/jstests/replsets/tags2.js @@ -34,7 +34,7 @@ var master = replTest.getPrimary(); var db = master.getDB("test"); var wtimeout = ReplSetTest.kDefaultTimeoutMS; -assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}})); +assert.commandWorked(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}})); var nextVersion = replTest.getReplSetConfigFromNode().version + 1; conf.version = nextVersion; @@ -44,7 +44,7 @@ replTest.awaitReplication(); master = replTest.getPrimary(); var db = master.getDB("test"); -assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}})); +assert.commandWorked(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}})); nextVersion++; conf.version = nextVersion; @@ -54,7 +54,7 @@ master.getDB("admin").runCommand({replSetReconfig: conf}); master = replTest.getPrimary(); var db = master.getDB("test"); -assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}})); +assert.commandWorked(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}})); replTest.stopSet(); }()); diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js index 6f28faf300a..7aadef7c5b8 100644 --- a/jstests/replsets/tags_with_reconfig.js +++ b/jstests/replsets/tags_with_reconfig.js @@ -36,10 +36,10 @@ var master = replTest.getPrimary(); var db = master.getDB("test"); // Insert a document with write concern : anydc -assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}})); +assert.commandWorked(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}})); // Insert a document with write concern : alldc -assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}})); +assert.commandWorked(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}})); // Add a new tag to the replica set var config = master.getDB("local").system.replset.findOne(); @@ -64,10 +64,10 @@ master = replTest.getPrimary(); var db = master.getDB("test"); // Insert a document with write concern : anydc -assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}})); +assert.commandWorked(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}})); // Insert a document with write concern : alldc -assert.writeOK(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}})); +assert.commandWorked(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}})); replTest.stopSet(); }()); diff --git a/jstests/replsets/temp_namespace.js b/jstests/replsets/temp_namespace.js index cc7bf462924..de276222b95 100644 --- a/jstests/replsets/temp_namespace.js +++ b/jstests/replsets/temp_namespace.js @@ -38,7 +38,7 @@ assert.commandWorked(masterDB.runCommand( assert.commandWorked(masterDB.runCommand( {applyOps: [{op: "c", ns: masterDB.getName() + ".$cmd", o: {create: "keep2", temp: 0}}]})); masterDB.runCommand({create: 'keep3'}); -assert.writeOK(masterDB.keep4.insert({}, {writeConcern: {w: 2}})); +assert.commandWorked(masterDB.keep4.insert({}, {writeConcern: {w: 2}})); // make sure they exist on primary and secondary function countCollection(mydb, nameFilter) { diff --git a/jstests/replsets/temp_namespace_restart_as_standalone.js b/jstests/replsets/temp_namespace_restart_as_standalone.js index e5061629c82..4fb4baefdad 100644 --- a/jstests/replsets/temp_namespace_restart_as_standalone.js +++ b/jstests/replsets/temp_namespace_restart_as_standalone.js @@ -96,7 +96,7 @@ rst.start(secondaryNodeId, {}, restart); // Verify that writes are replicated to the temporary collection and can successfully be applied // by the secondary after having restarted it. -assert.writeOK(primaryDB.temp_collection.insert( +assert.commandWorked(primaryDB.temp_collection.insert( {}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); rst.stopSet(); diff --git a/jstests/replsets/too_stale_secondary.js b/jstests/replsets/too_stale_secondary.js index ec71aa7bab1..c882bcc304e 100644 --- a/jstests/replsets/too_stale_secondary.js +++ b/jstests/replsets/too_stale_secondary.js @@ -55,7 +55,8 @@ function overflowOplog(conn, db, writeConcern) { // Keep inserting large documents until the oplog rolls over. const largeStr = new Array(32 * 1024).join('aaaaaaaa'); while (bsonWoCompare(getFirstOplogEntry(conn), firstOplogEntry) === 0) { - assert.writeOK(db[collName].insert({data: largeStr}, {writeConcern: {w: writeConcern}})); + assert.commandWorked( + db[collName].insert({data: largeStr}, {writeConcern: {w: writeConcern}})); } } @@ -109,7 +110,7 @@ var primary = replTest.getPrimary(); var primaryTestDB = primary.getDB(dbName); jsTestLog("1: Insert one document on the primary (Node 0) and ensure it is replicated."); -assert.writeOK(primaryTestDB[collName].insert({a: 1}, {writeConcern: {w: 3}})); +assert.commandWorked(primaryTestDB[collName].insert({a: 1}, {writeConcern: {w: 3}})); assert(!tooStale(replTest.nodes[2])); jsTestLog("2: Stop Node 2."); diff --git a/jstests/replsets/transaction_table_multi_statement_txn.js b/jstests/replsets/transaction_table_multi_statement_txn.js index 01fc3a577d5..c81c35e8bb0 100644 --- a/jstests/replsets/transaction_table_multi_statement_txn.js +++ b/jstests/replsets/transaction_table_multi_statement_txn.js @@ -26,8 +26,8 @@ replTest.awaitReplication(); const sessionId = session.getSessionId(); jsTestLog('Starting transaction on session ' + sessionId); session.startTransaction(); -assert.writeOK(coll.insert({_id: 0})); -assert.writeOK(coll.insert({_id: 1})); +assert.commandWorked(coll.insert({_id: 0})); +assert.commandWorked(coll.insert({_id: 1})); assert.commandWorked(session.commitTransaction_forTesting()); const opTime = session.getOperationTime(); const txnNum = session.getTxnNumber_forTesting(); diff --git a/jstests/replsets/transactions_during_step_down.js b/jstests/replsets/transactions_during_step_down.js index eb6aa6dad6e..c74062cb5e5 100644 --- a/jstests/replsets/transactions_during_step_down.js +++ b/jstests/replsets/transactions_during_step_down.js @@ -25,7 +25,7 @@ var primaryColl = db[collName]; var collNss = primaryColl.getFullName(); jsTestLog("Writing data to collection."); -assert.writeOK(primaryColl.insert({_id: 'readOp'}, {"writeConcern": {"w": 2}})); +assert.commandWorked(primaryColl.insert({_id: 'readOp'}, {"writeConcern": {"w": 2}})); TestData.dbName = dbName; TestData.collName = collName; @@ -120,7 +120,7 @@ testAbortOrCommitTxnFailsWithCode( {failPoint: "hangBeforeAbortingTxn", op: "session.abortTransaction_forTesting()"}); jsTestLog("Testing stepdown during running transaction in inactive state."); -TestData.cmd = "assert.writeOK(sessionColl.insert({_id: 'inactiveTxnOp'}))"; +TestData.cmd = "assert.commandWorked(sessionColl.insert({_id: 'inactiveTxnOp'}))"; // Do not start the transaction in parallel shell because when the parallel // shell work is done, implicit call to "endSessions" and "abortTransaction" // cmds are made. So, during step down we might not have any running diff --git a/jstests/replsets/write_concern_after_stepdown.js b/jstests/replsets/write_concern_after_stepdown.js index b54e62e8965..7c0cac74276 100644 --- a/jstests/replsets/write_concern_after_stepdown.js +++ b/jstests/replsets/write_concern_after_stepdown.js @@ -37,7 +37,7 @@ var primary = rst.getPrimary(); var secondaries = rst.getSecondaries(); assert.eq(nodes[0], primary); // Wait for all data bearing nodes to get up to date. -assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert( +assert.commandWorked(nodes[0].getDB(dbName).getCollection(collName).insert( {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}})); // Stop the secondaries from replicating. @@ -73,7 +73,7 @@ restartServerReplication(secondaries); waitForPrimary(nodes[1]); jsTest.log("Do a write to the new primary"); -assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert( +assert.commandWorked(nodes[1].getDB(dbName).getCollection(collName).insert( {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}})); jsTest.log("Reconnect the old primary to the rest of the nodes"); diff --git a/jstests/replsets/write_concern_after_stepdown_and_stepup.js b/jstests/replsets/write_concern_after_stepdown_and_stepup.js index daa143aa901..3cd88cfcf57 100644 --- a/jstests/replsets/write_concern_after_stepdown_and_stepup.js +++ b/jstests/replsets/write_concern_after_stepdown_and_stepup.js @@ -45,7 +45,7 @@ var primary = rst.getPrimary(); var secondaries = rst.getSecondaries(); assert.eq(nodes[0], primary); // Wait for all data bearing nodes to get up to date. -assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert( +assert.commandWorked(nodes[0].getDB(dbName).getCollection(collName).insert( {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}})); // Stop the secondaries from replicating. @@ -81,7 +81,7 @@ restartServerReplication(secondaries); waitForPrimary(nodes[1]); jsTest.log("Do a write to the new primary"); -assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert( +assert.commandWorked(nodes[1].getDB(dbName).getCollection(collName).insert( {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}})); jsTest.log("Reconnect the old primary to the rest of the nodes"); |