diff options
Diffstat (limited to 'jstests/noPassthrough')
91 files changed, 204 insertions, 203 deletions
diff --git a/jstests/noPassthrough/aggregation_cursor_invalidations.js b/jstests/noPassthrough/aggregation_cursor_invalidations.js index 7192e9595bc..07155ff7135 100644 --- a/jstests/noPassthrough/aggregation_cursor_invalidations.js +++ b/jstests/noPassthrough/aggregation_cursor_invalidations.js @@ -43,13 +43,13 @@ function setup() { sourceCollection.drop(); foreignCollection.drop(); for (let i = 0; i < numMatches; ++i) { - assert.writeOK(sourceCollection.insert({_id: i, local: i})); + assert.commandWorked(sourceCollection.insert({_id: i, local: i})); // We want to be able to pause a $lookup stage in a state where it has returned some but // not all of the results for a single lookup, so we need to insert at least // 'numMatches' matches for each source document. for (let j = 0; j < numMatches; ++j) { - assert.writeOK(foreignCollection.insert({_id: numMatches * i + j, foreign: i})); + assert.commandWorked(foreignCollection.insert({_id: numMatches * i + j, foreign: i})); } } } @@ -271,14 +271,14 @@ assert.commandWorked(testDB.runCommand( {create: sourceCollection.getName(), capped: true, size: maxCappedSizeBytes, max: maxNumDocs})); // Fill up about half of the collection. for (let i = 0; i < maxNumDocs / 2; ++i) { - assert.writeOK(sourceCollection.insert({_id: i})); + assert.commandWorked(sourceCollection.insert({_id: i})); } // Start an aggregation. assert.gt(maxNumDocs / 2, batchSize); res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch)); // Insert enough to force a truncation. for (let i = maxNumDocs / 2; i < 2 * maxNumDocs; ++i) { - assert.writeOK(sourceCollection.insert({_id: i})); + assert.commandWorked(sourceCollection.insert({_id: i})); } assert.eq(maxNumDocs, sourceCollection.count()); assert.commandFailedWithCode( diff --git a/jstests/noPassthrough/aggregation_zero_batchsize.js b/jstests/noPassthrough/aggregation_zero_batchsize.js index d143c75ede6..68ecfe24455 100644 --- a/jstests/noPassthrough/aggregation_zero_batchsize.js +++ b/jstests/noPassthrough/aggregation_zero_batchsize.js @@ -34,7 +34,7 @@ const bulk = coll.initializeUnorderedBulkOp(); for (let i = 0; i < nDocs; i++) { bulk.insert({_id: i, stringField: "string"}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); res = assert.commandWorked( testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}})); diff --git a/jstests/noPassthrough/apply_ops_mode.js b/jstests/noPassthrough/apply_ops_mode.js index 385cf0d532b..2376ed1c30f 100644 --- a/jstests/noPassthrough/apply_ops_mode.js +++ b/jstests/noPassthrough/apply_ops_mode.js @@ -13,7 +13,7 @@ var db = standalone.getDB("test"); var coll = db.getCollection("apply_ops_mode1"); coll.drop(); -assert.writeOK(coll.insert({_id: 1})); +assert.commandWorked(coll.insert({_id: 1})); // ------------ Testing normal updates --------------- @@ -42,7 +42,7 @@ assert.eq(coll.count({x: 1}), 1); coll = db.getCollection("apply_ops_mode2"); coll.drop(); updateOp.ns = coll.getFullName(); -assert.writeOK(coll.insert({_id: 1})); +assert.commandWorked(coll.insert({_id: 1})); // Test default succeeds in 'InitialSync' mode. assert.commandWorked(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"})); diff --git a/jstests/noPassthrough/atomic_rename_collection.js b/jstests/noPassthrough/atomic_rename_collection.js index a6f39c1c40f..621abd43d39 100644 --- a/jstests/noPassthrough/atomic_rename_collection.js +++ b/jstests/noPassthrough/atomic_rename_collection.js @@ -27,8 +27,8 @@ const tests = [ ]; tests.forEach((test) => { test.source.drop(); - assert.writeOK(test.source.insert({})); - assert.writeOK(test.target.insert({})); + assert.commandWorked(test.source.insert({})); + assert.commandWorked(test.target.insert({})); let ts = local.oplog.rs.find().sort({$natural: -1}).limit(1).next().ts; let cmd = { diff --git a/jstests/noPassthrough/auto_retry_on_network_error.js b/jstests/noPassthrough/auto_retry_on_network_error.js index 1c5f8465ebb..fa20b1d61c1 100644 --- a/jstests/noPassthrough/auto_retry_on_network_error.js +++ b/jstests/noPassthrough/auto_retry_on_network_error.js @@ -68,7 +68,7 @@ assert.commandWorked(db.runCommandWithMetadata({find: collName}, {}).commandRepl // Retryable write commands that can be retried succeed. failNextCommand(db, "insert"); -assert.writeOK(db[collName].insert({x: 1})); +assert.commandWorked(db[collName].insert({x: 1})); failNextCommand(db, "insert"); assert.commandWorked(db.runCommandWithMetadata({ diff --git a/jstests/noPassthrough/change_stream_failover.js b/jstests/noPassthrough/change_stream_failover.js index b8ec132fdd8..ca14d36ea0a 100644 --- a/jstests/noPassthrough/change_stream_failover.js +++ b/jstests/noPassthrough/change_stream_failover.js @@ -35,9 +35,9 @@ for (let key of Object.keys(ChangeStreamWatchMode)) { // Be sure we can read from the change stream. Use {w: "majority"} so that we're still // guaranteed to be able to read after the failover. - assert.writeOK(coll.insert({_id: 0}, {writeConcern: {w: "majority"}})); - assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}})); - assert.writeOK(coll.insert({_id: 2}, {writeConcern: {w: "majority"}})); + assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}})); + assert.commandWorked(coll.insert({_id: 1}, {writeConcern: {w: "majority"}})); + assert.commandWorked(coll.insert({_id: 2}, {writeConcern: {w: "majority"}})); const firstChange = cst.getOneChange(changeStream); assert.docEq(firstChange.fullDocument, {_id: 0}); diff --git a/jstests/noPassthrough/change_streams_require_majority_read_concern.js b/jstests/noPassthrough/change_streams_require_majority_read_concern.js index 6fdc4c2ee37..8bae0bcc287 100644 --- a/jstests/noPassthrough/change_streams_require_majority_read_concern.js +++ b/jstests/noPassthrough/change_streams_require_majority_read_concern.js @@ -49,7 +49,7 @@ function assertNextBatchIsEmpty(cursor) { // Test read concerns other than "majority" are not supported. const primaryColl = db.foo; -assert.writeOK(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}})); +assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}})); let res = primaryColl.runCommand({ aggregate: primaryColl.getName(), pipeline: [{$changeStream: {}}], @@ -81,7 +81,7 @@ let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collecti assert.eq(cursor.firstBatch.length, 0); // Insert a document on the primary only. -assert.writeOK(primaryColl.insert({_id: 2}, {writeConcern: {w: 1}})); +assert.commandWorked(primaryColl.insert({_id: 2}, {writeConcern: {w: 1}})); assertNextBatchIsEmpty(cursor); // Restart data replicaiton and wait until the new write becomes visible. diff --git a/jstests/noPassthrough/change_streams_update_lookup_collation.js b/jstests/noPassthrough/change_streams_update_lookup_collation.js index 996ce0e2c98..c6ed67b6dae 100644 --- a/jstests/noPassthrough/change_streams_update_lookup_collation.js +++ b/jstests/noPassthrough/change_streams_update_lookup_collation.js @@ -37,9 +37,9 @@ assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensi // Insert some documents that have similar _ids, but differ by case and diacritics. These _ids // would all match the collation on the strengthOneChangeStream, but should not be confused // during the update lookup using the strength 2 collection default collation. -assert.writeOK(coll.insert({_id: "abc", x: "abc"})); -assert.writeOK(coll.insert({_id: "abç", x: "ABC"})); -assert.writeOK(coll.insert({_id: "åbC", x: "AbÇ"})); +assert.commandWorked(coll.insert({_id: "abc", x: "abc"})); +assert.commandWorked(coll.insert({_id: "abç", x: "ABC"})); +assert.commandWorked(coll.insert({_id: "åbC", x: "AbÇ"})); const changeStreamDefaultCollation = coll.aggregate( [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}], @@ -54,7 +54,7 @@ const strengthOneChangeStream = coll.aggregate( [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}], {collation: strengthOneCollation}); -assert.writeOK(coll.update({_id: "abc"}, {$set: {updated: true}})); +assert.commandWorked(coll.update({_id: "abc"}, {$set: {updated: true}})); // Track the number of _id index usages to prove that the update lookup uses the _id index (and // therefore is using the correct collation for the lookup). @@ -72,7 +72,7 @@ assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 1); assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abc", x: "abc", updated: true}); assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 2); -assert.writeOK(coll.update({_id: "abç"}, {$set: {updated: true}})); +assert.commandWorked(coll.update({_id: "abç"}, {$set: {updated: true}})); assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 3); // Again, both cursors should produce a document describing this update. @@ -83,7 +83,7 @@ assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 4); assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abç", x: "ABC", updated: true}); assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 5); -assert.writeOK(coll.update({_id: "åbC"}, {$set: {updated: true}})); +assert.commandWorked(coll.update({_id: "åbC"}, {$set: {updated: true}})); assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 6); // Both $changeStream stages will see this update and both will look up the full document using diff --git a/jstests/noPassthrough/characterize_index_builds_on_restart.js b/jstests/noPassthrough/characterize_index_builds_on_restart.js index 35b0c7c9a7c..d618a249635 100644 --- a/jstests/noPassthrough/characterize_index_builds_on_restart.js +++ b/jstests/noPassthrough/characterize_index_builds_on_restart.js @@ -67,7 +67,7 @@ function addTestDocuments(db) { for (var i = 0; i < size; ++i) { bulk.insert({i: i, j: i * i, k: 1}); } - assert.writeOK(bulk.execute()); + assert.commandWorked(bulk.execute()); } function startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, writeConcern, secondaryDB) { diff --git a/jstests/noPassthrough/client_metadata_log.js b/jstests/noPassthrough/client_metadata_log.js index 419a19a9ebb..d2662d8905d 100644 --- a/jstests/noPassthrough/client_metadata_log.js +++ b/jstests/noPassthrough/client_metadata_log.js @@ -7,7 +7,7 @@ let checkLog = function(conn) { let coll = conn.getCollection("test.foo"); - assert.writeOK(coll.insert({_id: 1})); + assert.commandWorked(coll.insert({_id: 1})); print(`Checking ${conn.fullOptions.logFile} for client metadata message`); let log = cat(conn.fullOptions.logFile); diff --git a/jstests/noPassthrough/client_metadata_slowlog.js b/jstests/noPassthrough/client_metadata_slowlog.js index aab419023fe..ca4a2da4cf9 100644 --- a/jstests/noPassthrough/client_metadata_slowlog.js +++ b/jstests/noPassthrough/client_metadata_slowlog.js @@ -8,7 +8,7 @@ let conn = MongoRunner.runMongod({useLogFiles: true}); assert.neq(null, conn, 'mongod was unable to start up'); let coll = conn.getCollection("test.foo"); -assert.writeOK(coll.insert({_id: 1})); +assert.commandWorked(coll.insert({_id: 1})); // Do a really slow query beyond the 100ms threshold let count = coll.count({ diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js index 6811bf77ec2..cdc96579830 100644 --- a/jstests/noPassthrough/commands_handle_kill.js +++ b/jstests/noPassthrough/commands_handle_kill.js @@ -34,7 +34,7 @@ function setupCollection() { for (let i = 0; i < nDocs; i++) { bulk.insert({_id: i, a: i}); } - assert.writeOK(bulk.execute()); + assert.commandWorked(bulk.execute()); assert.commandWorked(coll.createIndex({a: 1})); } diff --git a/jstests/noPassthrough/commands_preserve_exec_error_code.js b/jstests/noPassthrough/commands_preserve_exec_error_code.js index 621fa72411e..3d0d1136f92 100644 --- a/jstests/noPassthrough/commands_preserve_exec_error_code.js +++ b/jstests/noPassthrough/commands_preserve_exec_error_code.js @@ -11,7 +11,7 @@ const db = mongod.getDB("test"); const coll = db.commands_preserve_exec_error_code; coll.drop(); -assert.writeOK(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}])); +assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}])); assert.commandWorked(coll.createIndex({geo: "2d"})); assert.commandWorked( diff --git a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js index 2183e6da600..1fe50111a8e 100644 --- a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js +++ b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js @@ -46,7 +46,7 @@ let coll = testDB.security_501; coll.drop(); for (let i = 0; i < 100; i++) { - assert.writeOK(coll.insert({_id: i})); + assert.commandWorked(coll.insert({_id: i})); } // Create our two users. diff --git a/jstests/noPassthrough/currentop_includes_await_time.js b/jstests/noPassthrough/currentop_includes_await_time.js index 5a5dee2f5ce..45506e05726 100644 --- a/jstests/noPassthrough/currentop_includes_await_time.js +++ b/jstests/noPassthrough/currentop_includes_await_time.js @@ -17,7 +17,7 @@ const coll = testDB.currentop_includes_await_time; coll.drop(); assert.commandWorked(testDB.createCollection(coll.getName(), {capped: true, size: 1024})); -assert.writeOK(coll.insert({_id: 1})); +assert.commandWorked(coll.insert({_id: 1})); let cmdRes = assert.commandWorked( testDB.runCommand({find: coll.getName(), tailable: true, awaitData: true})); @@ -46,7 +46,7 @@ assert.soon(function() { // A capped insertion should unblock the getMore, allowing the test to complete before the // getMore's awaitData time expires. -assert.writeOK(coll.insert({_id: 2})); +assert.commandWorked(coll.insert({_id: 2})); cleanupShell(); MongoRunner.stopMongod(conn); diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js index 15e655d568a..82dcc421de3 100644 --- a/jstests/noPassthrough/currentop_query.js +++ b/jstests/noPassthrough/currentop_query.js @@ -71,7 +71,7 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) { dropAndRecreateTestCollection(); for (let i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({_id: i, a: i})); + assert.commandWorked(coll.insert({_id: i, a: i})); } const isLocalMongosCurOp = (FixtureHelpers.isMongos(testDB) && localOps); @@ -280,8 +280,8 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) { }, { test: function(db) { - assert.writeOK(db.currentop_query.remove({a: 2, $comment: "currentop_query"}, - {collation: {locale: "fr"}})); + assert.commandWorked(db.currentop_query.remove( + {a: 2, $comment: "currentop_query"}, {collation: {locale: "fr"}})); }, operation: "remove", planSummary: "COLLSCAN", @@ -294,7 +294,7 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) { }, { test: function(db) { - assert.writeOK( + assert.commandWorked( db.currentop_query.update({a: 1, $comment: "currentop_query"}, {$inc: {b: 1}}, {collation: {locale: "fr"}, multi: true})); @@ -372,7 +372,7 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) { // dropAndRecreateTestCollection(); for (let i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); + assert.commandWorked(coll.insert({a: i})); } const originatingCommands = { @@ -493,7 +493,7 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) { */ function runTruncationTests() { dropAndRecreateTestCollection(); - assert.writeOK(coll.insert({a: 1})); + assert.commandWorked(coll.insert({a: 1})); // When the currentOp command serializes the query object as a string, individual string // values inside it are truncated at 150 characters. To test "total length" truncation diff --git a/jstests/noPassthrough/durable_view_catalog.js b/jstests/noPassthrough/durable_view_catalog.js index 23de01b4b30..f6888b2f830 100644 --- a/jstests/noPassthrough/durable_view_catalog.js +++ b/jstests/noPassthrough/durable_view_catalog.js @@ -51,7 +51,7 @@ let listedViews = assert.sameMembers(listedViews, expectedViews, "persisted view definitions not correctly loaded"); // Insert an invalid view definition directly into system.views to bypass normal validation. -assert.writeOK(viewsDB.system.views.insert({_id: "badView", pipeline: "badType"})); +assert.commandWorked(viewsDB.system.views.insert({_id: "badView", pipeline: "badType"})); // Skip collection validation during stopMongod if invalid views exists. TestData.skipValidationOnInvalidViewDefinitions = true; @@ -78,7 +78,7 @@ assert.commandFailedWithCode(viewsDB.runCommand({listCollections: 1}), // Manually remove the invalid view definition from system.views, and then verify that view // operations work successfully without requiring a server restart. -assert.writeOK(viewsDB.system.views.remove({_id: "badView"})); +assert.commandWorked(viewsDB.system.views.remove({_id: "badView"})); assert.commandWorked(viewsDB.runCommand({find: "view2"})); assert.commandWorked(viewsDB.runCommand({create: "view4", viewOn: "collection"})); assert.commandWorked(viewsDB.runCommand({collMod: "view2", viewOn: "view4"})); diff --git a/jstests/noPassthrough/feature_compatibility_version.js b/jstests/noPassthrough/feature_compatibility_version.js index 4f7cd42f450..319b253fa99 100644 --- a/jstests/noPassthrough/feature_compatibility_version.js +++ b/jstests/noPassthrough/feature_compatibility_version.js @@ -16,21 +16,21 @@ checkFCV(adminDB, latestFCV); // Updating the featureCompatibilityVersion document changes the featureCompatibilityVersion // server parameter. -assert.writeOK(adminDB.system.version.update({_id: "featureCompatibilityVersion"}, - {$set: {version: lastStableFCV}})); +assert.commandWorked(adminDB.system.version.update({_id: "featureCompatibilityVersion"}, + {$set: {version: lastStableFCV}})); checkFCV(adminDB, lastStableFCV); -assert.writeOK( +assert.commandWorked( adminDB.system.version.update({_id: "featureCompatibilityVersion"}, {$set: {version: lastStableFCV, targetVersion: latestFCV}})); checkFCV(adminDB, lastStableFCV, latestFCV); -assert.writeOK( +assert.commandWorked( adminDB.system.version.update({_id: "featureCompatibilityVersion"}, {$set: {version: lastStableFCV, targetVersion: lastStableFCV}})); checkFCV(adminDB, lastStableFCV, lastStableFCV); -assert.writeOK( +assert.commandWorked( adminDB.system.version.update({_id: "featureCompatibilityVersion"}, {$set: {version: latestFCV}, $unset: {targetVersion: true}})); checkFCV(adminDB, latestFCV); diff --git a/jstests/noPassthrough/filemd5_kill_during_yield.js b/jstests/noPassthrough/filemd5_kill_during_yield.js index e2f74bcb1ce..20e717d0b6c 100644 --- a/jstests/noPassthrough/filemd5_kill_during_yield.js +++ b/jstests/noPassthrough/filemd5_kill_during_yield.js @@ -8,8 +8,8 @@ const conn = MongoRunner.runMongod(); assert.neq(null, conn); const db = conn.getDB("test"); db.fs.chunks.drop(); -assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "64string")})); -assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 1, data: new BinData(0, "test")})); +assert.commandWorked(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "64string")})); +assert.commandWorked(db.fs.chunks.insert({files_id: 1, n: 1, data: new BinData(0, "test")})); db.fs.chunks.ensureIndex({files_id: 1, n: 1}); const kFailPointName = "waitInFilemd5DuringManualYield"; diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js index 7ffd8e90c50..87cc6661587 100644 --- a/jstests/noPassthrough/geo_full.js +++ b/jstests/noPassthrough/geo_full.js @@ -394,7 +394,7 @@ for (var test = 0; test < numTests; test++) { doc._id = i; bulk.insert(doc); } - assert.writeOK(bulk.execute()); + assert.commandWorked(bulk.execute()); var indexDoc = {"locs.loc": "2d"}; randIndexAdditions(indexDoc); diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js index 9f402db0d16..467c46e3698 100644 --- a/jstests/noPassthrough/geo_mnypts_plus_fields.js +++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js @@ -39,7 +39,7 @@ for (var fields = 1; fields < maxFields; fields++) { bulk.insert(doc); } - assert.writeOK(bulk.execute()); + assert.commandWorked(bulk.execute()); // Create the query for the additional fields const queryFields = {}; diff --git a/jstests/noPassthrough/global_operation_latency_histogram.js b/jstests/noPassthrough/global_operation_latency_histogram.js index 2f103e70a96..2045391c71c 100644 --- a/jstests/noPassthrough/global_operation_latency_histogram.js +++ b/jstests/noPassthrough/global_operation_latency_histogram.js @@ -31,13 +31,13 @@ function checkHistogramDiff(reads, writes, commands) { // Insert var numRecords = 100; for (var i = 0; i < numRecords; i++) { - assert.writeOK(testColl.insert({_id: i})); + assert.commandWorked(testColl.insert({_id: i})); } lastHistogram = checkHistogramDiff(0, numRecords, 0); // Update for (var i = 0; i < numRecords; i++) { - assert.writeOK(testColl.update({_id: i}, {x: i})); + assert.commandWorked(testColl.update({_id: i}, {x: i})); } lastHistogram = checkHistogramDiff(0, numRecords, 0); @@ -68,13 +68,13 @@ lastHistogram = checkHistogramDiff(0, 0, numRecords - 1); // Remove for (var i = 0; i < numRecords; i++) { - assert.writeOK(testColl.remove({_id: i})); + assert.commandWorked(testColl.remove({_id: i})); } lastHistogram = checkHistogramDiff(0, numRecords, 0); // Upsert for (var i = 0; i < numRecords; i++) { - assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1})); + assert.commandWorked(testColl.update({_id: i}, {x: i}, {upsert: 1})); } lastHistogram = checkHistogramDiff(0, numRecords, 0); diff --git a/jstests/noPassthrough/ignore_notablescan.js b/jstests/noPassthrough/ignore_notablescan.js index 255b646f757..8a0730b79ef 100644 --- a/jstests/noPassthrough/ignore_notablescan.js +++ b/jstests/noPassthrough/ignore_notablescan.js @@ -14,11 +14,11 @@ function runTests(ServerType) { const primaryDB = session.getDatabase(dbName); // Implicitly create the collection outside of the transaction. - assert.writeOK(primaryDB.getCollection(collName).insert({x: 1})); + assert.commandWorked(primaryDB.getCollection(collName).insert({x: 1})); // Run a transaction so the 'config.transactions' collection is implicitly created. session.startTransaction(); - assert.writeOK(primaryDB.getCollection(collName).insert({x: 2})); + assert.commandWorked(primaryDB.getCollection(collName).insert({x: 2})); assert.commandWorked(session.commitTransaction_forTesting()); // Run a predicate query that would fail if we did not ignore the 'notablescan' flag. diff --git a/jstests/noPassthrough/implicit_sessions.js b/jstests/noPassthrough/implicit_sessions.js index f0bb9d972f9..315f78cd5f1 100644 --- a/jstests/noPassthrough/implicit_sessions.js +++ b/jstests/noPassthrough/implicit_sessions.js @@ -69,7 +69,7 @@ function runTest() { const testDB = conn.getDB("test"); const coll = testDB.getCollection("foo"); const implicitId = inspectCommandForSessionId(function() { - assert.writeOK(coll.insert({x: 1})); + assert.commandWorked(coll.insert({x: 1})); }, {shouldIncludeId: true}); // Unacknowledged writes have no session id. @@ -88,43 +88,43 @@ function runTest() { // Further commands run on the same database should reuse the implicit session. inspectCommandForSessionId(function() { - assert.writeOK(coll.insert({x: 1})); + assert.commandWorked(coll.insert({x: 1})); }, {shouldIncludeId: true, expectedId: implicitId}); // New collections from the same database should inherit the implicit session. const collTwo = testDB.getCollection("bar"); inspectCommandForSessionId(function() { - assert.writeOK(collTwo.insert({x: 1})); + assert.commandWorked(collTwo.insert({x: 1})); }, {shouldIncludeId: true, expectedId: implicitId}); // Sibling databases should inherit the implicit session. let siblingColl = testDB.getSiblingDB("foo").getCollection("bar"); inspectCommandForSessionId(function() { - assert.writeOK(siblingColl.insert({x: 1})); + assert.commandWorked(siblingColl.insert({x: 1})); }, {shouldIncludeId: true, expectedId: implicitId}); // A new database from the same connection should inherit the implicit session. const newCollSameConn = conn.getDB("testTwo").getCollection("foo"); inspectCommandForSessionId(function() { - assert.writeOK(newCollSameConn.insert({x: 1})); + assert.commandWorked(newCollSameConn.insert({x: 1})); }, {shouldIncludeId: true, expectedId: implicitId}); // A new database from a new connection should use a different implicit session. const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo"); inspectCommandForSessionId(function() { - assert.writeOK(newCollNewConn.insert({x: 1})); + assert.commandWorked(newCollNewConn.insert({x: 1})); }, {shouldIncludeId: true, differentFromId: implicitId}); // The original implicit session should still live on the first database. inspectCommandForSessionId(function() { - assert.writeOK(coll.insert({x: 1})); + assert.commandWorked(coll.insert({x: 1})); }, {shouldIncludeId: true, expectedId: implicitId}); // Databases created from an explicit session should override any implicit sessions. const session = conn.startSession(); const sessionColl = session.getDatabase("test").getCollection("foo"); const explicitId = inspectCommandForSessionId(function() { - assert.writeOK(sessionColl.insert({x: 1})); + assert.commandWorked(sessionColl.insert({x: 1})); }, {shouldIncludeId: true, differentFromId: implicitId}); assert(bsonBinaryEqual(session.getSessionId(), explicitId), @@ -137,14 +137,14 @@ function runTest() { // The original implicit session should still live on the first database. inspectCommandForSessionId(function() { - assert.writeOK(coll.insert({x: 1})); + assert.commandWorked(coll.insert({x: 1})); }, {shouldIncludeId: true, expectedId: implicitId}); // New databases on the same connection as the explicit session should still inherit the // original implicit session. const newCollSameConnAfter = conn.getDB("testThree").getCollection("foo"); inspectCommandForSessionId(function() { - assert.writeOK(newCollSameConnAfter.insert({x: 1})); + assert.commandWorked(newCollSameConnAfter.insert({x: 1})); }, {shouldIncludeId: true, expectedId: implicitId}); session.endSession(); @@ -158,13 +158,13 @@ function runTestTransitionToDisabled() { // Existing implicit sessions should be erased when the disable flag is set. const coll = conn.getDB("test").getCollection("foo"); const implicitId = inspectCommandForSessionId(function() { - assert.writeOK(coll.insert({x: 1})); + assert.commandWorked(coll.insert({x: 1})); }, {shouldIncludeId: true}); TestData.disableImplicitSessions = true; inspectCommandForSessionId(function() { - assert.writeOK(coll.insert({x: 1})); + assert.commandWorked(coll.insert({x: 1})); }, {shouldIncludeId: false}); // After the flag is unset, databases using existing connections with implicit sessions will @@ -173,30 +173,30 @@ function runTestTransitionToDisabled() { TestData.disableImplicitSessions = false; inspectCommandForSessionId(function() { - assert.writeOK(coll.insert({x: 1})); + assert.commandWorked(coll.insert({x: 1})); }, {shouldIncludeId: true, expectedId: implicitId}); const newColl = conn.getDB("test").getCollection("foo"); inspectCommandForSessionId(function() { - assert.writeOK(newColl.insert({x: 1})); + assert.commandWorked(newColl.insert({x: 1})); }, {shouldIncludeId: true, expectedId: implicitId}); const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo"); inspectCommandForSessionId(function() { - assert.writeOK(newCollNewConn.insert({x: 1})); + assert.commandWorked(newCollNewConn.insert({x: 1})); }, {shouldIncludeId: true, differentFromId: implicitId}); // Explicit sessions should not be affected by the disable flag being set. const session = conn.startSession(); const sessionColl = session.getDatabase("test").getCollection("foo"); const explicitId = inspectCommandForSessionId(function() { - assert.writeOK(sessionColl.insert({x: 1})); + assert.commandWorked(sessionColl.insert({x: 1})); }, {shouldIncludeId: true}); TestData.disableImplicitSessions = true; inspectCommandForSessionId(function() { - assert.writeOK(sessionColl.insert({x: 1})); + assert.commandWorked(sessionColl.insert({x: 1})); }, {shouldIncludeId: true, expectedId: explicitId}); session.endSession(); @@ -210,14 +210,14 @@ function runTestDisabled() { // Commands run without an explicit session should not use an implicit one. const coll = conn.getDB("test").getCollection("foo"); inspectCommandForSessionId(function() { - assert.writeOK(coll.insert({x: 1})); + assert.commandWorked(coll.insert({x: 1})); }, {shouldIncludeId: false}); // Explicit sessions should still include session ids. const session = conn.startSession(); const sessionColl = session.getDatabase("test").getCollection("foo"); inspectCommandForSessionId(function() { - assert.writeOK(sessionColl.insert({x: 1})); + assert.commandWorked(sessionColl.insert({x: 1})); }, {shouldIncludeId: true}); // Commands run in a parallel shell inherit the disable flag. @@ -225,7 +225,7 @@ function runTestDisabled() { const awaitShell = startParallelShell(function() { const parallelColl = db.getCollection("foo"); TestData.inspectCommandForSessionId(function() { - assert.writeOK(parallelColl.insert({x: 1})); + assert.commandWorked(parallelColl.insert({x: 1})); }, {shouldIncludeId: false}); }, conn.port); awaitShell(); diff --git a/jstests/noPassthrough/index_killop_standalone.js b/jstests/noPassthrough/index_killop_standalone.js index 6692eda9e31..be4a3aff1e1 100644 --- a/jstests/noPassthrough/index_killop_standalone.js +++ b/jstests/noPassthrough/index_killop_standalone.js @@ -11,7 +11,7 @@ assert.neq(null, conn, "mongod was unable to start up"); const testDB = conn.getDB("test"); assert.commandWorked(testDB.dropDatabase()); -assert.writeOK(testDB.test.insert({a: 1})); +assert.commandWorked(testDB.test.insert({a: 1})); const coll = testDB.test; // Test that building an index with 'options' can be aborted using killop. diff --git a/jstests/noPassthrough/index_partial_no_explain_cmds.js b/jstests/noPassthrough/index_partial_no_explain_cmds.js index f1295e5531c..9d0dc8eb246 100644 --- a/jstests/noPassthrough/index_partial_no_explain_cmds.js +++ b/jstests/noPassthrough/index_partial_no_explain_cmds.js @@ -10,8 +10,8 @@ coll.drop(); assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}})); -assert.writeOK(coll.insert({_id: 1, x: 5, a: 2})); // Not in index. -assert.writeOK(coll.insert({_id: 2, x: 6, a: 1})); // In index. +assert.commandWorked(coll.insert({_id: 1, x: 5, a: 2})); // Not in index. +assert.commandWorked(coll.insert({_id: 2, x: 6, a: 1})); // In index. // Verify we will throw if the partial index can't be used. assert.throws(function() { diff --git a/jstests/noPassthrough/index_stepdown_after_init.js b/jstests/noPassthrough/index_stepdown_after_init.js index 4289d18ec8a..d104cd4693c 100644 --- a/jstests/noPassthrough/index_stepdown_after_init.js +++ b/jstests/noPassthrough/index_stepdown_after_init.js @@ -39,7 +39,7 @@ if (!enableIndexBuildsCoordinator) { return; } -assert.writeOK(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 1})); assert.commandWorked(primary.adminCommand( {configureFailPoint: 'hangAfterInitializingIndexBuild', mode: 'alwaysOn'})); diff --git a/jstests/noPassthrough/index_stepdown_during_scan.js b/jstests/noPassthrough/index_stepdown_during_scan.js index e8d003506e9..9628318c208 100644 --- a/jstests/noPassthrough/index_stepdown_during_scan.js +++ b/jstests/noPassthrough/index_stepdown_during_scan.js @@ -27,7 +27,7 @@ const primary = rst.getPrimary(); const testDB = primary.getDB('test'); const coll = testDB.getCollection('test'); -assert.writeOK(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 1})); IndexBuildTest.pauseIndexBuilds(primary); diff --git a/jstests/noPassthrough/index_version_v2.js b/jstests/noPassthrough/index_version_v2.js index 886c7c39590..16000891cc9 100644 --- a/jstests/noPassthrough/index_version_v2.js +++ b/jstests/noPassthrough/index_version_v2.js @@ -57,7 +57,7 @@ indexSpec = getIndexSpecByName(testDB.collation, "withCollation"); assert.eq(2, indexSpec.v, tojson(indexSpec)); // Test that indexing decimal data succeeds. -assert.writeOK(testDB.decimal.insert({_id: new NumberDecimal("42")})); +assert.commandWorked(testDB.decimal.insert({_id: new NumberDecimal("42")})); // // Index version v=1 @@ -97,7 +97,7 @@ if (storageEnginesUsingKeyString.has(storageEngine)) { assert.writeErrorWithCode(testDB.decimal.insert({num: new NumberDecimal("42")}), ErrorCodes.UnsupportedFormat); } else { - assert.writeOK(testDB.decimal.insert({num: new NumberDecimal("42")})); + assert.commandWorked(testDB.decimal.insert({num: new NumberDecimal("42")})); } // diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js index 1b06b881e30..c2bdf180be3 100644 --- a/jstests/noPassthrough/indexbg1.js +++ b/jstests/noPassthrough/indexbg1.js @@ -53,7 +53,7 @@ while (1) { // if indexing finishes before we can run checks, try indexing w/ m for (var i = 0; i < size; ++i) { bulk.insert({i: i}); } - assert.writeOK(bulk.execute()); + assert.commandWorked(bulk.execute()); assert.eq(size, t.count()); bgIndexBuildPid = doParallel(fullName + ".ensureIndex( {i:1}, {background:true} )"); @@ -76,14 +76,14 @@ while (1) { // if indexing finishes before we can run checks, try indexing w/ m assert(ex.executionStats.totalKeysExamined < 1000, "took too long to find 100: " + tojson(ex)); - assert.writeOK(t.remove({i: 40}, true)); // table scan - assert.writeOK(t.update({i: 10}, {i: -10})); // should scan 10 + assert.commandWorked(t.remove({i: 40}, true)); // table scan + assert.commandWorked(t.update({i: 10}, {i: -10})); // should scan 10 var id = t.find().hint({$natural: -1}).next()._id; - assert.writeOK(t.update({_id: id}, {i: -2})); - assert.writeOK(t.save({i: -50})); - assert.writeOK(t.save({i: size + 2})); + assert.commandWorked(t.update({_id: id}, {i: -2})); + assert.commandWorked(t.save({i: -50})); + assert.commandWorked(t.save({i: size + 2})); assert.eq(size + 1, t.count()); diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js index c7a119048ec..ec873eccf7e 100644 --- a/jstests/noPassthrough/indexbg2.js +++ b/jstests/noPassthrough/indexbg2.js @@ -58,7 +58,7 @@ let turnFailPointOff = function(failPointName) { // for the duration of the build. let failOnExistingDuplicateValue = function(coll) { let duplicateKey = 0; - assert.writeOK(coll.save({i: duplicateKey})); + assert.commandWorked(coll.save({i: duplicateKey})); let bgIndexBuildPid = indexBuild(); waitProgram(bgIndexBuildPid); @@ -81,7 +81,7 @@ let failOnInsertedDuplicateValue = function(coll) { jsTestLog("Waiting to hang before index build of i=" + duplicateKey); checkLog.contains(conn, "Hanging before index build of i=" + duplicateKey); - assert.writeOK(coll.save({i: duplicateKey})); + assert.commandWorked(coll.save({i: duplicateKey})); } finally { turnFailPointOff("hangBeforeIndexBuildOf"); } @@ -135,7 +135,7 @@ let doTest = function() { coll.drop(); for (let i = 0; i < size; ++i) { - assert.writeOK(coll.save({i: i})); + assert.commandWorked(coll.save({i: i})); } assert.eq(size, coll.count()); assert.eq(1, coll.getIndexes().length, "_id index should already exist"); diff --git a/jstests/noPassthrough/indexbg_drop.js b/jstests/noPassthrough/indexbg_drop.js index 6ee8e47a54d..ff192c9f3d4 100644 --- a/jstests/noPassthrough/indexbg_drop.js +++ b/jstests/noPassthrough/indexbg_drop.js @@ -45,7 +45,7 @@ var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp(); for (i = 0; i < size; ++i) { bulk.insert({i: Random.rand()}); } -assert.writeOK(bulk.execute({w: 2, wtimeout: replTest.kDefaultTimeoutMS})); +assert.commandWorked(bulk.execute({w: 2, wtimeout: replTest.kDefaultTimeoutMS})); assert.commandWorked( secondDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"})); diff --git a/jstests/noPassthrough/indexbg_killop_apply_ops.js b/jstests/noPassthrough/indexbg_killop_apply_ops.js index dc35b1d0b98..0a41b18749c 100644 --- a/jstests/noPassthrough/indexbg_killop_apply_ops.js +++ b/jstests/noPassthrough/indexbg_killop_apply_ops.js @@ -29,7 +29,7 @@ const primary = rst.getPrimary(); const testDB = primary.getDB('test'); const coll = testDB.getCollection('test'); -assert.writeOK(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 1})); IndexBuildTest.pauseIndexBuilds(primary); diff --git a/jstests/noPassthrough/indexbg_killop_primary.js b/jstests/noPassthrough/indexbg_killop_primary.js index 27042c77b5b..cad36b1f688 100644 --- a/jstests/noPassthrough/indexbg_killop_primary.js +++ b/jstests/noPassthrough/indexbg_killop_primary.js @@ -26,7 +26,7 @@ const primary = rst.getPrimary(); const testDB = primary.getDB('test'); const coll = testDB.getCollection('test'); -assert.writeOK(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 1})); IndexBuildTest.pauseIndexBuilds(primary); diff --git a/jstests/noPassthrough/indexbg_killop_primary_after_init.js b/jstests/noPassthrough/indexbg_killop_primary_after_init.js index 7cd5c318380..2fe700533fc 100644 --- a/jstests/noPassthrough/indexbg_killop_primary_after_init.js +++ b/jstests/noPassthrough/indexbg_killop_primary_after_init.js @@ -39,7 +39,7 @@ if (!enableIndexBuildsCoordinator) { return; } -assert.writeOK(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 1})); assert.commandWorked(primary.adminCommand( {configureFailPoint: 'hangAfterInitializingIndexBuild', mode: 'alwaysOn'})); diff --git a/jstests/noPassthrough/indexbg_killop_secondary.js b/jstests/noPassthrough/indexbg_killop_secondary.js index 261d65788de..f83077de565 100644 --- a/jstests/noPassthrough/indexbg_killop_secondary.js +++ b/jstests/noPassthrough/indexbg_killop_secondary.js @@ -26,7 +26,7 @@ const primary = rst.getPrimary(); const testDB = primary.getDB('test'); const coll = testDB.getCollection('test'); -assert.writeOK(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 1})); const secondary = rst.getSecondary(); IndexBuildTest.pauseIndexBuilds(secondary); diff --git a/jstests/noPassthrough/indexbg_shutdown.js b/jstests/noPassthrough/indexbg_shutdown.js index 88007a29e1a..7907780140c 100644 --- a/jstests/noPassthrough/indexbg_shutdown.js +++ b/jstests/noPassthrough/indexbg_shutdown.js @@ -47,7 +47,7 @@ var bulk = masterColl.initializeUnorderedBulkOp(); for (var i = 0; i < size; ++i) { bulk.insert({i: i, j: i * i}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); IndexBuildTest.pauseIndexBuilds(second); diff --git a/jstests/noPassthrough/initial_sync_wt_cache_full.js b/jstests/noPassthrough/initial_sync_wt_cache_full.js index 90d19a172ab..fb47472a674 100644 --- a/jstests/noPassthrough/initial_sync_wt_cache_full.js +++ b/jstests/noPassthrough/initial_sync_wt_cache_full.js @@ -34,7 +34,7 @@ const numDocs = 2; const minDocSizeMB = 10; for (let i = 0; i < numDocs; ++i) { - assert.writeOK( + assert.commandWorked( coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)}, {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); } @@ -57,7 +57,7 @@ checkLog.contains(secondary, 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled'); for (let i = 0; i < numDocs; ++i) { for (let j = 0; j < numUpdates; ++j) { - assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}})); + assert.commandWorked(coll.update({_id: i}, {$inc: {i: 1}})); } } diff --git a/jstests/noPassthrough/js_protection.js b/jstests/noPassthrough/js_protection.js index 7783488a663..2937e488143 100644 --- a/jstests/noPassthrough/js_protection.js +++ b/jstests/noPassthrough/js_protection.js @@ -62,7 +62,7 @@ function assertNoStoredWhere() { {$set: {y: 100}}, false, true); - assert.writeOK(res); + assert.commandWorked(res); doc = t.findOne({name: "testdoc"}); assert.neq(null, doc); diff --git a/jstests/noPassthrough/js_protection_roundtrip.js b/jstests/noPassthrough/js_protection_roundtrip.js index 5c0c0b4da10..0abef97ebc2 100644 --- a/jstests/noPassthrough/js_protection_roundtrip.js +++ b/jstests/noPassthrough/js_protection_roundtrip.js @@ -48,7 +48,7 @@ var result = t.insert({ return "yes"; } }); -assert.writeOK(result); +assert.commandWorked(result); testFunctionUnmarshall(true, withJavaScriptProtection); testFunctionUnmarshall(false, withoutJavaScriptProtection); diff --git a/jstests/noPassthrough/killop.js b/jstests/noPassthrough/killop.js index 53f14b1f838..50445470323 100644 --- a/jstests/noPassthrough/killop.js +++ b/jstests/noPassthrough/killop.js @@ -12,7 +12,7 @@ const collName = "test"; function runTest(conn, shardConn) { const db = conn.getDB(dbName); assert.commandWorked(db.dropDatabase()); - assert.writeOK(db.getCollection(collName).insert({x: 1})); + assert.commandWorked(db.getCollection(collName).insert({x: 1})); assert.commandWorked( shardConn.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1})); diff --git a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js index 5b1757188e7..1532954f96a 100644 --- a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js +++ b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js @@ -45,7 +45,7 @@ function runTests() { // lines, we are just verifying that the log line appears, which implies that the recorded // latency exceeds slowms. runWithWait(hangMillis, function() { - assert.writeOK(testColl.insert({a: 1})); + assert.commandWorked(testColl.insert({a: 1})); }); let profileEntry; if (conn.writeMode() === "commands") { @@ -66,7 +66,7 @@ function runTests() { // Test that update profiler/logs include lock acquisition time. runWithWait(hangMillis, function() { - assert.writeOK(testColl.update({}, {$set: {b: 1}})); + assert.commandWorked(testColl.update({}, {$set: {b: 1}})); }); profileEntry = getLatestProfilerEntry(testDB, { ns: testColl.getFullName(), @@ -87,7 +87,7 @@ function runTests() { checkLog.contains(conn, "find { find: \"lock_acquisition_time\""); // Test that getMore profiler/logs include lock acquisition time. - assert.writeOK(testColl.insert([{a: 2}, {a: 3}])); + assert.commandWorked(testColl.insert([{a: 2}, {a: 3}])); runWithWait(hangMillis, function() { // Include a batchSize in order to ensure that a getMore is issued. assert.eq(3, testColl.find().batchSize(2).itcount()); @@ -98,7 +98,7 @@ function runTests() { }); assert.gte(profileEntry.millis, hangMillis - padding); checkLog.contains(conn, "originatingCommand: { find: \"lock_acquisition_time\""); - assert.writeOK(testColl.remove({a: {$gt: 1}})); + assert.commandWorked(testColl.remove({a: {$gt: 1}})); // Test that aggregate profiler/logs include lock acquisition time. runWithWait(hangMillis, function() { @@ -135,7 +135,7 @@ function runTests() { // Test that delete profiler/logs include lock acquisition time. runWithWait(hangMillis, function() { - assert.writeOK(testColl.remove({b: 1})); + assert.commandWorked(testColl.remove({b: 1})); }); profileEntry = getLatestProfilerEntry(testDB, { ns: testColl.getFullName(), diff --git a/jstests/noPassthrough/libs/backup_restore.js b/jstests/noPassthrough/libs/backup_restore.js index 37411b9d061..b09eb6ea937 100644 --- a/jstests/noPassthrough/libs/backup_restore.js +++ b/jstests/noPassthrough/libs/backup_restore.js @@ -83,20 +83,20 @@ var BackupRestoreTest = function(options) { doc: largeValue.substring(0, match % largeValue.length), }); } - assert.writeOK(bulk.execute(writeConcern)); + assert.commandWorked(bulk.execute(writeConcern)); } else if (op < 0.4) { // 20% of the operations: update docs. var updateOpts = {upsert: true, multi: true, writeConcern: writeConcern}; - assert.writeOK(coll.update({x: {$gte: match}}, - {$inc: {x: baseNum}, $set: {n: 'hello'}}, - updateOpts)); + assert.commandWorked(coll.update({x: {$gte: match}}, + {$inc: {x: baseNum}, $set: {n: 'hello'}}, + updateOpts)); } else if (op < 0.9) { // 50% of the operations: find matchings docs. // itcount() consumes the cursor coll.find({x: {$gte: match}}).itcount(); } else { // 10% of the operations: remove matching docs. - assert.writeOK( + assert.commandWorked( coll.remove({x: {$gte: match}}, {writeConcern: writeConcern})); } } catch (e) { @@ -393,7 +393,7 @@ var BackupRestoreTest = function(options) { jsTestLog('Inserting single document into primary ' + primary.host + ' with writeConcern w:' + rst.nodes.length); - var writeResult = assert.writeOK(primary.getDB("test").foo.insert( + var writeResult = assert.commandWorked(primary.getDB("test").foo.insert( {}, {writeConcern: {w: rst.nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); // Stop set. diff --git a/jstests/noPassthrough/libs/concurrent_rename.js b/jstests/noPassthrough/libs/concurrent_rename.js index 79d1a0074c3..11d6d8a925b 100644 --- a/jstests/noPassthrough/libs/concurrent_rename.js +++ b/jstests/noPassthrough/libs/concurrent_rename.js @@ -4,7 +4,7 @@ function doRenames(dbName, collName, otherName) { const repeatRename = 200; // Signal to the parent shell that the parallel shell has started. - assert.writeOK(db.await_data.insert({_id: "signal parent shell"})); + assert.commandWorked(db.await_data.insert({_id: "signal parent shell"})); let renameDB = db.getSiblingDB(dbName); for (let i = 0; i < repeatRename; i++) { // Rename the collection back and forth. @@ -12,5 +12,5 @@ function doRenames(dbName, collName, otherName) { assert.commandWorked(renameDB[otherName].renameCollection(collName)); } // Signal to the parent shell that the renames have completed. - assert.writeOK(db.await_data.insert({_id: "rename has ended"})); + assert.commandWorked(db.await_data.insert({_id: "rename has ended"})); } diff --git a/jstests/noPassthrough/list_indexes_with_build_uuids.js b/jstests/noPassthrough/list_indexes_with_build_uuids.js index a52b58578a5..102f6560c1e 100644 --- a/jstests/noPassthrough/list_indexes_with_build_uuids.js +++ b/jstests/noPassthrough/list_indexes_with_build_uuids.js @@ -19,7 +19,7 @@ function addTestDocuments(db) { for (var i = 0; i < size; ++i) { bulk.insert({i: i, j: i * i}); } - assert.writeOK(bulk.execute()); + assert.commandWorked(bulk.execute()); } let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2}); diff --git a/jstests/noPassthrough/log_find_getmore.js b/jstests/noPassthrough/log_find_getmore.js index dc7f6d83c91..a3a2ecf7d7d 100644 --- a/jstests/noPassthrough/log_find_getmore.js +++ b/jstests/noPassthrough/log_find_getmore.js @@ -39,7 +39,7 @@ const coll = testDB.test; assert.commandWorked(testDB.dropDatabase()); for (let i = 1; i <= 10; ++i) { - assert.writeOK(coll.insert({a: i})); + assert.commandWorked(coll.insert({a: i})); } assert.commandWorked(coll.createIndex({a: 1})); diff --git a/jstests/noPassthrough/logical_session_cache_find_getmore.js b/jstests/noPassthrough/logical_session_cache_find_getmore.js index 4857443d032..c8e0f5bfbae 100644 --- a/jstests/noPassthrough/logical_session_cache_find_getmore.js +++ b/jstests/noPassthrough/logical_session_cache_find_getmore.js @@ -6,8 +6,8 @@ TestData.disableImplicitSessions = true; var conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}}); var testDB = conn.getDB("test"); -assert.writeOK(testDB.foo.insert({data: 1})); -assert.writeOK(testDB.foo.insert({data: 2})); +assert.commandWorked(testDB.foo.insert({data: 1})); +assert.commandWorked(testDB.foo.insert({data: 2})); for (var i = 0; i < 2; i++) { var session = conn.startSession(); diff --git a/jstests/noPassthrough/logical_session_cursor_checks.js b/jstests/noPassthrough/logical_session_cursor_checks.js index 5664fb1ef28..f2af849ab68 100644 --- a/jstests/noPassthrough/logical_session_cursor_checks.js +++ b/jstests/noPassthrough/logical_session_cursor_checks.js @@ -16,8 +16,8 @@ function runFixture(Fixture) { admin.logout(); data.auth("user0", "password"); - assert.writeOK(data.test.insert({name: "first", data: 1})); - assert.writeOK(data.test.insert({name: "second", data: 2})); + assert.commandWorked(data.test.insert({name: "first", data: 1})); + assert.commandWorked(data.test.insert({name: "second", data: 2})); // Test that getMore works correctly on the same session. { diff --git a/jstests/noPassthrough/max_bson_depth_parameter.js b/jstests/noPassthrough/max_bson_depth_parameter.js index bd39676bb98..3aef0995d34 100644 --- a/jstests/noPassthrough/max_bson_depth_parameter.js +++ b/jstests/noPassthrough/max_bson_depth_parameter.js @@ -20,8 +20,8 @@ assert.commandFailedWithCode( "Expected server to reject command for exceeding the nesting depth limit"); // Confirm depth limits for $lookup. -assert.writeOK(testDB.coll1.insert({_id: 1})); -assert.writeOK(testDB.coll2.insert({_id: 1})); +assert.commandWorked(testDB.coll1.insert({_id: 1})); +assert.commandWorked(testDB.coll2.insert({_id: 1})); assert.commandWorked(testDB.runCommand({ aggregate: "coll1", diff --git a/jstests/noPassthrough/minvalid.js b/jstests/noPassthrough/minvalid.js index 462016a2e73..0691908fbb2 100644 --- a/jstests/noPassthrough/minvalid.js +++ b/jstests/noPassthrough/minvalid.js @@ -22,7 +22,7 @@ var lastOp = local.oplog.rs.find().sort({$natural: -1}).limit(1).next(); printjson(lastOp); print("3: change minvalid"); -assert.writeOK(local.replset.minvalid.update( +assert.commandWorked(local.replset.minvalid.update( {}, {$set: {ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1)}}, {upsert: true})); printjson(local.replset.minvalid.findOne()); diff --git a/jstests/noPassthrough/minvalid2.js b/jstests/noPassthrough/minvalid2.js index b5f29a8a97c..5a731f0ffb7 100644 --- a/jstests/noPassthrough/minvalid2.js +++ b/jstests/noPassthrough/minvalid2.js @@ -54,7 +54,7 @@ print("2: shut down slave"); replTest.stop(slaveId); print("3: write to master"); -assert.writeOK(mdb.foo.insert({a: 1001}, {writeConcern: {w: 1}})); +assert.commandWorked(mdb.foo.insert({a: 1001}, {writeConcern: {w: 1}})); print("4: modify master's minvalid"); var local = master.getDB("local"); diff --git a/jstests/noPassthrough/noncapped_oplog_creation.js b/jstests/noPassthrough/noncapped_oplog_creation.js index 577074e1bb9..9d2f0865fdb 100644 --- a/jstests/noPassthrough/noncapped_oplog_creation.js +++ b/jstests/noPassthrough/noncapped_oplog_creation.js @@ -28,7 +28,7 @@ assert.writeError(localDB.oplog.rs.insert({})); assert.commandFailed(localDB.runCommand({godinsert: 'oplog.$main', obj: {}})); // Test that creating a non-capped oplog collection fails when using $out. -assert.writeOK(localDB.input.insert({})); +assert.commandWorked(localDB.input.insert({})); assert.commandFailed(localDB.runCommand({ aggregate: 'input', pipeline: [{$out: 'oplog.aggregation'}], diff --git a/jstests/noPassthrough/ns1.js b/jstests/noPassthrough/ns1.js index 63c7baacb0f..2a9a1659244 100644 --- a/jstests/noPassthrough/ns1.js +++ b/jstests/noPassthrough/ns1.js @@ -8,7 +8,7 @@ const check = function(n, isNew) { var coll = mydb["x" + n]; if (isNew) { assert.eq(0, coll.count(), "pop a: " + n); - assert.writeOK(coll.insert({_id: n})); + assert.commandWorked(coll.insert({_id: n})); } assert.eq(1, coll.count(), "pop b: " + n); assert.eq(n, coll.findOne()._id, "pop c: " + n); diff --git a/jstests/noPassthrough/predictive_connpool.js b/jstests/noPassthrough/predictive_connpool.js index d92d1ba9a2f..2ed10ecfac5 100644 --- a/jstests/noPassthrough/predictive_connpool.js +++ b/jstests/noPassthrough/predictive_connpool.js @@ -131,9 +131,9 @@ function walkThroughBehavior({primaryFollows, secondaryFollows}) { dropConnections(); } -assert.writeOK(mongos.test.insert({x: 1})); -assert.writeOK(mongos.test.insert({x: 2})); -assert.writeOK(mongos.test.insert({x: 3})); +assert.commandWorked(mongos.test.insert({x: 1})); +assert.commandWorked(mongos.test.insert({x: 2})); +assert.commandWorked(mongos.test.insert({x: 3})); st.rs0.awaitReplication(); jsTestLog("Following disabled"); diff --git a/jstests/noPassthrough/profile_agg_multiple_batches.js b/jstests/noPassthrough/profile_agg_multiple_batches.js index 6d21e254bde..62a2f713a93 100644 --- a/jstests/noPassthrough/profile_agg_multiple_batches.js +++ b/jstests/noPassthrough/profile_agg_multiple_batches.js @@ -21,7 +21,7 @@ const coll = testDB.getCollection("coll"); testDB.setProfilingLevel(2); for (let i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); + assert.commandWorked(coll.insert({a: i, b: i})); } assert.commandWorked(coll.createIndex({a: 1})); diff --git a/jstests/noPassthrough/query_yield_reset_timer.js b/jstests/noPassthrough/query_yield_reset_timer.js index cd7d9cf7d16..5af89432957 100644 --- a/jstests/noPassthrough/query_yield_reset_timer.js +++ b/jstests/noPassthrough/query_yield_reset_timer.js @@ -33,7 +33,7 @@ assert.commandWorked(coll.getDB().adminCommand({ // timing-based yield (incorrect accounting for timing-based yields was the cause for // SERVER-21341). for (var i = 0; i < 40; ++i) { - assert.writeOK(coll.insert({})); + assert.commandWorked(coll.insert({})); } var explainRes = coll.find().explain("executionStats"); // We expect 4 yields, but we throw in a fudge factor of 2 for test reliability. We also can diff --git a/jstests/noPassthrough/readConcern_snapshot_mongos.js b/jstests/noPassthrough/readConcern_snapshot_mongos.js index ab346a12937..472da1af4de 100644 --- a/jstests/noPassthrough/readConcern_snapshot_mongos.js +++ b/jstests/noPassthrough/readConcern_snapshot_mongos.js @@ -30,7 +30,7 @@ let testDB = st.getDB(dbName); let coll = testDB.coll; // Insert data to create the collection. -assert.writeOK(testDB[collName].insert({x: 1})); +assert.commandWorked(testDB[collName].insert({x: 1})); flushRoutersAndRefreshShardMetadata(st, {ns: dbName + "." + collName, dbNames: [dbName]}); diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js index 3e03b8124ae..fb1425dea97 100644 --- a/jstests/noPassthrough/read_majority.js +++ b/jstests/noPassthrough/read_majority.js @@ -102,7 +102,7 @@ function testReadConcernLevel(level) { var snapshot2 = assert.commandWorked(db.adminCommand("makeSnapshot")).name; for (var i = 0; i < 10; i++) { - assert.writeOK(t.insert({_id: i, version: 3})); + assert.commandWorked(t.insert({_id: i, version: 3})); } assertNoSnapshotAvailableForReadConcernLevel(); @@ -111,7 +111,7 @@ function testReadConcernLevel(level) { assertNoSnapshotAvailableForReadConcernLevel(); - assert.writeOK(t.update({}, {$set: {version: 4}}, false, true)); + assert.commandWorked(t.update({}, {$set: {version: 4}}, false, true)); var snapshot4 = assert.commandWorked(db.adminCommand("makeSnapshot")).name; // Collection didn't exist in snapshot 1. @@ -171,7 +171,7 @@ function testReadConcernLevel(level) { assert.eq(getCursorForReadConcernLevel().itcount(), 10); // Reindex bumps the min snapshot. - assert.writeOK(t.bump.insert({a: 1})); // Bump timestamp. + assert.commandWorked(t.bump.insert({a: 1})); // Bump timestamp. t.reIndex(); assertNoSnapshotAvailableForReadConcernLevel(); newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name; diff --git a/jstests/noPassthrough/read_majority_reads.js b/jstests/noPassthrough/read_majority_reads.js index f76363a0b28..065db43f426 100644 --- a/jstests/noPassthrough/read_majority_reads.js +++ b/jstests/noPassthrough/read_majority_reads.js @@ -129,15 +129,15 @@ function runTests(coll, mongodConnection) { var getCursor = cursorTestCases[testName]; // Setup initial state. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]})); + assert.commandWorked(coll.remove({})); + assert.commandWorked(coll.save({_id: 1, state: 'before', point: [0, 0]})); setCommittedSnapshot(makeSnapshot()); // Check initial conditions. assert.eq(getCursor(coll).next().state, 'before'); // Change state without making it committed. - assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]})); + assert.commandWorked(coll.save({_id: 1, state: 'after', point: [0, 0]})); // Cursor still sees old state. assert.eq(getCursor(coll).next().state, 'before'); @@ -163,15 +163,15 @@ function runTests(coll, mongodConnection) { var expectedAfter = nonCursorTestCases[testName].expectedAfter; // Setup initial state. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]})); + assert.commandWorked(coll.remove({})); + assert.commandWorked(coll.save({_id: 1, state: 'before', point: [0, 0]})); setCommittedSnapshot(makeSnapshot()); // Check initial conditions. assert.eq(getResult(coll), expectedBefore); // Change state without making it committed. - assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]})); + assert.commandWorked(coll.save({_id: 1, state: 'after', point: [0, 0]})); // Cursor still sees old state. assert.eq(getResult(coll), expectedBefore); diff --git a/jstests/noPassthrough/recovery_wt_cache_full.js b/jstests/noPassthrough/recovery_wt_cache_full.js index 7d7dc171296..72e36a13eb1 100644 --- a/jstests/noPassthrough/recovery_wt_cache_full.js +++ b/jstests/noPassthrough/recovery_wt_cache_full.js @@ -41,7 +41,7 @@ const numDocs = 2; const minDocSizeMB = 10; for (let i = 0; i < numDocs; ++i) { - assert.writeOK( + assert.commandWorked( coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)}, {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); } @@ -64,7 +64,7 @@ jsTestLog('Writing ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary after disabling snapshots.'); for (let i = 0; i < numDocs; ++i) { for (let j = 0; j < numUpdates; ++j) { - assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}})); + assert.commandWorked(coll.update({_id: i}, {$inc: {i: 1}})); } } diff --git a/jstests/noPassthrough/replica_set_connection_getmore.js b/jstests/noPassthrough/replica_set_connection_getmore.js index e7167fbd5eb..a27ff808878 100644 --- a/jstests/noPassthrough/replica_set_connection_getmore.js +++ b/jstests/noPassthrough/replica_set_connection_getmore.js @@ -28,7 +28,7 @@ coll.drop(); // Insert several document so that we can use a cursor to fetch them in multiple batches. var res = coll.insert([{}, {}, {}, {}, {}]); -assert.writeOK(res); +assert.commandWorked(res); assert.eq(5, res.nInserted); // Wait for the secondary to catch up because we're going to try and do reads from it. diff --git a/jstests/noPassthrough/rollback_wt_cache_full.js b/jstests/noPassthrough/rollback_wt_cache_full.js index 6ea271b1dba..f7733a0110b 100644 --- a/jstests/noPassthrough/rollback_wt_cache_full.js +++ b/jstests/noPassthrough/rollback_wt_cache_full.js @@ -59,7 +59,7 @@ let CommonOps = (node) => { jsTestLog('Inserting ' + numDocs + ' documents of ' + minDocSizeMB + ' MB each into ' + collName + '.'); for (let i = 0; i < numDocs; ++i) { - assert.writeOK( + assert.commandWorked( coll.save({_id: i, a: 0, x: largeString}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}})); } @@ -72,7 +72,7 @@ let RollbackOps = (node) => { jsTestLog('Updating ' + numDocs + ' documents on the primary. These updates will be rolled back.'); for (let i = 0; i < numDocs; ++i) { - assert.writeOK(coll.update({_id: i}, {$inc: {a: 1}})); + assert.commandWorked(coll.update({_id: i}, {$inc: {a: 1}})); } }; diff --git a/jstests/noPassthrough/set_step_params.js b/jstests/noPassthrough/set_step_params.js index d3fbe5deb02..dcde5d4c0d7 100644 --- a/jstests/noPassthrough/set_step_params.js +++ b/jstests/noPassthrough/set_step_params.js @@ -122,9 +122,9 @@ function runSubTest(name, fun) { updateSetParameters(stepParams); } -assert.writeOK(mongosDB.test.insert({x: 1})); -assert.writeOK(mongosDB.test.insert({x: 2})); -assert.writeOK(mongosDB.test.insert({x: 3})); +assert.commandWorked(mongosDB.test.insert({x: 1})); +assert.commandWorked(mongosDB.test.insert({x: 2})); +assert.commandWorked(mongosDB.test.insert({x: 3})); st.rs0.awaitReplication(); runSubTest("MinSize", function() { diff --git a/jstests/noPassthrough/shell_can_use_read_concern.js b/jstests/noPassthrough/shell_can_use_read_concern.js index f3d567960e0..73facc09214 100644 --- a/jstests/noPassthrough/shell_can_use_read_concern.js +++ b/jstests/noPassthrough/shell_can_use_read_concern.js @@ -97,7 +97,7 @@ function runTests({withSession}) { { testCommandCanBeCausallyConsistent(function() { - assert.writeOK(coll.insert([{}, {}, {}, {}, {}])); + assert.commandWorked(coll.insert([{}, {}, {}, {}, {}])); }, {expectedSession: withSession, expectedAfterClusterTime: false}); testCommandCanBeCausallyConsistent(function() { diff --git a/jstests/noPassthrough/shell_cmd_assertions.js b/jstests/noPassthrough/shell_cmd_assertions.js index 4bc800663f8..85d40386964 100644 --- a/jstests/noPassthrough/shell_cmd_assertions.js +++ b/jstests/noPassthrough/shell_cmd_assertions.js @@ -25,7 +25,7 @@ const sampleWriteConcernError = { function setup() { db.coll.drop(); - assert.writeOK(db.coll.insert({_id: 1})); + assert.commandWorked(db.coll.insert({_id: 1})); } // Raw command responses. diff --git a/jstests/noPassthrough/shell_gossip_cluster_time.js b/jstests/noPassthrough/shell_gossip_cluster_time.js index 119ba1e23dc..dc46a1173a7 100644 --- a/jstests/noPassthrough/shell_gossip_cluster_time.js +++ b/jstests/noPassthrough/shell_gossip_cluster_time.js @@ -70,7 +70,7 @@ assert(session2.getClusterTime() === undefined, // Advance the clusterTime outside of either of the sessions. testCommandGossipedWithClusterTime(function() { - assert.writeOK(coll.insert({})); + assert.commandWorked(coll.insert({})); }, primary.getClusterTime()); assert(session1.getClusterTime() === undefined, @@ -82,14 +82,14 @@ assert(session2.getClusterTime() === undefined, // since session1 hasn't been used yet. testCommandGossipedWithClusterTime(function() { const coll = session1.getDatabase("test").mycoll; - assert.writeOK(coll.insert({})); + assert.commandWorked(coll.insert({})); }, primary.getClusterTime()); assert.eq(session1.getClusterTime(), primary.getClusterTime()); testCommandGossipedWithClusterTime(function() { const coll = session1.getDatabase("test").mycoll; - assert.writeOK(coll.insert({})); + assert.commandWorked(coll.insert({})); }, session1.getClusterTime()); assert(session2.getClusterTime() === undefined, @@ -105,7 +105,7 @@ assert(primary.getClusterTime() === undefined, session2.advanceClusterTime(session1.getClusterTime()); testCommandGossipedWithClusterTime(function() { const coll = session2.getDatabase("test").mycoll; - assert.writeOK(coll.insert({})); + assert.commandWorked(coll.insert({})); }, session2.getClusterTime()); assert.eq(session2.getClusterTime(), primary.getClusterTime()); @@ -120,7 +120,7 @@ assert(primary.getClusterTime() === undefined, primary.advanceClusterTime(session1.getClusterTime()); testCommandGossipedWithClusterTime(function() { const coll = session2.getDatabase("test").mycoll; - assert.writeOK(coll.insert({})); + assert.commandWorked(coll.insert({})); }, session2.getClusterTime()); rst.stopSet(); diff --git a/jstests/noPassthrough/shell_retry_writes_uri.js b/jstests/noPassthrough/shell_retry_writes_uri.js index bb591438280..56f23981adc 100644 --- a/jstests/noPassthrough/shell_retry_writes_uri.js +++ b/jstests/noPassthrough/shell_retry_writes_uri.js @@ -66,14 +66,14 @@ function runShellScript(uri, cmdArgs, insertShouldHaveTxnNumber, shellFn) { // Tests --retryWrites command line parameter. runShellScript(mongoUri, ["--retryWrites"], true, function flagWorks() { assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true"); - assert.writeOK(db.coll.insert({}), "cannot insert"); + assert.commandWorked(db.coll.insert({}), "cannot insert"); }); // The uri param should override --retryWrites. runShellScript( mongoUri + "?retryWrites=false", ["--retryWrites"], false, function flagOverridenByUri() { assert(!db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be false"); - assert.writeOK(db.coll.insert({}), "cannot insert"); + assert.commandWorked(db.coll.insert({}), "cannot insert"); }); // Even if initial connection has retryWrites=false in uri, new connections should not be @@ -83,7 +83,7 @@ runShellScript( let connUri = db.getMongo().host; // does not have ?retryWrites=false. let sess = new Mongo(connUri).startSession(); assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true"); - assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert"); + assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert"); }); // Unless that uri also specifies retryWrites. @@ -92,7 +92,7 @@ runShellScript( let connUri = "mongodb://" + db.getMongo().host + "/test?retryWrites=false"; let sess = new Mongo(connUri).startSession(); assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false"); - assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert"); + assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert"); }); // Session options should override --retryWrites as well. @@ -100,19 +100,19 @@ runShellScript(mongoUri, ["--retryWrites"], false, function flagOverridenByOpts( let connUri = "mongodb://" + db.getMongo().host + "/test"; let sess = new Mongo(connUri).startSession({retryWrites: false}); assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false"); - assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert"); + assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert"); }); // Test uri retryWrites parameter. runShellScript(mongoUri + "?retryWrites=true", [], true, function uriTrueWorks() { assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true"); - assert.writeOK(db.coll.insert({}), "cannot insert"); + assert.commandWorked(db.coll.insert({}), "cannot insert"); }); // Test that uri retryWrites=false works. runShellScript(mongoUri + "?retryWrites=false", [], false, function uriFalseWorks() { assert(!db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be false"); - assert.writeOK(db.coll.insert({}), "cannot insert"); + assert.commandWorked(db.coll.insert({}), "cannot insert"); }); // Test SessionOptions retryWrites option. @@ -120,7 +120,7 @@ runShellScript(mongoUri, [], true, function sessOptTrueWorks() { let connUri = "mongodb://" + db.getMongo().host + "/test"; let sess = new Mongo(connUri).startSession({retryWrites: true}); assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true"); - assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert"); + assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert"); }); // Test that SessionOptions retryWrites:false works. @@ -128,14 +128,14 @@ runShellScript(mongoUri, [], false, function sessOptFalseWorks() { let connUri = "mongodb://" + db.getMongo().host + "/test"; let sess = new Mongo(connUri).startSession({retryWrites: false}); assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false"); - assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert"); + assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert"); }); // Test that session option overrides uri option. runShellScript(mongoUri + "?retryWrites=true", [], false, function sessOptOverridesUri() { let sess = db.getMongo().startSession({retryWrites: false}); assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false"); - assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert"); + assert.commandWorked(sess.getDatabase("test").coll.insert({}), "cannot insert"); }); rst.stopSet(); diff --git a/jstests/noPassthrough/snapshot_reads.js b/jstests/noPassthrough/snapshot_reads.js index 9c82a24af7e..8dd75ad75fd 100644 --- a/jstests/noPassthrough/snapshot_reads.js +++ b/jstests/noPassthrough/snapshot_reads.js @@ -58,7 +58,7 @@ function runTest({useCausalConsistency, establishCursorCmd, readConcern}) { // Insert an 11th document which should not be visible to the snapshot cursor. This write is // performed outside of the session. - assert.writeOK(primaryDB.coll.insert({_id: 10}, {writeConcern: {w: "majority"}})); + assert.commandWorked(primaryDB.coll.insert({_id: 10}, {writeConcern: {w: "majority"}})); // Fetch the first 5 documents. res = assert.commandWorked( diff --git a/jstests/noPassthrough/socket_disconnect_kills.js b/jstests/noPassthrough/socket_disconnect_kills.js index 3d6eb513b24..ec6c613cd1c 100644 --- a/jstests/noPassthrough/socket_disconnect_kills.js +++ b/jstests/noPassthrough/socket_disconnect_kills.js @@ -129,9 +129,9 @@ function runCommand(cmd) { function runTests(client) { let admin = client.getDB("admin"); - assert.writeOK(client.getDB(testName).test.insert({x: 1})); - assert.writeOK(client.getDB(testName).test.insert({x: 2})); - assert.writeOK(client.getDB(testName).test.insert({x: 3})); + assert.commandWorked(client.getDB(testName).test.insert({x: 1})); + assert.commandWorked(client.getDB(testName).test.insert({x: 2})); + assert.commandWorked(client.getDB(testName).test.insert({x: 3})); [[checkClosedEarly, runCommand({find: "test", filter: {}})], [ diff --git a/jstests/noPassthrough/step_down_during_drop_database.js b/jstests/noPassthrough/step_down_during_drop_database.js index 5480605b1c3..e31d876ad6a 100644 --- a/jstests/noPassthrough/step_down_during_drop_database.js +++ b/jstests/noPassthrough/step_down_during_drop_database.js @@ -26,7 +26,7 @@ var bulk = testDB.getCollection(collName).initializeUnorderedBulkOp(); for (var i = 0; i < size; ++i) { bulk.insert({i: i}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); replSet.awaitReplication(); const failpoint = "dropDatabaseHangAfterAllCollectionsDrop"; diff --git a/jstests/noPassthrough/stepdown_query.js b/jstests/noPassthrough/stepdown_query.js index 4e8cc001840..f52bd3c875c 100644 --- a/jstests/noPassthrough/stepdown_query.js +++ b/jstests/noPassthrough/stepdown_query.js @@ -29,7 +29,7 @@ function runTest(host, rst, waitForPrimary) { var conn = new Mongo(host); var coll = conn.getDB(dbName).getCollection(collName); assert(!coll.exists()); - assert.writeOK(coll.insert([{}, {}, {}, {}, {}])); + assert.commandWorked(coll.insert([{}, {}, {}, {}, {}])); var cursor = coll.find().batchSize(2); // Retrieve the first batch of results. cursor.next(); diff --git a/jstests/noPassthrough/sync_write.js b/jstests/noPassthrough/sync_write.js index a4c0d1ebe38..a76d86ec7f0 100644 --- a/jstests/noPassthrough/sync_write.js +++ b/jstests/noPassthrough/sync_write.js @@ -19,7 +19,7 @@ assert.neq(null, conn, 'mongod was unable to start up'); // Now connect to the mongod, do a journaled write and abruptly stop the server. var testDB = conn.getDB('test'); -assert.writeOK(testDB.synced.insert({synced: true}, {writeConcern: {j: true}})); +assert.commandWorked(testDB.synced.insert({synced: true}, {writeConcern: {j: true}})); MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}); // Restart the mongod. diff --git a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js index 0d29b065e7b..ef604f4d887 100644 --- a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js +++ b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js @@ -10,7 +10,7 @@ const st = new ShardingTest({shards: 2}); const db = st.s.getDB("test"); const coll = db.capped; assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 1024})); -assert.writeOK(coll.insert({})); +assert.commandWorked(coll.insert({})); const findResult = assert.commandWorked( db.runCommand({find: "capped", filter: {}, tailable: true, awaitData: true})); diff --git a/jstests/noPassthrough/transaction_reaper.js b/jstests/noPassthrough/transaction_reaper.js index 5f0536f0d7e..58198741da9 100644 --- a/jstests/noPassthrough/transaction_reaper.js +++ b/jstests/noPassthrough/transaction_reaper.js @@ -90,7 +90,7 @@ function Fixture(impl) { for (var i = 0; i < nSessions; i++) { // make a session and get it to the collection var session = this.sessions[i]; - assert.writeOK(session.getDatabase("test").test.save({a: 1})); + assert.commandWorked(session.getDatabase("test").test.save({a: 1})); } // Ensure a write flushes a transaction diff --git a/jstests/noPassthrough/ttl_capped.js b/jstests/noPassthrough/ttl_capped.js index c9eabbc0df7..662e5781e03 100644 --- a/jstests/noPassthrough/ttl_capped.js +++ b/jstests/noPassthrough/ttl_capped.js @@ -43,7 +43,8 @@ for (var i = 0; i < numCollectionsToCreate; i++) { // Insert a single document with a 'date' field that is already expired according to the // index definition. - assert.writeOK(testDB[collName].insert({date: new Date(now - expireAfterSeconds * 1000)})); + assert.commandWorked( + testDB[collName].insert({date: new Date(now - expireAfterSeconds * 1000)})); } // Increase the verbosity of the TTL monitor's output. diff --git a/jstests/noPassthrough/ttl_partial_index.js b/jstests/noPassthrough/ttl_partial_index.js index af4c9c1a7fb..61cc5e6aff1 100644 --- a/jstests/noPassthrough/ttl_partial_index.js +++ b/jstests/noPassthrough/ttl_partial_index.js @@ -12,8 +12,8 @@ assert.commandWorked(coll.ensureIndex( {x: 1}, {expireAfterSeconds: 0, partialFilterExpression: {z: {$exists: true}}})); var now = new Date(); -assert.writeOK(coll.insert({x: now, z: 2})); -assert.writeOK(coll.insert({x: now})); +assert.commandWorked(coll.insert({x: now, z: 2})); +assert.commandWorked(coll.insert({x: now})); // Wait for the TTL monitor to run at least twice (in case we weren't finished setting up our // collection when it ran the first time). diff --git a/jstests/noPassthrough/txn_override_causal_consistency.js b/jstests/noPassthrough/txn_override_causal_consistency.js index ac7c9758c96..ef5605a1b2d 100644 --- a/jstests/noPassthrough/txn_override_causal_consistency.js +++ b/jstests/noPassthrough/txn_override_causal_consistency.js @@ -102,7 +102,7 @@ function inspectFirstCommandForAfterClusterTime(conn, cmdName, isCausal, expectR function testInsert(conn, isCausal, expectRetry) { inspectFirstCommandForAfterClusterTime(conn, "insert", isCausal, expectRetry, (coll) => { - assert.writeOK(coll.insert({x: 1})); + assert.commandWorked(coll.insert({x: 1})); }); } diff --git a/jstests/noPassthrough/unsupported_change_stream_deployments.js b/jstests/noPassthrough/unsupported_change_stream_deployments.js index c342341da69..6a4d3dcd1c6 100644 --- a/jstests/noPassthrough/unsupported_change_stream_deployments.js +++ b/jstests/noPassthrough/unsupported_change_stream_deployments.js @@ -32,7 +32,7 @@ function assertChangeStreamNotSupportedOnConnection(conn) { const conn = MongoRunner.runMongod({enableMajorityReadConcern: ""}); assert.neq(null, conn, "mongod was unable to start up"); // $changeStream cannot run on a non-existent database. -assert.writeOK(conn.getDB("test").ensure_db_exists.insert({})); +assert.commandWorked(conn.getDB("test").ensure_db_exists.insert({})); assertChangeStreamNotSupportedOnConnection(conn); assert.eq(0, MongoRunner.stopMongod(conn)); diff --git a/jstests/noPassthrough/update_post_image_validation.js b/jstests/noPassthrough/update_post_image_validation.js index ad78227a09b..0b2c2d93a4b 100644 --- a/jstests/noPassthrough/update_post_image_validation.js +++ b/jstests/noPassthrough/update_post_image_validation.js @@ -9,7 +9,7 @@ const testDB = conn.getDB("test"); // Test validation of elements added to an array that is represented in a "deserialized" format // in mutablebson. The added element is invalid because it is a DBRef with a missing $id. -assert.writeOK(testDB.coll.insert({_id: 0, a: []})); +assert.commandWorked(testDB.coll.insert({_id: 0, a: []})); assert.writeErrorWithCode( testDB.coll.update({_id: 0}, {$set: {"a.1": 0, "a.0": {$ref: "coll", $db: "test"}}}), ErrorCodes.InvalidDBRef); @@ -18,7 +18,7 @@ assert.docEq(testDB.coll.findOne({_id: 0}), {_id: 0, a: []}); // Test validation of modified array elements that are accessed using a string that is // numerically equivalent to their fieldname. The modified element is invalid because it is a // DBRef with a missing $id. -assert.writeOK(testDB.coll.insert({_id: 1, a: [0]})); +assert.commandWorked(testDB.coll.insert({_id: 1, a: [0]})); assert.writeErrorWithCode( testDB.coll.update({_id: 1}, {$set: {"a.00": {$ref: "coll", $db: "test"}}}), ErrorCodes.InvalidDBRef); diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js index dd18a14d72c..876f9688c6f 100644 --- a/jstests/noPassthrough/update_server-5552.js +++ b/jstests/noPassthrough/update_server-5552.js @@ -14,7 +14,7 @@ var bulk = t.initializeUnorderedBulkOp(); for (let i = 0; i < N; i++) { bulk.insert({_id: i, x: 1}); } -assert.writeOK(bulk.execute()); +assert.commandWorked(bulk.execute()); const join = startParallelShell( "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );"); diff --git a/jstests/noPassthrough/use_disk.js b/jstests/noPassthrough/use_disk.js index 0e91fdcdca2..f4323bf3d35 100644 --- a/jstests/noPassthrough/use_disk.js +++ b/jstests/noPassthrough/use_disk.js @@ -16,14 +16,14 @@ testDB.setProfilingLevel(2); function resetCollection() { coll.drop(); for (var i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); + assert.commandWorked(coll.insert({a: i})); } } function resetForeignCollection() { testDB.foreign.drop(); const forColl = testDB.getCollection("foreign"); for (var i = 4; i < 18; i += 2) - assert.writeOK(forColl.insert({b: i})); + assert.commandWorked(forColl.insert({b: i})); } // // Confirm hasSortStage with in-memory sort. diff --git a/jstests/noPassthrough/utf8_paths.js b/jstests/noPassthrough/utf8_paths.js index b7b17355457..e7c0f56fa5d 100644 --- a/jstests/noPassthrough/utf8_paths.js +++ b/jstests/noPassthrough/utf8_paths.js @@ -25,7 +25,7 @@ let testMongoD = function() { assert.neq(null, conn, 'mongod was unable to start up'); let coll = conn.getCollection(db_name + ".foo"); - assert.writeOK(coll.insert({_id: 1})); + assert.commandWorked(coll.insert({_id: 1})); MongoRunner.stopMongod(conn); }; diff --git a/jstests/noPassthrough/views_legacy.js b/jstests/noPassthrough/views_legacy.js index 8ded34730ae..efedb669a25 100644 --- a/jstests/noPassthrough/views_legacy.js +++ b/jstests/noPassthrough/views_legacy.js @@ -13,7 +13,7 @@ assert.commandWorked(viewsDB.createView("view", "collection", [])); let coll = viewsDB.getCollection("collection"); for (let i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); + assert.commandWorked(coll.insert({a: i})); } conn.forceReadMode("legacy"); diff --git a/jstests/noPassthrough/wt_cache_full.js b/jstests/noPassthrough/wt_cache_full.js index 29be77da891..71a39764f68 100644 --- a/jstests/noPassthrough/wt_cache_full.js +++ b/jstests/noPassthrough/wt_cache_full.js @@ -33,7 +33,7 @@ const numDocs = 2; const minDocSizeMB = 10; for (let i = 0; i < numDocs; ++i) { - assert.writeOK( + assert.commandWorked( coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)}, {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); } @@ -52,7 +52,7 @@ assert.commandWorked( secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'})); for (let i = 0; i < numDocs; ++i) { for (let j = 0; j < numUpdates; ++j) { - assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}})); + assert.commandWorked(coll.update({_id: i}, {$inc: {i: 1}})); } } diff --git a/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js b/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js index 71383f91454..9fa3cd508de 100644 --- a/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js +++ b/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js @@ -35,7 +35,7 @@ const numDocs = 2; const minDocSizeMB = 10; for (let i = 0; i < numDocs; ++i) { - assert.writeOK( + assert.commandWorked( coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)}, {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); } @@ -56,7 +56,7 @@ const sessionColl = sessionDB.getCollection(coll.getName()); session.startTransaction(); for (let i = 0; i < numDocs; ++i) { for (let j = 0; j < numUpdates; ++j) { - assert.writeOK(sessionColl.update({_id: i}, {$inc: {i: 1}})); + assert.commandWorked(sessionColl.update({_id: i}, {$inc: {i: 1}})); } } assert.commandWorked(session.commitTransaction_forTesting()); diff --git a/jstests/noPassthrough/wt_cache_full_restart.js b/jstests/noPassthrough/wt_cache_full_restart.js index 29aed83c67f..94140324ddf 100644 --- a/jstests/noPassthrough/wt_cache_full_restart.js +++ b/jstests/noPassthrough/wt_cache_full_restart.js @@ -33,7 +33,7 @@ const numDocs = 2; const minDocSizeMB = 10; for (let i = 0; i < numDocs; ++i) { - assert.writeOK( + assert.commandWorked( coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)}, {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); } @@ -54,7 +54,7 @@ jsTestLog('Stopped secondary. Writing ' + numUpdates + ' updates to ' + numDocs const startTime = Date.now(); for (let i = 0; i < numDocs; ++i) { for (let j = 0; j < numUpdates; ++j) { - assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}})); + assert.commandWorked(coll.update({_id: i}, {$inc: {i: 1}})); } } const totalTime = Date.now() - startTime; diff --git a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js index 3470a04e24b..34d84bf7471 100644 --- a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js +++ b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js @@ -68,7 +68,7 @@ if (storageEngine !== "wiredTiger") { for (var j = 0; j < 100; j++) { batch.insert({a: bigstr}); } - assert.writeOK(batch.execute()); + assert.commandWorked(batch.execute()); } rst.stopSet(); } diff --git a/jstests/noPassthrough/wt_nojournal_skip_recovery.js b/jstests/noPassthrough/wt_nojournal_skip_recovery.js index df813e080d3..b33e354860f 100644 --- a/jstests/noPassthrough/wt_nojournal_skip_recovery.js +++ b/jstests/noPassthrough/wt_nojournal_skip_recovery.js @@ -38,8 +38,8 @@ var awaitShell = startParallelShell(function() { for (var i = 0; i < 100; ++i) { bulk.insert({unjournaled: i}); } - assert.writeOK(bulk.execute({j: false})); - assert.writeOK(db.nojournal.insert({journaled: loopNum}, {writeConcern: {j: true}})); + assert.commandWorked(bulk.execute({j: false})); + assert.commandWorked(db.nojournal.insert({journaled: loopNum}, {writeConcern: {j: true}})); // Create a checkpoint slightly before the mongod is terminated. if (loopNum === 90) { diff --git a/jstests/noPassthrough/wt_nojournal_toggle.js b/jstests/noPassthrough/wt_nojournal_toggle.js index 50d5483aa26..41e286b39fc 100644 --- a/jstests/noPassthrough/wt_nojournal_toggle.js +++ b/jstests/noPassthrough/wt_nojournal_toggle.js @@ -21,8 +21,8 @@ function insertFunctionFactory(checkpoint) { for (var i = 0; i < 100; ++i) { bulk.insert({unjournaled: i}); } - assert.writeOK(bulk.execute({j: false})); - assert.writeOK(db.nojournal.insert({journaled: iter}, {writeConcern: {j: true}})); + assert.commandWorked(bulk.execute({j: false})); + assert.commandWorked(db.nojournal.insert({journaled: iter}, {writeConcern: {j: true}})); if (__checkpoint_template_placeholder__ && iter === 50) { assert.commandWorked(db.adminCommand({fsync: 1})); } @@ -59,7 +59,7 @@ function runTest(options) { // We saw 100 journaled inserts, but visibility does not guarantee durability, so // do an extra journaled write to make all visible commits durable, before killing // the mongod. - assert.writeOK(testDB.nojournal.insert({final: true}, {writeConcern: {j: true}})); + assert.commandWorked(testDB.nojournal.insert({final: true}, {writeConcern: {j: true}})); MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}); return true; } @@ -84,7 +84,7 @@ function runTest(options) { 'journaled write operations since the last checkpoint were not replayed'); var initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations']; - assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}})); + assert.commandWorked(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}})); assert.eq(initialNumLogWrites, testDB.serverStatus().wiredTiger.log['log write operations'], 'journaling is still enabled even though --nojournal was specified'); @@ -103,7 +103,7 @@ function runTest(options) { testDB = conn.getDB('test'); initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations']; - assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}})); + assert.commandWorked(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}})); assert.lt(initialNumLogWrites, testDB.serverStatus().wiredTiger.log['log write operations'], 'journaling is still disabled even though --journal was specified'); diff --git a/jstests/noPassthrough/wt_operation_stats.js b/jstests/noPassthrough/wt_operation_stats.js index e273dd34170..f3bc4f5aa9d 100644 --- a/jstests/noPassthrough/wt_operation_stats.js +++ b/jstests/noPassthrough/wt_operation_stats.js @@ -51,7 +51,7 @@ if (jsTest.options().storageEngine && (jsTest.options().storageEngine !== "wired jsTestLog("insert data"); for (let i = 0; i < 200; i++) { - assert.writeOK(testDB.foo.insert({x: value})); + assert.commandWorked(testDB.foo.insert({x: value})); } let connport = conn.port; diff --git a/jstests/noPassthrough/yield_during_writes.js b/jstests/noPassthrough/yield_during_writes.js index d1e6845b58e..ab283382c5a 100644 --- a/jstests/noPassthrough/yield_during_writes.js +++ b/jstests/noPassthrough/yield_during_writes.js @@ -27,16 +27,16 @@ const coll = mongod.getDB('test').yield_during_writes; coll.drop(); for (let i = 0; i < nDocsToInsert; i++) { - assert.writeOK(coll.insert({_id: i})); + assert.commandWorked(coll.insert({_id: i})); } // A multi-update doing a collection scan should yield about nDocsToInsert / worksPerYield // times. -assert.writeOK(coll.update({}, {$inc: {counter: 1}}, {multi: true})); +assert.commandWorked(coll.update({}, {$inc: {counter: 1}}, {multi: true})); assert.gt(countOpYields(coll, 'update'), (nDocsToInsert / worksPerYield) - 2); // Likewise, a multi-remove should also yield approximately every worksPerYield documents. -assert.writeOK(coll.remove({}, {multi: true})); +assert.commandWorked(coll.remove({}, {multi: true})); assert.gt(countOpYields(coll, 'remove'), (nDocsToInsert / worksPerYield) - 2); MongoRunner.stopMongod(mongod); |