summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorYoonsoo Kim <yoonsoo.kim@mongodb.com>2021-06-28 07:00:07 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-06-30 23:41:39 +0000
commit0d37de1e3f89c6257f2cd66f9bf530cc80859717 (patch)
tree591d2c96007d9e7eb04a1ad06086ad804fadc477 /jstests
parentd635f733ad873fd469cf3e35e27452c45f1597c9 (diff)
downloadmongo-0d37de1e3f89c6257f2cd66f9bf530cc80859717.tar.gz
SERVER-58103 Remove test cases which test legacy op behaviors
Diffstat (limited to 'jstests')
-rw-r--r--jstests/auth/getMore.js14
-rw-r--r--jstests/change_streams/change_stream.js10
-rw-r--r--jstests/change_streams/metadata_notifications.js2
-rw-r--r--jstests/core/aggregation_getmore_batchsize.js1
-rw-r--r--jstests/core/batch_write_command_delete.js2
-rw-r--r--jstests/core/batch_write_command_insert.js2
-rw-r--r--jstests/core/batch_write_command_update.js2
-rw-r--r--jstests/core/batch_write_command_w0.js5
-rw-r--r--jstests/core/bulk_api_ordered.js304
-rw-r--r--jstests/core/bulk_api_unordered.js335
-rw-r--r--jstests/core/collation.js1024
-rw-r--r--jstests/core/comment_field.js24
-rw-r--r--jstests/core/currentop_cursors.js40
-rw-r--r--jstests/core/explain_find.js9
-rw-r--r--jstests/core/expr_index_use.js12
-rw-r--r--jstests/core/fts_mix.js26
-rw-r--r--jstests/core/fts_partition1.js13
-rw-r--r--jstests/core/getlog2.js4
-rw-r--r--jstests/core/getmore_invalidated_cursors.js10
-rw-r--r--jstests/core/indexes_multiple_commands.js21
-rw-r--r--jstests/core/insert2.js1
-rw-r--r--jstests/core/invalidated_legacy_cursors.js62
-rw-r--r--jstests/core/json_schema/misc_validation.js71
-rw-r--r--jstests/core/plan_cache_sbe.js6
-rw-r--r--jstests/core/profile_find.js20
-rw-r--r--jstests/core/shell_writeconcern.js38
-rw-r--r--jstests/core/skip1.js23
-rw-r--r--jstests/core/sort1.js42
-rw-r--r--jstests/core/tailable_cursor_legacy_read_mode.js28
-rw-r--r--jstests/core/tailable_skip_limit.js12
-rw-r--r--jstests/core/type4.js13
-rw-r--r--jstests/core/wildcard_index_cached_plans.js6
-rw-r--r--jstests/core/write_result.js2
-rw-r--r--jstests/libs/override_methods/check_indexes_consistent_across_cluster.js1
-rw-r--r--jstests/libs/sbe_util.js15
-rw-r--r--jstests/multiVersion/minor_version_tags_new_old_new.js2
-rw-r--r--jstests/multiVersion/minor_version_tags_old_new_old.js2
-rw-r--r--jstests/noPassthrough/currentop_active_cursor.js41
-rw-r--r--jstests/noPassthrough/currentop_query.js136
-rw-r--r--jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js52
-rw-r--r--jstests/noPassthrough/latency_includes_lock_acquisition_time.js232
-rw-r--r--jstests/noPassthrough/log_find_getmore.js87
-rw-r--r--jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js149
-rw-r--r--jstests/noPassthrough/mongos_exhausts_stale_config_retries.js4
-rw-r--r--jstests/noPassthrough/profile_operation_metrics.js156
-rw-r--r--jstests/noPassthrough/replica_set_connection_getmore.js12
-rw-r--r--jstests/noPassthrough/server_read_concern_metrics.js12
-rw-r--r--jstests/noPassthrough/socket_disconnect_kills.js21
-rw-r--r--jstests/noPassthrough/traffic_reading_legacy.js70
-rw-r--r--jstests/noPassthrough/views_legacy.js83
-rw-r--r--jstests/noPassthroughWithMongod/bulk_api_limits.js29
-rw-r--r--jstests/noPassthroughWithMongod/create_indexes_shell_helper.js3
-rw-r--r--jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js1
-rw-r--r--jstests/replsets/agg_write_concern_zero_batch_size.js11
-rw-r--r--jstests/replsets/bulk_api_wc.js195
-rw-r--r--jstests/replsets/disconnect_on_legacy_write_to_secondary.js106
-rw-r--r--jstests/replsets/libs/tags.js3
-rw-r--r--jstests/replsets/no_disconnect_on_stepdown.js6
-rw-r--r--jstests/replsets/secondaryok_read_pref.js30
-rw-r--r--jstests/sharding/explainFind_stale_mongos.js5
-rw-r--r--jstests/sharding/query/collation_targeting.js20
-rw-r--r--jstests/sharding/query/collation_targeting_inherited.js10
-rw-r--r--jstests/sharding/query/comment_field_sharded.js19
-rw-r--r--jstests/sharding/query/geo_near_sort.js26
-rw-r--r--jstests/sharding/query/mongos_query_comment.js49
-rw-r--r--jstests/sharding/read_pref.js26
-rw-r--r--jstests/sharding/read_write_concern_defaults_application.js2
-rw-r--r--jstests/sharding/sharding_rs2.js1
68 files changed, 1179 insertions, 2622 deletions
diff --git a/jstests/auth/getMore.js b/jstests/auth/getMore.js
index 00c150d59a0..83e427accd6 100644
--- a/jstests/auth/getMore.js
+++ b/jstests/auth/getMore.js
@@ -47,20 +47,6 @@ function runTest(conn) {
"read from another user's find cursor");
testDB.logout();
- // Test that "Mallory" cannot use a legacy find cursor created by "Alice".
- testDB.getMongo().forceReadMode("legacy");
- assert.eq(1, testDB.auth("Alice", "pwd"));
- let cursor = testDB.foo.find().batchSize(2);
- cursor.next();
- cursor.next();
- testDB.logout();
- assert.eq(1, testDB.auth("Mallory", "pwd"));
- assert.throws(function() {
- cursor.next();
- }, [], "read from another user's legacy find cursor");
- testDB.logout();
- testDB.getMongo().forceReadMode("commands");
-
// Test that "Mallory" cannot use an aggregation cursor created by "Alice".
assert.eq(1, testDB.auth("Alice", "pwd"));
res = assert.commandWorked(
diff --git a/jstests/change_streams/change_stream.js b/jstests/change_streams/change_stream.js
index ecf2268e0e7..8debd1feb60 100644
--- a/jstests/change_streams/change_stream.js
+++ b/jstests/change_streams/change_stream.js
@@ -209,16 +209,6 @@ assert.commandWorked(db.t2.insert({_id: 101, renameCollection: "test.dne1", to:
cst.assertNoChange(dne1cursor);
cst.assertNoChange(dne2cursor);
-if (!isMongos) {
- jsTestLog("Ensuring attempt to read with legacy operations fails.");
- db.getMongo().forceReadMode('legacy');
- const legacyCursor = db.tailable2.aggregate([{$changeStream: {}}], {cursor: {batchSize: 0}});
- assert.throws(function() {
- legacyCursor.next();
- }, [], "Legacy getMore expected to fail on changeStream cursor.");
- db.getMongo().forceReadMode('commands');
-}
-
jsTestLog("Testing resumability");
assertDropAndRecreateCollection(db, "resume1");
diff --git a/jstests/change_streams/metadata_notifications.js b/jstests/change_streams/metadata_notifications.js
index f51dacc5b51..991c776553c 100644
--- a/jstests/change_streams/metadata_notifications.js
+++ b/jstests/change_streams/metadata_notifications.js
@@ -13,8 +13,6 @@ load("jstests/libs/fixture_helpers.js"); // For isSharded.
db = db.getSiblingDB(jsTestName());
let cst = new ChangeStreamTest(db);
-db.getMongo().forceReadMode('commands');
-
// Test that it is possible to open a new change stream cursor on a collection that does not
// exist.
const collName = "test";
diff --git a/jstests/core/aggregation_getmore_batchsize.js b/jstests/core/aggregation_getmore_batchsize.js
index 7cef863979d..e33116cb4d5 100644
--- a/jstests/core/aggregation_getmore_batchsize.js
+++ b/jstests/core/aggregation_getmore_batchsize.js
@@ -6,7 +6,6 @@
(function() {
'use strict';
-db.getMongo().forceReadMode("commands");
var coll = db["aggregation_getmore_batchsize"];
// Insert some data to query for
diff --git a/jstests/core/batch_write_command_delete.js b/jstests/core/batch_write_command_delete.js
index 4eb9499013b..ecd3ea39f05 100644
--- a/jstests/core/batch_write_command_delete.js
+++ b/jstests/core/batch_write_command_delete.js
@@ -16,8 +16,6 @@
var coll = db.getCollection("batch_write_delete");
coll.drop();
-assert(coll.getDB().getMongo().useWriteCommands(), "test is not running with write commands");
-
var request;
var result;
var batch;
diff --git a/jstests/core/batch_write_command_insert.js b/jstests/core/batch_write_command_insert.js
index c2f7a22ca0c..282614f6f2e 100644
--- a/jstests/core/batch_write_command_insert.js
+++ b/jstests/core/batch_write_command_insert.js
@@ -20,8 +20,6 @@ load("jstests/libs/get_index_helpers.js");
var coll = db.getCollection("batch_write_insert");
coll.drop();
-assert(coll.getDB().getMongo().useWriteCommands(), "test is not running with write commands");
-
var request;
var result;
var batch;
diff --git a/jstests/core/batch_write_command_update.js b/jstests/core/batch_write_command_update.js
index 394601e4f1f..d9e0fea6668 100644
--- a/jstests/core/batch_write_command_update.js
+++ b/jstests/core/batch_write_command_update.js
@@ -16,8 +16,6 @@
var coll = db.getCollection("batch_write_update");
coll.drop();
-assert(coll.getDB().getMongo().useWriteCommands(), "test is not running with write commands");
-
var request;
var result;
var batch;
diff --git a/jstests/core/batch_write_command_w0.js b/jstests/core/batch_write_command_w0.js
index ca2522bbe01..4869e228919 100644
--- a/jstests/core/batch_write_command_w0.js
+++ b/jstests/core/batch_write_command_w0.js
@@ -25,11 +25,6 @@ function countEventually(collection, n) {
var coll = db.getCollection("batch_write_w0");
coll.drop();
-//
-// Ensures that mongod respects the batch write protocols for delete
-//
-assert(coll.getDB().getMongo().useWriteCommands(), "test is not running with write commands");
-
// EACH TEST BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
//
diff --git a/jstests/core/bulk_api_ordered.js b/jstests/core/bulk_api_ordered.js
index b0b6fb9fc08..0b91ab0c989 100644
--- a/jstests/core/bulk_api_ordered.js
+++ b/jstests/core/bulk_api_ordered.js
@@ -3,6 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection, requires_multi_updates, requires_non_retryable_writes]
+(function() {
var collectionName = "bulk_api_ordered";
var coll = db.getCollection(collectionName);
coll.drop();
@@ -12,173 +13,138 @@ var result;
jsTest.log("Starting bulk api ordered tests...");
-/********************************************************
- *
- * Unordered tests should return same results for write command as
- * well as for the legacy operations
- *
- *******************************************************/
-var executeTests = function() {
- /**
- * find() requires selector.
- */
- var bulkOp = coll.initializeOrderedBulkOp();
-
- assert.throws(function() {
- bulkOp.find();
- });
-
- /**
- * Single successful ordered bulk operation
- */
- var bulkOp = coll.initializeOrderedBulkOp();
- bulkOp.insert({a: 1});
- bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
- // no-op, should increment nMatched but not nModified
- bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
- bulkOp.find({a: 2}).upsert().updateOne({$set: {b: 2}});
- bulkOp.insert({a: 3});
- bulkOp.find({a: 3}).update({$set: {b: 1}});
- bulkOp.find({a: 3}).upsert().update({$set: {b: 2}});
- bulkOp.find({a: 10}).upsert().update({$set: {b: 2}});
- bulkOp.find({a: 2}).replaceOne({a: 11});
- bulkOp.find({a: 11}).removeOne();
- bulkOp.find({a: 3}).remove({a: 3});
- var result = bulkOp.execute();
- assert.eq(2, result.nInserted);
- assert.eq(2, result.nUpserted);
- assert.eq(5, result.nMatched);
- // only check nModified if write commands are enabled
- if (coll.getMongo().writeMode() == "commands") {
- assert.eq(4, result.nModified);
- }
- assert.eq(2, result.nRemoved);
- var upserts = result.getUpsertedIds();
- assert.eq(2, upserts.length);
- assert.eq(3, upserts[0].index);
- assert(upserts[0]._id != null);
- var upsert = result.getUpsertedIdAt(0);
- assert.eq(3, upsert.index);
- assert(upsert._id != null);
- assert.eq(2, coll.find({}).itcount(), "find should return two documents");
-
- // illegal to try to convert a multi-op batch into a SingleWriteResult
- assert.throws(function() {
- result.toSingleResult();
- });
-
- // attempt to re-run bulk operation
- assert.throws(function() {
- bulkOp.execute();
- });
-
- // Test SingleWriteResult
- var singleBatch = coll.initializeOrderedBulkOp();
- singleBatch.find({a: 4}).upsert().updateOne({$set: {b: 1}});
- var singleResult = singleBatch.execute().toSingleResult();
- assert(singleResult.getUpsertedId() != null);
-
- // Create unique index
- coll.remove({});
- coll.createIndex({a: 1}, {unique: true});
-
- /**
- * Single error ordered bulk operation
- */
- var bulkOp = coll.initializeOrderedBulkOp();
- bulkOp.insert({b: 1, a: 1});
- bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
- bulkOp.insert({b: 3, a: 2});
- var result = assert.throws(function() {
- bulkOp.execute();
- });
- assert(result instanceof BulkWriteError);
- assert(result instanceof Error);
- // Basic properties check
- assert.eq(1, result.nInserted);
- assert.eq(true, result.hasWriteErrors());
- assert.eq(1, result.getWriteErrorCount());
-
- // Get the first error
- var error = result.getWriteErrorAt(0);
- assert.eq(11000, error.code);
- assert(error.errmsg != null);
-
- // Get the operation that caused the error
- var op = error.getOperation();
- assert.eq(2, op.q.b);
- assert.eq(1, op.u['$set'].a);
- assert.eq(false, op.multi);
- assert.eq(true, op.upsert);
-
- // Get the first error
- var error = result.getWriteErrorAt(1);
- assert.eq(null, error);
-
- // Create unique index
- coll.dropIndexes();
- coll.remove({});
- coll.createIndex({a: 1}, {unique: true});
-
- /**
- * Multiple error ordered bulk operation
- */
- var bulkOp = coll.initializeOrderedBulkOp();
- bulkOp.insert({b: 1, a: 1});
- bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
- bulkOp.find({b: 3}).upsert().updateOne({$set: {a: 2}});
- bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
- bulkOp.insert({b: 4, a: 3});
- bulkOp.insert({b: 5, a: 1});
- var result = assert.throws(function() {
- bulkOp.execute();
- });
-
- // Basic properties check
- assert.eq(1, result.nInserted);
- assert.eq(true, result.hasWriteErrors());
- assert.eq(1, result.getWriteErrorCount());
-
- // Individual error checking
- var error = result.getWriteErrorAt(0);
- assert.eq(1, error.index);
- assert.eq(11000, error.code);
- assert(error.errmsg != null);
- assert.eq(2, error.getOperation().q.b);
- assert.eq(1, error.getOperation().u['$set'].a);
- assert.eq(false, error.getOperation().multi);
- assert.eq(true, error.getOperation().upsert);
-
- // Create unique index
- coll.dropIndexes();
- coll.remove({});
- coll.createIndex({a: 1}, {unique: true});
-};
-
-var buildVersion = parseInt(db.runCommand({buildInfo: 1}).versionArray.slice(0, 3).join(""), 10);
-// Save the existing useWriteCommands function
-var _useWriteCommands = coll.getMongo().useWriteCommands;
-
-//
-// Only execute write command tests if we have > 2.5.5 otherwise
-// execute the down converted version
-if (buildVersion >= 255) {
- // Force the use of useWriteCommands
- coll._mongo.useWriteCommands = function() {
- return true;
- };
-
- // Execute tests using legacy operations
- executeTests();
+/**
+ * find() requires selector.
+ */
+var bulkOp = coll.initializeOrderedBulkOp();
+
+assert.throws(function() {
+ bulkOp.find();
+});
+
+/**
+ * Single successful ordered bulk operation
+ */
+var bulkOp = coll.initializeOrderedBulkOp();
+bulkOp.insert({a: 1});
+bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
+// no-op, should increment nMatched but not nModified
+bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
+bulkOp.find({a: 2}).upsert().updateOne({$set: {b: 2}});
+bulkOp.insert({a: 3});
+bulkOp.find({a: 3}).update({$set: {b: 1}});
+bulkOp.find({a: 3}).upsert().update({$set: {b: 2}});
+bulkOp.find({a: 10}).upsert().update({$set: {b: 2}});
+bulkOp.find({a: 2}).replaceOne({a: 11});
+bulkOp.find({a: 11}).removeOne();
+bulkOp.find({a: 3}).remove({a: 3});
+var result = bulkOp.execute();
+assert.eq(2, result.nInserted);
+assert.eq(2, result.nUpserted);
+assert.eq(5, result.nMatched);
+// only check nModified if write commands are enabled
+if (coll.getMongo().writeMode() == "commands") {
+ assert.eq(4, result.nModified);
}
-
-// Force the use of legacy commands
-coll._mongo.useWriteCommands = function() {
- return false;
-};
-
-// Execute tests using legacy operations
-executeTests();
-
-// Reset the function
-coll.getMongo().useWriteCommands = _useWriteCommands;
+assert.eq(2, result.nRemoved);
+var upserts = result.getUpsertedIds();
+assert.eq(2, upserts.length);
+assert.eq(3, upserts[0].index);
+assert(upserts[0]._id != null);
+var upsert = result.getUpsertedIdAt(0);
+assert.eq(3, upsert.index);
+assert(upsert._id != null);
+assert.eq(2, coll.find({}).itcount(), "find should return two documents");
+
+// illegal to try to convert a multi-op batch into a SingleWriteResult
+assert.throws(function() {
+ result.toSingleResult();
+});
+
+// attempt to re-run bulk operation
+assert.throws(function() {
+ bulkOp.execute();
+});
+
+// Test SingleWriteResult
+var singleBatch = coll.initializeOrderedBulkOp();
+singleBatch.find({a: 4}).upsert().updateOne({$set: {b: 1}});
+var singleResult = singleBatch.execute().toSingleResult();
+assert(singleResult.getUpsertedId() != null);
+
+// Create unique index
+coll.remove({});
+coll.createIndex({a: 1}, {unique: true});
+
+/**
+ * Single error ordered bulk operation
+ */
+var bulkOp = coll.initializeOrderedBulkOp();
+bulkOp.insert({b: 1, a: 1});
+bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+bulkOp.insert({b: 3, a: 2});
+var result = assert.throws(function() {
+ bulkOp.execute();
+});
+assert(result instanceof BulkWriteError);
+assert(result instanceof Error);
+// Basic properties check
+assert.eq(1, result.nInserted);
+assert.eq(true, result.hasWriteErrors());
+assert.eq(1, result.getWriteErrorCount());
+
+// Get the first error
+var error = result.getWriteErrorAt(0);
+assert.eq(11000, error.code);
+assert(error.errmsg != null);
+
+// Get the operation that caused the error
+var op = error.getOperation();
+assert.eq(2, op.q.b);
+assert.eq(1, op.u['$set'].a);
+assert.eq(false, op.multi);
+assert.eq(true, op.upsert);
+
+// Get the first error
+var error = result.getWriteErrorAt(1);
+assert.eq(null, error);
+
+// Create unique index
+coll.dropIndexes();
+coll.remove({});
+coll.createIndex({a: 1}, {unique: true});
+
+/**
+ * Multiple error ordered bulk operation
+ */
+var bulkOp = coll.initializeOrderedBulkOp();
+bulkOp.insert({b: 1, a: 1});
+bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+bulkOp.find({b: 3}).upsert().updateOne({$set: {a: 2}});
+bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+bulkOp.insert({b: 4, a: 3});
+bulkOp.insert({b: 5, a: 1});
+var result = assert.throws(function() {
+ bulkOp.execute();
+});
+
+// Basic properties check
+assert.eq(1, result.nInserted);
+assert.eq(true, result.hasWriteErrors());
+assert.eq(1, result.getWriteErrorCount());
+
+// Individual error checking
+var error = result.getWriteErrorAt(0);
+assert.eq(1, error.index);
+assert.eq(11000, error.code);
+assert(error.errmsg != null);
+assert.eq(2, error.getOperation().q.b);
+assert.eq(1, error.getOperation().u['$set'].a);
+assert.eq(false, error.getOperation().multi);
+assert.eq(true, error.getOperation().upsert);
+
+// Create unique index
+coll.dropIndexes();
+coll.remove({});
+coll.createIndex({a: 1}, {unique: true});
+}());
diff --git a/jstests/core/bulk_api_unordered.js b/jstests/core/bulk_api_unordered.js
index ed625c04342..9915ec7591f 100644
--- a/jstests/core/bulk_api_unordered.js
+++ b/jstests/core/bulk_api_unordered.js
@@ -3,6 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection, requires_multi_updates, requires_non_retryable_writes]
+(function() {
var collectionName = "bulk_api_unordered";
var coll = db.getCollection(collectionName);
coll.drop();
@@ -12,189 +13,153 @@ var result;
jsTest.log("Starting bulk api unordered tests...");
-/********************************************************
- *
- * Unordered tests should return same results for write command as
- * well as for the legacy operations
- *
- *******************************************************/
-var executeTests = function() {
- // Remove collection
- coll.remove({});
-
- /**
- * find() requires selector.
- */
- var bulkOp = coll.initializeUnorderedBulkOp();
-
- assert.throws(function() {
- bulkOp.find();
- });
-
- /**
- * Single successful unordered bulk operation
- */
- var bulkOp = coll.initializeUnorderedBulkOp();
- bulkOp.insert({a: 1});
- bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
- // no-op, should increment nMatched but not nModified
- bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
- bulkOp.find({a: 2}).upsert().updateOne({$set: {b: 2}});
- bulkOp.insert({a: 3});
- bulkOp.find({a: 3}).update({$set: {b: 1}});
- bulkOp.find({a: 3}).upsert().update({$set: {b: 2}});
- bulkOp.find({a: 10}).upsert().update({$set: {b: 2}});
- bulkOp.find({a: 2}).replaceOne({a: 11});
- bulkOp.find({a: 11}).removeOne();
- bulkOp.find({a: 3}).remove({a: 3});
- var result = bulkOp.execute();
- assert.eq(2, result.nInserted);
- assert.eq(2, result.nUpserted);
- assert.eq(5, result.nMatched);
- // only check nModified if write commands are enabled
- if (coll.getMongo().writeMode() == "commands") {
- assert.eq(4, result.nModified);
- }
- assert.eq(2, result.nRemoved);
- assert.eq(false, result.hasWriteErrors());
- assert.eq(0, result.getWriteErrorCount());
- var upserts = result.getUpsertedIds();
- assert.eq(2, upserts.length);
- assert.eq(3, upserts[0].index);
- assert(upserts[0]._id != null);
- var upsert = result.getUpsertedIdAt(0);
- assert.eq(3, upsert.index);
- assert(upsert._id != null);
- assert.eq(2, coll.find({}).itcount(), "find should return two documents");
-
- // illegal to try to convert a multi-op batch into a SingleWriteResult
- assert.throws(function() {
- result.toSingleResult();
- });
-
- // attempt to re-run bulk
- assert.throws(function() {
- bulkOp.execute();
- });
-
- // Test SingleWriteResult
- var singleBatch = coll.initializeUnorderedBulkOp();
- singleBatch.find({a: 4}).upsert().updateOne({$set: {b: 1}});
- var singleResult = singleBatch.execute().toSingleResult();
- assert(singleResult.getUpsertedId() != null);
-
- // Create unique index
- coll.remove({});
- coll.createIndex({a: 1}, {unique: true});
-
- /**
- * Single error unordered bulk operation
- */
- var bulkOp = coll.initializeUnorderedBulkOp();
- bulkOp.insert({b: 1, a: 1});
- bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
- bulkOp.insert({b: 3, a: 2});
- var result = assert.throws(function() {
- bulkOp.execute();
- });
-
- // Basic properties check
- assert.eq(2, result.nInserted);
- assert.eq(true, result.hasWriteErrors());
- assert.eq(1, result.getWriteErrorCount());
-
- // Get the first error
- var error = result.getWriteErrorAt(0);
- assert.eq(11000, error.code);
- assert(error.errmsg != null);
-
- // Get the operation that caused the error
- var op = error.getOperation();
- assert.eq(2, op.q.b);
- assert.eq(1, op.u['$set'].a);
- assert.eq(false, op.multi);
- assert.eq(true, op.upsert);
-
- // Create unique index
- coll.dropIndexes();
- coll.remove({});
- coll.createIndex({a: 1}, {unique: true});
-
- /**
- * Multiple error unordered bulk operation
- */
- var bulkOp = coll.initializeUnorderedBulkOp();
- bulkOp.insert({b: 1, a: 1});
- bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
- bulkOp.find({b: 3}).upsert().updateOne({$set: {a: 2}});
- bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
- bulkOp.insert({b: 4, a: 3});
- bulkOp.insert({b: 5, a: 1});
- var result = assert.throws(function() {
- bulkOp.execute();
- });
-
- // Basic properties check
- assert.eq(2, result.nInserted);
- assert.eq(1, result.nUpserted);
- assert.eq(true, result.hasWriteErrors());
- assert.eq(3, result.getWriteErrorCount());
-
- // Individual error checking
- var error = result.getWriteErrorAt(0);
- assert.eq(11000, error.code);
- assert(error.errmsg != null);
- assert.eq(2, error.getOperation().q.b);
- assert.eq(1, error.getOperation().u['$set'].a);
- assert.eq(false, error.getOperation().multi);
- assert.eq(true, error.getOperation().upsert);
-
- var error = result.getWriteErrorAt(1);
- assert.eq(3, error.index);
- assert.eq(11000, error.code);
- assert(error.errmsg != null);
- assert.eq(2, error.getOperation().q.b);
- assert.eq(1, error.getOperation().u['$set'].a);
- assert.eq(false, error.getOperation().multi);
- assert.eq(true, error.getOperation().upsert);
-
- var error = result.getWriteErrorAt(2);
- assert.eq(5, error.index);
- assert.eq(11000, error.code);
- assert(error.errmsg != null);
- assert.eq(5, error.getOperation().b);
- assert.eq(1, error.getOperation().a);
-
- // Create unique index
- coll.dropIndexes();
- coll.remove({});
- coll.createIndex({a: 1}, {unique: true});
-};
-
-var buildVersion = parseInt(db.runCommand({buildInfo: 1}).versionArray.slice(0, 3).join(""), 10);
-// Save the existing useWriteCommands function
-var _useWriteCommands = coll.getMongo().useWriteCommands;
-
-//
-// Only execute write command tests if we have > 2.5.5 otherwise
-// execute the down converted version
-if (buildVersion >= 255) {
- // Force the use of useWriteCommands
- coll._mongo.useWriteCommands = function() {
- return true;
- };
-
- // Execute tests using legacy operations
- executeTests();
+coll.remove({});
+
+/**
+ * find() requires selector.
+ */
+var bulkOp = coll.initializeUnorderedBulkOp();
+
+assert.throws(function() {
+ bulkOp.find();
+});
+
+/**
+ * Single successful unordered bulk operation
+ */
+var bulkOp = coll.initializeUnorderedBulkOp();
+bulkOp.insert({a: 1});
+bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
+// no-op, should increment nMatched but not nModified
+bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
+bulkOp.find({a: 2}).upsert().updateOne({$set: {b: 2}});
+bulkOp.insert({a: 3});
+bulkOp.find({a: 3}).update({$set: {b: 1}});
+bulkOp.find({a: 3}).upsert().update({$set: {b: 2}});
+bulkOp.find({a: 10}).upsert().update({$set: {b: 2}});
+bulkOp.find({a: 2}).replaceOne({a: 11});
+bulkOp.find({a: 11}).removeOne();
+bulkOp.find({a: 3}).remove({a: 3});
+var result = bulkOp.execute();
+assert.eq(2, result.nInserted);
+assert.eq(2, result.nUpserted);
+assert.eq(5, result.nMatched);
+// only check nModified if write commands are enabled
+if (coll.getMongo().writeMode() == "commands") {
+ assert.eq(4, result.nModified);
}
-
-// Force the use of legacy commands
-coll._mongo.useWriteCommands = function() {
- return false;
-};
-
-// Execute tests using legacy operations
-executeTests();
-
-// Reset the function
-coll.getMongo().useWriteCommands = _useWriteCommands;
+assert.eq(2, result.nRemoved);
+assert.eq(false, result.hasWriteErrors());
+assert.eq(0, result.getWriteErrorCount());
+var upserts = result.getUpsertedIds();
+assert.eq(2, upserts.length);
+assert.eq(3, upserts[0].index);
+assert(upserts[0]._id != null);
+var upsert = result.getUpsertedIdAt(0);
+assert.eq(3, upsert.index);
+assert(upsert._id != null);
+assert.eq(2, coll.find({}).itcount(), "find should return two documents");
+
+// illegal to try to convert a multi-op batch into a SingleWriteResult
+assert.throws(function() {
+ result.toSingleResult();
+});
+
+// attempt to re-run bulk
+assert.throws(function() {
+ bulkOp.execute();
+});
+
+// Test SingleWriteResult
+var singleBatch = coll.initializeUnorderedBulkOp();
+singleBatch.find({a: 4}).upsert().updateOne({$set: {b: 1}});
+var singleResult = singleBatch.execute().toSingleResult();
+assert(singleResult.getUpsertedId() != null);
+
+// Create unique index
+coll.remove({});
+coll.createIndex({a: 1}, {unique: true});
+
+/**
+ * Single error unordered bulk operation
+ */
+var bulkOp = coll.initializeUnorderedBulkOp();
+bulkOp.insert({b: 1, a: 1});
+bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+bulkOp.insert({b: 3, a: 2});
+var result = assert.throws(function() {
+ bulkOp.execute();
+});
+
+// Basic properties check
+assert.eq(2, result.nInserted);
+assert.eq(true, result.hasWriteErrors());
+assert.eq(1, result.getWriteErrorCount());
+
+// Get the first error
+var error = result.getWriteErrorAt(0);
+assert.eq(11000, error.code);
+assert(error.errmsg != null);
+
+// Get the operation that caused the error
+var op = error.getOperation();
+assert.eq(2, op.q.b);
+assert.eq(1, op.u['$set'].a);
+assert.eq(false, op.multi);
+assert.eq(true, op.upsert);
+
+// Create unique index
+coll.dropIndexes();
+coll.remove({});
+coll.createIndex({a: 1}, {unique: true});
+
+/**
+ * Multiple error unordered bulk operation
+ */
+var bulkOp = coll.initializeUnorderedBulkOp();
+bulkOp.insert({b: 1, a: 1});
+bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+bulkOp.find({b: 3}).upsert().updateOne({$set: {a: 2}});
+bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+bulkOp.insert({b: 4, a: 3});
+bulkOp.insert({b: 5, a: 1});
+var result = assert.throws(function() {
+ bulkOp.execute();
+});
+
+// Basic properties check
+assert.eq(2, result.nInserted);
+assert.eq(1, result.nUpserted);
+assert.eq(true, result.hasWriteErrors());
+assert.eq(3, result.getWriteErrorCount());
+
+// Individual error checking
+var error = result.getWriteErrorAt(0);
+assert.eq(11000, error.code);
+assert(error.errmsg != null);
+assert.eq(2, error.getOperation().q.b);
+assert.eq(1, error.getOperation().u['$set'].a);
+assert.eq(false, error.getOperation().multi);
+assert.eq(true, error.getOperation().upsert);
+
+var error = result.getWriteErrorAt(1);
+assert.eq(3, error.index);
+assert.eq(11000, error.code);
+assert(error.errmsg != null);
+assert.eq(2, error.getOperation().q.b);
+assert.eq(1, error.getOperation().u['$set'].a);
+assert.eq(false, error.getOperation().multi);
+assert.eq(true, error.getOperation().upsert);
+
+var error = result.getWriteErrorAt(2);
+assert.eq(5, error.index);
+assert.eq(11000, error.code);
+assert(error.errmsg != null);
+assert.eq(5, error.getOperation().b);
+assert.eq(1, error.getOperation().a);
+
+// Create unique index
+coll.dropIndexes();
+coll.remove({});
+coll.createIndex({a: 1}, {unique: true});
+}());
diff --git a/jstests/core/collation.js b/jstests/core/collation.js
index c9db94ea24e..c659324b528 100644
--- a/jstests/core/collation.js
+++ b/jstests/core/collation.js
@@ -255,38 +255,34 @@ assertIndexHasCollation({e: 1}, {locale: "simple"});
// Test that an index with a non-simple collation contains collator-generated comparison keys
// rather than the verbatim indexed strings.
-if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
- assert.commandWorked(coll.createIndex({b: 1}));
- assert.commandWorked(coll.insert({a: "foo", b: "foo"}));
- assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().itcount());
- assert.neq("foo", coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().next().a);
- assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().itcount());
- assert.eq("foo", coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().next().b);
-}
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
+assert.commandWorked(coll.createIndex({b: 1}));
+assert.commandWorked(coll.insert({a: "foo", b: "foo"}));
+assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().itcount());
+assert.neq("foo", coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().next().a);
+assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().itcount());
+assert.eq("foo", coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().next().b);
// Test that a query with a string comparison can use an index with a non-simple collation if it
// has a matching collation.
-if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
- // Query has simple collation, but index has fr_CA collation.
- explainRes = coll.find({a: "foo"}).explain();
- assert.commandWorked(explainRes);
- assert(planHasStage(db, getWinningPlan(explainRes.queryPlanner), "COLLSCAN"));
+// Query has simple collation, but index has fr_CA collation.
+explainRes = coll.find({a: "foo"}).explain();
+assert.commandWorked(explainRes);
+assert(planHasStage(db, getWinningPlan(explainRes.queryPlanner), "COLLSCAN"));
- // Query has en_US collation, but index has fr_CA collation.
- explainRes = coll.find({a: "foo"}).collation({locale: "en_US"}).explain();
- assert.commandWorked(explainRes);
- assert(planHasStage(db, getWinningPlan(explainRes.queryPlanner), "COLLSCAN"));
+// Query has en_US collation, but index has fr_CA collation.
+explainRes = coll.find({a: "foo"}).collation({locale: "en_US"}).explain();
+assert.commandWorked(explainRes);
+assert(planHasStage(db, getWinningPlan(explainRes.queryPlanner), "COLLSCAN"));
- // Matching collations.
- explainRes = coll.find({a: "foo"}).collation({locale: "fr_CA"}).explain();
- assert.commandWorked(explainRes);
- assert(planHasStage(db, getWinningPlan(explainRes.queryPlanner), "IXSCAN"));
-}
+// Matching collations.
+explainRes = coll.find({a: "foo"}).collation({locale: "fr_CA"}).explain();
+assert.commandWorked(explainRes);
+assert(planHasStage(db, getWinningPlan(explainRes.queryPlanner), "IXSCAN"));
// Should not be possible to create a text index with an explicit non-simple collation.
coll.drop();
@@ -473,7 +469,7 @@ assert.eq(0, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "FOO"}));
-var res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}});
+let res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}});
assert.eq(1, res.length);
assert.eq("foo", res[0].toLowerCase());
assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}}).length);
@@ -590,80 +586,71 @@ assert.eq(planStage.collation, {
// Collation tests for find.
//
-if (db.getMongo().useReadCommands()) {
- // Find should return correct results when collation specified and collection does not
- // exist.
- coll.drop();
- assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount());
+// Find should return correct results when collation specified and collection does not
+// exist.
+coll.drop();
+assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount());
- // Find should return correct results when collation specified and filter is a match on _id.
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
- assert.commandWorked(coll.insert({_id: "foo"}));
- assert.eq(0, coll.find({_id: "FOO"}).itcount());
- assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount());
- assert.eq(1, coll.find({_id: "FOO"}).collation({locale: "en_US", strength: 2}).itcount());
- assert.commandWorked(coll.remove({_id: "foo"}));
-
- // Find should return correct results when collation specified and no indexes exist.
- assert.eq(0, coll.find({str: "FOO"}).itcount());
- assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).itcount());
- assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).itcount());
- assert.eq(1,
- coll.find({str: {$ne: "FOO"}}).collation({locale: "en_US", strength: 2}).itcount());
-
- // Find should return correct results when collation specified and compatible index exists.
- assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "en_US", strength: 2}}));
- assert.eq(0, coll.find({str: "FOO"}).hint({str: 1}).itcount());
- assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).hint({str: 1}).itcount());
- assert.eq(
- 1,
- coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
- assert.eq(1,
- coll.find({str: {$ne: "FOO"}})
- .collation({locale: "en_US", strength: 2})
- .hint({str: 1})
- .itcount());
- assert.commandWorked(coll.dropIndexes());
-
- // Find should return correct results when collation specified and compatible partial index
- // exists.
- assert.commandWorked(coll.createIndex({str: 1}, {
- partialFilterExpression: {str: {$lte: "FOO"}},
- collation: {locale: "en_US", strength: 2}
- }));
- assert.eq(
- 1,
- coll.find({str: "foo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
- assert.commandWorked(coll.insert({_id: 3, str: "goo"}));
- assert.eq(
- 0,
- coll.find({str: "goo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
- assert.commandWorked(coll.remove({_id: 3}));
- assert.commandWorked(coll.dropIndexes());
-
- // Queries that use a index with a non-matching collation should add a sort
- // stage if needed.
- coll.drop();
- assert.commandWorked(coll.insert([{a: "A"}, {a: "B"}, {a: "b"}, {a: "a"}]));
-
- // Ensure results from an index that doesn't match the query collation are sorted to match
- // the requested collation.
- assert.commandWorked(coll.createIndex({a: 1}));
- var res =
- coll.find({a: {'$exists': true}}, {_id: 0}).collation({locale: "en_US", strength: 3}).sort({
- a: 1
- });
- assert.eq(res.toArray(), [{a: "a"}, {a: "A"}, {a: "b"}, {a: "B"}]);
-
- // Find should return correct results when collation specified and query contains $expr.
- coll.drop();
- assert.commandWorked(coll.insert([{a: "A"}, {a: "B"}]));
- assert.eq(
- 1,
- coll.find({$expr: {$eq: ["$a", "a"]}}).collation({locale: "en_US", strength: 2}).itcount());
-}
+// Find should return correct results when collation specified and filter is a match on _id.
+coll.drop();
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
+assert.commandWorked(coll.insert({_id: "foo"}));
+assert.eq(0, coll.find({_id: "FOO"}).itcount());
+assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount());
+assert.eq(1, coll.find({_id: "FOO"}).collation({locale: "en_US", strength: 2}).itcount());
+assert.commandWorked(coll.remove({_id: "foo"}));
+
+// Find should return correct results when collation specified and no indexes exist.
+assert.eq(0, coll.find({str: "FOO"}).itcount());
+assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).itcount());
+assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).itcount());
+assert.eq(1, coll.find({str: {$ne: "FOO"}}).collation({locale: "en_US", strength: 2}).itcount());
+
+// Find should return correct results when collation specified and compatible index exists.
+assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "en_US", strength: 2}}));
+assert.eq(0, coll.find({str: "FOO"}).hint({str: 1}).itcount());
+assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).hint({str: 1}).itcount());
+assert.eq(
+ 1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
+assert.eq(1,
+ coll.find({str: {$ne: "FOO"}})
+ .collation({locale: "en_US", strength: 2})
+ .hint({str: 1})
+ .itcount());
+assert.commandWorked(coll.dropIndexes());
+
+// Find should return correct results when collation specified and compatible partial index
+// exists.
+assert.commandWorked(coll.createIndex(
+ {str: 1},
+ {partialFilterExpression: {str: {$lte: "FOO"}}, collation: {locale: "en_US", strength: 2}}));
+assert.eq(
+ 1, coll.find({str: "foo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
+assert.commandWorked(coll.insert({_id: 3, str: "goo"}));
+assert.eq(
+ 0, coll.find({str: "goo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
+assert.commandWorked(coll.remove({_id: 3}));
+assert.commandWorked(coll.dropIndexes());
+
+// Queries that use a index with a non-matching collation should add a sort
+// stage if needed.
+coll.drop();
+assert.commandWorked(coll.insert([{a: "A"}, {a: "B"}, {a: "b"}, {a: "a"}]));
+
+// Ensure results from an index that doesn't match the query collation are sorted to match
+// the requested collation.
+assert.commandWorked(coll.createIndex({a: 1}));
+res = coll.find({a: {'$exists': true}}, {_id: 0}).collation({locale: "en_US", strength: 3}).sort({
+ a: 1
+});
+assert.eq(res.toArray(), [{a: "a"}, {a: "A"}, {a: "b"}, {a: "B"}]);
+
+// Find should return correct results when collation specified and query contains $expr.
+coll.drop();
+assert.commandWorked(coll.insert([{a: "A"}, {a: "B"}]));
+assert.eq(
+ 1, coll.find({$expr: {$eq: ["$a", "a"]}}).collation({locale: "en_US", strength: 2}).itcount());
// Find should return correct results when no collation specified and collection has a default
// collation.
@@ -704,58 +691,55 @@ assert.commandWorked(
assert.commandWorked(coll.insert([{a: "A"}, {a: "B"}]));
assert.eq(1, coll.find({$expr: {$eq: ["$a", "a"]}}).itcount());
-if (db.getMongo().useReadCommands()) {
- // Find should return correct results when "simple" collation specified and collection has a
- // default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.commandWorked(coll.insert({str: "foo"}));
- assert.commandWorked(coll.insert({str: "FOO"}));
- assert.commandWorked(coll.insert({str: "bar"}));
- assert.eq(2, coll.find({str: {$in: ["foo", "bar"]}}).collation({locale: "simple"}).itcount());
- assert.eq(1, coll.find({str: "foo"}).collation({locale: "simple"}).itcount());
- assert.eq(
- [{str: "FOO"}, {str: "bar"}, {str: "foo"}],
- coll.find({}, {_id: 0, str: 1}).sort({str: 1}).collation({locale: "simple"}).toArray());
-
- // Find on _id should return correct results when query collation differs from collection
- // default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 3}}));
- assert.commandWorked(coll.insert({_id: "foo"}));
- assert.commandWorked(coll.insert({_id: "FOO"}));
- assert.eq(2, coll.find({_id: "foo"}).collation({locale: "en_US", strength: 2}).itcount());
+// Find should return correct results when "simple" collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.commandWorked(coll.insert({str: "foo"}));
+assert.commandWorked(coll.insert({str: "FOO"}));
+assert.commandWorked(coll.insert({str: "bar"}));
+assert.eq(2, coll.find({str: {$in: ["foo", "bar"]}}).collation({locale: "simple"}).itcount());
+assert.eq(1, coll.find({str: "foo"}).collation({locale: "simple"}).itcount());
+assert.eq([{str: "FOO"}, {str: "bar"}, {str: "foo"}],
+ coll.find({}, {_id: 0, str: 1}).sort({str: 1}).collation({locale: "simple"}).toArray());
- // Find on _id should use idhack stage when explicitly given query collation matches
- // collection default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes =
- coll.explain("executionStats").find({_id: "foo"}).collation({locale: "en_US"}).finish();
- assert.commandWorked(explainRes);
- if (isSBEEnabled) {
- planStage = getPlanStage(getWinningPlan(explainRes.queryPlanner), "IXSCAN");
- } else {
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
- }
- assert.neq(null, planStage, explainRes);
+// Find on _id should return correct results when query collation differs from collection
+// default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 3}}));
+assert.commandWorked(coll.insert({_id: "foo"}));
+assert.commandWorked(coll.insert({_id: "FOO"}));
+assert.eq(2, coll.find({_id: "foo"}).collation({locale: "en_US", strength: 2}).itcount());
- // Find on _id should not use idhack stage when query collation does not match collection
- // default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes =
- coll.explain("executionStats").find({_id: "foo"}).collation({locale: "fr_CA"}).finish();
- assert.commandWorked(explainRes);
- if (isSBEEnabled) {
- planStage = getPlanStage(getWinningPlan(explainRes.queryPlanner), "IXSCAN");
- } else {
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
- }
- assert.eq(null, planStage);
+// Find on _id should use idhack stage when explicitly given query collation matches
+// collection default.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+explainRes =
+ coll.explain("executionStats").find({_id: "foo"}).collation({locale: "en_US"}).finish();
+assert.commandWorked(explainRes);
+if (isSBEEnabled) {
+ planStage = getPlanStage(getWinningPlan(explainRes.queryPlanner), "IXSCAN");
+} else {
+ planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
+}
+assert.neq(null, planStage, explainRes);
+
+// Find on _id should not use idhack stage when query collation does not match collection
+// default.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+explainRes =
+ coll.explain("executionStats").find({_id: "foo"}).collation({locale: "fr_CA"}).finish();
+assert.commandWorked(explainRes);
+if (isSBEEnabled) {
+ planStage = getPlanStage(getWinningPlan(explainRes.queryPlanner), "IXSCAN");
+} else {
+ planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
}
+assert.eq(null, planStage);
// Find should select compatible index when no collation specified and collection has a default
// collation.
@@ -882,17 +866,6 @@ assert.eq(planStage.collation, {
version: "57.1",
});
-if (!db.getMongo().useReadCommands()) {
- // find() shell helper should error if a collation is specified and the shell is not using
- // read commands.
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
- assert.throws(function() {
- coll.find().collation({locale: "fr"}).itcount();
- });
-}
-
//
// Collation tests for findAndModify.
//
@@ -1039,33 +1012,28 @@ assert.eq(mapReduceOut.results.length, 0);
// Collation tests for remove.
//
-if (db.getMongo().writeMode() === "commands") {
- // Remove should succeed when collation specified and collection does not exist.
- coll.drop();
- assert.commandWorked(coll.remove({str: "foo"}, {justOne: true, collation: {locale: "fr"}}));
+// Remove should succeed when collation specified and collection does not exist.
+coll.drop();
+assert.commandWorked(coll.remove({str: "foo"}, {justOne: true, collation: {locale: "fr"}}));
- // Remove should return correct results when collation specified.
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
- writeRes =
- coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}});
- assert.commandWorked(writeRes);
- assert.eq(1, writeRes.nRemoved);
+// Remove should return correct results when collation specified.
+coll.drop();
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
+writeRes = coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}});
+assert.commandWorked(writeRes);
+assert.eq(1, writeRes.nRemoved);
- // Explain of remove should return correct results when collation specified.
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
- explainRes = coll.explain("executionStats").remove({str: "FOO"}, {
- justOne: true,
- collation: {locale: "en_US", strength: 2}
- });
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE");
- assert.neq(null, planStage);
- assert.eq(1, planStage.nWouldDelete);
-}
+// Explain of remove should return correct results when collation specified.
+coll.drop();
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
+explainRes = coll.explain("executionStats")
+ .remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}});
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE");
+assert.neq(null, planStage);
+assert.eq(1, planStage.nWouldDelete);
// Remove should return correct results when no collation specified and collection has a default
// collation.
@@ -1378,84 +1346,73 @@ assert.eq(0, coll.aggregate([geoNearStage], {collation: {locale: "simple"}}).itc
// Collation tests for find with $nearSphere.
//
-if (db.getMongo().useReadCommands()) {
- // Find with $nearSphere should return correct results when collation specified and
- // collection does not exist.
- coll.drop();
- assert.eq(
- 0,
- coll.find(
- {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
- .collation({locale: "en_US", strength: 2})
- .itcount());
-
- // Find with $nearSphere should return correct results when collation specified and string
- // predicate not indexed.
- coll.drop();
- assert.commandWorked(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
- assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
- assert.eq(
- 0,
- coll.find(
- {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
- .itcount());
- assert.eq(
- 1,
- coll.find(
- {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
- .collation({locale: "en_US", strength: 2})
- .itcount());
-
- // Find with $nearSphere should return correct results when no collation specified and
- // string predicate indexed.
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.createIndex({geo: "2dsphere", str: 1}));
- assert.eq(
- 0,
- coll.find(
- {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
- .itcount());
- assert.eq(
- 1,
- coll.find(
- {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
- .collation({locale: "en_US", strength: 2})
- .itcount());
-
- // Find with $nearSphere should return correct results when collation specified and
- // collation on index is incompatible with string predicate.
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(
- coll.createIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 3}}));
- assert.eq(
- 0,
- coll.find(
- {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
- .itcount());
- assert.eq(
- 1,
- coll.find(
- {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
- .collation({locale: "en_US", strength: 2})
- .itcount());
-
- // Find with $nearSphere should return correct results when collation specified and
- // collation on index is compatible with string predicate.
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(
- coll.createIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 2}}));
- assert.eq(
- 0,
- coll.find(
- {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
- .itcount());
- assert.eq(
- 1,
- coll.find(
- {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
- .collation({locale: "en_US", strength: 2})
- .itcount());
-}
+// Find with $nearSphere should return correct results when collation specified and
+// collection does not exist.
+coll.drop();
+assert.eq(
+ 0,
+ coll.find({str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
+
+// Find with $nearSphere should return correct results when collation specified and string
+// predicate not indexed.
+coll.drop();
+assert.commandWorked(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
+assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
+assert.eq(
+ 0,
+ coll.find({str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
+assert.eq(
+ 1,
+ coll.find({str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
+
+// Find with $nearSphere should return correct results when no collation specified and
+// string predicate indexed.
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.createIndex({geo: "2dsphere", str: 1}));
+assert.eq(
+ 0,
+ coll.find({str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
+assert.eq(
+ 1,
+ coll.find({str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
+
+// Find with $nearSphere should return correct results when collation specified and
+// collation on index is incompatible with string predicate.
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(
+ coll.createIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 3}}));
+assert.eq(
+ 0,
+ coll.find({str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
+assert.eq(
+ 1,
+ coll.find({str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
+
+// Find with $nearSphere should return correct results when collation specified and
+// collation on index is compatible with string predicate.
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(
+ coll.createIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 2}}));
+assert.eq(
+ 0,
+ coll.find({str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
+assert.eq(
+ 1,
+ coll.find({str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
//
// Tests for the bulk API.
@@ -1463,94 +1420,73 @@ if (db.getMongo().useReadCommands()) {
var bulk;
-if (db.getMongo().writeMode() !== "commands") {
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-
- // Can't use the bulk API to set a collation when using legacy write ops.
- bulk = coll.initializeUnorderedBulkOp();
- assert.throws(function() {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2});
- });
-
- bulk = coll.initializeOrderedBulkOp();
- assert.throws(function() {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2});
- });
-} else {
- // update().
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({$set: {other: 99}});
- writeRes = bulk.execute();
- assert.commandWorked(writeRes);
- assert.eq(2, writeRes.nModified);
+// update().
+coll.drop();
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({$set: {other: 99}});
+writeRes = bulk.execute();
+assert.commandWorked(writeRes);
+assert.eq(2, writeRes.nModified);
- // updateOne().
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({
- $set: {other: 99}
- });
- writeRes = bulk.execute();
- assert.commandWorked(writeRes);
- assert.eq(1, writeRes.nModified);
+// updateOne().
+coll.drop();
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({$set: {other: 99}});
+writeRes = bulk.execute();
+assert.commandWorked(writeRes);
+assert.eq(1, writeRes.nModified);
- // replaceOne().
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "oof"});
- writeRes = bulk.execute();
- assert.commandWorked(writeRes);
- assert.eq(1, writeRes.nModified);
+// replaceOne().
+coll.drop();
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "oof"});
+writeRes = bulk.execute();
+assert.commandWorked(writeRes);
+assert.eq(1, writeRes.nModified);
- // replaceOne() with upsert().
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US"}).upsert().replaceOne({str: "foo"});
- writeRes = bulk.execute();
- assert.commandWorked(writeRes);
- assert.eq(1, writeRes.nUpserted);
- assert.eq(0, writeRes.nModified);
+// replaceOne() with upsert().
+coll.drop();
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({str: "FOO"}).collation({locale: "en_US"}).upsert().replaceOne({str: "foo"});
+writeRes = bulk.execute();
+assert.commandWorked(writeRes);
+assert.eq(1, writeRes.nUpserted);
+assert.eq(0, writeRes.nModified);
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).upsert().replaceOne({
- str: "foo"
- });
- writeRes = bulk.execute();
- assert.commandWorked(writeRes);
- assert.eq(0, writeRes.nUpserted);
- assert.eq(1, writeRes.nModified);
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).upsert().replaceOne({str: "foo"});
+writeRes = bulk.execute();
+assert.commandWorked(writeRes);
+assert.eq(0, writeRes.nUpserted);
+assert.eq(1, writeRes.nModified);
- // removeOne().
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne();
- writeRes = bulk.execute();
- assert.commandWorked(writeRes);
- assert.eq(1, writeRes.nRemoved);
+// removeOne().
+coll.drop();
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne();
+writeRes = bulk.execute();
+assert.commandWorked(writeRes);
+assert.eq(1, writeRes.nRemoved);
- // remove().
- coll.drop();
- assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
- assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove();
- writeRes = bulk.execute();
- assert.commandWorked(writeRes);
- assert.eq(2, writeRes.nRemoved);
-}
+// remove().
+coll.drop();
+assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
+assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove();
+writeRes = bulk.execute();
+assert.commandWorked(writeRes);
+assert.eq(2, writeRes.nRemoved);
//
// Tests for the CRUD API.
@@ -1560,27 +1496,15 @@ if (db.getMongo().writeMode() !== "commands") {
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(1, res.deletedCount);
-} else {
- assert.throws(function() {
- coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
- });
-}
+res = coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
+assert.eq(1, res.deletedCount);
// deleteMany().
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(2, res.deletedCount);
-} else {
- assert.throws(function() {
- coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
- });
-}
+res = coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
+assert.eq(2, res.deletedCount);
// findOneAndDelete().
coll.drop();
@@ -1609,184 +1533,99 @@ assert.neq(null, coll.findOne({other: 99}));
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res =
- coll.replaceOne({str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(1, res.modifiedCount);
-} else {
- assert.throws(function() {
- coll.replaceOne({str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}});
- });
-}
+res = coll.replaceOne({str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}});
+assert.eq(1, res.modifiedCount);
// updateOne().
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.updateOne(
- {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(1, res.modifiedCount);
-} else {
- assert.throws(function() {
- coll.updateOne(
- {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
- });
-}
+res =
+ coll.updateOne({str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
+assert.eq(1, res.modifiedCount);
// updateMany().
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.updateMany(
- {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(2, res.modifiedCount);
-} else {
- assert.throws(function() {
- coll.updateMany(
- {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
- });
-}
+res =
+ coll.updateMany({str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
+assert.eq(2, res.modifiedCount);
// updateOne with bulkWrite().
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite([{
- updateOne: {
- filter: {str: "FOO"},
- update: {$set: {other: 99}},
- collation: {locale: "en_US", strength: 2}
- }
- }]);
- assert.eq(1, res.matchedCount);
-} else {
+res = coll.bulkWrite([{
+ updateOne: {
+ filter: {str: "FOO"},
+ update: {$set: {other: 99}},
+ collation: {locale: "en_US", strength: 2}
+ }
+}]);
+assert.eq(1, res.matchedCount);
+
+// updateOne with undefined/null collation.backwards parameter (SERVER-54482).
+for (let backwards of [undefined, null]) {
assert.throws(function() {
coll.bulkWrite([{
updateOne: {
- filter: {str: "FOO"},
- update: {$set: {other: 99}},
- collation: {locale: "en_US", strength: 2}
+ filter: {str: 'foo'},
+ update: {$set: {str: 'bar'}},
+ collation: {locale: 'en_US', backwards: backwards}
}
}]);
});
}
-// updateOne with undefined/null collation.backwards parameter (SERVER-54482).
-if (db.getMongo().writeMode() === "commands") {
- for (let backwards of [undefined, null]) {
- assert.throws(function() {
- coll.bulkWrite([{
- updateOne: {
- filter: {str: 'foo'},
- update: {$set: {str: 'bar'}},
- collation: {locale: 'en_US', backwards: backwards}
- }
- }]);
- });
- }
-}
-
// updateMany with bulkWrite().
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite([{
- updateMany: {
- filter: {str: "FOO"},
- update: {$set: {other: 99}},
- collation: {locale: "en_US", strength: 2}
- }
- }]);
- assert.eq(2, res.matchedCount);
-} else {
- assert.throws(function() {
- coll.bulkWrite([{
- updateMany: {
- filter: {str: "FOO"},
- update: {$set: {other: 99}},
- collation: {locale: "en_US", strength: 2}
- }
- }]);
- });
-}
+res = coll.bulkWrite([{
+ updateMany: {
+ filter: {str: "FOO"},
+ update: {$set: {other: 99}},
+ collation: {locale: "en_US", strength: 2}
+ }
+}]);
+assert.eq(2, res.matchedCount);
// replaceOne with bulkWrite().
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite([{
- replaceOne: {
- filter: {str: "FOO"},
- replacement: {str: "bar"},
- collation: {locale: "en_US", strength: 2}
- }
- }]);
- assert.eq(1, res.matchedCount);
-} else {
- assert.throws(function() {
- coll.bulkWrite([{
- replaceOne: {
- filter: {str: "FOO"},
- replacement: {str: "bar"},
- collation: {locale: "en_US", strength: 2}
- }
- }]);
- });
-}
+res = coll.bulkWrite([{
+ replaceOne:
+ {filter: {str: "FOO"}, replacement: {str: "bar"}, collation: {locale: "en_US", strength: 2}}
+}]);
+assert.eq(1, res.matchedCount);
// deleteOne with bulkWrite().
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite(
- [{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
- assert.eq(1, res.deletedCount);
-} else {
- assert.throws(function() {
- coll.bulkWrite(
- [{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
- });
-}
+res = coll.bulkWrite(
+ [{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
+assert.eq(1, res.deletedCount);
// deleteMany with bulkWrite().
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "foo"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite(
- [{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
- assert.eq(2, res.deletedCount);
-} else {
- assert.throws(function() {
- coll.bulkWrite(
- [{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
- });
-}
+res = coll.bulkWrite(
+ [{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
+assert.eq(2, res.deletedCount);
// Two deleteOne ops with bulkWrite using different collations.
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
assert.commandWorked(coll.insert({_id: 2, str: "bar"}));
-if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite([
- {deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}},
- {deleteOne: {filter: {str: "BAR"}, collation: {locale: "en_US", strength: 2}}}
- ]);
- assert.eq(2, res.deletedCount);
-} else {
- assert.throws(function() {
- coll.bulkWrite([
- {deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}},
- {deleteOne: {filter: {str: "BAR"}, collation: {locale: "en_US", strength: 2}}}
- ]);
- });
-}
+res = coll.bulkWrite([
+ {deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}},
+ {deleteOne: {filter: {str: "BAR"}, collation: {locale: "en_US", strength: 2}}}
+]);
+assert.eq(2, res.deletedCount);
// applyOps.
if (!isMongos) {
@@ -1857,108 +1696,109 @@ if (!isMongos) {
}
// Test that the find command's min/max options respect the collation.
-if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.commandWorked(coll.insert({str: "a"}));
- assert.commandWorked(coll.insert({str: "A"}));
- assert.commandWorked(coll.insert({str: "b"}));
- assert.commandWorked(coll.insert({str: "B"}));
- assert.commandWorked(coll.insert({str: "c"}));
- assert.commandWorked(coll.insert({str: "C"}));
- assert.commandWorked(coll.insert({str: "d"}));
- assert.commandWorked(coll.insert({str: "D"}));
-
- // This query should fail, since there is no index to support the min/max.
- let err = assert.throws(() => coll.find()
- .min({str: "b"})
- .max({str: "D"})
- .collation({locale: "en_US", strength: 2})
- .itcount());
- assert.commandFailedWithCode(err, 51173);
-
- // Even after building an index with the right key pattern, the query should fail since the
- // collations don't match.
- assert.commandWorked(coll.createIndex({str: 1}, {name: "noCollation"}));
- err = assert.throws(() => coll.find()
+coll.drop();
+assert.commandWorked(coll.insert({str: "a"}));
+assert.commandWorked(coll.insert({str: "A"}));
+assert.commandWorked(coll.insert({str: "b"}));
+assert.commandWorked(coll.insert({str: "B"}));
+assert.commandWorked(coll.insert({str: "c"}));
+assert.commandWorked(coll.insert({str: "C"}));
+assert.commandWorked(coll.insert({str: "d"}));
+assert.commandWorked(coll.insert({str: "D"}));
+
+// This query should fail, since there is no index to support the min/max.
+let err = assert.throws(() => coll.find()
.min({str: "b"})
.max({str: "D"})
.collation({locale: "en_US", strength: 2})
- .hint({str: 1})
.itcount());
- assert.commandFailedWithCode(err, 51174);
-
- // This query should fail, because the hinted index does not match the requested
- // collation, and the 'max' value is a string, which means we cannot ignore the
- // collation.
- const caseInsensitive = {locale: "en", strength: 2};
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.createIndex({str: 1}));
- err = assert.throws(() => coll.find({}, {_id: 0})
- .min({str: MinKey})
- .max({str: "Hello1"})
- .hint({str: 1})
- .collation(caseInsensitive)
- .toArray());
- assert.commandFailedWithCode(err, 51174);
-
- // After building an index with the case-insensitive US English collation, the query should
- // work. Furthermore, the bounds defined by the min and max should respect the
- // case-insensitive collation.
- assert.commandWorked(coll.createIndex(
- {str: 1}, {name: "withCollation", collation: {locale: "en_US", strength: 2}}));
- assert.eq(4,
- coll.find()
- .min({str: "b"})
- .max({str: "D"})
- .collation({locale: "en_US", strength: 2})
- .hint("withCollation")
- .itcount());
-
- // Ensure results from index with min/max query are sorted to match requested collation.
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- assert.commandWorked(
- coll.insert([{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}]));
- var expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}];
- res = coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .min({a: 1, b: 1})
- .max({a: 2, b: 3})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1});
- assert.eq(res.toArray(), expected);
- res = coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .min({a: 1, b: 1})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1});
- assert.eq(res.toArray(), expected);
- res = coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .max({a: 2, b: 3})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1});
- assert.eq(res.toArray(), expected);
-
- // A min/max query that can use an index whose collation doesn't match should require a sort
- // stage if there are any in-bounds strings. Verify this using explain.
- explainRes = coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .max({a: 2, b: 3})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1})
- .explain();
- assert.commandWorked(explainRes);
- assert(planHasStage(db, getWinningPlan(explainRes.queryPlanner), "SORT"));
-
- // This query should fail since min has a string as one of it's boundaries, and the
- // collation doesn't match that of the index.
- assert.throws(() => coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .min({a: 1, b: "A"})
- .max({a: 2, b: 1})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1})
- .itcount());
-}
+assert.commandFailedWithCode(err, 51173);
+
+// Even after building an index with the right key pattern, the query should fail since the
+// collations don't match.
+assert.commandWorked(coll.createIndex({str: 1}, {name: "noCollation"}));
+err = assert.throws(() => coll.find()
+ .min({str: "b"})
+ .max({str: "D"})
+ .collation({locale: "en_US", strength: 2})
+ .hint({str: 1})
+ .itcount());
+assert.commandFailedWithCode(err, 51174);
+
+// This query should fail, because the hinted index does not match the requested
+// collation, and the 'max' value is a string, which means we cannot ignore the
+// collation.
+const caseInsensitive = {
+ locale: "en",
+ strength: 2
+};
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.createIndex({str: 1}));
+err = assert.throws(() => coll.find({}, {_id: 0})
+ .min({str: MinKey})
+ .max({str: "Hello1"})
+ .hint({str: 1})
+ .collation(caseInsensitive)
+ .toArray());
+assert.commandFailedWithCode(err, 51174);
+
+// After building an index with the case-insensitive US English collation, the query should
+// work. Furthermore, the bounds defined by the min and max should respect the
+// case-insensitive collation.
+assert.commandWorked(
+ coll.createIndex({str: 1}, {name: "withCollation", collation: {locale: "en_US", strength: 2}}));
+assert.eq(4,
+ coll.find()
+ .min({str: "b"})
+ .max({str: "D"})
+ .collation({locale: "en_US", strength: 2})
+ .hint("withCollation")
+ .itcount());
+
+// Ensure results from index with min/max query are sorted to match requested collation.
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+assert.commandWorked(
+ coll.insert([{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}]));
+var expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}];
+res = coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .min({a: 1, b: 1})
+ .max({a: 2, b: 3})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1});
+assert.eq(res.toArray(), expected);
+res = coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .min({a: 1, b: 1})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1});
+assert.eq(res.toArray(), expected);
+res = coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .max({a: 2, b: 3})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1});
+assert.eq(res.toArray(), expected);
+
+// A min/max query that can use an index whose collation doesn't match should require a sort
+// stage if there are any in-bounds strings. Verify this using explain.
+explainRes = coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .max({a: 2, b: 3})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1})
+ .explain();
+assert.commandWorked(explainRes);
+assert(planHasStage(db, getWinningPlan(explainRes.queryPlanner), "SORT"));
+
+// This query should fail since min has a string as one of it's boundaries, and the
+// collation doesn't match that of the index.
+assert.throws(() => coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .min({a: 1, b: "A"})
+ .max({a: 2, b: 1})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1})
+ .itcount());
})();
diff --git a/jstests/core/comment_field.js b/jstests/core/comment_field.js
index 60345f10ad5..5bc3d2ba04a 100644
--- a/jstests/core/comment_field.js
+++ b/jstests/core/comment_field.js
@@ -140,28 +140,4 @@ runCommentParamTest({
coll: coll,
command: {explain: {aggregate: coll.getName(), pipeline: [], cursor: {}, comment: innerComment}}
});
-
-//
-// Tests for Legacy query.
-//
-
-testDB.getMongo().forceReadMode("legacy");
-restartProfiler();
-
-// Verify that $comment meta-operator inside $query is not treated as a 'comment' field.
-assert.eq(testDB.coll.find({$query: {_id: 1, $comment: {a: 1}}}).itcount(), 1);
-profilerHasSingleMatchingEntryOrThrow({
- profileDB: testDB,
- filter: {"command.filter": {_id: 1, $comment: {a: 1}}, "command.comment": {$exists: false}}
-});
-
-// Verify that $comment at top level is treated as a 'comment' field.
-const expectedComment = {
- commentName: "legacy_query"
-};
-assert.eq(testDB.coll.find({$query: {_id: 1}, $comment: expectedComment}).itcount(), 1);
-profilerHasSingleMatchingEntryOrThrow(
- {profileDB: testDB, filter: {"command.comment": expectedComment}});
-
-testDB.getMongo().forceReadMode("commands");
})();
diff --git a/jstests/core/currentop_cursors.js b/jstests/core/currentop_cursors.js
index ce59787cb6f..15fe92ce4d8 100644
--- a/jstests/core/currentop_cursors.js
+++ b/jstests/core/currentop_cursors.js
@@ -12,8 +12,6 @@
(function() {
"use strict";
const coll = db.jstests_currentop_cursors;
-// Will skip lsid tests if not in commands read mode.
-const commandReadMode = db.getMongo().readMode() == "commands";
load("jstests/libs/fixture_helpers.js"); // for FixtureHelpers
@@ -76,11 +74,8 @@ runTest({
} else {
assert(!result[0].hasOwnProperty("planSummary"), result);
}
- // Lsid will not exist if not in command read mode.
- if (commandReadMode) {
- assert(result[0].lsid.hasOwnProperty('id'), result);
- assert(result[0].lsid.hasOwnProperty('uid'), result);
- }
+ assert(result[0].lsid.hasOwnProperty('id'), result);
+ assert(result[0].lsid.hasOwnProperty('uid'), result);
const uri = new MongoURI(db.getMongo().host);
assert(uri.servers.some((server) => {
return result[0].host == getHostName() + ":" + server.port;
@@ -230,21 +225,18 @@ if (!FixtureHelpers.isMongos(db)) {
}
});
}
-// Test lsid.id value is correct if in commandReadMode.
-if (commandReadMode) {
- const session = db.getMongo().startSession();
- runTest({
- findFunc: function() {
- const sessionDB = session.getDatabase("test");
- return assert
- .commandWorked(
- sessionDB.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
- .cursor.id;
- },
- assertFunc: function(cursorId, result) {
- assert.eq(result.length, 1, result);
- assert.eq(session.getSessionId().id, result[0].lsid.id);
- }
- });
-}
+// Test lsid.id value is correct.
+const session = db.getMongo().startSession();
+runTest({
+ findFunc: function() {
+ const sessionDB = session.getDatabase("test");
+ return assert
+ .commandWorked(sessionDB.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
+ .cursor.id;
+ },
+ assertFunc: function(cursorId, result) {
+ assert.eq(result.length, 1, result);
+ assert.eq(session.getSessionId().id, result[0].lsid.id);
+ }
+});
})();
diff --git a/jstests/core/explain_find.js b/jstests/core/explain_find.js
index ecb609d6268..88fecde810f 100644
--- a/jstests/core/explain_find.js
+++ b/jstests/core/explain_find.js
@@ -30,15 +30,6 @@ explain = db.runCommand({
assert.commandWorked(explain);
assert.eq(2, explain.executionStats.nReturned);
-// Compatibility test for the $explain OP_QUERY flag. This can only run if find command is disabled.
-if (!db.getMongo().useReadCommands()) {
- var explain = t.find({$query: {a: 4}, $explain: true}).limit(-1).next();
- assert("queryPlanner" in explain);
- assert("executionStats" in explain);
- assert.eq(1, explain.executionStats.nReturned);
- assert("allPlansExecution" in explain.executionStats);
-}
-
// Invalid verbosity string.
let error = assert.throws(function() {
t.explain("foobar").find().finish();
diff --git a/jstests/core/expr_index_use.js b/jstests/core/expr_index_use.js
index 5d340253c8b..0bd1025514d 100644
--- a/jstests/core/expr_index_use.js
+++ b/jstests/core/expr_index_use.js
@@ -261,13 +261,11 @@ const caseInsensitiveCollation = {
locale: "en_US",
strength: 2
};
-if (db.getMongo().useReadCommands()) {
- confirmExpectedExprExecution({$eq: ["$w", "FoO"]}, {nReturned: 2}, caseInsensitiveCollation);
- confirmExpectedExprExecution({$gt: ["$w", "FoO"]}, {nReturned: 2}, caseInsensitiveCollation);
- confirmExpectedExprExecution({$gte: ["$w", "FoO"]}, {nReturned: 4}, caseInsensitiveCollation);
- confirmExpectedExprExecution({$lt: ["$w", "FoO"]}, {nReturned: 19}, caseInsensitiveCollation);
- confirmExpectedExprExecution({$lte: ["$w", "FoO"]}, {nReturned: 21}, caseInsensitiveCollation);
-}
+confirmExpectedExprExecution({$eq: ["$w", "FoO"]}, {nReturned: 2}, caseInsensitiveCollation);
+confirmExpectedExprExecution({$gt: ["$w", "FoO"]}, {nReturned: 2}, caseInsensitiveCollation);
+confirmExpectedExprExecution({$gte: ["$w", "FoO"]}, {nReturned: 4}, caseInsensitiveCollation);
+confirmExpectedExprExecution({$lt: ["$w", "FoO"]}, {nReturned: 19}, caseInsensitiveCollation);
+confirmExpectedExprExecution({$lte: ["$w", "FoO"]}, {nReturned: 21}, caseInsensitiveCollation);
// Test equality queries against a hashed index.
assert.commandWorked(coll.dropIndex({w: 1}));
diff --git a/jstests/core/fts_mix.js b/jstests/core/fts_mix.js
index 5255c4ffe7c..5942a85ec2c 100644
--- a/jstests/core/fts_mix.js
+++ b/jstests/core/fts_mix.js
@@ -124,29 +124,15 @@ assert(resultsEq(getIDS(res), getIDS(res2)));
assert.throws(function() {
const cursor = tc.find({"$text": {"$search": "member", $language: "spanglish"}});
- if (db.getMongo().readMode() === "legacy") {
- // In legacy read mode, calling next() will check if the response to the OP_QUERY message
- // has an error.
- cursor.next();
- } else {
- // In commands read mode, calling hasNext() will check if the find command returned an
- // error. We intentionally do not call next() to avoid masking errors caused by the cursor
- // exhausting all of its documents.
- cursor.hasNext();
- }
+ // Calling hasNext() will check if the find command returned an error. We intentionally do not
+ // call next() to avoid masking errors caused by the cursor exhausting all of its documents.
+ cursor.hasNext();
});
assert.doesNotThrow(function() {
const cursor = tc.find({"$text": {"$search": "member", $language: "english"}});
- if (db.getMongo().readMode() === "legacy") {
- // In legacy read mode, calling next() will check if the response to the OP_QUERY message
- // has an error.
- cursor.next();
- } else {
- // In commands read mode, calling hasNext() will check if the find command returned an
- // error. We intentionally do not call next() to avoid masking errors caused by the cursor
- // exhausting all of its documents.
- cursor.hasNext();
- }
+ // Calling hasNext() will check if the find command returned an error. We intentionally do not
+ // call next() to avoid masking errors caused by the cursor exhausting all of its documents.
+ cursor.hasNext();
});
// -------------------------------------------- LIMIT RESULTS --------------------------------------
diff --git a/jstests/core/fts_partition1.js b/jstests/core/fts_partition1.js
index ec569a4a355..4a26a3ad629 100644
--- a/jstests/core/fts_partition1.js
+++ b/jstests/core/fts_partition1.js
@@ -12,16 +12,9 @@ t.createIndex({x: 1, y: "text"});
assert.throws(function() {
const cursor = t.find({"$text": {"$search": "foo"}});
- if (db.getMongo().readMode() === "legacy") {
- // In legacy read mode, calling next() will check if the response to the OP_QUERY message
- // has an error.
- cursor.next();
- } else {
- // In commands read mode, calling hasNext() will check if the find command returned an
- // error. We intentionally do not call next() to avoid masking errors caused by the cursor
- // exhausting all of its documents.
- cursor.hasNext();
- }
+ // Calling hasNext() will check if the find command returned an error. We intentionally do not
+ // call next() to avoid masking errors caused by the cursor exhausting all of its documents.
+ cursor.hasNext();
});
assert.eq([1], queryIDS(t, "foo", {x: 1}));
diff --git a/jstests/core/getlog2.js b/jstests/core/getlog2.js
index 0b33ee93a0f..94e18131ce0 100644
--- a/jstests/core/getlog2.js
+++ b/jstests/core/getlog2.js
@@ -62,9 +62,7 @@ assert.gt(query.log.length, 0, "no log lines");
// Ensure that slow query is logged in detail.
assert(contains(query.log, function(v) {
print(v);
- const opString = db.getMongo().useReadCommands() ? " find " : " query ";
- const filterString = db.getMongo().useReadCommands() ? "filter:" : "command:";
- return stringContains(v, opString) && stringContains(v, filterString) &&
+ return stringContains(v, " find ") && stringContains(v, "filter:") &&
stringContains(v, "keysExamined:") && stringContains(v, "docsExamined:") &&
v.indexOf("SENTINEL") != -1;
}));
diff --git a/jstests/core/getmore_invalidated_cursors.js b/jstests/core/getmore_invalidated_cursors.js
index aa9c5ab93e3..4208918c57d 100644
--- a/jstests/core/getmore_invalidated_cursors.js
+++ b/jstests/core/getmore_invalidated_cursors.js
@@ -39,7 +39,6 @@ setupCollection();
const batchSize = (nDocs / FixtureHelpers.numberOfShardsForCollection(coll)) - 1;
const isShardedCollection = coll.stats().sharded;
-const shellReadMode = testDB.getMongo().readMode();
let cursor = coll.find().batchSize(batchSize);
cursor.next(); // Send the query to the server.
@@ -48,13 +47,8 @@ assert.commandWorked(testDB.dropDatabase());
let error = assert.throws(() => cursor.itcount());
-if (testDB.runCommand({isdbgrid: 1}).isdbgrid && shellReadMode == 'legacy') {
- // The cursor will be invalidated on mongos, and we won't be able to find it.
- assert.neq(-1, error.message.indexOf('didn\'t exist on server'), error.message);
-} else {
- assert(kKilledByDropErrorCodes.includes(error.code), tojson(error));
- assert.neq(-1, error.message.indexOf('collection dropped'), error.message);
-}
+assert(kKilledByDropErrorCodes.includes(error.code), tojson(error));
+assert.neq(-1, error.message.indexOf('collection dropped'), error.message);
// Test that dropping the collection between a find and a getMore will return an appropriate
// error code and message.
diff --git a/jstests/core/indexes_multiple_commands.js b/jstests/core/indexes_multiple_commands.js
index a7fbe7f200b..d28734e28d9 100644
--- a/jstests/core/indexes_multiple_commands.js
+++ b/jstests/core/indexes_multiple_commands.js
@@ -142,23 +142,18 @@ assert.commandWorked(coll.insert([{a: "a"}, {a: "A"}, {a: 20}]));
// An ambiguous hint pattern fails.
assert.throws(() => coll.find({a: 1}).hint({a: 1}).itcount());
-if (db.getMongo().useReadCommands()) {
- assert.throws(
- () => coll.find({a: 1}).collation({locale: "en_US", strength: 2}).hint({a: 1}).itcount());
-}
+assert.throws(
+ () => coll.find({a: 1}).collation({locale: "en_US", strength: 2}).hint({a: 1}).itcount());
// Index hint by name succeeds.
assert.eq(coll.find({a: "a"}).hint("sbc").itcount(), 1);
// A hint on an incompatible index does a whole index scan, and then filters using the query
// collation.
assert.eq(coll.find({a: "a"}).hint("caseInsensitive").itcount(), 1);
-if (db.getMongo().useReadCommands()) {
- assert.eq(coll.find({a: "a"}).collation({locale: "en_US", strength: 2}).hint("sbc").itcount(),
- 2);
-
- // A non-ambiguous index hint by key pattern is allowed, even if the collation doesn't
- // match.
- assertIndexesCreated(() => coll.createIndex({b: 1}, {collation: {locale: "fr"}}));
- assert.eq(coll.find({a: "a"}).collation({locale: "en_US"}).hint({b: 1}).itcount(), 1);
-}
+assert.eq(coll.find({a: "a"}).collation({locale: "en_US", strength: 2}).hint("sbc").itcount(), 2);
+
+// A non-ambiguous index hint by key pattern is allowed, even if the collation doesn't
+// match.
+assertIndexesCreated(() => coll.createIndex({b: 1}, {collation: {locale: "fr"}}));
+assert.eq(coll.find({a: "a"}).collation({locale: "en_US"}).hint({b: 1}).itcount(), 1);
})();
diff --git a/jstests/core/insert2.js b/jstests/core/insert2.js
index ca5b1b1013e..b8b291ba250 100644
--- a/jstests/core/insert2.js
+++ b/jstests/core/insert2.js
@@ -13,7 +13,6 @@ if (!isDotsAndDollarsEnabled) {
// Create a new connection object so it won't affect the global connection when we modify
// it's settings.
var conn = new Mongo(db.getMongo().host);
- conn.forceWriteMode(db.getMongo().writeMode());
t = conn.getDB(db.getName()).insert2;
t.drop();
diff --git a/jstests/core/invalidated_legacy_cursors.js b/jstests/core/invalidated_legacy_cursors.js
deleted file mode 100644
index 22198716717..00000000000
--- a/jstests/core/invalidated_legacy_cursors.js
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Test that all DBClientCursor cursor types throw an exception when the server returns
- * CursorNotFound.
- * @tags: [
- * assumes_balancer_off,
- * requires_getmore,
- * requires_non_retryable_commands,
- * ]
- */
-(function() {
-'use strict';
-
-const testDB = db.getSiblingDB("invalidated_legacy_cursors");
-const coll = testDB.test;
-const nDocs = 10;
-const batchSize = 2; // The minimum DBClientCursor batch size is 2.
-
-function setupCollection(isCapped) {
- coll.drop();
- if (isCapped) {
- assert.commandWorked(testDB.createCollection(coll.getName(), {capped: true, size: 4096}));
- }
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; ++i) {
- bulk.insert({_id: i, x: i});
- }
- assert.commandWorked(bulk.execute());
- assert.commandWorked(coll.createIndex({x: 1}));
-}
-
-function testLegacyCursorThrowsCursorNotFound(isTailable) {
- coll.getMongo().forceReadMode("legacy");
- setupCollection(isTailable);
-
- // Create a cursor and consume the docs in the first batch.
- let cursor = coll.find().batchSize(batchSize);
- if (isTailable) {
- cursor = cursor.tailable();
- }
- for (let i = 0; i < batchSize; i++) {
- cursor.next();
- }
-
- // Kill the cursor and assert that the cursor throws CursorNotFound on the first next() call.
- // Use killCursors instead of cursor.close() since we still want to send getMore requests
- // through the existing cursor.
- assert.commandWorked(
- testDB.runCommand({killCursors: coll.getName(), cursors: [cursor.getId()]}));
- const error = assert.throws(() => cursor.next());
- assert.eq(error.code, ErrorCodes.CursorNotFound);
-
- // Check the state of the cursor.
- assert(!cursor.hasNext());
- assert.eq(0, cursor.getId());
- assert.throws(() => cursor.next());
-}
-
-testLegacyCursorThrowsCursorNotFound(false);
-if (!jsTest.options().mixedBinVersions) {
- testLegacyCursorThrowsCursorNotFound(true);
-}
-}());
diff --git a/jstests/core/json_schema/misc_validation.js b/jstests/core/json_schema/misc_validation.js
index 68f78c6e7c1..bdb9d55aa9b 100644
--- a/jstests/core/json_schema/misc_validation.js
+++ b/jstests/core/json_schema/misc_validation.js
@@ -138,36 +138,34 @@ coll.drop();
assert.commandWorked(coll.insert({a: "str"}));
assert.commandWorked(coll.insert({a: ["STR", "sTr"]}));
-if (testDB.getMongo().useReadCommands()) {
- assert.eq(0, coll.find({$jsonSchema: schema}).collation(caseInsensitiveCollation).itcount());
- assert.eq(2,
- coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}})
- .collation(caseInsensitiveCollation)
- .itcount());
- assert.eq(2, coll.find({a: "STR"}).collation(caseInsensitiveCollation).itcount());
-
- // Test that $jsonSchema can be used in a $match stage within a view.
- coll.drop();
- let bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({name: "Peter", age: 65});
- bulk.insert({name: "Paul", age: 105});
- bulk.insert({name: "Mary", age: 10});
- bulk.insert({name: "John", age: "unknown"});
- bulk.insert({name: "Mark"});
- bulk.insert({});
- assert.commandWorked(bulk.execute());
-
- assert.commandWorked(testDB.createView(
- "seniorCitizens", coll.getName(), [{
- $match: {
- $jsonSchema: {
- required: ["name", "age"],
- properties: {name: {type: "string"}, age: {type: "number", minimum: 65}}
- }
+assert.eq(0, coll.find({$jsonSchema: schema}).collation(caseInsensitiveCollation).itcount());
+assert.eq(2,
+ coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}})
+ .collation(caseInsensitiveCollation)
+ .itcount());
+assert.eq(2, coll.find({a: "STR"}).collation(caseInsensitiveCollation).itcount());
+
+// Test that $jsonSchema can be used in a $match stage within a view.
+coll.drop();
+let bulk = coll.initializeUnorderedBulkOp();
+bulk.insert({name: "Peter", age: 65});
+bulk.insert({name: "Paul", age: 105});
+bulk.insert({name: "Mary", age: 10});
+bulk.insert({name: "John", age: "unknown"});
+bulk.insert({name: "Mark"});
+bulk.insert({});
+assert.commandWorked(bulk.execute());
+
+assert.commandWorked(testDB.createView(
+ "seniorCitizens", coll.getName(), [{
+ $match: {
+ $jsonSchema: {
+ required: ["name", "age"],
+ properties: {name: {type: "string"}, age: {type: "number", minimum: 65}}
}
- }]));
- assert.eq(2, testDB.seniorCitizens.find().itcount());
-}
+ }
+ }]));
+assert.eq(2, testDB.seniorCitizens.find().itcount());
// Test that $jsonSchema can be used in the listCollections filter.
res = testDB.runCommand(
@@ -220,18 +218,9 @@ assert.eq(2, res.deletedCount);
assert.eq(0, coll.find({$jsonSchema: schema}).itcount());
// Test that $jsonSchema does not respect the collation specified in a delete command.
-if (db.getMongo().writeMode() === "commands") {
- res = coll.deleteMany({$jsonSchema: {properties: {a: {enum: ["STR"]}}}},
- {collation: caseInsensitiveCollation});
- assert.eq(0, res.deletedCount);
-} else {
- res = testDB.runCommand({
- delete: coll.getName(),
- deletes: [{q: {$jsonSchema: {properties: {a: {enum: ["STR"]}}}}}],
- collation: caseInsensitiveCollation,
- });
- assert.eq(res.deletedCount);
-}
+res = coll.deleteMany({$jsonSchema: {properties: {a: {enum: ["STR"]}}}},
+ {collation: caseInsensitiveCollation});
+assert.eq(0, res.deletedCount);
// Test that $jsonSchema is legal in an update command.
coll.drop();
diff --git a/jstests/core/plan_cache_sbe.js b/jstests/core/plan_cache_sbe.js
index ec6ac2c3fc9..04284591a1c 100644
--- a/jstests/core/plan_cache_sbe.js
+++ b/jstests/core/plan_cache_sbe.js
@@ -25,7 +25,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const coll = db.plan_cache_sbe;
coll.drop();
-const isSBECompat = checkSBECompatible(db);
+const isSBEEnabled = checkSBEEnabled(db);
assert.commandWorked(coll.insert({a: 1, b: 1}));
// We need two indexes so that the multi-planner is executed.
@@ -40,6 +40,6 @@ const allStats = coll.aggregate([{$planCacheStats: {}}]).toArray();
assert.eq(allStats.length, 1, allStats);
const stats = allStats[0];
assert(stats.hasOwnProperty("cachedPlan"), stats);
-assert.eq(stats.cachedPlan.hasOwnProperty("queryPlan"), isSBECompat, stats);
-assert.eq(stats.cachedPlan.hasOwnProperty("slotBasedPlan"), isSBECompat, stats);
+assert.eq(stats.cachedPlan.hasOwnProperty("queryPlan"), isSBEEnabled, stats);
+assert.eq(stats.cachedPlan.hasOwnProperty("slotBasedPlan"), isSBEEnabled, stats);
})();
diff --git a/jstests/core/profile_find.js b/jstests/core/profile_find.js
index 5ede8af5f93..a4d3676646c 100644
--- a/jstests/core/profile_find.js
+++ b/jstests/core/profile_find.js
@@ -14,7 +14,6 @@ load("jstests/libs/profiler.js");
var testDB = db.getSiblingDB("profile_find");
assert.commandWorked(testDB.dropDatabase());
var coll = testDB.getCollection("test");
-var isLegacyReadMode = (testDB.getMongo().readMode() === "legacy");
testDB.setProfilingLevel(2);
const profileEntryFilter = {
@@ -30,11 +29,7 @@ for (i = 0; i < 3; ++i) {
}
assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr"}}));
-if (!isLegacyReadMode) {
- assert.eq(coll.find({a: 1}).collation({locale: "fr"}).limit(1).itcount(), 1);
-} else {
- assert.neq(coll.findOne({a: 1}), null);
-}
+assert.eq(coll.find({a: 1}).collation({locale: "fr"}).limit(1).itcount(), 1);
var profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
@@ -45,17 +40,10 @@ assert.eq(profileObj.nreturned, 1, profileObj);
assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", profileObj);
assert(profileObj.execStats.hasOwnProperty("stage"), profileObj);
assert.eq(profileObj.command.filter, {a: 1}, profileObj);
-if (isLegacyReadMode) {
- assert.eq(profileObj.command.ntoreturn, -1, profileObj);
-} else {
- assert.eq(profileObj.command.limit, 1, profileObj);
- assert.eq(
- profileObj.protocol, getProfilerProtocolStringForCommand(testDB.getMongo()), profileObj);
-}
+assert.eq(profileObj.command.limit, 1, profileObj);
+assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(testDB.getMongo()), profileObj);
-if (!isLegacyReadMode) {
- assert.eq(profileObj.command.collation, {locale: "fr"});
-}
+assert.eq(profileObj.command.collation, {locale: "fr"});
assert.eq(profileObj.cursorExhausted, true, profileObj);
assert(!profileObj.hasOwnProperty("cursorid"), profileObj);
assert(profileObj.hasOwnProperty("responseLength"), profileObj);
diff --git a/jstests/core/shell_writeconcern.js b/jstests/core/shell_writeconcern.js
index b54c15bb5fc..fb30601fbcd 100644
--- a/jstests/core/shell_writeconcern.js
+++ b/jstests/core/shell_writeconcern.js
@@ -34,46 +34,22 @@ assert.eq(undefined, db.getWriteConcern());
// test methods, by generating an error
var res = assert.commandWorked(collA.save({_id: 1}, {writeConcern: {w: 1}}));
-if (!db.getMongo().useWriteCommands()) {
- assert.eq(1, res.n, tojson(res));
- assert.eq(1, res.upserted, tojson(res));
-} else {
- assert.eq(1, res.nUpserted, tojson(res));
-}
+assert.eq(1, res.nUpserted, tojson(res));
var res = assert.commandWorked(collA.update({_id: 1}, {_id: 1}, {writeConcern: {w: 1}}));
-if (!db.getMongo().useWriteCommands()) {
- assert.eq(1, res.n, tojson(res));
-} else {
- assert.eq(1, res.nMatched, tojson(res));
-}
+assert.eq(1, res.nMatched, tojson(res));
+
var res = assert.commandWorked(collA.update({_id: 1}, {_id: 1}, {writeConcern: {w: 1}}));
-if (!db.getMongo().useWriteCommands()) {
- assert.eq(1, res.n, tojson(res));
-} else {
- assert.eq(1, res.nMatched, tojson(res));
-}
+assert.eq(1, res.nMatched, tojson(res));
var res = assert.commandWorked(collA.insert({_id: 2}, {writeConcern: {w: 1}}));
-if (!db.getMongo().useWriteCommands()) {
- assert.eq(0, res.n, tojson(res));
-} else {
- assert.eq(1, res.nInserted, tojson(res));
-}
+assert.eq(1, res.nInserted, tojson(res));
var res = assert.commandWorked(collA.remove({_id: 3}, {writeConcern: {w: 1}}));
-if (!db.getMongo().useWriteCommands()) {
- assert.eq(0, res.n, tojson(res));
-} else {
- assert.eq(0, res.nRemoved, tojson(res));
-}
+assert.eq(0, res.nRemoved, tojson(res));
var res = assert.commandWorked(collA.remove({_id: 1}, {writeConcern: {w: 1}}));
-if (!db.getMongo().useWriteCommands()) {
- assert.eq(1, res.n, tojson(res));
-} else {
- assert.eq(1, res.nRemoved, tojson(res));
-}
+assert.eq(1, res.nRemoved, tojson(res));
// Test ordered write concern, and that the write concern isn't run/error.
assert.commandWorked(collA.insert({_id: 1}));
diff --git a/jstests/core/skip1.js b/jstests/core/skip1.js
index 3b99e6b4cdd..3a853b11aa0 100644
--- a/jstests/core/skip1.js
+++ b/jstests/core/skip1.js
@@ -3,18 +3,6 @@
var t = db.jstests_skip1;
-if (0) { // SERVER-2845
- t.drop();
-
- t.createIndex({a: 1});
- t.save({a: 5});
- t.save({a: 5});
- t.save({a: 5});
-
- assert.eq(3, t.find({a: 5}).skip(2).explain().nscanned);
- assert.eq(1, t.find({a: 5}).skip(2).explain().nscannedObjects);
-}
-
// SERVER-13537: Ensure that combinations of skip and limit don't crash
// the server due to overflow.
t.drop();
@@ -23,14 +11,3 @@ for (var i = 0; i < 10; i++) {
}
assert.eq(9, t.find().sort({a: 1}).limit(2147483647).skip(1).itcount());
assert.eq(0, t.find().sort({a: 1}).skip(2147483647).limit(1).itcount());
-
-if (!db.getMongo().useReadCommands()) {
- // If we're using OP_QUERY/OP_GET_MORE reads rather than find/getMore command, then the skip and
- // limit fields must fit inside a 32-bit signed integer.
- assert.throws(function() {
- assert.eq(0, t.find().sort({a: 1}).skip(2147483648).itcount());
- });
- assert.throws(function() {
- assert.eq(0, t.find().sort({a: 1}).limit(2147483648).itcount());
- });
-}
diff --git a/jstests/core/sort1.js b/jstests/core/sort1.js
index 924ac9986b0..ec713e189e5 100644
--- a/jstests/core/sort1.js
+++ b/jstests/core/sort1.js
@@ -40,30 +40,26 @@ assert(coll.validate().valid);
// Ensure that sorts with a collation and no index return the correct ordering. Here we use the
// 'numericOrdering' option which orders number-like strings by their numerical values.
-if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.commandWorked(coll.insert({_id: 0, str: '1000'}));
- assert.commandWorked(coll.insert({_id: 1, str: '5'}));
- assert.commandWorked(coll.insert({_id: 2, str: '200'}));
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, str: '1000'}));
+assert.commandWorked(coll.insert({_id: 1, str: '5'}));
+assert.commandWorked(coll.insert({_id: 2, str: '200'}));
- var cursor = coll.find().sort({str: -1}).collation({locale: 'en_US', numericOrdering: true});
- assert.eq(cursor.next(), {_id: 0, str: '1000'});
- assert.eq(cursor.next(), {_id: 2, str: '200'});
- assert.eq(cursor.next(), {_id: 1, str: '5'});
- assert(!cursor.hasNext());
-}
+var cursor = coll.find().sort({str: -1}).collation({locale: 'en_US', numericOrdering: true});
+assert.eq(cursor.next(), {_id: 0, str: '1000'});
+assert.eq(cursor.next(), {_id: 2, str: '200'});
+assert.eq(cursor.next(), {_id: 1, str: '5'});
+assert(!cursor.hasNext());
// Ensure that sorting of arrays correctly respects a collation with numeric ordering.
-if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.commandWorked(coll.insert({_id: 0, strs: ['1000', '500']}));
- assert.commandWorked(coll.insert({_id: 1, strs: ['2000', '60']}));
- cursor = coll.find({strs: {$lt: '1000'}}).sort({strs: 1}).collation({
- locale: 'en_US',
- numericOrdering: true
- });
- assert.eq(cursor.next(), {_id: 1, strs: ['2000', '60']});
- assert.eq(cursor.next(), {_id: 0, strs: ['1000', '500']});
- assert(!cursor.hasNext());
-}
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, strs: ['1000', '500']}));
+assert.commandWorked(coll.insert({_id: 1, strs: ['2000', '60']}));
+cursor = coll.find({strs: {$lt: '1000'}}).sort({strs: 1}).collation({
+ locale: 'en_US',
+ numericOrdering: true
+});
+assert.eq(cursor.next(), {_id: 1, strs: ['2000', '60']});
+assert.eq(cursor.next(), {_id: 0, strs: ['1000', '500']});
+assert(!cursor.hasNext());
})();
diff --git a/jstests/core/tailable_cursor_legacy_read_mode.js b/jstests/core/tailable_cursor_legacy_read_mode.js
deleted file mode 100644
index 9f8d0e66d8a..00000000000
--- a/jstests/core/tailable_cursor_legacy_read_mode.js
+++ /dev/null
@@ -1,28 +0,0 @@
-// SERVER-54410: Test that tailable cursors do not throw with legacy read mode.
-// @tags: [
-// requires_capped,
-// # This disables all sharding and replica set tests that require usage of sessions. Legacy
-// # queries cannot be used in sessions.
-// assumes_standalone_mongod,
-// ]
-(function() {
-"use strict";
-
-const collName = "tailable_cursor_legacy_read_mode";
-const coll = db[collName];
-coll.drop();
-
-assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024}));
-
-const mongo = db.getMongo();
-const oldReadMode = mongo.readMode();
-try {
- mongo.forceReadMode("legacy");
-
- assert.commandWorked(coll.insert({a: 1}));
- const results = coll.find({}, {_id: 0}).tailable().toArray();
- assert.eq(results, [{a: 1}]);
-} finally {
- mongo.forceReadMode(oldReadMode);
-}
-})();
diff --git a/jstests/core/tailable_skip_limit.js b/jstests/core/tailable_skip_limit.js
index 9f5ea9bf227..04e3a85fd11 100644
--- a/jstests/core/tailable_skip_limit.js
+++ b/jstests/core/tailable_skip_limit.js
@@ -67,16 +67,8 @@ assert.throws(function() {
t.find().addOption(2).limit(-1).itcount();
});
-// When using read commands, a limit of 1 with the tailable option is allowed. In legacy
-// readMode, an ntoreturn of 1 means the same thing as ntoreturn -1 and is disallowed with
-// tailable.
-if (db.getMongo().useReadCommands()) {
- assert.eq(1, t.find().addOption(2).limit(1).itcount());
-} else {
- assert.throws(function() {
- t.find().addOption(2).limit(1).itcount();
- });
-}
+// A limit of 1 with the tailable option is allowed.
+assert.eq(1, t.find().addOption(2).limit(1).itcount());
// Tests that a tailable cursor over an empty capped collection produces a dead cursor, intended
// to be run on both mongod and mongos. For SERVER-20720.
diff --git a/jstests/core/type4.js b/jstests/core/type4.js
index 7f3adf6645c..c7e11110618 100644
--- a/jstests/core/type4.js
+++ b/jstests/core/type4.js
@@ -11,8 +11,6 @@ t.insert({});
t.insert({});
t.insert({});
-var oldReadMode = db.getMongo().readMode();
-
assert.throws(function() {
(new _rand())();
}, [], "invoke constructor on natively injected function");
@@ -23,20 +21,9 @@ assert.throws(function() {
}, [], "invoke constructor on BSON");
assert.throws(function() {
- db.getMongo().forceReadMode("commands");
var cursor = t.find();
cursor.next();
new cursor._cursor._cursorHandle();
}, [], "invoke constructor on CursorHandle");
-
-assert.throws(function() {
- db.getMongo().forceReadMode("legacy");
- var cursor = t.find();
- cursor.next();
-
- new cursor._cursor();
-}, [], "invoke constructor on Cursor");
-
-db.getMongo().forceReadMode(oldReadMode);
})();
diff --git a/jstests/core/wildcard_index_cached_plans.js b/jstests/core/wildcard_index_cached_plans.js
index fd21158a77f..50c59865377 100644
--- a/jstests/core/wildcard_index_cached_plans.js
+++ b/jstests/core/wildcard_index_cached_plans.js
@@ -87,12 +87,12 @@ assert.eq(cacheEntry.isActive, true);
// Should be at least two plans: one using the {a: 1} index and the other using the b.$** index.
assert.gte(cacheEntry.creationExecStats.length, 2, tojson(cacheEntry.plans));
-const isSBECompat = checkSBECompatible(db);
+const isSBEEnabled = checkSBEEnabled(db);
// In SBE index scan stage does not serialize key pattern in execution stats, so we use IXSCAN from
// the query plan instead.
-const plan =
- isSBECompat ? cacheEntry.cachedPlan.queryPlan : cacheEntry.creationExecStats[0].executionStages;
+const plan = isSBEEnabled ? cacheEntry.cachedPlan.queryPlan
+ : cacheEntry.creationExecStats[0].executionStages;
const ixScanStage = getPlanStage(plan, "IXSCAN");
assert.neq(ixScanStage, null, () => tojson(plan));
assert.eq(ixScanStage.keyPattern, {"$_path": 1, "b": 1}, () => tojson(plan));
diff --git a/jstests/core/write_result.js b/jstests/core/write_result.js
index 37bbc2b58bc..78f635cc3e7 100644
--- a/jstests/core/write_result.js
+++ b/jstests/core/write_result.js
@@ -17,8 +17,6 @@
var coll = db.write_result;
coll.drop();
-assert(coll.getDB().getMongo().useWriteCommands(), "test is not running with write commands");
-
var result = null;
//
diff --git a/jstests/libs/override_methods/check_indexes_consistent_across_cluster.js b/jstests/libs/override_methods/check_indexes_consistent_across_cluster.js
index e93e683195e..e908981e5a6 100644
--- a/jstests/libs/override_methods/check_indexes_consistent_across_cluster.js
+++ b/jstests/libs/override_methods/check_indexes_consistent_across_cluster.js
@@ -18,7 +18,6 @@ ShardingTest.prototype.checkIndexesConsistentAcrossCluster = function() {
const mongos = new Mongo(this.s.host);
mongos.fullOptions = this.s.fullOptions || {};
- mongos.forceReadMode("commands");
mongos.setReadPref("primary");
const keyFile = this.keyFile;
diff --git a/jstests/libs/sbe_util.js b/jstests/libs/sbe_util.js
index 8728fabf191..89cf6af7aef 100644
--- a/jstests/libs/sbe_util.js
+++ b/jstests/libs/sbe_util.js
@@ -48,18 +48,3 @@ function checkSBEEnabled(theDB) {
return checkResult;
}
-
-/**
- * Returns whether queries will run with SBE or not. This is distinct from determining whether SBE
- * is enabled because queries run using the legacy read mode will not use SBE even if it is
- * enabled.
- */
-function checkSBECompatible(theDB) {
- if (!checkSBEEnabled(theDB)) {
- return false;
- }
-
- // We can use SBE if we're not using legacy reads, or if we're connected to mongos (which will
- // always use read commands against the shards).
- return theDB.getMongo().readMode() != "legacy" || FixtureHelpers.isMongos(theDB);
-}
diff --git a/jstests/multiVersion/minor_version_tags_new_old_new.js b/jstests/multiVersion/minor_version_tags_new_old_new.js
index 283380637ca..e3c0a051cb7 100644
--- a/jstests/multiVersion/minor_version_tags_new_old_new.js
+++ b/jstests/multiVersion/minor_version_tags_new_old_new.js
@@ -12,5 +12,5 @@ let nodes = [
{binVersion: oldVersion},
{binVersion: newVersion}
];
-new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
+new TagsTest({nodes: nodes}).run();
}());
diff --git a/jstests/multiVersion/minor_version_tags_old_new_old.js b/jstests/multiVersion/minor_version_tags_old_new_old.js
index 02649e9ed87..9aeeaf216de 100644
--- a/jstests/multiVersion/minor_version_tags_old_new_old.js
+++ b/jstests/multiVersion/minor_version_tags_old_new_old.js
@@ -12,5 +12,5 @@ let nodes = [
{binVersion: newVersion},
{binVersion: oldVersion}
];
-new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
+new TagsTest({nodes: nodes}).run();
}());
diff --git a/jstests/noPassthrough/currentop_active_cursor.js b/jstests/noPassthrough/currentop_active_cursor.js
index bbb72faaca8..1b3849c3d1b 100644
--- a/jstests/noPassthrough/currentop_active_cursor.js
+++ b/jstests/noPassthrough/currentop_active_cursor.js
@@ -48,28 +48,6 @@ withPinnedCursor({
failPointName: failPointName,
assertEndCounts: true
});
-
-// Test OP_GET_MORE (legacy read mode) against a mongod.
-failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
-const db = conn.getDB("test");
-db.getMongo().forceReadMode("legacy");
-withPinnedCursor({
- conn: conn,
- sessionId: null,
- db: db,
- assertFunction: runTest,
- runGetMoreFunc: function() {
- db.getMongo().forceReadMode("legacy");
- let cmdRes = {
- "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
- "ok": 1
- };
- let cursor = new DBCommandCursor(db, cmdRes, 2);
- cursor.itcount();
- },
- failPointName: failPointName,
- assertEndCounts: true
-});
MongoRunner.stopMongod(conn);
// Sharded test
@@ -87,24 +65,5 @@ withPinnedCursor({
failPointName: failPointName,
assertEndCounts: true
});
-
-// Test OP_GET_MORE (legacy reead mode) against a mongos.
-withPinnedCursor({
- conn: st.s,
- sessionId: null,
- db: st.s.getDB("test"),
- assertFunction: runTest,
- runGetMoreFunc: function() {
- db.getMongo().forceReadMode("legacy");
- let cmdRes = {
- "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
- "ok": 1
- };
- let cursor = new DBCommandCursor(db, cmdRes, 2);
- cursor.itcount();
- },
- failPointName: failPointName,
- assertEndCounts: true
-});
st.stop();
})();
diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js
index d4d7aab81c4..21003bed329 100644
--- a/jstests/noPassthrough/currentop_query.js
+++ b/jstests/noPassthrough/currentop_query.js
@@ -55,9 +55,6 @@ function dropAndRecreateTestCollection() {
/**
* @param {connection} conn - The connection through which to run the test suite.
- * @param {string} readMode - The read mode to use for the parallel shell. This allows
- * testing currentOp() output for both OP_QUERY and OP_GET_MORE queries, as well as "find" and
- * "getMore" commands.
* @params {function} currentOp - Function which takes a database object and a filter, and
* returns an array of matching current operations. This allows us to test output for both the
* currentOp command and the $currentOp aggregation stage.
@@ -68,7 +65,7 @@ function dropAndRecreateTestCollection() {
* @params {boolean} localOps - if true, we expect currentOp to return operations running on a
* mongoS itself rather than on the shards.
*/
-function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
+function runTests({conn, currentOp, truncatedOps, localOps}) {
const testDB = conn.getDB("currentop_query");
const coll = testDB.currentop_query;
dropAndRecreateTestCollection();
@@ -123,15 +120,13 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
});
// Set the test configuration in TestData for the parallel shell test.
- TestData.shellReadMode = readMode;
TestData.currentOpTest = testObj.test;
TestData.currentOpCollName = "currentop_query";
- // Wrapper function which sets the readMode and DB before running the test function
+ // Wrapper function which sets DB before running the test function
// found at TestData.currentOpTest.
function doTest() {
const testDB = db.getSiblingDB(TestData.currentOpCollName);
- testDB.getMongo().forceReadMode(TestData.shellReadMode);
TestData.currentOpTest(testDB);
}
@@ -188,7 +183,6 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
awaitShell();
delete TestData.currentOpCollName;
delete TestData.currentOpTest;
- delete TestData.shellReadMode;
}
/**
@@ -339,21 +333,19 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
//
// Confirm currentOp contains collation for find command.
//
- if (readMode === "commands") {
- confirmCurrentOpContents({
- test: function(db) {
- assert.eq(db.currentop_query.find({a: 1})
- .comment("currentop_query")
- .collation({locale: "fr"})
- .itcount(),
- 1);
- },
- command: "find",
- planSummary: "COLLSCAN",
- currentOpFilter:
- {"command.comment": "currentop_query", "command.collation.locale": "fr"}
- });
- }
+ confirmCurrentOpContents({
+ test: function(db) {
+ assert.eq(db.currentop_query.find({a: 1})
+ .comment("currentop_query")
+ .collation({locale: "fr"})
+ .itcount(),
+ 1);
+ },
+ command: "find",
+ planSummary: "COLLSCAN",
+ currentOpFilter:
+ {"command.comment": "currentop_query", "command.collation.locale": "fr"}
+ });
//
// Confirm currentOp content for the $geoNear aggregation stage.
@@ -439,76 +431,6 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
delete TestData.commandResult;
}
-
- //
- // Confirm that currentOp displays upconverted getMore and originatingCommand in the
- // case of a legacy query.
- //
- if (readMode === "legacy") {
- let filter = {
- "command.getMore": {$gt: 0},
- "command.collection": "currentop_query",
- "command.batchSize": 2,
- "cursor.originatingCommand.find": "currentop_query",
- "cursor.originatingCommand.ntoreturn": 2,
- "cursor.originatingCommand.comment": "currentop_query"
- };
-
- confirmCurrentOpContents({
- test: function(db) {
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Temporarily disable hanging yields so that we can iterate the first
- // batch.
- FixtureHelpers.runCommandOnEachPrimary({
- db: db.getSiblingDB("admin"),
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
- });
-
- let cursor =
- db.currentop_query.find({}).comment("currentop_query").batchSize(2);
-
- // Exhaust the current batch so that the next request will force a getMore.
- while (cursor.objsLeftInBatch() > 0) {
- cursor.next();
- }
-
- // Set yields to hang so that we can check currentOp output.
- FixtureHelpers.runCommandOnEachPrimary({
- db: db.getSiblingDB("admin"),
- cmdObj: {
- configureFailPoint: "setYieldAllLocksHang",
- mode: "alwaysOn",
- data: {namespace: db.currentop_query.getFullName()}
- }
- });
-
- assert.eq(cursor.itcount(), 8);
- },
- operation: "getmore",
- planSummary: "COLLSCAN",
- currentOpFilter: filter
- });
- }
-
- //
- // Confirm that a legacy query whose filter contains a field named 'query' appears as
- // expected in currentOp. This test ensures that upconverting a legacy query correctly
- // identifies this as a user field rather than a wrapped filter spec.
- //
- if (readMode === "legacy") {
- confirmCurrentOpContents({
- test: function(db) {
- assert.eq(db.currentop_query.find({query: "foo", $comment: "currentop_query"})
- .itcount(),
- 0);
- },
- command: "find",
- planSummary: "COLLSCAN",
- currentOpFilter:
- {"command.filter.$comment": "currentop_query", "command.filter.query": "foo"}
- });
- }
}
/**
@@ -657,27 +579,23 @@ function currentOpAgg(inputDB, filter, truncatedOps, localOps) {
}
for (let connType of [rsConn, mongosConn]) {
- for (let readMode of ["commands", "legacy"]) {
- for (let truncatedOps of [false, true]) {
- for (let localOps of [false, true]) {
- // Run all tests using the $currentOp aggregation stage.
- runTests({
- conn: connType,
- readMode: readMode,
- currentOp: currentOpAgg,
- localOps: localOps,
- truncatedOps: truncatedOps
- });
- }
- // Run tests using the currentOp command. The 'localOps' parameter is not supported.
+ for (let truncatedOps of [false, true]) {
+ for (let localOps of [false, true]) {
+ // Run all tests using the $currentOp aggregation stage.
runTests({
conn: connType,
- readMode: readMode,
- currentOp: currentOpCommand,
- localOps: false,
+ currentOp: currentOpAgg,
+ localOps: localOps,
truncatedOps: truncatedOps
});
}
+ // Run tests using the currentOp command. The 'localOps' parameter is not supported.
+ runTests({
+ conn: connType,
+ currentOp: currentOpCommand,
+ localOps: false,
+ truncatedOps: truncatedOps
+ });
}
}
diff --git a/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js b/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js
index 047cb2f2983..5eb00416f86 100644
--- a/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js
+++ b/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js
@@ -1,5 +1,5 @@
/**
- * Test that 'failGetMoreAfterCursorCheckout' works in both legacy and command read modes.
+ * Test that 'failGetMoreAfterCursorCheckout' works.
* @tags: [requires_replication, requires_journaling]
*/
(function() {
@@ -17,34 +17,28 @@ for (let i = 0; i < 10; ++i) {
assert.commandWorked(coll.insert({_id: i}));
}
-// Run the test for both 'commands' and 'legacy' read modes.
-for (let readMode of ["commands", "legacy"]) {
- // Set the appropriate read mode for this test case.
- testDB.getMongo().forceReadMode(readMode);
-
- // Perform the test for both 'find' and 'aggregate' cursors.
- for (let testCursor of [coll.find({}).sort({_id: 1}).batchSize(2),
- coll.aggregate([{$sort: {_id: 1}}], {cursor: {batchSize: 2}})]) {
- // Activate the failpoint and set the exception that it will throw.
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failGetMoreAfterCursorCheckout",
- mode: "alwaysOn",
- data: {"errorCode": ErrorCodes.ShutdownInProgress}
- }));
-
- // Consume the documents from the first batch, leaving the cursor open.
- assert.docEq(testCursor.next(), {_id: 0});
- assert.docEq(testCursor.next(), {_id: 1});
- assert.eq(testCursor.objsLeftInBatch(), 0);
-
- // Issue a getMore and confirm that the failpoint throws the expected exception.
- const getMoreRes = assert.throws(() => testCursor.hasNext() && testCursor.next());
- assert.commandFailedWithCode(getMoreRes, ErrorCodes.ShutdownInProgress);
-
- // Disable the failpoint.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "failGetMoreAfterCursorCheckout", mode: "off"}));
- }
+// Perform the test for both 'find' and 'aggregate' cursors.
+for (let testCursor of [coll.find({}).sort({_id: 1}).batchSize(2),
+ coll.aggregate([{$sort: {_id: 1}}], {cursor: {batchSize: 2}})]) {
+ // Activate the failpoint and set the exception that it will throw.
+ assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failGetMoreAfterCursorCheckout",
+ mode: "alwaysOn",
+ data: {"errorCode": ErrorCodes.ShutdownInProgress}
+ }));
+
+ // Consume the documents from the first batch, leaving the cursor open.
+ assert.docEq(testCursor.next(), {_id: 0});
+ assert.docEq(testCursor.next(), {_id: 1});
+ assert.eq(testCursor.objsLeftInBatch(), 0);
+
+ // Issue a getMore and confirm that the failpoint throws the expected exception.
+ const getMoreRes = assert.throws(() => testCursor.hasNext() && testCursor.next());
+ assert.commandFailedWithCode(getMoreRes, ErrorCodes.ShutdownInProgress);
+
+ // Disable the failpoint.
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "failGetMoreAfterCursorCheckout", mode: "off"}));
}
rst.stopSet();
diff --git a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
index fa59f6e39d5..4fc0743b468 100644
--- a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
+++ b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
@@ -36,140 +36,120 @@ assert.neq(null, conn, "mongod was unable to start up");
let testDB = conn.getDB("test");
let testColl = testDB.lock_acquisition_time;
-function runTests() {
- // Profile all operations.
- assert.commandWorked(testDB.setProfilingLevel(0));
- testDB.system.profile.drop();
- assert.commandWorked(testDB.setProfilingLevel(2));
+// Profile all operations.
+assert.commandWorked(testDB.setProfilingLevel(0));
+testDB.system.profile.drop();
+assert.commandWorked(testDB.setProfilingLevel(2));
- // Test that insert profiler/logs include lock acquisition time. Rather than parsing the log
- // lines, we are just verifying that the log line appears, which implies that the recorded
- // latency exceeds slowms.
- runWithWait(hangMillis, function() {
- assert.commandWorked(testColl.insert({a: 1}));
- });
- let profileEntry;
- if (conn.writeMode() === "commands") {
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.insert": testColl.getName(),
- });
- } else {
- profileEntry = getLatestProfilerEntry(testDB, {
- op: "insert",
- ns: testColl.getFullName(),
- });
- }
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(
- conn,
- !isJsonLog(conn)
- ? (conn.writeMode() === "commands" ? "insert { insert: \"lock_acquisition_time\""
- : "insert test.lock_acquisition_time")
- : (conn.writeMode() === "commands"
- ? /"ns":"test.lock_acquisition_time".*"command":{"insert"/
- : /"type":"insert","ns":"test.lock_acquisition_time/));
+// Test that insert profiler/logs include lock acquisition time. Rather than parsing the log
+// lines, we are just verifying that the log line appears, which implies that the recorded
+// latency exceeds slowms.
+runWithWait(hangMillis, function() {
+ assert.commandWorked(testColl.insert({a: 1}));
+});
+let profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.insert": testColl.getName(),
+});
+assert.gte(profileEntry.millis, hangMillis - padding);
+checkLog.contains(conn,
+ !isJsonLog(conn) ? "insert { insert: \"lock_acquisition_time\""
+ : /"ns":"test.lock_acquisition_time".*"command":{"insert"/);
- // Test that update profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.commandWorked(testColl.update({}, {$set: {b: 1}}));
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.u": {$eq: {$set: {b: 1}}},
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn,
- !isJsonLog(conn)
- ? "update { update: \"lock_acquisition_time\""
- : /"ns":"test.\$cmd".*"command":{"update":"lock_acquisition_time"/);
+// Test that update profiler/logs include lock acquisition time.
+runWithWait(hangMillis, function() {
+ assert.commandWorked(testColl.update({}, {$set: {b: 1}}));
+});
+profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.u": {$eq: {$set: {b: 1}}},
+});
+assert.gte(profileEntry.millis, hangMillis - padding);
+checkLog.contains(conn,
+ !isJsonLog(conn)
+ ? "update { update: \"lock_acquisition_time\""
+ : /"ns":"test.\$cmd".*"command":{"update":"lock_acquisition_time"/);
- // Test that find profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq(1, testColl.find({b: 1}).itcount());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.find": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn,
- !isJsonLog(conn) ? "find { find: \"lock_acquisition_time\""
- : '"command":{"find":"lock_acquisition_time"');
+// Test that find profiler/logs include lock acquisition time.
+runWithWait(hangMillis, function() {
+ assert.eq(1, testColl.find({b: 1}).itcount());
+});
+profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.find": testColl.getName(),
+});
+assert.gte(profileEntry.millis, hangMillis - padding);
+checkLog.contains(conn,
+ !isJsonLog(conn) ? "find { find: \"lock_acquisition_time\""
+ : '"command":{"find":"lock_acquisition_time"');
- // Test that getMore profiler/logs include lock acquisition time.
- assert.commandWorked(testColl.insert([{a: 2}, {a: 3}]));
- runWithWait(hangMillis, function() {
- // Include a batchSize in order to ensure that a getMore is issued.
- assert.eq(3, testColl.find().batchSize(2).itcount());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.getMore": {$exists: true},
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn,
- !isJsonLog(conn) ? "originatingCommand: { find: \"lock_acquisition_time\""
- : '"originatingCommand":{"find":"lock_acquisition_time"');
- assert.commandWorked(testColl.remove({a: {$gt: 1}}));
+// Test that getMore profiler/logs include lock acquisition time.
+assert.commandWorked(testColl.insert([{a: 2}, {a: 3}]));
+runWithWait(hangMillis, function() {
+ // Include a batchSize in order to ensure that a getMore is issued.
+ assert.eq(3, testColl.find().batchSize(2).itcount());
+});
+profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.getMore": {$exists: true},
+});
+assert.gte(profileEntry.millis, hangMillis - padding);
+checkLog.contains(conn,
+ !isJsonLog(conn) ? "originatingCommand: { find: \"lock_acquisition_time\""
+ : '"originatingCommand":{"find":"lock_acquisition_time"');
+assert.commandWorked(testColl.remove({a: {$gt: 1}}));
- // Test that aggregate profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq(1, testColl.aggregate([{$match: {b: 1}}]).itcount());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.aggregate": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn,
- !isJsonLog(conn) ? "aggregate { aggregate: \"lock_acquisition_time\""
- : '"command":{"aggregate":"lock_acquisition_time"');
+// Test that aggregate profiler/logs include lock acquisition time.
+runWithWait(hangMillis, function() {
+ assert.eq(1, testColl.aggregate([{$match: {b: 1}}]).itcount());
+});
+profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.aggregate": testColl.getName(),
+});
+assert.gte(profileEntry.millis, hangMillis - padding);
+checkLog.contains(conn,
+ !isJsonLog(conn) ? "aggregate { aggregate: \"lock_acquisition_time\""
+ : '"command":{"aggregate":"lock_acquisition_time"');
- // Test that count profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq(1, testColl.count());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.count": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn,
- !isJsonLog(conn) ? "count { count: \"lock_acquisition_time\""
- : '"command":{"count":"lock_acquisition_time"');
+// Test that count profiler/logs include lock acquisition time.
+runWithWait(hangMillis, function() {
+ assert.eq(1, testColl.count());
+});
+profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.count": testColl.getName(),
+});
+assert.gte(profileEntry.millis, hangMillis - padding);
+checkLog.contains(conn,
+ !isJsonLog(conn) ? "count { count: \"lock_acquisition_time\""
+ : '"command":{"count":"lock_acquisition_time"');
- // Test that distinct profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq([1], testColl.distinct("a"));
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.distinct": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn,
- !isJsonLog(conn) ? "distinct { distinct: \"lock_acquisition_time\""
- : '"command":{"distinct":"lock_acquisition_time"');
+// Test that distinct profiler/logs include lock acquisition time.
+runWithWait(hangMillis, function() {
+ assert.eq([1], testColl.distinct("a"));
+});
+profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.distinct": testColl.getName(),
+});
+assert.gte(profileEntry.millis, hangMillis - padding);
+checkLog.contains(conn,
+ !isJsonLog(conn) ? "distinct { distinct: \"lock_acquisition_time\""
+ : '"command":{"distinct":"lock_acquisition_time"');
- // Test that delete profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.commandWorked(testColl.remove({b: 1}));
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.q": {b: 1},
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn,
- !isJsonLog(conn) ? "delete { delete: \"lock_acquisition_time\""
- : '"command":{"delete":"lock_acquisition_time"');
-}
+// Test that delete profiler/logs include lock acquisition time.
+runWithWait(hangMillis, function() {
+ assert.commandWorked(testColl.remove({b: 1}));
+});
+profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.q": {b: 1},
+});
+assert.gte(profileEntry.millis, hangMillis - padding);
+checkLog.contains(conn,
+ !isJsonLog(conn) ? "delete { delete: \"lock_acquisition_time\""
+ : '"command":{"delete":"lock_acquisition_time"');
-// Run the tests once with read and write commands and once with legacy ops.
-runTests();
-conn.forceWriteMode("compatibility");
-conn.forceReadMode("legacy");
-runTests();
MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/log_find_getmore.js b/jstests/noPassthrough/log_find_getmore.js
index a5d6b983f43..ed9d2caac81 100644
--- a/jstests/noPassthrough/log_find_getmore.js
+++ b/jstests/noPassthrough/log_find_getmore.js
@@ -1,6 +1,5 @@
/**
- * Confirms that the log output for command and legacy find and getMore are in the expected format.
- * Legacy operations should be upconverted to match the format of their command counterparts.
+ * Confirms that the log output for find and getMore are in the expected format.
* @tags: [requires_profiling]
*/
load("jstests/libs/logv2_helpers.js");
@@ -49,11 +48,6 @@ assert.commandWorked(coll.createIndex({a: 1}));
// we can easily retrieve cursor IDs in all cases.
assert.commandWorked(testDB.setProfilingLevel(2, -1));
-//
-// Command tests.
-//
-testDB.getMongo().forceReadMode("commands");
-
// TEST: Verify the log format of the find command.
let cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
cursor.next(); // Perform initial query and retrieve first document in batch.
@@ -129,84 +123,5 @@ if (isJsonLog(conn)) {
assertLogLineContains(conn, logLine);
-//
-// Legacy tests.
-//
-testDB.getMongo().forceReadMode("legacy");
-
-// TEST: Verify the log format of a legacy find. This should be upconverted to resemble a find
-// command.
-cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
-cursor.next();
-
-cursorid = getLatestProfilerEntry(testDB).cursorid;
-
-logLine = 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { a: ' +
- '{ $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
-
-if (isJsonLog(conn)) {
- logLine =
- '"msg":"Slow query","attr":{"type":"query","ns":"log_getmore.test","appName":"MongoDB Shell","command":{"find":"test","filter":{"a":{"$gt":0}},"skip":1,"ntoreturn":5,"sort":{"a":1},"hint":{"a":1}}';
-}
-
-assertLogLineContains(conn, logLine);
-
-// TEST: Verify that a query whose filter contains a field named 'query' appears as expected in
-// the logs. This test ensures that upconverting a legacy query correctly identifies this as a
-// user field rather than a wrapped filter spec.
-coll.find({query: "foo"}).itcount();
-
-logLine =
- 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { query:' +
- ' "foo" } }';
-
-if (isJsonLog(conn)) {
- logLine =
- '"msg":"Slow query","attr":{"type":"query","ns":"log_getmore.test","appName":"MongoDB Shell","command":{"find":"test","filter":{"query":"foo"}}';
-}
-
-assertLogLineContains(conn, logLine);
-
-// TEST: Verify that a legacy getMore following a find is logged in the expected format. This
-// should be upconverted to resemble a getMore command, with the preceding upconverted legacy
-// find in the originatingCommand field.
-
-assert.eq(cursor.itcount(), 8); // Iterate the cursor established above to trigger getMore.
-
-logLine = 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
- cursorIdToString(cursorid) +
- ', collection: "test", batchSize: 5 } originatingCommand: { find: "test", filter: { a: {' +
- ' $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
-
-if (isJsonLog(conn)) {
- logLine = `"msg":"Slow query","attr":{"type":"getmore","ns":"log_getmore.test","appName":"MongoDB Shell","command":{"getMore":${cursorIdToString(cursorid)},"collection":"test","batchSize":5},"originatingCommand":{"find":"test","filter":{"a":{"$gt":0}},"skip":1,"ntoreturn":5,"sort":{"a":1},"hint":{"a":1}}`;
-}
-
-assertLogLineContains(conn, logLine);
-
-// TEST: Verify that a legacy getMore following an aggregation is logged in the expected format.
-// This should be upconverted to resemble a getMore command, with the preceding aggregation in
-// the originatingCommand field.
-cursor = coll.aggregate([{$match: {a: {$gt: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
-cursorid = getLatestProfilerEntry(testDB).cursorid;
-
-assert.eq(cursor.itcount(), 10);
-
-logLine = [
- 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
- cursorIdToString(cursorid) + ', collection: "test", batchSize: 0',
- 'originatingCommand: { aggregate: "test", pipeline:' +
- ' [ { $match: { a: { $gt: 0.0 } } } ], cursor: { batchSize: 0.0 }, hint: { a: 1.0 }'
-];
-
-if (isJsonLog(conn)) {
- logLine = [
- `"msg":"Slow query","attr":{"type":"getmore","ns":"log_getmore.test","appName":"MongoDB Shell","command":{"getMore":${
- cursorIdToString(cursorid)},"collection":"test","batchSize":0}`,
- '"originatingCommand":{"aggregate":"test","pipeline":[{"$match":{"a":{"$gt":0}}}],"cursor":{"batchSize":0},"hint":{"a":1}'
- ];
-}
-
-assertLogLineContains(conn, logLine);
MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
index 233fbcb5f86..9b0356639ce 100644
--- a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
+++ b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
@@ -1,7 +1,6 @@
/**
* Confirms that log output for each operation adheres to the expected, consistent format, including
- * query/write metrics where applicable, on both mongoD and mongoS and under both legacy and command
- * protocols.
+ * query/write metrics where applicable, on both mongoD and mongoS.
* @tags: [
* requires_replication,
* requires_sharding,
@@ -59,37 +58,25 @@ function dropAndRecreateTestCollection() {
// corresponding output. Returns a pair of arrays [testsRun, logLines]; the former is the set of
// test cases that were run, while the latter contains the logline for each test, or null if no
// such logline was found.
-function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
+function runLoggingTests({db, slowMs, logLevel, sampleRate}) {
dropAndRecreateTestCollection();
const coll = db.test;
- // Transparently handles assert.writeOK for legacy writes.
- function assertWriteOK(writeResponse) {
- if (!writeResponse) {
- assert(db.getMongo().writeMode !== "commands");
- assert(db.runCommand({getLastError: 1}).err == null);
- } else {
- assert.commandWorked(writeResponse);
- }
- }
-
for (let i = 1; i <= 5; ++i) {
- assertWriteOK(coll.insert({_id: i, a: i, loc: {type: "Point", coordinates: [i, i]}}));
- assertWriteOK(coll.insert({_id: -i, a: -i, loc: {type: "Point", coordinates: [-i, -i]}}));
+ assert.commandWorked(
+ coll.insert({_id: i, a: i, loc: {type: "Point", coordinates: [i, i]}}));
+ assert.commandWorked(
+ coll.insert({_id: -i, a: -i, loc: {type: "Point", coordinates: [-i, -i]}}));
}
- assertWriteOK(coll.createIndex({loc: "2dsphere"}));
+ assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
const isMongos = FixtureHelpers.isMongos(db);
- // Set the shell read/write mode.
- db.getMongo().forceWriteMode(readWriteMode);
- db.getMongo().forceReadMode(readWriteMode);
-
// Build a string that identifies the parameters of this test run. Individual ops will
// use this string as their comment where applicable, and we also print it to the logs.
- const logFormatTestComment = (isMongos ? 'mongos' : 'mongod') + "_" + readWriteMode +
- "_slowms:" + slowMs + "_logLevel:" + logLevel + "_sampleRate:" + sampleRate;
+ const logFormatTestComment = (isMongos ? 'mongos' : 'mongod') + "_slowms:" + slowMs +
+ "_logLevel:" + logLevel + "_sampleRate:" + sampleRate;
jsTestLog(logFormatTestComment);
// Set all logging parameters. If slowMs is null, we set a high threshold here so that
@@ -110,11 +97,6 @@ function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
"usedDisk", "hasSortStage"]
: ["nShards"]);
- // Legacy operations do not produce a 'command: <name>' field in the log.
- if (readWriteMode === "legacy") {
- ignoreFields.push("command");
- }
-
function confirmLogContents(db, {test, logFields}, testIndex) {
// Clear the log before running the test, to guarantee that we do not match against any
// similar tests which may have run previously.
@@ -201,8 +183,7 @@ function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
command: "find",
find: coll.getName(),
comment: logFormatTestComment,
- planSummary: isSBEEnabled && readWriteMode == "commands" ? "IXSCAN { _id: 1 }"
- : "IDHACK",
+ planSummary: isSBEEnabled ? "IXSCAN { _id: 1 }" : "IDHACK",
cursorExhausted: 1,
keysExamined: 1,
docsExamined: 1,
@@ -249,7 +230,7 @@ function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
},
{
test: function(db) {
- assertWriteOK(db.test.update(
+ assert.commandWorked(db.test.update(
{a: 1, $comment: logFormatTestComment}, {$inc: {b: 1}}, {multi: true}));
},
logFields: (isMongos ? {
@@ -273,9 +254,9 @@ function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
},
{
test: function(db) {
- assertWriteOK(db.test.update({_id: 100, $comment: logFormatTestComment},
- {$inc: {b: 1}},
- {multi: true, upsert: true}));
+ assert.commandWorked(db.test.update({_id: 100, $comment: logFormatTestComment},
+ {$inc: {b: 1}},
+ {multi: true, upsert: true}));
},
logFields: (isMongos ? {
command: "update",
@@ -300,7 +281,7 @@ function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
},
{
test: function(db) {
- assertWriteOK(db.test.insert({z: 1, comment: logFormatTestComment}));
+ assert.commandWorked(db.test.insert({z: 1, comment: logFormatTestComment}));
},
logFields: {
command: "insert",
@@ -312,7 +293,7 @@ function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
},
{
test: function(db) {
- assertWriteOK(db.test.remove({z: 1, $comment: logFormatTestComment}));
+ assert.commandWorked(db.test.remove({z: 1, $comment: logFormatTestComment}));
},
logFields: (isMongos ? {
command: "delete",
@@ -372,12 +353,8 @@ function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
nShards: 1
}
: {command: "update", ns: `${db.getName()}.$cmd`})
- }
- ];
-
- // Confirm log contains collation for find command.
- if (readWriteMode === "commands") {
- testList.push({
+ },
+ {
test: function(db) {
assert.eq(db.test.find({_id: {$in: [1, 5]}})
.comment(logFormatTestComment)
@@ -396,8 +373,8 @@ function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
nreturned: 2,
nShards: 1
}
- });
- }
+ }
+ ];
// Confirm log content for getMore on both find and aggregate cursors.
const originatingCommands = {
@@ -448,57 +425,45 @@ function getUnloggedTests(testsRun, logLines) {
//
for (let testDB of [shardDB, mongosDB]) {
- for (let readWriteMode of ["commands", "legacy"]) {
- // Test that all operations are logged when slowMs is < 0 and sampleRate is 1 at the
- // default logLevel.
- let [testsRun, logLines] = runLoggingTests(
- {db: testDB, readWriteMode: readWriteMode, slowMs: -1, logLevel: 0, sampleRate: 1.0});
- let unlogged = getUnloggedTests(testsRun, logLines);
- assert.eq(unlogged.length, 0, () => tojson(unlogged));
-
- // Test that only some operations are logged when sampleRate is < 1 at the default
- // logLevel, even when slowMs is < 0. The actual sample rate is probabilistic, and may
- // therefore vary quite significantly from 0.5. However, we have already established
- // that with sampleRate 1 *all* ops are logged, so here it is sufficient to confirm that
- // some ops are not. We repeat the test 5 times to minimize the odds of failure.
- let sampleRateTestsRun = 0, sampleRateTestsLogged = 0;
- for (let i = 0; i < 5; i++) {
- [testsRun, logLines] = runLoggingTests({
- db: testDB,
- readWriteMode: readWriteMode,
- slowMs: -1,
- logLevel: 0,
- sampleRate: 0.5
- });
- unlogged = getUnloggedTests(testsRun, logLines);
- sampleRateTestsLogged += (testsRun.length - unlogged.length);
- sampleRateTestsRun += testsRun.length;
- }
- assert.betweenEx(0, sampleRateTestsLogged, sampleRateTestsRun);
-
- // Test that only operations which exceed slowMs are logged when slowMs > 0 and
- // sampleRate is 1, at the default logLevel. The given value of slowMs will be applied
- // to every second op in the test, so only half of the ops should be logged.
- [testsRun, logLines] = runLoggingTests({
- db: testDB,
- readWriteMode: readWriteMode,
- slowMs: 1000000,
- logLevel: 0,
- sampleRate: 1.0
- });
- unlogged = getUnloggedTests(testsRun, logLines);
- assert.eq(unlogged.length, Math.floor(testsRun.length / 2), () => tojson(unlogged));
-
- // Test that all operations are logged when logLevel is 1, regardless of sampleRate and
- // slowMs. We pass 'null' for slowMs to signify that a high threshold should be set
- // (such that, at logLevel 0, no operations would be logged) and that this value should
- // be applied for all operations, rather than for every second op as in the case of the
- // slowMs test.
- [testsRun, logLines] = runLoggingTests(
- {db: testDB, readWriteMode: readWriteMode, slowMs: null, logLevel: 1, sampleRate: 0.5});
+ // Test that all operations are logged when slowMs is < 0 and sampleRate is 1 at the
+ // default logLevel.
+ let [testsRun, logLines] =
+ runLoggingTests({db: testDB, slowMs: -1, logLevel: 0, sampleRate: 1.0});
+ let unlogged = getUnloggedTests(testsRun, logLines);
+ assert.eq(unlogged.length, 0, () => tojson(unlogged));
+
+ // Test that only some operations are logged when sampleRate is < 1 at the default
+ // logLevel, even when slowMs is < 0. The actual sample rate is probabilistic, and may
+ // therefore vary quite significantly from 0.5. However, we have already established
+ // that with sampleRate 1 *all* ops are logged, so here it is sufficient to confirm that
+ // some ops are not. We repeat the test 5 times to minimize the odds of failure.
+ let sampleRateTestsRun = 0, sampleRateTestsLogged = 0;
+ for (let i = 0; i < 5; i++) {
+ [testsRun, logLines] =
+ runLoggingTests({db: testDB, slowMs: -1, logLevel: 0, sampleRate: 0.5});
unlogged = getUnloggedTests(testsRun, logLines);
- assert.eq(unlogged.length, 0, () => tojson(unlogged));
+ sampleRateTestsLogged += (testsRun.length - unlogged.length);
+ sampleRateTestsRun += testsRun.length;
}
+ assert.betweenEx(0, sampleRateTestsLogged, sampleRateTestsRun);
+
+ // Test that only operations which exceed slowMs are logged when slowMs > 0 and
+ // sampleRate is 1, at the default logLevel. The given value of slowMs will be applied
+ // to every second op in the test, so only half of the ops should be logged.
+ [testsRun, logLines] =
+ runLoggingTests({db: testDB, slowMs: 1000000, logLevel: 0, sampleRate: 1.0});
+ unlogged = getUnloggedTests(testsRun, logLines);
+ assert.eq(unlogged.length, Math.floor(testsRun.length / 2), () => tojson(unlogged));
+
+ // Test that all operations are logged when logLevel is 1, regardless of sampleRate and
+ // slowMs. We pass 'null' for slowMs to signify that a high threshold should be set
+ // (such that, at logLevel 0, no operations would be logged) and that this value should
+ // be applied for all operations, rather than for every second op as in the case of the
+ // slowMs test.
+ [testsRun, logLines] =
+ runLoggingTests({db: testDB, slowMs: null, logLevel: 1, sampleRate: 0.5});
+ unlogged = getUnloggedTests(testsRun, logLines);
+ assert.eq(unlogged.length, 0, () => tojson(unlogged));
}
st.stop();
})();
diff --git a/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js b/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js
index d8290a66b89..0ac83c6cb56 100644
--- a/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js
+++ b/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js
@@ -14,10 +14,6 @@ const ns = dbName + '.' + collName;
const st = new ShardingTest({shards: 2, config: 1});
const testDB = st.s.getDB(dbName);
-// Only testing the command read and write modes.
-assert(testDB.getMongo().readMode() === "commands");
-assert(testDB.getMongo().writeMode() === "commands");
-
// Shard a collection with the only chunk on shard0.
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
diff --git a/jstests/noPassthrough/profile_operation_metrics.js b/jstests/noPassthrough/profile_operation_metrics.js
index 39427500a6d..3a5f4630b5f 100644
--- a/jstests/noPassthrough/profile_operation_metrics.js
+++ b/jstests/noPassthrough/profile_operation_metrics.js
@@ -52,19 +52,6 @@ const assertMetricsExist = (profilerEntry) => {
assert.gte(metrics.totalUnitsWritten, 0);
};
-const runInLegacyQueryMode = (db, func) => {
- const readMode = db.getMongo().readMode();
- const writeMode = db.getMongo().writeMode();
- try {
- db.getMongo().forceReadMode("legacy");
- db.getMongo().forceWriteMode("legacy");
- func();
- } finally {
- db.getMongo().forceReadMode(readMode);
- db.getMongo().forceWriteMode(writeMode);
- }
-};
-
const resetProfileColl = {
name: 'resetProfileColl',
command: (db) => {
@@ -864,149 +851,6 @@ const operations = [
resetProfileColl,
resetTestColl,
{
- name: 'insertLegacy',
- command: (db) => {
- runInLegacyQueryMode(db, () => {
- db[collName].insert({_id: 1, a: 0});
- });
- },
- profileFilter: {op: 'insert'},
- profileAssert: (db, profileDoc) => {
- // Insert should not perform any reads.
- assert.eq(profileDoc.docBytesRead, 0);
- assert.eq(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntryBytesRead, 0);
- assert.eq(profileDoc.idxEntryUnitsRead, 0);
- assert.eq(profileDoc.cursorSeeks, 0);
- assert.eq(profileDoc.docBytesWritten, 29);
- assert.eq(profileDoc.docUnitsWritten, 1);
- assert.eq(profileDoc.idxEntryBytesWritten, 3);
- assert.eq(profileDoc.idxEntryUnitsWritten, 1);
- assert.eq(profileDoc.totalUnitsWritten, 1);
- assert.eq(profileDoc.keysSorted, 0);
- assert.eq(profileDoc.sorterSpills, 0);
- assert.eq(profileDoc.docUnitsReturned, 0);
- }
- },
- {
- name: 'findLegacy',
- command: (db) => {
- runInLegacyQueryMode(db, () => {
- assert.eq(db[collName].find({_id: 1}).itcount(), 1);
- });
- },
- profileFilter: {op: 'query', 'command.find': collName},
- profileAssert: (db, profileDoc) => {
- assert.eq(profileDoc.docBytesRead, 29);
- assert.eq(profileDoc.docUnitsRead, 1);
- assert.eq(profileDoc.idxEntryBytesRead, 3);
- assert.eq(profileDoc.idxEntryUnitsRead, 1);
- assert.eq(profileDoc.cursorSeeks, 2);
- assert.eq(profileDoc.docBytesWritten, 0);
- assert.eq(profileDoc.docUnitsWritten, 0);
- assert.eq(profileDoc.idxEntryBytesWritten, 0);
- assert.eq(profileDoc.idxEntryUnitsWritten, 0);
- assert.eq(profileDoc.totalUnitsWritten, 0);
- assert.eq(profileDoc.keysSorted, 0);
- assert.eq(profileDoc.sorterSpills, 0);
- assert.eq(profileDoc.docUnitsReturned, 1);
- }
- },
- resetProfileColl,
- {
- name: 'getMoreLegacy',
- command: (db) => {
- runInLegacyQueryMode(db, () => {
- db[collName].insert({_id: 2});
- db[collName].insert({_id: 3});
- // The value '1' is not a valid batch size for legacy queries, and will actually
- // return more than 1 document per batch.
- let cursor = db[collName].find().batchSize(2);
- cursor.next();
- cursor.next();
- assert.eq(cursor.objsLeftInBatch(), 0);
- // Trigger a getMore.
- cursor.next();
- });
- },
- profileFilter: {op: 'getmore'},
- profileAssert: (db, profileDoc) => {
- assert.eq(profileDoc.docBytesRead, 18);
- assert.eq(profileDoc.docUnitsRead, 1);
- assert.eq(profileDoc.idxEntryBytesRead, 0);
- assert.eq(profileDoc.idxEntryUnitsRead, 0);
- assert.eq(profileDoc.cursorSeeks, 0);
- assert.eq(profileDoc.docBytesWritten, 0);
- assert.eq(profileDoc.docUnitsWritten, 0);
- assert.eq(profileDoc.idxEntryBytesWritten, 0);
- assert.eq(profileDoc.idxEntryUnitsWritten, 0);
- assert.eq(profileDoc.totalUnitsWritten, 0);
- assert.eq(profileDoc.keysSorted, 0);
- assert.eq(profileDoc.sorterSpills, 0);
- assert.eq(profileDoc.docUnitsReturned, 1);
- }
- },
- {
- name: 'updateLegacy',
- command: (db) => {
- runInLegacyQueryMode(db, () => {
- db[collName].update({_id: 1}, {$set: {a: 1}});
- });
- },
- profileFilter: {op: 'update'},
- profileAssert: (db, profileDoc) => {
- if (!isDebugBuild(db)) {
- assert.eq(profileDoc.docBytesRead, 29);
- assert.eq(profileDoc.docUnitsRead, 1);
- assert.eq(profileDoc.cursorSeeks, 2);
- } else {
- assert.gte(profileDoc.docBytesRead, 29);
- assert.gte(profileDoc.docUnitsRead, 1);
- assert.gte(profileDoc.cursorSeeks, 2);
- }
- assert.eq(profileDoc.idxEntryBytesRead, 3);
- assert.eq(profileDoc.idxEntryUnitsRead, 1);
- assert.eq(profileDoc.docBytesWritten, 16);
- assert.eq(profileDoc.docUnitsWritten, 1);
- assert.eq(profileDoc.idxEntryBytesWritten, 0);
- assert.eq(profileDoc.idxEntryUnitsWritten, 0);
- assert.eq(profileDoc.totalUnitsWritten, 1);
- assert.eq(profileDoc.docUnitsReturned, 0);
- }
- },
- {
- name: 'deleteLegacy',
- command: (db) => {
- runInLegacyQueryMode(db, () => {
- db[collName].remove({_id: 1});
- });
- },
- profileFilter: {op: 'remove'},
- profileAssert: (db, profileDoc) => {
- if (!isDebugBuild(db)) {
- assert.eq(profileDoc.docBytesRead, 29);
- assert.eq(profileDoc.docUnitsRead, 1);
- assert.eq(profileDoc.cursorSeeks, 3);
- } else {
- assert.gte(profileDoc.docBytesRead, 29);
- assert.gte(profileDoc.docUnitsRead, 1);
- assert.gte(profileDoc.cursorSeeks, 3);
- }
- assert.eq(profileDoc.idxEntryBytesRead, 3);
- assert.eq(profileDoc.idxEntryUnitsRead, 1);
- assert.eq(profileDoc.docBytesWritten, 29);
- assert.eq(profileDoc.docUnitsWritten, 1);
- assert.eq(profileDoc.idxEntryBytesWritten, 3);
- assert.eq(profileDoc.idxEntryUnitsWritten, 1);
- assert.eq(profileDoc.totalUnitsWritten, 1);
- assert.eq(profileDoc.keysSorted, 0);
- assert.eq(profileDoc.sorterSpills, 0);
- assert.eq(profileDoc.docUnitsReturned, 0);
- }
- },
- resetProfileColl,
- resetTestColl,
- {
name: 'insertBulk',
command: (db) => {
let bulk = db[collName].initializeUnorderedBulkOp();
diff --git a/jstests/noPassthrough/replica_set_connection_getmore.js b/jstests/noPassthrough/replica_set_connection_getmore.js
index c1abc0e074d..5a0537fb367 100644
--- a/jstests/noPassthrough/replica_set_connection_getmore.js
+++ b/jstests/noPassthrough/replica_set_connection_getmore.js
@@ -18,13 +18,6 @@ const collName = "getmore";
// connections to each individual node.
var conn = new Mongo(rst.getURL());
-// We force a read mode of "compatibility" so that we can test Mongo.prototype.readMode()
-// resolves to "commands" independently of the --readMode passed to the mongo shell running this
-// test.
-conn.forceReadMode("compatibility");
-assert.eq("commands",
- conn.readMode(),
- "replica set connections created by the mongo shell should use 'commands' read mode");
var coll = conn.getDB(dbName)[collName];
coll.drop();
@@ -40,10 +33,5 @@ rst.awaitReplication();
var cursor = coll.find().readPref("secondary").batchSize(2);
assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
-// Verify that queries work when the read mode is forced to "legacy" reads.
-conn.forceReadMode("legacy");
-var cursor = coll.find().readPref("secondary").batchSize(2);
-assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
-
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/server_read_concern_metrics.js b/jstests/noPassthrough/server_read_concern_metrics.js
index 617a8603b2b..cc97ceb27c2 100644
--- a/jstests/noPassthrough/server_read_concern_metrics.js
+++ b/jstests/noPassthrough/server_read_concern_metrics.js
@@ -179,19 +179,9 @@ function getServerStatus(conn) {
let serverStatus = getServerStatus(testDB);
verifyServerStatusFields(serverStatus);
-let newStatus;
-
-// Run a legacy query.
-primary.forceReadMode("legacy");
-assert.eq(testColl.find().itcount(), 1);
-newStatus = getServerStatus(testDB);
-verifyServerStatusChange(serverStatus, newStatus, "none", 1);
-primary.forceReadMode("commands");
-serverStatus = newStatus;
-
// Run a command without a readConcern.
assert.eq(testColl.find().itcount(), 1);
-newStatus = getServerStatus(testDB);
+let newStatus = getServerStatus(testDB);
verifyServerStatusChange(serverStatus, newStatus, "none", 1);
serverStatus = newStatus;
diff --git a/jstests/noPassthrough/socket_disconnect_kills.js b/jstests/noPassthrough/socket_disconnect_kills.js
index 5a2aa2dd692..afec864763e 100644
--- a/jstests/noPassthrough/socket_disconnect_kills.js
+++ b/jstests/noPassthrough/socket_disconnect_kills.js
@@ -160,13 +160,6 @@ function runTests(client) {
}
})
],
- [
- checkClosedEarly,
- function(client) {
- client.forceReadMode("legacy");
- assert(client.getDB(testName).test.findOne({}));
- }
- ],
].forEach(runWithCuropFailPointEnabled(client, "waitInFindBeforeMakingBatch"));
// After SERVER-39475, re-enable these tests and add negative testing for $out cursors.
@@ -180,18 +173,8 @@ function runTests(client) {
assert.commandWorked(client.getDB(testName).runCommand(
{getMore: result.cursor.id, collection: "test"}));
}
- ],
- [
- checkClosedEarly,
- function(client) {
- client.forceReadMode("legacy");
- var cursor = client.getDB(testName).test.find({}).batchSize(2);
- assert(cursor.next());
- assert(cursor.next());
- assert(cursor.next());
- }
- ],
- ].forEach(runWithCuropFailPointEnabled(client, "waitAfterPinningCursorBeforeGetMoreBatch"));
+ ]].forEach(runWithCuropFailPointEnabled(client,
+ "waitAfterPinningCursorBeforeGetMoreBatch"));
}
[[checkClosedEarly, runCommand({aggregate: "test", pipeline: [], cursor: {}})],
diff --git a/jstests/noPassthrough/traffic_reading_legacy.js b/jstests/noPassthrough/traffic_reading_legacy.js
deleted file mode 100644
index 69cda3be58f..00000000000
--- a/jstests/noPassthrough/traffic_reading_legacy.js
+++ /dev/null
@@ -1,70 +0,0 @@
-// tests for the traffic_recording commands.
-(function() {
-var baseName = "jstests_traffic_recording";
-
-// Variables for this test
-const recordingDir = MongoRunner.toRealDir("$dataDir/traffic_recording/");
-const recordingFile = "recording.txt";
-const recordingFilePath = MongoRunner.toRealDir(recordingDir + "/" + recordingFile);
-
-// Create the recording directory if it does not already exist
-mkdir(recordingDir);
-
-// Create the options and run mongod
-var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
-m = MongoRunner.runMongod(opts);
-
-// Get the port of the host
-var serverPort = m.port;
-
-// Set the readMode and writeMode to legacy
-m.forceReadMode("legacy");
-m.forceWriteMode("legacy");
-
-// Create necessary users
-adminDB = m.getDB("admin");
-const testDB = m.getDB("test");
-const coll = testDB.getCollection("foo");
-adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
-adminDB.auth("admin", "pass");
-
-// Start recording traffic
-assert.commandWorked(adminDB.runCommand({'startRecordingTraffic': 1, 'filename': 'recording.txt'}));
-
-// Run a few commands
-testDB.runCommand({"serverStatus": 1});
-coll.insert({"name": "foo biz bar"});
-coll.findOne();
-coll.insert({"name": "foo bar"});
-coll.findOne({"name": "foo bar"});
-coll.deleteOne({});
-
-// Stop recording traffic
-assert.commandWorked(testDB.runCommand({'stopRecordingTraffic': 1}));
-
-// Shutdown Mongod
-MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'password'});
-
-// Counters
-var opCodes = {};
-
-// Pass filepath to traffic_reader helper method to get recorded info in BSON
-var res = convertTrafficRecordingToBSON(recordingFilePath);
-
-// Iterate through the results and assert the above commands are properly recorded
-res.forEach((obj) => {
- opCodes[obj["rawop"]["header"]["opcode"]] =
- (opCodes[obj["rawop"]["header"]["opcode"]] || 0) + 1;
- assert.eq(obj["seenconnectionnum"], 1);
- var responseTo = obj["rawop"]["header"]["responseto"];
- if (responseTo == 0) {
- assert.eq(obj["destendpoint"], serverPort.toString());
- } else {
- assert.eq(obj["srcendpoint"], serverPort.toString());
- }
-});
-
-// ensure legacy operations worked properly
-assert.eq(opCodes[2002], 2);
-assert.eq(opCodes[2006], 1);
-})();
diff --git a/jstests/noPassthrough/views_legacy.js b/jstests/noPassthrough/views_legacy.js
deleted file mode 100644
index efedb669a25..00000000000
--- a/jstests/noPassthrough/views_legacy.js
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Tests that views properly reject queries in legacy read mode, and reject writes performed in
- * legacy write mode. Also confirms that legacy killCursors execution is successful.
- */
-(function() {
-"use strict";
-
-let conn = MongoRunner.runMongod({});
-
-let viewsDB = conn.getDB("views_legacy");
-assert.commandWorked(viewsDB.dropDatabase());
-assert.commandWorked(viewsDB.createView("view", "collection", []));
-let coll = viewsDB.getCollection("collection");
-
-for (let i = 0; i < 10; ++i) {
- assert.commandWorked(coll.insert({a: i}));
-}
-
-conn.forceReadMode("legacy");
-conn.forceWriteMode("legacy");
-
-//
-// Legacy getMore is explicitly prohibited on views; you must use the getMore command.
-//
-let cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
-assert.commandWorked(cmdRes);
-let cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
-
-let err = assert.throws(function() {
- cursor.itcount();
-}, [], "Legacy getMore expected to fail on a view cursor");
-assert.eq(ErrorCodes.CommandNotSupportedOnView, err.code, tojson(err));
-
-//
-// Legacy killcursors is expected to work on views.
-//
-cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
-assert.commandWorked(cmdRes);
-cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
-
-// When DBCommandCursor is constructed under legacy readMode, cursor.close() will execute a
-// legacy killcursors operation.
-cursor.close();
-assert.gleSuccess(viewsDB, "legacy killcursors expected to work on view cursor");
-
-//
-// A view should reject all write CRUD operations performed in legacy write mode.
-//
-viewsDB.view.insert({x: 1});
-assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
-
-viewsDB.view.remove({x: 1});
-assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
-
-viewsDB.view.update({x: 1}, {x: 2});
-assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
-
-//
-// Legacy find is explicitly prohibited on views; you must use the find command.
-//
-let res = assert.throws(function() {
- viewsDB.view.find({x: 1}).toArray();
-});
-assert.eq(res.code, ErrorCodes.CommandNotSupportedOnView, tojson(res));
-
-// Ensure that legacy getMore succeeds even when a cursor is established on a namespace whose
-// database does not exist. Legacy getMore must check that the cursor is not over a view, and
-// this must handle the case where the namespace is not a view by virtue of the database not
-// existing.
-assert.commandWorked(viewsDB.dropDatabase());
-
-cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
-assert.commandWorked(cmdRes);
-cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
-assert.eq(0, cursor.itcount());
-
-cmdRes = viewsDB.runCommand({aggregate: "view", pipeline: [], cursor: {batchSize: 0}});
-assert.commandWorked(cmdRes);
-cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
-assert.eq(0, cursor.itcount());
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthroughWithMongod/bulk_api_limits.js b/jstests/noPassthroughWithMongod/bulk_api_limits.js
index 91b56f80951..b6e08a50b6a 100644
--- a/jstests/noPassthroughWithMongod/bulk_api_limits.js
+++ b/jstests/noPassthroughWithMongod/bulk_api_limits.js
@@ -1,3 +1,4 @@
+(function() {
var collectionName = "bulk_api_limits";
var coll = db.getCollection(collectionName);
coll.drop();
@@ -129,32 +130,6 @@ var executeTestsOrdered = function() {
coll.remove({});
};
-var buildVersion = parseInt(db.runCommand({buildInfo: 1}).versionArray.slice(0, 3).join(""), 10);
-// Save the existing useWriteCommands function
-var _useWriteCommands = coll.getMongo().useWriteCommands;
-
-//
-// Only execute write command tests if we have > 2.5.5 otherwise
-// execute the down converted version
-if (buildVersion >= 255) {
- // Force the use of useWriteCommands
- coll._mongo.useWriteCommands = function() {
- return true;
- };
-
- // Execute tests using legacy operations
- executeTestsUnordered();
- executeTestsOrdered();
-}
-
-// Force the use of legacy commands
-coll._mongo.useWriteCommands = function() {
- return false;
-};
-
-// Execute tests using legacy operations
executeTestsUnordered();
executeTestsOrdered();
-
-// Reset the function
-coll.getMongo().useWriteCommands = _useWriteCommands; \ No newline at end of file
+}());
diff --git a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
index 1d7b2c6f82a..c0f0bcdab5a 100644
--- a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
+++ b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
@@ -22,9 +22,6 @@ try {
getWriteConcern: function() {
return null;
},
- useWriteCommands: function() {
- return true;
- },
hasWriteCommands: function() {
return true;
},
diff --git a/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js b/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js
index 9903c7a5835..eddca9766ed 100644
--- a/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js
+++ b/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js
@@ -5,7 +5,6 @@ var testDB = db.getSiblingDB('dbcommand_cursor_throws_on_closed_conn');
testDB.dropDatabase();
var coll = testDB.collection;
var conn = testDB.getMongo();
-conn.forceReadMode("commands");
assert.commandWorked(coll.save({}));
var res = assert.commandWorked(testDB.runCommand({
find: coll.getName(),
diff --git a/jstests/replsets/agg_write_concern_zero_batch_size.js b/jstests/replsets/agg_write_concern_zero_batch_size.js
index 0942ec8cfff..5d5961aea8c 100644
--- a/jstests/replsets/agg_write_concern_zero_batch_size.js
+++ b/jstests/replsets/agg_write_concern_zero_batch_size.js
@@ -65,17 +65,6 @@ try {
assert(error instanceof Error);
assert(tojson(error).indexOf("writeConcernError") != -1, tojson(error));
- // Now switch to legacy OP_GET_MORE read mode. We should get a different error indicating
- // that using writeConcern in this way is unsupported.
- source.getDB().getMongo().forceReadMode("legacy");
- error = assert.throws(
- () => source
- .aggregate([stageSpec],
- {cursor: {batchSize: 0}, writeConcern: {w: 2, wtimeout: 100}})
- .itcount());
- assert.eq(error.code, 31124);
- source.getDB().getMongo().forceReadMode("commands");
-
restartServerReplication(rst.getSecondary());
});
} finally {
diff --git a/jstests/replsets/bulk_api_wc.js b/jstests/replsets/bulk_api_wc.js
index 59635744a1a..8bc52ef04ff 100644
--- a/jstests/replsets/bulk_api_wc.js
+++ b/jstests/replsets/bulk_api_wc.js
@@ -11,7 +11,7 @@ if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wired
return;
}
-// Start a 2-node replica set with no journal
+// Start a 2-node replica set with no journal.
// Allows testing immediate write concern failures and wc application failures
var rst = new ReplSetTest({nodes: 2});
rst.startSet({nojournal: ""});
@@ -19,120 +19,101 @@ rst.initiate();
var mongod = rst.getPrimary();
var coll = mongod.getCollection("test.bulk_api_wc");
-var executeTests = function() {
- // Create a unique index, legacy writes validate too early to use invalid documents for
- // write
- // error testing
- coll.createIndex({a: 1}, {unique: true});
+// Create a unique index, legacy writes validate too early to use invalid documents for write error
+// testing
+coll.createIndex({a: 1}, {unique: true});
- //
- // Ordered
- //
+//
+// Ordered
+//
- //
- // Fail due to nojournal
- coll.remove({});
- var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- assert.throws(function() {
- bulk.execute({j: true});
- });
+//
+// Fail due to nojournal
+coll.remove({});
+var bulk = coll.initializeOrderedBulkOp();
+bulk.insert({a: 1});
+bulk.insert({a: 2});
+assert.throws(function() {
+ bulk.execute({j: true});
+});
- //
- // Fail due to unrecognized write concern field.
- coll.remove({});
- var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- assert.throws(function() {
- bulk.execute({x: 1});
- });
+//
+// Fail due to unrecognized write concern field.
+coll.remove({});
+var bulk = coll.initializeOrderedBulkOp();
+bulk.insert({a: 1});
+bulk.insert({a: 2});
+assert.throws(function() {
+ bulk.execute({x: 1});
+});
- //
- // Fail with write error, no write concern error even though it would fail on apply for
- // ordered
- coll.remove({});
- var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.insert({a: 2});
- result = assert.throws(function() {
- bulk.execute({w: 'invalid'});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.getWriteErrors()[0].index, 2);
- assert(!result.getWriteConcernError());
- assert.eq(coll.find().itcount(), 2);
+//
+// Fail with write error, no write concern error even though it would fail on apply for ordered
+coll.remove({});
+var bulk = coll.initializeOrderedBulkOp();
+bulk.insert({a: 1});
+bulk.insert({a: 2});
+bulk.insert({a: 2});
+result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+});
+assert.eq(result.nInserted, 2);
+assert.eq(result.getWriteErrors()[0].index, 2);
+assert(!result.getWriteConcernError());
+assert.eq(coll.find().itcount(), 2);
- //
- // Unordered
- //
+//
+// Unordered
+//
- //
- // Fail with write error, write concern error reported when unordered
- coll.remove({});
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.insert({a: 2});
- var result = assert.throws(function() {
- bulk.execute({w: 'invalid'});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.getWriteErrors()[0].index, 2);
- assert(result.getWriteConcernError());
- assert.eq(coll.find().itcount(), 2);
+//
+// Fail with write error, write concern error reported when unordered
+coll.remove({});
+var bulk = coll.initializeUnorderedBulkOp();
+bulk.insert({a: 1});
+bulk.insert({a: 2});
+bulk.insert({a: 2});
+var result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+});
+assert.eq(result.nInserted, 2);
+assert.eq(result.getWriteErrors()[0].index, 2);
+assert(result.getWriteConcernError());
+assert.eq(coll.find().itcount(), 2);
- //
- // Fail with write error, write concern timeout reported when unordered
- // Note that wtimeout:true can only be reported when the batch is all the same, so there's
- // not
- // multiple wc errors
- coll.remove({});
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.insert({a: 2});
- var result = assert.throws(function() {
- bulk.execute({w: 3, wtimeout: 1});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.getWriteErrors()[0].index, 2);
- assert.eq(100, result.getWriteConcernError().code);
- assert.eq(coll.find().itcount(), 2);
+//
+// Fail with write error, write concern timeout reported when unordered Note that wtimeout:true can
+// only be reported when the batch is all the same, so there's not multiple wc errors
+coll.remove({});
+var bulk = coll.initializeUnorderedBulkOp();
+bulk.insert({a: 1});
+bulk.insert({a: 2});
+bulk.insert({a: 2});
+var result = assert.throws(function() {
+ bulk.execute({w: 3, wtimeout: 1});
+});
+assert.eq(result.nInserted, 2);
+assert.eq(result.getWriteErrors()[0].index, 2);
+assert.eq(100, result.getWriteConcernError().code);
+assert.eq(coll.find().itcount(), 2);
- //
- // Fail with write error and upserted, write concern error reported when unordered
- coll.remove({});
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.find({a: 3}).upsert().updateOne({a: 3});
- bulk.insert({a: 3});
- var result = assert.throws(function() {
- bulk.execute({w: 'invalid'});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.nUpserted, 1);
- assert.eq(result.getUpsertedIds()[0].index, 2);
- assert.eq(result.getWriteErrors()[0].index, 3);
- assert(result.getWriteConcernError());
- assert.eq(coll.find().itcount(), 3);
-};
-
-// Use write commands
-coll.getMongo().useWriteCommands = function() {
- return true;
-};
-executeTests();
-
-// FAILING currently due to incorrect batch api reading of GLE
-// Use legacy opcodes
-coll.getMongo().useWriteCommands = function() {
- return false;
-};
-executeTests();
+//
+// Fail with write error and upserted, write concern error reported when unordered
+coll.remove({});
+var bulk = coll.initializeUnorderedBulkOp();
+bulk.insert({a: 1});
+bulk.insert({a: 2});
+bulk.find({a: 3}).upsert().updateOne({a: 3});
+bulk.insert({a: 3});
+var result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+});
+assert.eq(result.nInserted, 2);
+assert.eq(result.nUpserted, 1);
+assert.eq(result.getUpsertedIds()[0].index, 2);
+assert.eq(result.getWriteErrors()[0].index, 3);
+assert(result.getWriteConcernError());
+assert.eq(coll.find().itcount(), 3);
jsTest.log("DONE bulk api wc tests");
rst.stopSet();
diff --git a/jstests/replsets/disconnect_on_legacy_write_to_secondary.js b/jstests/replsets/disconnect_on_legacy_write_to_secondary.js
deleted file mode 100644
index 9678fc1a98e..00000000000
--- a/jstests/replsets/disconnect_on_legacy_write_to_secondary.js
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Tests that legacy writes to secondaries result in no answer and a disconnection.
- */
-(function() {
-"use strict";
-
-load("jstests/libs/fail_point_util.js");
-
-const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
-rst.startSet();
-rst.initiate();
-
-const primary = rst.getPrimary();
-const secondary = rst.getSecondary();
-const collname = "disconnect_on_legacy_write_to_secondary";
-const coll = primary.getDB("test")[collname];
-const secondaryDb = secondary.getDB("test");
-const secondaryColl = secondaryDb[collname];
-
-// Never retry on network error, because this test needs to detect the network error.
-TestData.skipRetryOnNetworkError = true;
-secondary.forceWriteMode('legacy');
-assert.commandWorked(coll.insert([{_id: 'deleteme'}, {_id: 'updateme'}]));
-rst.awaitReplication();
-
-jsTestLog("Trying legacy insert on secondary");
-secondaryColl.insert({_id: 'no_insert_on_secondary'});
-let res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
-assert(isNetworkError(res));
-// We should automatically reconnect after the failed command.
-assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
-
-jsTestLog("Trying legacy update on secondary");
-secondaryColl.update({_id: 'updateme'}, {'$set': {x: 1}});
-res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
-assert(isNetworkError(res));
-// We should automatically reconnect after the failed command.
-assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
-
-jsTestLog("Trying legacy remove on secondary");
-secondaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}});
-res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
-assert(isNetworkError(res));
-// We should automatically reconnect after the failed command.
-assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
-
-// Do the stepdown tests on a separate connection to avoid interfering with the
-// ReplSetTest mechanism.
-const primaryAdmin = primary.getDB("admin");
-const primaryDataConn = new Mongo(primary.host);
-const primaryDb = primaryDataConn.getDB("test");
-const primaryColl = primaryDb[collname];
-primaryDataConn.forceWriteMode('legacy');
-
-function getNotPrimaryLegacyUnackWritesCounter() {
- return assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1}))
- .metrics.repl.network.notPrimaryLegacyUnacknowledgedWrites;
-}
-
-function runStepDownTest({description, failpoint, operation}) {
- jsTestLog("Enabling failpoint to block " + description + "s");
- let failPoint = configureFailPoint(primaryAdmin, failpoint);
-
- let failedLegacyUnackWritesBefore = getNotPrimaryLegacyUnackWritesCounter();
-
- jsTestLog("Trying legacy " + description + " on stepping-down primary");
- operation();
- failPoint.wait();
- jsTestLog("Within " + description + ": stepping down and disabling failpoint");
- assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- failPoint.off();
- res = assert.throws(() => primaryDb.adminCommand({ping: 1}));
- assert(isNetworkError(res));
- // We should automatically reconnect after the failed command.
- assert.commandWorked(primaryDb.adminCommand({ping: 1}));
-
- // Validate the number of legacy unacknowledged writes failed due to step down resulted
- // in network disconnection.
- let failedLegacyUnackWritesAfter = getNotPrimaryLegacyUnackWritesCounter();
- assert.eq(failedLegacyUnackWritesAfter, failedLegacyUnackWritesBefore + 1);
-
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
-}
-runStepDownTest({
- description: "insert",
- failpoint: "hangDuringBatchInsert",
- operation: () => primaryColl.insert({_id: 'no_insert_on_stepdown'})
-});
-
-runStepDownTest({
- description: "update",
- failpoint: "hangDuringBatchUpdate",
- operation: () => primaryColl.update({_id: 'updateme'}, {'$set': {x: 1}})
-});
-
-runStepDownTest({
- description: "remove",
- failpoint: "hangDuringBatchRemove",
- operation: () => primaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}})
-});
-
-rst.stopSet();
-})();
diff --git a/jstests/replsets/libs/tags.js b/jstests/replsets/libs/tags.js
index bd6b7a946a7..816c4b6e2cb 100644
--- a/jstests/replsets/libs/tags.js
+++ b/jstests/replsets/libs/tags.js
@@ -168,9 +168,6 @@ var TagsTest = function(options) {
' agree that ' + nodeId + ' (' + replTest.nodes[nodeId].host +
') should be primary.');
- if (options.forceWriteMode) {
- primary.forceWriteMode(options.forceWriteMode);
- }
var writeConcern = {
writeConcern: {w: expectedWritableNodesCount, wtimeout: replTest.kDefaultTimeoutMS}
};
diff --git a/jstests/replsets/no_disconnect_on_stepdown.js b/jstests/replsets/no_disconnect_on_stepdown.js
index 6d7e6036956..dc84e254ae7 100644
--- a/jstests/replsets/no_disconnect_on_stepdown.js
+++ b/jstests/replsets/no_disconnect_on_stepdown.js
@@ -33,9 +33,6 @@ const coll = primaryDb[collname];
// Never retry on network error, because this test needs to detect the network error.
TestData.skipRetryOnNetworkError = true;
-// Legacy writes will still disconnect, so don't use them.
-primaryDataConn.forceWriteMode('commands');
-
assert.commandWorked(coll.insert([
{_id: 'update0', updateme: true},
{_id: 'update1', updateme: true},
@@ -63,8 +60,7 @@ function runStepDownTest({description, failpoint, operation, errorCode}) {
}));
errorCode = errorCode || ErrorCodes.InterruptedDueToReplStateChange;
- const writeCommand = `db.getMongo().forceWriteMode("commands");
- assert.commandFailedWithCode(${operation}, ${errorCode});
+ const writeCommand = `assert.commandFailedWithCode(${operation}, ${errorCode});
assert.commandWorked(db.adminCommand({ping:1}));`;
const waitForShell = startParallelShell(writeCommand, primary.port);
diff --git a/jstests/replsets/secondaryok_read_pref.js b/jstests/replsets/secondaryok_read_pref.js
index a4e4ea3701c..b6788af37da 100644
--- a/jstests/replsets/secondaryok_read_pref.js
+++ b/jstests/replsets/secondaryok_read_pref.js
@@ -26,31 +26,27 @@ assert.commandWorked(priDB.test.insert({a: 1}, {writeConcern: {w: "majority"}}))
const secDB = rst.getSecondary().getDB(jsTestName());
-for (let readMode of ["commands", "legacy"]) {
- for (let readPref of readPrefs) {
- for (let secondaryOk of [true, false]) {
- const testType = {readMode: readMode, readPref: readPref, secondaryOk: secondaryOk};
+for (let readPref of readPrefs) {
+ for (let secondaryOk of [true, false]) {
+ const testType = {readPref: readPref, secondaryOk: secondaryOk};
- secDB.getMongo().forceReadMode(readMode);
- secDB.getMongo().setSecondaryOk(secondaryOk);
+ secDB.getMongo().setSecondaryOk(secondaryOk);
- const cursor = (readPref ? secDB.test.find().readPref(readPref) : secDB.test.find());
+ const cursor = (readPref ? secDB.test.find().readPref(readPref) : secDB.test.find());
- if (readPref === "primary" || (!readPref && !secondaryOk)) {
- // Attempting to run the query throws an error of type NotPrimaryNoSecondaryOk.
- const secondaryOkErr = assert.throws(() => cursor.itcount(), [], tojson(testType));
- assert.commandFailedWithCode(secondaryOkErr, ErrorCodes.NotPrimaryNoSecondaryOk);
- } else {
- // Succeeds for all non-primary readPrefs, and for no readPref iff secondaryOk.
- const docCount = assert.doesNotThrow(() => cursor.itcount(), [], tojson(testType));
- assert.eq(docCount, 1);
- }
+ if (readPref === "primary" || (!readPref && !secondaryOk)) {
+ // Attempting to run the query throws an error of type NotPrimaryNoSecondaryOk.
+ const secondaryOkErr = assert.throws(() => cursor.itcount(), [], tojson(testType));
+ assert.commandFailedWithCode(secondaryOkErr, ErrorCodes.NotPrimaryNoSecondaryOk);
+ } else {
+ // Succeeds for all non-primary readPrefs, and for no readPref iff secondaryOk.
+ const docCount = assert.doesNotThrow(() => cursor.itcount(), [], tojson(testType));
+ assert.eq(docCount, 1);
}
}
}
function assertNotPrimaryNoSecondaryOk(func) {
- secDB.getMongo().forceReadMode("commands");
secDB.getMongo().setSecondaryOk(false);
secDB.getMongo().setReadPref("primary");
const res = assert.throws(func);
diff --git a/jstests/sharding/explainFind_stale_mongos.js b/jstests/sharding/explainFind_stale_mongos.js
index 8e74287e520..ec3029217cb 100644
--- a/jstests/sharding/explainFind_stale_mongos.js
+++ b/jstests/sharding/explainFind_stale_mongos.js
@@ -1,6 +1,6 @@
/**
* Tests that a stale mongos does not return a stale shardVersion error to the client for explain
- * find sent using the legacy query mode (it retries on the stale shardVersion error internally).
+ * of a find command.
*/
(function() {
"use strict";
@@ -38,8 +38,7 @@ if (clusterVersionInfo.isMixedVersion) {
}
jsTest.log("Run explain find on " + ns + " from the stale mongos");
-staleMongos.getDB(dbName).getMongo().forceReadMode("legacy");
-staleMongos.getDB(dbName).getCollection(collName).find({$query: {}, $explain: true}).next();
+staleMongos.getDB(dbName).getCollection(collName).explain().find().next();
st.stop();
})();
diff --git a/jstests/sharding/query/collation_targeting.js b/jstests/sharding/query/collation_targeting.js
index 0883d18c430..8e446f745aa 100644
--- a/jstests/sharding/query/collation_targeting.js
+++ b/jstests/sharding/query/collation_targeting.js
@@ -162,12 +162,10 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// Find.
// Test a find command on strings with a non-simple collation. This should be scatter-gather.
-if (testDB.getMongo().useReadCommands()) {
- assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).itcount());
- explain = coll.find({a: "foo"}).collation(caseInsensitive).explain();
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-}
+assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).itcount());
+explain = coll.find({a: "foo"}).collation(caseInsensitive).explain();
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
// Test a find command with a simple collation. This should be single-shard.
assert.eq(1, coll.find({a: "foo"}).itcount());
@@ -176,12 +174,10 @@ assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// Test a find command on numbers with a non-simple collation. This should be single-shard.
-if (testDB.getMongo().useReadCommands()) {
- assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).itcount());
- explain = coll.find({a: 100}).collation(caseInsensitive).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-}
+assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).itcount());
+explain = coll.find({a: 100}).collation(caseInsensitive).explain();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// FindAndModify.
diff --git a/jstests/sharding/query/collation_targeting_inherited.js b/jstests/sharding/query/collation_targeting_inherited.js
index 1ae0c4f32f4..74ac599dc62 100644
--- a/jstests/sharding/query/collation_targeting_inherited.js
+++ b/jstests/sharding/query/collation_targeting_inherited.js
@@ -187,12 +187,10 @@ assert.commandWorked(explain);
assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
// Test a find command with a simple collation. This should be single-shard.
-if (testDB.getMongo().useReadCommands()) {
- assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).itcount());
- explain = collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-}
+assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).itcount());
+explain = collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).explain();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// Test a find command on numbers with a non-simple collation inherited from the collection
// default. This should be single-shard.
diff --git a/jstests/sharding/query/comment_field_sharded.js b/jstests/sharding/query/comment_field_sharded.js
index 9482f6f4870..c87b8bf08a3 100644
--- a/jstests/sharding/query/comment_field_sharded.js
+++ b/jstests/sharding/query/comment_field_sharded.js
@@ -620,24 +620,5 @@ runCommentParamTest({
expectedRunningOps: 1
});
-//
-// Tests for Legacy query.
-//
-
-// Verify that $comment at top level is treated as a 'comment' field.
-const legacyComment = {
- testName: jsTestName(),
- commentField: "Legacy_find_comment"
-};
-runCommentParamTest({
- coll: shardedColl,
- expectedRunningOps: 2,
- cmdName: "find",
- commentObj: legacyComment,
- parallelFunction: `const sourceDB = db.getSiblingDB(jsTestName());
- sourceDB.getMongo().forceReadMode("legacy");
- sourceDB.coll.find({$query: {a: 1}, $comment: ${tojson(legacyComment)}});`
-});
-
st.stop();
})();
diff --git a/jstests/sharding/query/geo_near_sort.js b/jstests/sharding/query/geo_near_sort.js
index 8c3a19465c7..a283c475e18 100644
--- a/jstests/sharding/query/geo_near_sort.js
+++ b/jstests/sharding/query/geo_near_sort.js
@@ -69,21 +69,19 @@ function testSortOrders(query, indexSpec) {
assert.eq(res[0], docMinus2, tojson(res));
assert.eq(res[1], doc1, tojson(res));
- if (db.getMongo().useReadCommands()) {
- // Test a $near/$nearSphere query sorted by {a: 1} with the case-insensitive collation.
- res = coll.find(query).collation(caseInsensitive).sort({a: 1}).toArray();
- assert.eq(res.length, 4, tojson(res));
- assert.eq(res[0], doc2, tojson(res));
- assert.eq(res[1], docMinus1, tojson(res));
- assert.eq(res[2], doc1, tojson(res));
- assert.eq(res[3], docMinus2, tojson(res));
+ // Test a $near/$nearSphere query sorted by {a: 1} with the case-insensitive collation.
+ res = coll.find(query).collation(caseInsensitive).sort({a: 1}).toArray();
+ assert.eq(res.length, 4, tojson(res));
+ assert.eq(res[0], doc2, tojson(res));
+ assert.eq(res[1], docMinus1, tojson(res));
+ assert.eq(res[2], doc1, tojson(res));
+ assert.eq(res[3], docMinus2, tojson(res));
- // Test with a limit.
- res = coll.find(query).collation(caseInsensitive).sort({a: 1}).limit(2).toArray();
- assert.eq(res.length, 2, tojson(res));
- assert.eq(res[0], doc2, tojson(res));
- assert.eq(res[1], docMinus1, tojson(res));
- }
+ // Test with a limit.
+ res = coll.find(query).collation(caseInsensitive).sort({a: 1}).limit(2).toArray();
+ assert.eq(res.length, 2, tojson(res));
+ assert.eq(res[0], doc2, tojson(res));
+ assert.eq(res[1], docMinus1, tojson(res));
// Test a $near/$nearSphere query sorted by {b: 1}.
res = coll.find(query).sort({b: 1}).toArray();
diff --git a/jstests/sharding/query/mongos_query_comment.js b/jstests/sharding/query/mongos_query_comment.js
index 28fb8cb9c05..873f8017960 100644
--- a/jstests/sharding/query/mongos_query_comment.js
+++ b/jstests/sharding/query/mongos_query_comment.js
@@ -35,55 +35,6 @@ for (let i = 0; i < 5; ++i) {
assert.commandWorked(shardDB.setProfilingLevel(2));
const profiler = shardDB.system.profile;
-//
-// Set legacy read mode for the mongos and shard connections.
-//
-mongosDB.getMongo().forceReadMode("legacy");
-shardDB.getMongo().forceReadMode("legacy");
-
-// TEST CASE: A legacy string $comment meta-operator inside $query is propagated to the shards via
-// mongos but not treated as a 'comment' field.
-assert.eq(mongosColl.find({a: 1, $comment: "TEST"}).itcount(), 1);
-profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {
- op: "query",
- ns: collNS,
- "command.filter": {a: 1, $comment: "TEST"},
- "command.comment": {$exists: false}
- }
-});
-
-// TEST CASE: A legacy string $comment meta-operator is propagated to the shards via mongos.
-assert.eq(mongosColl.find({$query: {a: 1}, $comment: "TEST"}).itcount(), 1);
-profilerHasSingleMatchingEntryOrThrow(
- {profileDB: shardDB, filter: {op: "query", ns: collNS, "command.comment": "TEST"}});
-
-// TEST CASE: A legacy BSONObj $comment propagated via mongos.
-assert.eq(mongosColl.find({$query: {a: 1}, $comment: {c: 2, d: {e: "TEST"}}}).itcount(), 1);
-profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {
- op: "query",
- ns: collNS,
- "command.comment": {c: 2, d: {e: "TEST"}},
- "command.filter": {a: 1}
- }
-});
-
-// TEST CASE: Legacy BSONObj $comment when issued on the mongod.
-assert.eq(shardColl.find({$query: {a: 1}, $comment: {c: 3, d: {e: "TEST"}}}).itcount(), 1);
-profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {op: "query", ns: collNS, "command.comment": {c: 3, d: {e: "TEST"}}}
-});
-
-//
-// Revert to "commands" read mode for the find command test cases below.
-//
-mongosDB.getMongo().forceReadMode("commands");
-shardDB.getMongo().forceReadMode("commands");
-
// TEST CASE: Verify that find.comment and non-string find.filter.$comment propagate.
assert.eq(mongosColl.find({a: 1, $comment: {b: "TEST"}}).comment("TEST").itcount(), 1);
profilerHasSingleMatchingEntryOrThrow({
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index 97ca77d7556..3a0c928f92f 100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -17,18 +17,11 @@ var PRI_TAG = {dc: 'ny'};
var SEC_TAGS = [{dc: 'sf', s: "1"}, {dc: 'ma', s: "2"}, {dc: 'eu', s: "3"}, {dc: 'jp', s: "4"}];
var NODES = SEC_TAGS.length + 1;
-var doTest = function(useDollarQuerySyntax) {
+var doTest = function() {
var st = new ShardingTest({shards: {rs0: {nodes: NODES, oplogSize: 10, useHostName: true}}});
var replTest = st.rs0;
var primaryNode = replTest.getPrimary();
- // The $-prefixed query syntax is only legal for compatibility mode reads, not for the
- // find/getMore commands.
- if (useDollarQuerySyntax && st.s.getDB("test").getMongo().useReadCommands()) {
- st.stop();
- return;
- }
-
var setupConf = function() {
var replConf = primaryNode.getDB('local').system.replset.findOne();
replConf.version = (replConf.version || 0) + 1;
@@ -107,19 +100,7 @@ var doTest = function(useDollarQuerySyntax) {
});
var getExplain = function(readPrefMode, readPrefTags) {
- if (useDollarQuerySyntax) {
- var readPrefObj = {mode: readPrefMode};
-
- if (readPrefTags) {
- readPrefObj.tags = readPrefTags;
- }
-
- return coll.find({$query: {}, $readPreference: readPrefObj, $explain: true})
- .limit(-1)
- .next();
- } else {
- return coll.find().readPref(readPrefMode, readPrefTags).explain("executionStats");
- }
+ return coll.find().readPref(readPrefMode, readPrefTags).explain("executionStats");
};
var getExplainServer = function(explain) {
@@ -205,6 +186,5 @@ var doTest = function(useDollarQuerySyntax) {
st.stop();
};
-doTest(false);
-doTest(true);
+doTest();
})();
diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js
index 477ec0bee83..709ee968f32 100644
--- a/jstests/sharding/read_write_concern_defaults_application.js
+++ b/jstests/sharding/read_write_concern_defaults_application.js
@@ -855,7 +855,7 @@ function runScenario(
// Do any test-specific setup.
if (typeof (test.setUp) === "function") {
- conn._runWithForcedReadMode("commands", test.setUp);
+ test.setUp(conn);
}
// Get the command from the test case.
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index ea5b8a7760c..ccb38cbfe3d 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -204,7 +204,6 @@ assert.lte(before.query + 10, after.query, "D3");
// by shard key
m = new Mongo(s.s.name);
-m.forceWriteMode("commands");
s.printShardingStatus();