summaryrefslogtreecommitdiff
path: root/jstests/noPassthroughWithMongod
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/noPassthroughWithMongod')
-rw-r--r--jstests/noPassthroughWithMongod/apply_ops_errors.js82
-rw-r--r--jstests/noPassthroughWithMongod/bench_test_crud_commands.js147
-rw-r--r--jstests/noPassthroughWithMongod/capped_truncate.js92
-rw-r--r--jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js50
-rw-r--r--jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js42
-rw-r--r--jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js112
-rw-r--r--jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js316
-rw-r--r--jstests/noPassthroughWithMongod/connections_opened.js10
-rw-r--r--jstests/noPassthroughWithMongod/create_indexes_shell_helper.js134
-rw-r--r--jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js223
-rw-r--r--jstests/noPassthroughWithMongod/currentop_includes_connid.js14
-rw-r--r--jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js98
-rw-r--r--jstests/noPassthroughWithMongod/cursor_server_status_metrics.js136
-rw-r--r--jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js26
-rw-r--r--jstests/noPassthroughWithMongod/default_read_pref.js95
-rw-r--r--jstests/noPassthroughWithMongod/dup_bgindex.js32
-rw-r--r--jstests/noPassthroughWithMongod/exchangeProducer.js487
-rw-r--r--jstests/noPassthroughWithMongod/external_sort_text_agg.js6
-rw-r--r--jstests/noPassthroughWithMongod/ftdc_params.js6
-rw-r--r--jstests/noPassthroughWithMongod/geo_polygon.js15
-rw-r--r--jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js70
-rw-r--r--jstests/noPassthroughWithMongod/host_connection_string_validation.js207
-rw-r--r--jstests/noPassthroughWithMongod/index_boundary_values_validate.js40
-rw-r--r--jstests/noPassthroughWithMongod/index_limits_not_bypassed.js49
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary.js126
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js166
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_updates.js96
-rw-r--r--jstests/noPassthroughWithMongod/insertMulti.js82
-rw-r--r--jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js306
-rw-r--r--jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js87
-rw-r--r--jstests/noPassthroughWithMongod/log_component_helpers.js81
-rw-r--r--jstests/noPassthroughWithMongod/logpath.js3
-rw-r--r--jstests/noPassthroughWithMongod/moveprimary-replset.js104
-rw-r--r--jstests/noPassthroughWithMongod/mr_writeconflict.js103
-rw-r--r--jstests/noPassthroughWithMongod/ne_array_indexability.js68
-rw-r--r--jstests/noPassthroughWithMongod/now_variable.js229
-rw-r--r--jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js96
-rw-r--r--jstests/noPassthroughWithMongod/plan_cache_replanning.js227
-rw-r--r--jstests/noPassthroughWithMongod/query_oplogreplay.js448
-rw-r--r--jstests/noPassthroughWithMongod/renameWithWCE.js96
-rw-r--r--jstests/noPassthroughWithMongod/replset_host_connection_validation.js126
-rw-r--r--jstests/noPassthroughWithMongod/rpc_protocols.js105
-rw-r--r--jstests/noPassthroughWithMongod/shell_advance_cluster_time.js30
-rw-r--r--jstests/noPassthroughWithMongod/shelllimit.js26
-rw-r--r--jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js67
-rw-r--r--jstests/noPassthroughWithMongod/temp_namespace.js6
-rw-r--r--jstests/noPassthroughWithMongod/top_drop.js118
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl.js3
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_maintenance.js3
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js3
-rw-r--r--jstests/noPassthroughWithMongod/ttl_sharded.js3
-rw-r--r--jstests/noPassthroughWithMongod/validate_command.js52
-rw-r--r--jstests/noPassthroughWithMongod/validate_interrupt.js76
-rw-r--r--jstests/noPassthroughWithMongod/views_invalid.js126
-rw-r--r--jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js60
55 files changed, 2791 insertions, 2820 deletions
diff --git a/jstests/noPassthroughWithMongod/apply_ops_errors.js b/jstests/noPassthroughWithMongod/apply_ops_errors.js
index 9441d006e43..0cf4a789718 100644
--- a/jstests/noPassthroughWithMongod/apply_ops_errors.js
+++ b/jstests/noPassthroughWithMongod/apply_ops_errors.js
@@ -13,45 +13,45 @@
*/
(function() {
- "use strict";
- var coll = db.apply_ops_errors;
- coll.drop();
-
- // Scenario 1: only one operation
- assert.eq(0, coll.find().count(), "test collection not empty");
- coll.ensureIndex({x: 1}, {unique: true});
- coll.insert({_id: 1, x: "init"});
-
- var res = db.runCommand({
- applyOps: [
- {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "init"}},
- ]
- });
-
- assert.eq(1, res.applied);
- assert(res.code);
- assert(res.errmsg);
- assert.eq([false], res.results);
- assert.eq(0, res.ok);
-
- coll.drop();
-
- // Scenario 2: Three operations, first two should run, second should fail.
- assert.eq(0, coll.find().count(), "test collection not empty");
- coll.ensureIndex({x: 1}, {unique: true});
- coll.insert({_id: 1, x: "init"});
-
- var res = db.runCommand({
- applyOps: [
- {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "not init"}},
- {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "init"}},
- {op: "i", ns: coll.getFullName(), o: {_id: 5, x: "not init again"}},
- ]
- });
-
- assert.eq(2, res.applied);
- assert(res.code);
- assert(res.errmsg);
- assert.eq([false, false], res.results);
- assert.eq(0, res.ok);
+"use strict";
+var coll = db.apply_ops_errors;
+coll.drop();
+
+// Scenario 1: only one operation
+assert.eq(0, coll.find().count(), "test collection not empty");
+coll.ensureIndex({x: 1}, {unique: true});
+coll.insert({_id: 1, x: "init"});
+
+var res = db.runCommand({
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "init"}},
+ ]
+});
+
+assert.eq(1, res.applied);
+assert(res.code);
+assert(res.errmsg);
+assert.eq([false], res.results);
+assert.eq(0, res.ok);
+
+coll.drop();
+
+// Scenario 2: Three operations, first two should run, second should fail.
+assert.eq(0, coll.find().count(), "test collection not empty");
+coll.ensureIndex({x: 1}, {unique: true});
+coll.insert({_id: 1, x: "init"});
+
+var res = db.runCommand({
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "not init"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "init"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 5, x: "not init again"}},
+ ]
+});
+
+assert.eq(2, res.applied);
+assert(res.code);
+assert(res.errmsg);
+assert.eq([false, false], res.results);
+assert.eq(0, res.ok);
})();
diff --git a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
index c043a6dd056..e6db1e1bcb9 100644
--- a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
+++ b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
@@ -1,102 +1,93 @@
// Tests the "writeCmd" and "readCmd" options to benchRun().
(function() {
- "use strict";
+"use strict";
- var coll = db.bench_test_crud_commands;
- coll.drop();
- assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+var coll = db.bench_test_crud_commands;
+coll.drop();
+assert.commandWorked(coll.getDB().createCollection(coll.getName()));
- function makeDocument(docSize) {
- var doc = {"fieldName": ""};
- var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
- while (Object.bsonsize(doc) < docSize) {
- if (Object.bsonsize(doc) < docSize - longString.length) {
- doc.fieldName += longString;
- } else {
- doc.fieldName += "x";
- }
+function makeDocument(docSize) {
+ var doc = {"fieldName": ""};
+ var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+ while (Object.bsonsize(doc) < docSize) {
+ if (Object.bsonsize(doc) < docSize - longString.length) {
+ doc.fieldName += longString;
+ } else {
+ doc.fieldName += "x";
}
- return doc;
}
+ return doc;
+}
- function executeBenchRun(benchOps) {
- var benchArgs = {ops: benchOps, parallel: 2, seconds: 5, host: db.getMongo().host};
- if (jsTest.options().auth) {
- benchArgs['db'] = 'admin';
- benchArgs['username'] = jsTest.options().authUser;
- benchArgs['password'] = jsTest.options().authPassword;
- }
- return benchRun(benchArgs);
+function executeBenchRun(benchOps) {
+ var benchArgs = {ops: benchOps, parallel: 2, seconds: 5, host: db.getMongo().host};
+ if (jsTest.options().auth) {
+ benchArgs['db'] = 'admin';
+ benchArgs['username'] = jsTest.options().authUser;
+ benchArgs['password'] = jsTest.options().authPassword;
}
+ return benchRun(benchArgs);
+}
+
+function testInsert(docs, writeCmd, wc) {
+ coll.drop();
- function testInsert(docs, writeCmd, wc) {
- coll.drop();
+ var res = executeBenchRun(
+ [{ns: coll.getFullName(), op: "insert", doc: docs, writeCmd: writeCmd, writeConcern: wc}]);
- var res = executeBenchRun([{
- ns: coll.getFullName(),
- op: "insert",
- doc: docs,
- writeCmd: writeCmd,
- writeConcern: wc
- }]);
+ assert.gt(coll.count(), 0);
+ assert.eq(coll.findOne({}, {_id: 0}), docs[0]);
+}
- assert.gt(coll.count(), 0);
- assert.eq(coll.findOne({}, {_id: 0}), docs[0]);
+function testFind(readCmd) {
+ coll.drop();
+ for (var i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({}));
}
- function testFind(readCmd) {
- coll.drop();
- for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({}));
- }
+ var res = executeBenchRun([
+ {ns: coll.getFullName(), op: "find", query: {}, batchSize: NumberInt(10), readCmd: readCmd}
+ ]);
+ assert.gt(res.query, 0, tojson(res));
+}
- var res = executeBenchRun([{
- ns: coll.getFullName(),
- op: "find",
- query: {},
- batchSize: NumberInt(10),
- readCmd: readCmd
- }]);
- assert.gt(res.query, 0, tojson(res));
+function testFindOne(readCmd) {
+ coll.drop();
+ for (var i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({}));
}
- function testFindOne(readCmd) {
- coll.drop();
- for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({}));
- }
+ var res =
+ executeBenchRun([{ns: coll.getFullName(), op: "findOne", query: {}, readCmd: readCmd}]);
+ assert.gt(res.findOne, 0, tojson(res));
+}
- var res =
- executeBenchRun([{ns: coll.getFullName(), op: "findOne", query: {}, readCmd: readCmd}]);
- assert.gt(res.findOne, 0, tojson(res));
+function testWriteConcern(writeCmd) {
+ var bigDoc = makeDocument(260 * 1024);
+ var docs = [];
+ for (var i = 0; i < 100; i++) {
+ docs.push({x: 1});
}
- function testWriteConcern(writeCmd) {
- var bigDoc = makeDocument(260 * 1024);
- var docs = [];
- for (var i = 0; i < 100; i++) {
- docs.push({x: 1});
- }
+ testInsert([bigDoc], writeCmd, {});
+ testInsert(docs, writeCmd, {});
+ testInsert(docs, writeCmd, {"w": "majority"});
+ testInsert(docs, writeCmd, {"w": 1, "j": false});
- testInsert([bigDoc], writeCmd, {});
- testInsert(docs, writeCmd, {});
- testInsert(docs, writeCmd, {"w": "majority"});
- testInsert(docs, writeCmd, {"w": 1, "j": false});
-
- var storageEnginesWithoutJournaling = new Set(["ephemeralForTest", "inMemory"]);
- var runningWithoutJournaling = TestData.noJournal ||
- storageEnginesWithoutJournaling.has(db.serverStatus().storageEngine.name);
- if (!runningWithoutJournaling) {
- // Only test journaled writes if the server actually supports them.
- testInsert(docs, writeCmd, {"j": true});
- }
+ var storageEnginesWithoutJournaling = new Set(["ephemeralForTest", "inMemory"]);
+ var runningWithoutJournaling = TestData.noJournal ||
+ storageEnginesWithoutJournaling.has(db.serverStatus().storageEngine.name);
+ if (!runningWithoutJournaling) {
+ // Only test journaled writes if the server actually supports them.
+ testInsert(docs, writeCmd, {"j": true});
}
+}
- testWriteConcern(false);
- testWriteConcern(true);
+testWriteConcern(false);
+testWriteConcern(true);
- testFind(false);
- testFind(true);
- testFindOne(false);
- testFindOne(true);
+testFind(false);
+testFind(true);
+testFindOne(false);
+testFindOne(true);
})();
diff --git a/jstests/noPassthroughWithMongod/capped_truncate.js b/jstests/noPassthroughWithMongod/capped_truncate.js
index 1f4cf236c57..25e023c890d 100644
--- a/jstests/noPassthroughWithMongod/capped_truncate.js
+++ b/jstests/noPassthroughWithMongod/capped_truncate.js
@@ -8,50 +8,50 @@
* @tags: [SERVER-21658, requires_capped]
*/
(function() {
- 'use strict';
-
- db.capped_truncate.drop();
- assert.commandWorked(
- db.runCommand({create: "capped_truncate", capped: true, size: 1000, autoIndexId: true}));
- var t = db.capped_truncate;
-
- // It is an error to remove a non-positive number of documents.
- assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: -1}),
- "captrunc didn't return an error when attempting to remove a negative " +
- "number of documents");
- assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 0}),
- "captrunc didn't return an error when attempting to remove 0 documents");
-
- for (var j = 1; j <= 10; j++) {
- assert.writeOK(t.insert({x: j}));
- }
-
- // It is an error to try and remove more documents than what exist in the capped collection.
- assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 20}),
- "captrunc didn't return an error when attempting to remove more" +
- " documents than what the collection contains");
-
- assert.commandWorked(db.runCommand({captrunc: "capped_truncate", n: 5, inc: false}));
- assert.eq(5, t.count(), "wrong number of documents in capped collection after truncate");
- assert.eq(5, t.distinct("_id").length, "wrong number of entries in _id index after truncate");
-
- var last = t.find({}, {_id: 1}).sort({_id: -1}).next();
- assert.neq(null,
- t.findOne({_id: last._id}),
- tojson(last) + " is in _id index, but not in capped collection after truncate");
-
- // It is an error to run the captrunc command on a nonexistent collection.
- assert.commandFailed(db.runCommand({captrunc: "nonexistent", n: 1}),
- "captrunc didn't return an error for a nonexistent collection");
-
- // It is an error to run the captrunc command on a non-capped collection.
- var collName = "noncapped";
- db[collName].drop();
-
- assert.commandWorked(db.runCommand({create: collName, capped: false}));
- for (var j = 1; j <= 10; j++) {
- assert.writeOK(db[collName].insert({x: j}));
- }
- assert.commandFailed(db.runCommand({captrunc: collName, n: 5}),
- "captrunc didn't return an error for a non-capped collection");
+'use strict';
+
+db.capped_truncate.drop();
+assert.commandWorked(
+ db.runCommand({create: "capped_truncate", capped: true, size: 1000, autoIndexId: true}));
+var t = db.capped_truncate;
+
+// It is an error to remove a non-positive number of documents.
+assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: -1}),
+ "captrunc didn't return an error when attempting to remove a negative " +
+ "number of documents");
+assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 0}),
+ "captrunc didn't return an error when attempting to remove 0 documents");
+
+for (var j = 1; j <= 10; j++) {
+ assert.writeOK(t.insert({x: j}));
+}
+
+// It is an error to try and remove more documents than what exist in the capped collection.
+assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 20}),
+ "captrunc didn't return an error when attempting to remove more" +
+ " documents than what the collection contains");
+
+assert.commandWorked(db.runCommand({captrunc: "capped_truncate", n: 5, inc: false}));
+assert.eq(5, t.count(), "wrong number of documents in capped collection after truncate");
+assert.eq(5, t.distinct("_id").length, "wrong number of entries in _id index after truncate");
+
+var last = t.find({}, {_id: 1}).sort({_id: -1}).next();
+assert.neq(null,
+ t.findOne({_id: last._id}),
+ tojson(last) + " is in _id index, but not in capped collection after truncate");
+
+// It is an error to run the captrunc command on a nonexistent collection.
+assert.commandFailed(db.runCommand({captrunc: "nonexistent", n: 1}),
+ "captrunc didn't return an error for a nonexistent collection");
+
+// It is an error to run the captrunc command on a non-capped collection.
+var collName = "noncapped";
+db[collName].drop();
+
+assert.commandWorked(db.runCommand({create: collName, capped: false}));
+for (var j = 1; j <= 10; j++) {
+ assert.writeOK(db[collName].insert({x: j}));
+}
+assert.commandFailed(db.runCommand({captrunc: collName, n: 5}),
+ "captrunc didn't return an error for a non-capped collection");
})();
diff --git a/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js b/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js
index 3b1f7337133..2526fed9636 100644
--- a/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js
+++ b/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js
@@ -3,35 +3,35 @@
//
// @tags: [requires_capped]
(function() {
- "use strict";
+"use strict";
- const coll = db.captrunc_cursor_invalidation;
- coll.drop();
+const coll = db.captrunc_cursor_invalidation;
+coll.drop();
- // Create a capped collection with four documents.
- assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
- const numDocs = 4;
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute());
+// Create a capped collection with four documents.
+assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
+const numDocs = 4;
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
- // Open a tailable cursor against the capped collection.
- const findRes = assert.commandWorked(db.runCommand({find: coll.getName(), tailable: true}));
- assert.neq(findRes.cursor.id, 0);
- assert.eq(findRes.cursor.ns, coll.getFullName());
- assert.eq(findRes.cursor.firstBatch.length, 4);
- const cursorId = findRes.cursor.id;
+// Open a tailable cursor against the capped collection.
+const findRes = assert.commandWorked(db.runCommand({find: coll.getName(), tailable: true}));
+assert.neq(findRes.cursor.id, 0);
+assert.eq(findRes.cursor.ns, coll.getFullName());
+assert.eq(findRes.cursor.firstBatch.length, 4);
+const cursorId = findRes.cursor.id;
- // Truncate the capped collection so that the cursor's position no longer exists.
- assert.commandWorked(db.runCommand({captrunc: coll.getName(), n: 2}));
+// Truncate the capped collection so that the cursor's position no longer exists.
+assert.commandWorked(db.runCommand({captrunc: coll.getName(), n: 2}));
- // A subsequent getMore should fail with 'CappedPositionLost'.
- assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
- ErrorCodes.CappedPositionLost);
+// A subsequent getMore should fail with 'CappedPositionLost'.
+assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
+ ErrorCodes.CappedPositionLost);
- // The cursor has now been destroyed, so another getMore should fail with 'CursorNotFound'.
- assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
- ErrorCodes.CursorNotFound);
+// The cursor has now been destroyed, so another getMore should fail with 'CursorNotFound'.
+assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
+ ErrorCodes.CursorNotFound);
}());
diff --git a/jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js b/jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js
index bf78d13f887..b7fba31017e 100644
--- a/jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js
+++ b/jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js
@@ -2,32 +2,32 @@
* Ensures that the 'collMod' command takes a database MODE_X lock during a no-op.
*/
(function() {
- 'use strict';
+'use strict';
- const failpoint = 'hangAfterDatabaseLock';
- assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+const failpoint = 'hangAfterDatabaseLock';
+assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- const conn = db.getMongo();
- db.createCollection('foo');
+const conn = db.getMongo();
+db.createCollection('foo');
- // Run a no-op collMod command.
- const awaitParallelShell = startParallelShell(() => {
- assert.commandWorked(db.runCommand({collMod: 'foo'}));
- }, conn.port);
+// Run a no-op collMod command.
+const awaitParallelShell = startParallelShell(() => {
+ assert.commandWorked(db.runCommand({collMod: 'foo'}));
+}, conn.port);
- // Check that the database MODE_X lock is being held by checking in lockInfo.
- assert.soon(() => {
- let lockInfo = assert.commandWorked(db.adminCommand({lockInfo: 1})).lockInfo;
- for (let i = 0; i < lockInfo.length; i++) {
- let resourceId = lockInfo[i].resourceId;
- if (resourceId.includes("Database") && resourceId.includes("test")) {
- return true;
- }
+// Check that the database MODE_X lock is being held by checking in lockInfo.
+assert.soon(() => {
+ let lockInfo = assert.commandWorked(db.adminCommand({lockInfo: 1})).lockInfo;
+ for (let i = 0; i < lockInfo.length; i++) {
+ let resourceId = lockInfo[i].resourceId;
+ if (resourceId.includes("Database") && resourceId.includes("test")) {
+ return true;
}
+ }
- return false;
- });
+ return false;
+});
- assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- awaitParallelShell();
+assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+awaitParallelShell();
})();
diff --git a/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js b/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js
index 293ab4e45bf..1474626e757 100644
--- a/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js
+++ b/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js
@@ -2,70 +2,70 @@
* Ensures that the 'collStats' command lists indexes that are ready and in-progress.
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const collName = "collstats_show_ready_and_in_progress_indexes";
- const testDB = db.getSiblingDB("test");
- const testColl = db.getCollection(collName);
- testColl.drop();
+const collName = "collstats_show_ready_and_in_progress_indexes";
+const testDB = db.getSiblingDB("test");
+const testColl = db.getCollection(collName);
+testColl.drop();
- const bulk = testColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 5; ++i) {
- bulk.insert({a: i, b: i * i});
- }
- assert.commandWorked(bulk.execute());
+const bulk = testColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 5; ++i) {
+ bulk.insert({a: i, b: i * i});
+}
+assert.commandWorked(bulk.execute());
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuildUnlocked", mode: "alwaysOn"}));
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangAfterStartingIndexBuildUnlocked", mode: "alwaysOn"}));
- let awaitParallelShell;
- try {
- jsTest.log("Starting a parallel shell to run two background index builds");
- awaitParallelShell = startParallelShell(() => {
- db.getSiblingDB("test").runCommand({
- createIndexes: "collstats_show_ready_and_in_progress_indexes",
- indexes: [
- {key: {a: 1}, name: 'a_1', background: true},
- {key: {b: 1}, name: 'b_1', background: true}
- ]
- });
- }, db.getMongo().port);
+let awaitParallelShell;
+try {
+ jsTest.log("Starting a parallel shell to run two background index builds");
+ awaitParallelShell = startParallelShell(() => {
+ db.getSiblingDB("test").runCommand({
+ createIndexes: "collstats_show_ready_and_in_progress_indexes",
+ indexes: [
+ {key: {a: 1}, name: 'a_1', background: true},
+ {key: {b: 1}, name: 'b_1', background: true}
+ ]
+ });
+ }, db.getMongo().port);
- jsTest.log("Waiting until the index build begins.");
- // Note that we cannot use checkLog here to wait for the failpoint logging because this test
- // shares a mongod with other tests that might have already provoked identical failpoint
- // logging.
- IndexBuildTest.waitForIndexBuildToScanCollection(testDB, testColl.getName(), 'b_1');
+ jsTest.log("Waiting until the index build begins.");
+ // Note that we cannot use checkLog here to wait for the failpoint logging because this test
+ // shares a mongod with other tests that might have already provoked identical failpoint
+ // logging.
+ IndexBuildTest.waitForIndexBuildToScanCollection(testDB, testColl.getName(), 'b_1');
- jsTest.log("Running collStats on collection '" + collName +
- "' to check for expected 'indexSizes', 'nindexes' and 'indexBuilds' results");
- const collStatsRes = assert.commandWorked(db.runCommand({collStats: collName}));
+ jsTest.log("Running collStats on collection '" + collName +
+ "' to check for expected 'indexSizes', 'nindexes' and 'indexBuilds' results");
+ const collStatsRes = assert.commandWorked(db.runCommand({collStats: collName}));
- assert(typeof(collStatsRes.indexSizes._id_) != 'undefined',
- "expected 'indexSizes._id_' to exist: " + tojson(collStatsRes));
- assert(typeof(collStatsRes.indexSizes.a_1) != 'undefined',
- "expected 'indexSizes.a_1' to exist: " + tojson(collStatsRes));
- assert(typeof(collStatsRes.indexSizes.b_1) != 'undefined',
- "expected 'indexSizes.b_1' to exist: " + tojson(collStatsRes));
+ assert(typeof (collStatsRes.indexSizes._id_) != 'undefined',
+ "expected 'indexSizes._id_' to exist: " + tojson(collStatsRes));
+ assert(typeof (collStatsRes.indexSizes.a_1) != 'undefined',
+ "expected 'indexSizes.a_1' to exist: " + tojson(collStatsRes));
+ assert(typeof (collStatsRes.indexSizes.b_1) != 'undefined',
+ "expected 'indexSizes.b_1' to exist: " + tojson(collStatsRes));
- assert.eq(3, collStatsRes.nindexes, "expected 'nindexes' to be 3: " + tojson(collStatsRes));
+ assert.eq(3, collStatsRes.nindexes, "expected 'nindexes' to be 3: " + tojson(collStatsRes));
- assert.eq(2,
- collStatsRes.indexBuilds.length,
- "expected to find 2 entries in 'indexBuilds': " + tojson(collStatsRes));
- assert.eq('a_1',
- collStatsRes.indexBuilds[0],
- "expected to find an 'a_1' index build:" + tojson(collStatsRes));
- assert.eq('b_1',
- collStatsRes.indexBuilds[1],
- "expected to find an 'b_1' index build:" + tojson(collStatsRes));
- } finally {
- // Ensure the failpoint is unset, even if there are assertion failures, so that we do not
- // hang the test/mongod.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuildUnlocked", mode: "off"}));
- awaitParallelShell();
- }
+ assert.eq(2,
+ collStatsRes.indexBuilds.length,
+ "expected to find 2 entries in 'indexBuilds': " + tojson(collStatsRes));
+ assert.eq('a_1',
+ collStatsRes.indexBuilds[0],
+ "expected to find an 'a_1' index build:" + tojson(collStatsRes));
+ assert.eq('b_1',
+ collStatsRes.indexBuilds[1],
+ "expected to find an 'b_1' index build:" + tojson(collStatsRes));
+} finally {
+ // Ensure the failpoint is unset, even if there are assertion failures, so that we do not
+ // hang the test/mongod.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangAfterStartingIndexBuildUnlocked", mode: "off"}));
+ awaitParallelShell();
+}
})();
diff --git a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
index 0e12eb05a97..25083f1484c 100644
--- a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
+++ b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
@@ -6,168 +6,168 @@
*/
(function() {
- "use strict";
- var collName = 'leaves';
- var coll = db[collName];
-
- var commands = [];
-
- commands.push({
- req: {insert: collName, documents: [{type: 'maple'}]},
- setupFunc: function() {},
- confirmFunc: function() {
- assert.eq(coll.count({type: 'maple'}), 1);
- }
- });
-
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.getIndexes().length, 1);
- },
- confirmFunc: function() {
- assert.eq(coll.getIndexes().length, 2);
- }
- });
-
- commands.push({
- req: {
- update: collName,
- updates: [{
- q: {type: 'oak'},
- u: [{$set: {type: 'ginkgo'}}],
- }],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: {$set: {type: 'ginkgo'}},
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: [{$set: {type: 'ginkgo'}}],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]},
- setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
- assert.eq(coll.count({type: 'willow'}), 0);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'willow'}), 1);
- }
- });
+"use strict";
+var collName = 'leaves';
+var coll = db[collName];
- commands.push({
- req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
- setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
- coll.insert({_id: 2, type: 'maple'});
- },
- confirmFunc: function() {
- assert.eq(db.foo.count({type: 'oak'}), 1);
- assert.eq(db.foo.count({type: 'maple'}), 1);
- db.foo.drop();
- }
- });
+var commands = [];
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- return {count: values.length};
- },
- out: "foo"
+commands.push({
+ req: {insert: collName, documents: [{type: 'maple'}]},
+ setupFunc: function() {},
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'maple'}), 1);
+ }
+});
+
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.getIndexes().length, 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.getIndexes().length, 2);
+ }
+});
+
+commands.push({
+ req: {
+ update: collName,
+ updates: [{
+ q: {type: 'oak'},
+ u: [{$set: {type: 'ginkgo'}}],
+ }],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: {$set: {type: 'ginkgo'}},
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: [{$set: {type: 'ginkgo'}}],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]},
+ setupFunc: function() {
+ coll.insert({_id: 1, type: 'oak'});
+ assert.eq(coll.count({type: 'willow'}), 0);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'willow'}), 1);
+ }
+});
+
+commands.push({
+ req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
+ setupFunc: function() {
+ coll.insert({_id: 1, type: 'oak'});
+ coll.insert({_id: 2, type: 'maple'});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.count({type: 'oak'}), 1);
+ assert.eq(db.foo.count({type: 'maple'}), 1);
+ db.foo.drop();
+ }
+});
+
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
},
- setupFunc: function() {
- coll.insert({x: 1, tags: ["a", "b"]});
- coll.insert({x: 2, tags: ["b", "c"]});
- coll.insert({x: 3, tags: ["c", "a"]});
- coll.insert({x: 4, tags: ["b", "c"]});
+ reduce: function(key, values) {
+ return {count: values.length};
},
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value.count, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value.count, 3);
- db.foo.drop();
- }
- });
-
- function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 1, j: true};
- jsTest.log("Testing " + tojson(cmd.req));
-
- coll.drop();
- cmd.setupFunc();
- var res = db.runCommand(cmd.req);
- assert.commandWorked(res);
- assert(!res.writeConcernError, 'command had writeConcernError: ' + tojson(res));
- cmd.confirmFunc();
+ out: "foo"
+ },
+ setupFunc: function() {
+ coll.insert({x: 1, tags: ["a", "b"]});
+ coll.insert({x: 2, tags: ["b", "c"]});
+ coll.insert({x: 3, tags: ["c", "a"]});
+ coll.insert({x: 4, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value.count, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value.count, 3);
+ db.foo.drop();
}
-
- function testInvalidWriteConcern(wc, cmd) {
- cmd.req.writeConcern = wc;
- jsTest.log("Testing " + tojson(cmd.req));
-
- coll.drop();
- cmd.setupFunc();
- var res = coll.runCommand(cmd.req);
- // These commands should fail because standalone writeConcerns are found to be invalid at
- // the validation stage when the writeConcern is parsed, before the command is run.
- assert.commandFailed(res);
- }
-
- var invalidWriteConcerns = [{w: 'invalid'}, {w: 2}, {j: 'invalid'}];
-
- commands.forEach(function(cmd) {
- testValidWriteConcern(cmd);
- invalidWriteConcerns.forEach(function(wc) {
- testInvalidWriteConcern(wc, cmd);
- });
+});
+
+function testValidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 1, j: true};
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ coll.drop();
+ cmd.setupFunc();
+ var res = db.runCommand(cmd.req);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError, 'command had writeConcernError: ' + tojson(res));
+ cmd.confirmFunc();
+}
+
+function testInvalidWriteConcern(wc, cmd) {
+ cmd.req.writeConcern = wc;
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ coll.drop();
+ cmd.setupFunc();
+ var res = coll.runCommand(cmd.req);
+ // These commands should fail because standalone writeConcerns are found to be invalid at
+ // the validation stage when the writeConcern is parsed, before the command is run.
+ assert.commandFailed(res);
+}
+
+var invalidWriteConcerns = [{w: 'invalid'}, {w: 2}, {j: 'invalid'}];
+
+commands.forEach(function(cmd) {
+ testValidWriteConcern(cmd);
+ invalidWriteConcerns.forEach(function(wc) {
+ testInvalidWriteConcern(wc, cmd);
});
+});
})();
diff --git a/jstests/noPassthroughWithMongod/connections_opened.js b/jstests/noPassthroughWithMongod/connections_opened.js
index ecd2076f2af..20c53b0db1d 100644
--- a/jstests/noPassthroughWithMongod/connections_opened.js
+++ b/jstests/noPassthroughWithMongod/connections_opened.js
@@ -30,12 +30,14 @@ function createPersistentConnection() {
function createTemporaryConnection() {
// Retry connecting until you are successful
- var pollString = "var conn = null;" + "assert.soon(function() {" + "try { conn = new Mongo(\"" +
- db.getMongo().host + "\"); return conn" + "} catch (x) {return false;}}, " +
+ var pollString = "var conn = null;" +
+ "assert.soon(function() {" +
+ "try { conn = new Mongo(\"" + db.getMongo().host + "\"); return conn" +
+ "} catch (x) {return false;}}, " +
"\"Timed out waiting for temporary connection to connect\", 30000, 5000);";
// Poll the signal collection until it is told to terminate.
- pollString += "assert.soon(function() {" + "return conn.getDB('" + testDB +
- "').getCollection('" + signalCollection + "')" +
+ pollString += "assert.soon(function() {" +
+ "return conn.getDB('" + testDB + "').getCollection('" + signalCollection + "')" +
".findOne().stop;}, \"Parallel shell never told to terminate\", 10 * 60000);";
return startParallelShell(pollString, null, true);
}
diff --git a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
index 8a36f7ee11e..61f38ce4b94 100644
--- a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
+++ b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
@@ -1,77 +1,77 @@
(function() {
- "use strict";
- var t = db.create_indexes_shell_helper;
- t.drop();
+"use strict";
+var t = db.create_indexes_shell_helper;
+t.drop();
- var mongo = db.getMongo();
+var mongo = db.getMongo();
- try {
- var commandsRan = [];
- var insertsRan = [];
- var mockMongo = {
- writeMode: function() {
- return "commands";
- },
- getSlaveOk: function() {
- return true;
- },
- runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1.0};
- },
- getWriteConcern: function() {
- return null;
- },
- useWriteCommands: function() {
- return true;
- },
- hasWriteCommands: function() {
- return true;
- },
- getMinWireVersion: function() {
- return mongo.getMinWireVersion();
- },
- getMaxWireVersion: function() {
- return mongo.getMaxWireVersion();
- },
- isReplicaSetMember: function() {
- return mongo.isReplicaSetMember();
- },
- isMongos: function() {
- return mongo.isMongos();
- },
- isCausalConsistency: function() {
- return false;
- },
- getClusterTime: function() {
- return null;
- },
- };
+try {
+ var commandsRan = [];
+ var insertsRan = [];
+ var mockMongo = {
+ writeMode: function() {
+ return "commands";
+ },
+ getSlaveOk: function() {
+ return true;
+ },
+ runCommand: function(db, cmd, opts) {
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {ok: 1.0};
+ },
+ getWriteConcern: function() {
+ return null;
+ },
+ useWriteCommands: function() {
+ return true;
+ },
+ hasWriteCommands: function() {
+ return true;
+ },
+ getMinWireVersion: function() {
+ return mongo.getMinWireVersion();
+ },
+ getMaxWireVersion: function() {
+ return mongo.getMaxWireVersion();
+ },
+ isReplicaSetMember: function() {
+ return mongo.isReplicaSetMember();
+ },
+ isMongos: function() {
+ return mongo.isMongos();
+ },
+ isCausalConsistency: function() {
+ return false;
+ },
+ getClusterTime: function() {
+ return null;
+ },
+ };
- db._mongo = mockMongo;
- db._session = new _DummyDriverSession(mockMongo);
+ db._mongo = mockMongo;
+ db._session = new _DummyDriverSession(mockMongo);
- t.createIndexes([{x: 1}]);
- assert.eq(commandsRan.length, 1);
- assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
- assert.eq(commandsRan[0].cmd["indexes"][0], {key: {x: 1}, name: "x_1"});
+ t.createIndexes([{x: 1}]);
+ assert.eq(commandsRan.length, 1);
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert.eq(commandsRan[0].cmd["indexes"][0], {key: {x: 1}, name: "x_1"});
- commandsRan = [];
+ commandsRan = [];
- t.createIndexes([{y: 1}, {z: -1}]);
- assert.eq(commandsRan.length, 1);
- assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
- assert.eq(commandsRan[0].cmd["indexes"][0], {key: {y: 1}, name: "y_1"});
- assert.eq(commandsRan[0].cmd["indexes"][1], {key: {z: -1}, name: "z_-1"});
+ t.createIndexes([{y: 1}, {z: -1}]);
+ assert.eq(commandsRan.length, 1);
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert.eq(commandsRan[0].cmd["indexes"][0], {key: {y: 1}, name: "y_1"});
+ assert.eq(commandsRan[0].cmd["indexes"][1], {key: {z: -1}, name: "z_-1"});
- commandsRan = [];
+ commandsRan = [];
- t.createIndex({a: 1});
- assert.eq(commandsRan.length, 1);
- assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
- assert.eq(commandsRan[0].cmd["indexes"][0], {key: {a: 1}, name: "a_1"});
- } finally {
- db._mongo = mongo;
- db._session = new _DummyDriverSession(mongo);
- }
+ t.createIndex({a: 1});
+ assert.eq(commandsRan.length, 1);
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert.eq(commandsRan[0].cmd["indexes"][0], {key: {a: 1}, name: "a_1"});
+} finally {
+ db._mongo = mongo;
+ db._session = new _DummyDriverSession(mongo);
+}
}());
diff --git a/jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js b/jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js
index a3ba81bb4ed..08c3b26f5c4 100644
--- a/jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js
+++ b/jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js
@@ -23,116 +23,117 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/parallel_shell_helpers.js");
- load('jstests/libs/test_background_ops.js');
-
- const dbName = "test";
- const collName = "create_indexes_waits_for_already_in_progress";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
- const indexSpecB = {key: {b: 1}, name: "the_b_1_index"};
- const indexSpecC = {key: {c: 1}, name: "the_c_1_index"};
-
- testColl.drop();
- assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
-
- // TODO (SERVER-40952): currently createIndexes will hold an X lock for the duration of the
- // build if the collection is not created beforehand. This test needs that not to happen, so we
- // can pause a build and a subsequently issued request can get an IX lock.
- assert.commandWorked(testDB.runCommand({create: collName}));
-
- function runSuccessfulIndexBuild(dbName, collName, indexSpec, requestNumber) {
- jsTest.log("Index build request " + requestNumber + " starting...");
- const res =
- db.getSiblingDB(dbName).runCommand({createIndexes: collName, indexes: [indexSpec]});
- jsTest.log("Index build request " + requestNumber + ", expected to succeed, result: " +
- tojson(res));
- assert.commandWorked(res);
- }
-
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
- let joinFirstIndexBuild;
- let joinSecondIndexBuild;
- try {
- jsTest.log("Starting a parallel shell to run first index build request...");
- joinFirstIndexBuild = startParallelShell(
- funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecB, 1),
- db.getMongo().port);
-
- jsTest.log("Waiting for first index build to get started...");
- checkLog.contains(db.getMongo(),
- "Hanging index build due to failpoint 'hangAfterSettingUpIndexBuild'");
-
- jsTest.log("Starting a parallel shell to run second index build request...");
- joinSecondIndexBuild = startParallelShell(
- funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecB, 2),
- db.getMongo().port);
-
- jsTest.log("Waiting for second index build request to wait behind the first...");
- checkLog.contains(db.getMongo(),
- "but found that at least one of the indexes is already being built");
- } finally {
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
- }
-
- // The second request stalled behind the first, so now all we need to do is check that they both
- // complete successfully.
- joinFirstIndexBuild();
- joinSecondIndexBuild();
-
- // Make sure the parallel shells sucessfully built the index. We should have the _id index and
- // the 'the_b_1_index' index just built in the parallel shells.
- assert.eq(testColl.getIndexes().length, 2);
-
- // Lastly, if the first request fails transiently, then the second should restart the index
- // build.
- assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
-
- function runFailedIndexBuild(dbName, collName, indexSpec, requestNumber) {
- const res =
- db.getSiblingDB(dbName).runCommand({createIndexes: collName, indexes: [indexSpec]});
- jsTest.log("Index build request " + requestNumber + ", expected to fail, result: " +
- tojson(res));
- assert.commandFailedWithCode(res, ErrorCodes.InternalError);
- }
-
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/parallel_shell_helpers.js");
+load('jstests/libs/test_background_ops.js');
+
+const dbName = "test";
+const collName = "create_indexes_waits_for_already_in_progress";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+const indexSpecB = {
+ key: {b: 1},
+ name: "the_b_1_index"
+};
+const indexSpecC = {
+ key: {c: 1},
+ name: "the_c_1_index"
+};
+
+testColl.drop();
+assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
+
+// TODO (SERVER-40952): currently createIndexes will hold an X lock for the duration of the
+// build if the collection is not created beforehand. This test needs that not to happen, so we
+// can pause a build and a subsequently issued request can get an IX lock.
+assert.commandWorked(testDB.runCommand({create: collName}));
+
+function runSuccessfulIndexBuild(dbName, collName, indexSpec, requestNumber) {
+ jsTest.log("Index build request " + requestNumber + " starting...");
+ const res = db.getSiblingDB(dbName).runCommand({createIndexes: collName, indexes: [indexSpec]});
+ jsTest.log("Index build request " + requestNumber +
+ ", expected to succeed, result: " + tojson(res));
+ assert.commandWorked(res);
+}
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
+let joinFirstIndexBuild;
+let joinSecondIndexBuild;
+try {
+ jsTest.log("Starting a parallel shell to run first index build request...");
+ joinFirstIndexBuild = startParallelShell(
+ funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecB, 1), db.getMongo().port);
+
+ jsTest.log("Waiting for first index build to get started...");
+ checkLog.contains(db.getMongo(),
+ "Hanging index build due to failpoint 'hangAfterSettingUpIndexBuild'");
+
+ jsTest.log("Starting a parallel shell to run second index build request...");
+ joinSecondIndexBuild = startParallelShell(
+ funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecB, 2), db.getMongo().port);
+
+ jsTest.log("Waiting for second index build request to wait behind the first...");
+ checkLog.contains(db.getMongo(),
+ "but found that at least one of the indexes is already being built");
+} finally {
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
+}
+
+// The second request stalled behind the first, so now all we need to do is check that they both
+// complete successfully.
+joinFirstIndexBuild();
+joinSecondIndexBuild();
+
+// Make sure the parallel shells sucessfully built the index. We should have the _id index and
+// the 'the_b_1_index' index just built in the parallel shells.
+assert.eq(testColl.getIndexes().length, 2);
+
+// Lastly, if the first request fails transiently, then the second should restart the index
+// build.
+assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
+
+function runFailedIndexBuild(dbName, collName, indexSpec, requestNumber) {
+ const res = db.getSiblingDB(dbName).runCommand({createIndexes: collName, indexes: [indexSpec]});
+ jsTest.log("Index build request " + requestNumber +
+ ", expected to fail, result: " + tojson(res));
+ assert.commandFailedWithCode(res, ErrorCodes.InternalError);
+}
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAndThenFailIndexBuild', mode: 'alwaysOn'}));
+let joinFailedIndexBuild;
+let joinSuccessfulIndexBuild;
+try {
+ jsTest.log("Starting a parallel shell to run third index build request...");
+ joinFailedIndexBuild = startParallelShell(
+ funWithArgs(runFailedIndexBuild, dbName, collName, indexSpecC, 3), db.getMongo().port);
+
+ jsTest.log("Waiting for third index build to get started...");
+ checkLog.contains(db.getMongo(),
+ "Hanging index build due to failpoint 'hangAndThenFailIndexBuild'");
+
+ jsTest.log("Starting a parallel shell to run fourth index build request...");
+ joinSuccessfulIndexBuild = startParallelShell(
+ funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecC, 4), db.getMongo().port);
+
+ jsTest.log("Waiting for fourth index build request to wait behind the third...");
+ checkLog.contains(db.getMongo(),
+ "but found that at least one of the indexes is already being built");
+} finally {
assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAndThenFailIndexBuild', mode: 'alwaysOn'}));
- let joinFailedIndexBuild;
- let joinSuccessfulIndexBuild;
- try {
- jsTest.log("Starting a parallel shell to run third index build request...");
- joinFailedIndexBuild = startParallelShell(
- funWithArgs(runFailedIndexBuild, dbName, collName, indexSpecC, 3), db.getMongo().port);
-
- jsTest.log("Waiting for third index build to get started...");
- checkLog.contains(db.getMongo(),
- "Hanging index build due to failpoint 'hangAndThenFailIndexBuild'");
-
- jsTest.log("Starting a parallel shell to run fourth index build request...");
- joinSuccessfulIndexBuild = startParallelShell(
- funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecC, 4),
- db.getMongo().port);
-
- jsTest.log("Waiting for fourth index build request to wait behind the third...");
- checkLog.contains(db.getMongo(),
- "but found that at least one of the indexes is already being built");
- } finally {
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAndThenFailIndexBuild', mode: 'off'}));
- }
-
- // The second request stalled behind the first, so now all we need to do is check that they both
- // complete as expected: the first should fail; the second should succeed.
- joinFailedIndexBuild();
- joinSuccessfulIndexBuild();
-
- // Make sure the parallel shells sucessfully built the index. We should now have the _id index,
- // the 'the_b_1_index' index and the 'the_c_1_index' just built in the parallel shells.
- assert.eq(testColl.getIndexes().length, 3);
+ testDB.adminCommand({configureFailPoint: 'hangAndThenFailIndexBuild', mode: 'off'}));
+}
+
+// The second request stalled behind the first, so now all we need to do is check that they both
+// complete as expected: the first should fail; the second should succeed.
+joinFailedIndexBuild();
+joinSuccessfulIndexBuild();
+
+// Make sure the parallel shells sucessfully built the index. We should now have the _id index,
+// the 'the_b_1_index' index and the 'the_c_1_index' just built in the parallel shells.
+assert.eq(testColl.getIndexes().length, 3);
})();
diff --git a/jstests/noPassthroughWithMongod/currentop_includes_connid.js b/jstests/noPassthroughWithMongod/currentop_includes_connid.js
index b9b9ceeeb90..a58bba44479 100644
--- a/jstests/noPassthroughWithMongod/currentop_includes_connid.js
+++ b/jstests/noPassthroughWithMongod/currentop_includes_connid.js
@@ -1,12 +1,12 @@
(function() {
- "use strict";
+"use strict";
- let res = assert.commandWorked(db.runCommand({whatsmyuri: 1}));
- const myUri = res.you;
+let res = assert.commandWorked(db.runCommand({whatsmyuri: 1}));
+const myUri = res.you;
- res = assert.commandWorked(db.adminCommand({currentOp: 1, client: myUri}));
- const threadName = res.inprog[0].desc;
- const connectionId = res.inprog[0].connectionId;
+res = assert.commandWorked(db.adminCommand({currentOp: 1, client: myUri}));
+const threadName = res.inprog[0].desc;
+const connectionId = res.inprog[0].connectionId;
- assert.eq("conn" + connectionId, threadName, tojson(res));
+assert.eq("conn" + connectionId, threadName, tojson(res));
})();
diff --git a/jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js b/jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js
index c82c75ff35d..1e4cf5764b8 100644
--- a/jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js
+++ b/jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js
@@ -1,57 +1,57 @@
// Tests that planSummary is not duplicated in an active getmore currentOp entry.
(function() {
- "use strict";
-
- // This test runs a getMore in a parallel shell, which will not inherit the implicit session of
- // the cursor establishing command.
- TestData.disableImplicitSessions = true;
-
- const collName = "currentop_plan_summary_no_dup";
- const coll = db.getCollection(collName);
- coll.drop();
- for (let i = 0; i < 200; i++) {
- assert.commandWorked(coll.insert({x: 1}));
- }
-
- // Create a long-running getMore operation by sleeping for every document.
- const cmdRes = assert.commandWorked(db.runCommand({
- find: collName,
- filter: {
- $where: function() {
- sleep(100);
- return true;
- }
- },
- batchSize: 0
- }));
- const cmdStr = 'db.runCommand({getMore: ' + cmdRes.cursor.id.toString() + ', collection: "' +
- collName + '"})';
- const awaitShell = startParallelShell(cmdStr);
-
- assert.soon(function() {
- const currOp = db.currentOp({"op": "getmore"});
-
- assert("inprog" in currOp);
- if (currOp.inprog.length === 0) {
- return false;
+"use strict";
+
+// This test runs a getMore in a parallel shell, which will not inherit the implicit session of
+// the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+const collName = "currentop_plan_summary_no_dup";
+const coll = db.getCollection(collName);
+coll.drop();
+for (let i = 0; i < 200; i++) {
+ assert.commandWorked(coll.insert({x: 1}));
+}
+
+// Create a long-running getMore operation by sleeping for every document.
+const cmdRes = assert.commandWorked(db.runCommand({
+ find: collName,
+ filter: {
+ $where: function() {
+ sleep(100);
+ return true;
}
+ },
+ batchSize: 0
+}));
+const cmdStr =
+ 'db.runCommand({getMore: ' + cmdRes.cursor.id.toString() + ', collection: "' + collName + '"})';
+const awaitShell = startParallelShell(cmdStr);
+
+assert.soon(function() {
+ const currOp = db.currentOp({"op": "getmore"});
+
+ assert("inprog" in currOp);
+ if (currOp.inprog.length === 0) {
+ return false;
+ }
- const getmoreOp = currOp.inprog[0];
- if (!("planSummary" in getmoreOp)) {
- print("getMore op does not yet contain planSummary:");
- printjson(getmoreOp);
- return false;
- }
+ const getmoreOp = currOp.inprog[0];
+ if (!("planSummary" in getmoreOp)) {
+ print("getMore op does not yet contain planSummary:");
+ printjson(getmoreOp);
+ return false;
+ }
- // getmoreOp should only contain a top-level plan summary.
- // Check that it doesn't contain a sub-level duplicate.
- assert(!getmoreOp.cursor.hasOwnProperty("planSummary"),
- "getmore contains duplicated planSummary: " + tojson(getmoreOp));
+ // getmoreOp should only contain a top-level plan summary.
+ // Check that it doesn't contain a sub-level duplicate.
+ assert(!getmoreOp.cursor.hasOwnProperty("planSummary"),
+ "getmore contains duplicated planSummary: " + tojson(getmoreOp));
- // Kill the op so that the test won't run for a long time.
- db.killOp(getmoreOp.opid);
+ // Kill the op so that the test won't run for a long time.
+ db.killOp(getmoreOp.opid);
- return true;
- });
- awaitShell();
+ return true;
+});
+awaitShell();
}());
diff --git a/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js b/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js
index b8f8a240662..865809f63b2 100644
--- a/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js
+++ b/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js
@@ -2,85 +2,85 @@
* Tests for serverStatus metrics.cursor stats.
*/
(function() {
- var coll = db[jsTest.name()];
- coll.drop();
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.writeOK(coll.insert({_id: 3}));
+var coll = db[jsTest.name()];
+coll.drop();
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 2}));
+assert.writeOK(coll.insert({_id: 3}));
- assert.eq(3, coll.find().count());
+assert.eq(3, coll.find().count());
- function getCurrentCursorsOpen() {
- return db.serverStatus().metrics.cursor.open.total;
- }
+function getCurrentCursorsOpen() {
+ return db.serverStatus().metrics.cursor.open.total;
+}
- function getCurrentCursorsPinned() {
- return db.serverStatus().metrics.cursor.open.pinned;
- }
+function getCurrentCursorsPinned() {
+ return db.serverStatus().metrics.cursor.open.pinned;
+}
- var initialTotalOpen = getCurrentCursorsOpen();
+var initialTotalOpen = getCurrentCursorsOpen();
- // We expect no pinned cursors
- assert.eq(0, getCurrentCursorsPinned());
+// We expect no pinned cursors
+assert.eq(0, getCurrentCursorsPinned());
- // Total open cursors should not have changed after exhausting a cursor.
- assert.eq(3, coll.find().itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.find().batchSize(2).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.find().batchSize(1).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+// Total open cursors should not have changed after exhausting a cursor.
+assert.eq(3, coll.find().itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.find().batchSize(2).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.find().batchSize(1).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.aggregate([]).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.aggregate([], {cursor: {batchSize: 2}}).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.aggregate([], {cursor: {batchSize: 1}}).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.aggregate([]).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.aggregate([], {cursor: {batchSize: 2}}).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.aggregate([], {cursor: {batchSize: 1}}).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- // Total pinned cursors should remain zero exhausting a cursor.
- assert.eq(3, coll.find().itcount());
- assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.find().batchSize(2).itcount());
- assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.find().batchSize(1).itcount());
- assert.eq(0, getCurrentCursorsPinned());
+// Total pinned cursors should remain zero exhausting a cursor.
+assert.eq(3, coll.find().itcount());
+assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.find().batchSize(2).itcount());
+assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.find().batchSize(1).itcount());
+assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.aggregate([]).itcount());
- assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.aggregate([], {cursor: {batchSize: 2}}).itcount());
- assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.aggregate([], {cursor: {batchSize: 1}}).itcount());
- assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.aggregate([]).itcount());
+assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.aggregate([], {cursor: {batchSize: 2}}).itcount());
+assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.aggregate([], {cursor: {batchSize: 1}}).itcount());
+assert.eq(0, getCurrentCursorsPinned());
- // This cursor should remain open on the server, but not pinned.
- var cursor = coll.find().batchSize(2);
- cursor.next();
- assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
+// This cursor should remain open on the server, but not pinned.
+var cursor = coll.find().batchSize(2);
+cursor.next();
+assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
- // Same should be true after pulling the second document out of the cursor, since we haven't
- // issued a getMore yet.
- cursor.next();
- assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
+// Same should be true after pulling the second document out of the cursor, since we haven't
+// issued a getMore yet.
+cursor.next();
+assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
- // Cursor no longer reported as open after being exhausted.
- cursor.next();
- assert(!cursor.hasNext());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
+// Cursor no longer reported as open after being exhausted.
+cursor.next();
+assert(!cursor.hasNext());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
- // Same behavior expected for an aggregation cursor.
- var cursor = coll.aggregate([], {cursor: {batchSize: 2}});
- cursor.next();
- assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
- cursor.next();
- assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
- cursor.next();
- assert(!cursor.hasNext());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
+// Same behavior expected for an aggregation cursor.
+var cursor = coll.aggregate([], {cursor: {batchSize: 2}});
+cursor.next();
+assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
+cursor.next();
+assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
+cursor.next();
+assert(!cursor.hasNext());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
}());
diff --git a/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js b/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js
index f2041253e3b..9903c7a5835 100644
--- a/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js
+++ b/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js
@@ -1,17 +1,17 @@
(function() {
- "use strict";
+"use strict";
- var testDB = db.getSiblingDB('dbcommand_cursor_throws_on_closed_conn');
- testDB.dropDatabase();
- var coll = testDB.collection;
- var conn = testDB.getMongo();
- conn.forceReadMode("commands");
- assert.commandWorked(coll.save({}));
- var res = assert.commandWorked(testDB.runCommand({
- find: coll.getName(),
- batchSize: 0,
- }));
+var testDB = db.getSiblingDB('dbcommand_cursor_throws_on_closed_conn');
+testDB.dropDatabase();
+var coll = testDB.collection;
+var conn = testDB.getMongo();
+conn.forceReadMode("commands");
+assert.commandWorked(coll.save({}));
+var res = assert.commandWorked(testDB.runCommand({
+ find: coll.getName(),
+ batchSize: 0,
+}));
- conn.close();
- assert.throws(() => new DBCommandCursor(testDB, res));
+conn.close();
+assert.throws(() => new DBCommandCursor(testDB, res));
}());
diff --git a/jstests/noPassthroughWithMongod/default_read_pref.js b/jstests/noPassthroughWithMongod/default_read_pref.js
index e5daba20d8a..12e8962a0a7 100644
--- a/jstests/noPassthroughWithMongod/default_read_pref.js
+++ b/jstests/noPassthroughWithMongod/default_read_pref.js
@@ -2,55 +2,54 @@
// on read commands run with an 'unset' read preference.
(function() {
- "use strict";
+"use strict";
- var mongo = db.getMongo();
- try {
- var commandsRan = [];
- db._mongo = {
- getSlaveOk: function() {
- return false;
- },
- getReadPrefMode: function() {
- return mongo.getReadPrefMode();
- },
- getReadPref: function() {
- return mongo.getReadPref();
- },
- runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1};
- },
- getMinWireVersion: function() {
- return mongo.getMinWireVersion();
- },
- getMaxWireVersion: function() {
- return mongo.getMaxWireVersion();
- },
- isReplicaSetMember: function() {
- return mongo.isReplicaSetMember();
- },
- isMongos: function() {
- return mongo.isMongos();
- },
- isCausalConsistency: function() {
- return false;
- },
- getClusterTime: function() {
- return null;
- },
- };
- db._session = new _DummyDriverSession(db._mongo);
+var mongo = db.getMongo();
+try {
+ var commandsRan = [];
+ db._mongo = {
+ getSlaveOk: function() {
+ return false;
+ },
+ getReadPrefMode: function() {
+ return mongo.getReadPrefMode();
+ },
+ getReadPref: function() {
+ return mongo.getReadPref();
+ },
+ runCommand: function(db, cmd, opts) {
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {ok: 1};
+ },
+ getMinWireVersion: function() {
+ return mongo.getMinWireVersion();
+ },
+ getMaxWireVersion: function() {
+ return mongo.getMaxWireVersion();
+ },
+ isReplicaSetMember: function() {
+ return mongo.isReplicaSetMember();
+ },
+ isMongos: function() {
+ return mongo.isMongos();
+ },
+ isCausalConsistency: function() {
+ return false;
+ },
+ getClusterTime: function() {
+ return null;
+ },
+ };
+ db._session = new _DummyDriverSession(db._mongo);
- db.runReadCommand({ping: 1});
- assert.eq(commandsRan.length, 1);
- assert.docEq(commandsRan[0].cmd, {ping: 1}, "The command should not have been wrapped.");
- assert.eq(
- commandsRan[0].opts & DBQuery.Option.slaveOk, 0, "The slaveOk bit should not be set.");
-
- } finally {
- db._mongo = mongo;
- db._session = new _DummyDriverSession(mongo);
- }
+ db.runReadCommand({ping: 1});
+ assert.eq(commandsRan.length, 1);
+ assert.docEq(commandsRan[0].cmd, {ping: 1}, "The command should not have been wrapped.");
+ assert.eq(
+ commandsRan[0].opts & DBQuery.Option.slaveOk, 0, "The slaveOk bit should not be set.");
+} finally {
+ db._mongo = mongo;
+ db._session = new _DummyDriverSession(mongo);
+}
})();
diff --git a/jstests/noPassthroughWithMongod/dup_bgindex.js b/jstests/noPassthroughWithMongod/dup_bgindex.js
index 298f585d543..02ac4bf7870 100644
--- a/jstests/noPassthroughWithMongod/dup_bgindex.js
+++ b/jstests/noPassthroughWithMongod/dup_bgindex.js
@@ -1,19 +1,19 @@
// Try to create two identical indexes, via background. Shouldn't be allowed by the server.
(function() {
- var t = db.duplIndexTest;
- t.drop();
- docs = [];
- for (var i = 0; i < 10000; i++) {
- docs.push({name: "foo", z: {a: 17, b: 4}, i: i});
- }
- assert.commandWorked(t.insert(docs));
- var cmd = "assert.commandWorked(db.duplIndexTest.ensureIndex( { i : 1 }, {background:true} ));";
- var join1 = startParallelShell(cmd);
- var join2 = startParallelShell(cmd);
- assert.commandWorked(t.ensureIndex({i: 1}, {background: true}));
- assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
- assert.commandWorked(t.dropIndex({i: 1}));
- assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
- join1();
- join2();
+var t = db.duplIndexTest;
+t.drop();
+docs = [];
+for (var i = 0; i < 10000; i++) {
+ docs.push({name: "foo", z: {a: 17, b: 4}, i: i});
+}
+assert.commandWorked(t.insert(docs));
+var cmd = "assert.commandWorked(db.duplIndexTest.ensureIndex( { i : 1 }, {background:true} ));";
+var join1 = startParallelShell(cmd);
+var join2 = startParallelShell(cmd);
+assert.commandWorked(t.ensureIndex({i: 1}, {background: true}));
+assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
+assert.commandWorked(t.dropIndex({i: 1}));
+assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
+join1();
+join2();
})();
diff --git a/jstests/noPassthroughWithMongod/exchangeProducer.js b/jstests/noPassthroughWithMongod/exchangeProducer.js
index f3f23ee4e0d..5d609e04634 100644
--- a/jstests/noPassthroughWithMongod/exchangeProducer.js
+++ b/jstests/noPassthroughWithMongod/exchangeProducer.js
@@ -7,138 +7,258 @@
TestData.disableImplicitSessions = true;
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- const coll = db.testCollection;
- coll.drop();
+const coll = db.testCollection;
+coll.drop();
- const numDocs = 10000;
+const numDocs = 10000;
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({a: i, b: 'abcdefghijklmnopqrstuvxyz', c: {d: i}, e: [0, {f: i}]});
- }
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({a: i, b: 'abcdefghijklmnopqrstuvxyz', c: {d: i}, e: [0, {f: i}]});
+}
+
+assert.commandWorked(bulk.execute());
- assert.commandWorked(bulk.execute());
-
- /**
- * A consumer runs in a parallel shell reading the cursor until exhausted and then asserts that
- * it got the correct number of documents.
- *
- * @param {Object} cursor - the cursor that a consumer will read
- * @param {int} count - number of expected documents
- */
- function countingConsumer(cursor, count) {
- let shell = startParallelShell(`{
+/**
+ * A consumer runs in a parallel shell reading the cursor until exhausted and then asserts that
+ * it got the correct number of documents.
+ *
+ * @param {Object} cursor - the cursor that a consumer will read
+ * @param {int} count - number of expected documents
+ */
+function countingConsumer(cursor, count) {
+ let shell = startParallelShell(`{
const dbCursor = new DBCommandCursor(db, ${tojsononeline(cursor)});
assert.eq(${count}, dbCursor.itcount())
}`);
- return shell;
- }
+ return shell;
+}
- /**
- * A consumer runs in a parallel shell reading the cursor expecting an error.
- *
- * @param {Object} cursor - the cursor that a consumer will read
- * @param {int} code - the expected error code
- */
- function failingConsumer(cursor, code) {
- let shell = startParallelShell(`{
+/**
+ * A consumer runs in a parallel shell reading the cursor expecting an error.
+ *
+ * @param {Object} cursor - the cursor that a consumer will read
+ * @param {int} code - the expected error code
+ */
+function failingConsumer(cursor, code) {
+ let shell = startParallelShell(`{
const dbCursor = new DBCommandCursor(db, ${tojsononeline(cursor)});
const cmdRes = db.runCommand({getMore: dbCursor._cursorid, collection: dbCursor._collName});
assert.commandFailedWithCode(cmdRes, ${code});
}`);
- return shell;
- }
-
- const numConsumers = 4;
- // For simplicity we assume that we can evenly distribute documents among consumers.
- assert.eq(0, numDocs % numConsumers);
-
- (function testParameterValidation() {
- const tooManyConsumers = 101;
- assertErrorCode(coll, [], 50950, "Expected too many consumers", {
- exchange: {
- policy: "roundrobin",
- consumers: NumberInt(tooManyConsumers),
- bufferSize: NumberInt(1024)
- },
- cursor: {batchSize: 0}
- });
-
- const bufferTooLarge = 200 * 1024 * 1024; // 200 MB
- assertErrorCode(coll, [], 50951, "Expected buffer too large", {
- exchange: {
- policy: "roundrobin",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(bufferTooLarge)
- },
- cursor: {batchSize: 0}
- });
+ return shell;
+}
+
+const numConsumers = 4;
+// For simplicity we assume that we can evenly distribute documents among consumers.
+assert.eq(0, numDocs % numConsumers);
+
+(function testParameterValidation() {
+ const tooManyConsumers = 101;
+ assertErrorCode(coll, [], 50950, "Expected too many consumers", {
+ exchange: {
+ policy: "roundrobin",
+ consumers: NumberInt(tooManyConsumers),
+ bufferSize: NumberInt(1024)
+ },
+ cursor: {batchSize: 0}
+ });
+
+ const bufferTooLarge = 200 * 1024 * 1024; // 200 MB
+ assertErrorCode(coll, [], 50951, "Expected buffer too large", {
+ exchange: {
+ policy: "roundrobin",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(bufferTooLarge)
+ },
+ cursor: {batchSize: 0}
+ });
+})();
- })();
+/**
+ * RoundRobin - evenly distribute documents to consumers.
+ */
+(function testRoundRobin() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange:
+ {policy: "roundrobin", consumers: NumberInt(numConsumers), bufferSize: NumberInt(1024)},
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- /**
- * RoundRobin - evenly distribute documents to consumers.
- */
- (function testRoundRobin() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "roundrobin",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024)
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
+/**
+ * Broadcast - send a document to all consumers.
+ */
+(function testBroadcast() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange:
+ {policy: "broadcast", consumers: NumberInt(numConsumers), bufferSize: NumberInt(1024)},
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], numDocs));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- let parallelShells = [];
+/**
+ * Range - send documents to consumer based on the range of values of the 'a' field.
+ */
+(function testRange() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange: {
+ policy: "keyRange",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(1024),
+ key: {a: 1},
+ boundaries: [{a: MinKey}, {a: 2500}, {a: 5000}, {a: 7500}, {a: MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
+ },
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
- }
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
+/**
+ * Range with more complex pipeline.
+ */
+(function testRangeComplex() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$match: {a: {$gte: 5000}}}, {$sort: {a: -1}}, {$project: {_id: 0, b: 0}}],
+ exchange: {
+ policy: "keyRange",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(1024),
+ key: {a: 1},
+ boundaries: [{a: MinKey}, {a: 2500}, {a: 5000}, {a: 7500}, {a: MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
+ },
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ parallelShells.push(countingConsumer(res.cursors[0], 0));
+ parallelShells.push(countingConsumer(res.cursors[1], 0));
+ parallelShells.push(countingConsumer(res.cursors[2], 2500));
+ parallelShells.push(countingConsumer(res.cursors[3], 2500));
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- /**
- * Broadcast - send a document to all consumers.
- */
- (function testBroadcast() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "broadcast",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024)
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
+/**
+ * Range with a dotted path.
+ */
+(function testRangeDottedPath() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange: {
+ policy: "keyRange",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(1024),
+ key: {"c.d": 1},
+ boundaries:
+ [{"c.d": MinKey}, {"c.d": 2500}, {"c.d": 5000}, {"c.d": 7500}, {"c.d": MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
+ },
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- let parallelShells = [];
+/**
+ * Range with a dotted path and array.
+ */
+(function testRangeDottedPath() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange: {
+ policy: "keyRange",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(1024),
+ key: {"e.f": 1},
+ boundaries:
+ [{"e.f": MinKey}, {"e.f": 2500}, {"e.f": 5000}, {"e.f": 7500}, {"e.f": MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
+ },
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ // The e.f field contains an array and hence the exchange cannot compute the range. Instead
+ // it sends all such documents to the consumer 0 by fiat.
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], i == 0 ? numDocs : 0));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], numDocs));
- }
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
+/**
+ * Range - simulate an exception in loading the batch.
+ */
+(function testRangeFailLoad() {
+ const kFailPointName = "exchangeFailLoadNextBatch";
+ try {
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: kFailPointName, mode: "alwaysOn"}));
- /**
- * Range - send documents to consumer based on the range of values of the 'a' field.
- */
- (function testRange() {
let res = assert.commandWorked(db.runCommand({
aggregate: coll.getName(),
pipeline: [],
@@ -155,148 +275,19 @@ TestData.disableImplicitSessions = true;
assert.eq(numConsumers, res.cursors.length);
let parallelShells = [];
+ failingConsumer(res.cursors[0], ErrorCodes.FailPointEnabled)();
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
+ // After the first consumer sees an error, each subsequent consumer should see an
+ // 'ExchangePassthrough' error.
+ for (let i = 0; i < numConsumers - 1; ++i) {
+ parallelShells.push(
+ failingConsumer(res.cursors[i + 1], ErrorCodes.ExchangePassthrough));
}
- for (let i = 0; i < numConsumers; ++i) {
+ for (let i = 0; i < numConsumers - 1; ++i) {
parallelShells[i]();
}
- })();
-
- /**
- * Range with more complex pipeline.
- */
- (function testRangeComplex() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$match: {a: {$gte: 5000}}}, {$sort: {a: -1}}, {$project: {_id: 0, b: 0}}],
- exchange: {
- policy: "keyRange",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024),
- key: {a: 1},
- boundaries: [{a: MinKey}, {a: 2500}, {a: 5000}, {a: 7500}, {a: MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
-
- let parallelShells = [];
-
- parallelShells.push(countingConsumer(res.cursors[0], 0));
- parallelShells.push(countingConsumer(res.cursors[1], 0));
- parallelShells.push(countingConsumer(res.cursors[2], 2500));
- parallelShells.push(countingConsumer(res.cursors[3], 2500));
-
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
-
- /**
- * Range with a dotted path.
- */
- (function testRangeDottedPath() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "keyRange",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024),
- key: {"c.d": 1},
- boundaries:
- [{"c.d": MinKey}, {"c.d": 2500}, {"c.d": 5000}, {"c.d": 7500}, {"c.d": MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
-
- let parallelShells = [];
-
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
- }
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
-
- /**
- * Range with a dotted path and array.
- */
- (function testRangeDottedPath() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "keyRange",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024),
- key: {"e.f": 1},
- boundaries:
- [{"e.f": MinKey}, {"e.f": 2500}, {"e.f": 5000}, {"e.f": 7500}, {"e.f": MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
-
- let parallelShells = [];
-
- // The e.f field contains an array and hence the exchange cannot compute the range. Instead
- // it sends all such documents to the consumer 0 by fiat.
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], i == 0 ? numDocs : 0));
- }
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
-
- /**
- * Range - simulate an exception in loading the batch.
- */
- (function testRangeFailLoad() {
- const kFailPointName = "exchangeFailLoadNextBatch";
- try {
- assert.commandWorked(
- db.adminCommand({configureFailPoint: kFailPointName, mode: "alwaysOn"}));
-
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "keyRange",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024),
- key: {a: 1},
- boundaries: [{a: MinKey}, {a: 2500}, {a: 5000}, {a: 7500}, {a: MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
-
- let parallelShells = [];
- failingConsumer(res.cursors[0], ErrorCodes.FailPointEnabled)();
-
- // After the first consumer sees an error, each subsequent consumer should see an
- // 'ExchangePassthrough' error.
- for (let i = 0; i < numConsumers - 1; ++i) {
- parallelShells.push(
- failingConsumer(res.cursors[i + 1], ErrorCodes.ExchangePassthrough));
- }
- for (let i = 0; i < numConsumers - 1; ++i) {
- parallelShells[i]();
- }
- } finally {
- assert.commandWorked(
- db.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
- }
- })();
-
+ } finally {
+ assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
+ }
+})();
})();
diff --git a/jstests/noPassthroughWithMongod/external_sort_text_agg.js b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
index b08a7c79a44..089432b88d0 100644
--- a/jstests/noPassthroughWithMongod/external_sort_text_agg.js
+++ b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
@@ -10,9 +10,9 @@ for (i = 0; i < 100; i++) {
var score = t.find({$text: {$search: "asdf"}}, {score: {$meta: 'textScore'}}).next().score;
var res = t.aggregate(
[
- {$match: {$text: {$search: "asdf"}}},
- {$sort: {"_id": 1}},
- {$project: {string: "$text", score: {$meta: "textScore"}}}
+ {$match: {$text: {$search: "asdf"}}},
+ {$sort: {"_id": 1}},
+ {$project: {string: "$text", score: {$meta: "textScore"}}}
],
{allowDiskUse: true});
// we must use .next() rather than a $limit because a $limit will optimize away the external sort
diff --git a/jstests/noPassthroughWithMongod/ftdc_params.js b/jstests/noPassthroughWithMongod/ftdc_params.js
index 08714040fcb..12c450ed932 100644
--- a/jstests/noPassthroughWithMongod/ftdc_params.js
+++ b/jstests/noPassthroughWithMongod/ftdc_params.js
@@ -3,8 +3,8 @@
load('jstests/libs/ftdc.js');
(function() {
- 'use strict';
- var admin = db.getSiblingDB("admin");
+'use strict';
+var admin = db.getSiblingDB("admin");
- verifyCommonFTDCParameters(admin, true);
+verifyCommonFTDCParameters(admin, true);
})();
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index 7c23442d4de..4d10d62a7dd 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -51,14 +51,13 @@ for (var n = 0; n < numTests; n++) {
341,
"Square Missing Chunk Test",
true);
- assert.between(
- 21 - 2,
- t.find({
- loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0], [1, 1]]}}
- }).count(),
- 21,
- "Square Missing Chunk Test 2",
- true);
+ assert.between(21 - 2,
+ t.find({
+ loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0], [1, 1]]}}
+ }).count(),
+ 21,
+ "Square Missing Chunk Test 2",
+ true);
}
assert.eq(1,
diff --git a/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js b/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
index cef102b8e6d..a56d105fa4a 100644
--- a/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
+++ b/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
@@ -3,46 +3,46 @@
* @tags: [requires_capped]
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.getmore_awaitdata_opcounters;
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.writeOK(coll.insert({_id: 3}));
+const coll = db.getmore_awaitdata_opcounters;
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 2}));
+assert.writeOK(coll.insert({_id: 3}));
- function getGlobalLatencyStats() {
- return db.serverStatus().opLatencies.reads;
- }
+function getGlobalLatencyStats() {
+ return db.serverStatus().opLatencies.reads;
+}
- function getCollectionLatencyStats() {
- return coll.latencyStats().next().latencyStats.reads;
- }
+function getCollectionLatencyStats() {
+ return coll.latencyStats().next().latencyStats.reads;
+}
- function getTop() {
- return db.adminCommand({top: 1}).totals[coll.getFullName()];
- }
+function getTop() {
+ return db.adminCommand({top: 1}).totals[coll.getFullName()];
+}
- // Global latency histogram from serverStatus should record two read ops, one for find and one
- // for getMore.
- let oldGlobalLatency = getGlobalLatencyStats();
- assert.eq(3, coll.find().tailable(true).itcount());
- let newGlobalLatency = getGlobalLatencyStats();
- assert.eq(2, newGlobalLatency.ops - oldGlobalLatency.ops);
+// Global latency histogram from serverStatus should record two read ops, one for find and one
+// for getMore.
+let oldGlobalLatency = getGlobalLatencyStats();
+assert.eq(3, coll.find().tailable(true).itcount());
+let newGlobalLatency = getGlobalLatencyStats();
+assert.eq(2, newGlobalLatency.ops - oldGlobalLatency.ops);
- // Per-collection latency histogram should record three read ops, one for find, one for getMore,
- // and one for the aggregation command used to retrieve the stats themselves.
- let oldCollLatency = getCollectionLatencyStats();
- assert.eq(3, coll.find().tailable(true).itcount());
- let newCollLatency = getCollectionLatencyStats();
- assert.eq(3, newCollLatency.ops - oldCollLatency.ops);
+// Per-collection latency histogram should record three read ops, one for find, one for getMore,
+// and one for the aggregation command used to retrieve the stats themselves.
+let oldCollLatency = getCollectionLatencyStats();
+assert.eq(3, coll.find().tailable(true).itcount());
+let newCollLatency = getCollectionLatencyStats();
+assert.eq(3, newCollLatency.ops - oldCollLatency.ops);
- // Top separates counters for getMore and find. We should see a delta of one getMore op and one
- // find op.
- let oldTop = getTop();
- assert.eq(3, coll.find().tailable(true).itcount());
- let newTop = getTop();
- assert.eq(1, newTop.getmore.count - oldTop.getmore.count);
- assert.eq(1, newTop.queries.count - oldTop.queries.count);
+// Top separates counters for getMore and find. We should see a delta of one getMore op and one
+// find op.
+let oldTop = getTop();
+assert.eq(3, coll.find().tailable(true).itcount());
+let newTop = getTop();
+assert.eq(1, newTop.getmore.count - oldTop.getmore.count);
+assert.eq(1, newTop.queries.count - oldTop.queries.count);
}());
diff --git a/jstests/noPassthroughWithMongod/host_connection_string_validation.js b/jstests/noPassthroughWithMongod/host_connection_string_validation.js
index c37c834d903..07ba793151f 100644
--- a/jstests/noPassthroughWithMongod/host_connection_string_validation.js
+++ b/jstests/noPassthroughWithMongod/host_connection_string_validation.js
@@ -1,120 +1,119 @@
// Test --host.
(function() {
- // This "inner_mode" method of spawning a mongod and re-running was copied from
- // ipv6_connection_string_validation.js
- if ("undefined" == typeof inner_mode) {
- // Start a mongod with --ipv6
- jsTest.log("Outer mode test starting mongod with --ipv6");
- // NOTE: bind_ip arg is present to test if it can parse ipv6 addresses (::1 in this case).
- // Unfortunately, having bind_ip = ::1 won't work in the test framework (But does work when
- // tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
- // with that address.
- var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
- if (mongod == null) {
- jsTest.log("Unable to run test because ipv6 is not on machine, see BF-10990");
- return;
- }
- var args = [
- "mongo",
- "--nodb",
- "--ipv6",
- "--host",
- "::1",
- "--port",
- mongod.port,
- "--eval",
- "inner_mode=true;port=" + mongod.port + ";",
- "jstests/noPassthroughWithMongod/host_connection_string_validation.js"
- ];
- var exitCode = _runMongoProgram.apply(null, args);
- jsTest.log("Inner mode test finished, exit code was " + exitCode);
-
- MongoRunner.stopMongod(mongod);
- // Pass the inner test's exit code back as the outer test's exit code
- if (exitCode != 0) {
- doassert("inner test failed with exit code " + exitCode);
- }
+// This "inner_mode" method of spawning a mongod and re-running was copied from
+// ipv6_connection_string_validation.js
+if ("undefined" == typeof inner_mode) {
+ // Start a mongod with --ipv6
+ jsTest.log("Outer mode test starting mongod with --ipv6");
+ // NOTE: bind_ip arg is present to test if it can parse ipv6 addresses (::1 in this case).
+ // Unfortunately, having bind_ip = ::1 won't work in the test framework (But does work when
+ // tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
+ // with that address.
+ var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
+ if (mongod == null) {
+ jsTest.log("Unable to run test because ipv6 is not on machine, see BF-10990");
return;
}
+ var args = [
+ "mongo",
+ "--nodb",
+ "--ipv6",
+ "--host",
+ "::1",
+ "--port",
+ mongod.port,
+ "--eval",
+ "inner_mode=true;port=" + mongod.port + ";",
+ "jstests/noPassthroughWithMongod/host_connection_string_validation.js"
+ ];
+ var exitCode = _runMongoProgram.apply(null, args);
+ jsTest.log("Inner mode test finished, exit code was " + exitCode);
+
+ MongoRunner.stopMongod(mongod);
+ // Pass the inner test's exit code back as the outer test's exit code
+ if (exitCode != 0) {
+ doassert("inner test failed with exit code " + exitCode);
+ }
+ return;
+}
- var testHost = function(host, shouldSucceed) {
- var exitCode = runMongoProgram('mongo', '--ipv6', '--eval', ';', '--host', host);
- if (shouldSucceed) {
- if (exitCode !== 0) {
- doassert("failed to connect with `--host " + host +
- "`, but expected success. Exit code: " + exitCode);
- }
- } else {
- if (exitCode === 0) {
- doassert("successfully connected with `--host " + host +
- "`, but expected to fail.");
- }
+var testHost = function(host, shouldSucceed) {
+ var exitCode = runMongoProgram('mongo', '--ipv6', '--eval', ';', '--host', host);
+ if (shouldSucceed) {
+ if (exitCode !== 0) {
+ doassert("failed to connect with `--host " + host +
+ "`, but expected success. Exit code: " + exitCode);
}
- };
+ } else {
+ if (exitCode === 0) {
+ doassert("successfully connected with `--host " + host + "`, but expected to fail.");
+ }
+ }
+};
- var goodStrings = [
- "[::1]:27999",
- "localhost:27999",
- "127.0.0.1:27999",
- "[0:0:0:0:0:0:0:1]:27999",
- "[0000:0000:0000:0000:0000:0000:0000:0001]:27999",
- ];
+var goodStrings = [
+ "[::1]:27999",
+ "localhost:27999",
+ "127.0.0.1:27999",
+ "[0:0:0:0:0:0:0:1]:27999",
+ "[0000:0000:0000:0000:0000:0000:0000:0001]:27999",
+];
- var goodSocketStrings = [
- "/tmp/mongodb-27999.sock",
- ];
+var goodSocketStrings = [
+ "/tmp/mongodb-27999.sock",
+];
- var badStrings = [
- "::1:27999",
- "::1:65536",
- "::1]:27999",
- ":",
- ":27999",
- "[::1:]27999",
- "[::1:27999",
- "[::1]:",
- "[::1]:123456",
- "[::1]:1cat",
- "[::1]:65536",
- "[::1]:cat",
- "0:0::0:0:1:27999",
- "0000:0000:0000:0000:0000:0000:0000:0001:27999",
- "127.0.0.1:",
- "127.0.0.1:123456",
- "127.0.0.1:1cat",
- "127.0.0.1:65536",
- "127.0.0.1:cat",
- "a[::1:]27999",
- "a[127.0.0.1]:27999",
- "localhost:",
- ];
+var badStrings = [
+ "::1:27999",
+ "::1:65536",
+ "::1]:27999",
+ ":",
+ ":27999",
+ "[::1:]27999",
+ "[::1:27999",
+ "[::1]:",
+ "[::1]:123456",
+ "[::1]:1cat",
+ "[::1]:65536",
+ "[::1]:cat",
+ "0:0::0:0:1:27999",
+ "0000:0000:0000:0000:0000:0000:0000:0001:27999",
+ "127.0.0.1:",
+ "127.0.0.1:123456",
+ "127.0.0.1:1cat",
+ "127.0.0.1:65536",
+ "127.0.0.1:cat",
+ "a[::1:]27999",
+ "a[127.0.0.1]:27999",
+ "localhost:",
+];
- function runUriTestFor(i, connectionString, isGood) {
- connectionString = connectionString.replace("27999", "" + port);
- print("Testing " + (isGood ? "good" : "bad") + " connection string " + i + "...");
- print(" * testing " + connectionString);
- testHost(connectionString, isGood);
- print(" * testing mongodb://" + encodeURIComponent(connectionString));
- testHost("mongodb://" + encodeURIComponent(connectionString), isGood);
- }
+function runUriTestFor(i, connectionString, isGood) {
+ connectionString = connectionString.replace("27999", "" + port);
+ print("Testing " + (isGood ? "good" : "bad") + " connection string " + i + "...");
+ print(" * testing " + connectionString);
+ testHost(connectionString, isGood);
+ print(" * testing mongodb://" + encodeURIComponent(connectionString));
+ testHost("mongodb://" + encodeURIComponent(connectionString), isGood);
+}
- var i;
- jsTest.log("TESTING " + goodStrings.length + " good uri strings");
- for (i = 0; i < goodStrings.length; ++i) {
- runUriTestFor(i, goodStrings[i], true);
- }
+var i;
+jsTest.log("TESTING " + goodStrings.length + " good uri strings");
+for (i = 0; i < goodStrings.length; ++i) {
+ runUriTestFor(i, goodStrings[i], true);
+}
- if (!_isWindows()) {
- jsTest.log("TESTING " + goodSocketStrings.length + " good uri socket strings");
- for (i = 0; i < goodSocketStrings.length; ++i) {
- runUriTestFor(i, goodSocketStrings[i], true);
- }
+if (!_isWindows()) {
+ jsTest.log("TESTING " + goodSocketStrings.length + " good uri socket strings");
+ for (i = 0; i < goodSocketStrings.length; ++i) {
+ runUriTestFor(i, goodSocketStrings[i], true);
}
+}
- jsTest.log("TESTING " + badStrings.length + " bad uri strings");
- for (i = 0; i < badStrings.length; ++i) {
- runUriTestFor(i, badStrings[i], false);
- }
+jsTest.log("TESTING " + badStrings.length + " bad uri strings");
+for (i = 0; i < badStrings.length; ++i) {
+ runUriTestFor(i, badStrings[i], false);
+}
- jsTest.log("SUCCESSFUL test completion");
+jsTest.log("SUCCESSFUL test completion");
})();
diff --git a/jstests/noPassthroughWithMongod/index_boundary_values_validate.js b/jstests/noPassthroughWithMongod/index_boundary_values_validate.js
index fd9ce6f5d21..5ff5a44ef93 100644
--- a/jstests/noPassthroughWithMongod/index_boundary_values_validate.js
+++ b/jstests/noPassthroughWithMongod/index_boundary_values_validate.js
@@ -3,30 +3,28 @@
'use strict';
(function() {
- var t = db.index_boundary_values_validate;
- t.drop();
+var t = db.index_boundary_values_validate;
+t.drop();
- assert.writeOK(t.insert({a: MaxKey, b: MaxKey}));
- assert.writeOK(t.insert({a: MaxKey, b: MinKey}));
- assert.writeOK(t.insert({a: MinKey, b: MaxKey}));
- assert.writeOK(t.insert({a: MinKey, b: MinKey}));
+assert.writeOK(t.insert({a: MaxKey, b: MaxKey}));
+assert.writeOK(t.insert({a: MaxKey, b: MinKey}));
+assert.writeOK(t.insert({a: MinKey, b: MaxKey}));
+assert.writeOK(t.insert({a: MinKey, b: MinKey}));
- assert.writeOK(t.insert({a: {}}));
- assert.writeOK(t.insert({b: {}}));
- assert.writeOK(t.insert({unindexed_field: {}}));
- assert.writeOK(t.insert({a: {}, b: {}}));
+assert.writeOK(t.insert({a: {}}));
+assert.writeOK(t.insert({b: {}}));
+assert.writeOK(t.insert({unindexed_field: {}}));
+assert.writeOK(t.insert({a: {}, b: {}}));
- assert.commandWorked(t.createIndex({a: 1, b: 1}));
- assert.commandWorked(t.createIndex({a: 1, b: -1}));
- assert.commandWorked(t.createIndex({a: -1, b: 1}));
- assert.commandWorked(t.createIndex({a: -1, b: -1}));
+assert.commandWorked(t.createIndex({a: 1, b: 1}));
+assert.commandWorked(t.createIndex({a: 1, b: -1}));
+assert.commandWorked(t.createIndex({a: -1, b: 1}));
+assert.commandWorked(t.createIndex({a: -1, b: -1}));
- var res = t.validate(true);
- assert.commandWorked(res);
+var res = t.validate(true);
+assert.commandWorked(res);
- assert.eq(
- res.nrecords, 8, 'the collection had an unexpected number of records:\n' + tojson(res));
- assert.eq(
- res.nIndexes, 5, 'the collection had an unexpected number of indexes:\n' + tojson(res));
- assert.eq(res.valid, true, 'the collection failed validation:\n' + tojson(res));
+assert.eq(res.nrecords, 8, 'the collection had an unexpected number of records:\n' + tojson(res));
+assert.eq(res.nIndexes, 5, 'the collection had an unexpected number of indexes:\n' + tojson(res));
+assert.eq(res.valid, true, 'the collection failed validation:\n' + tojson(res));
})();
diff --git a/jstests/noPassthroughWithMongod/index_limits_not_bypassed.js b/jstests/noPassthroughWithMongod/index_limits_not_bypassed.js
index bc55bda6550..33e2e5cec8e 100644
--- a/jstests/noPassthroughWithMongod/index_limits_not_bypassed.js
+++ b/jstests/noPassthroughWithMongod/index_limits_not_bypassed.js
@@ -3,36 +3,35 @@
* the 'createIndexes()' command to create multiple indexes in one request.
*/
(function() {
- "use strict";
+"use strict";
- const collName = "index_limits_not_bypassed";
- const coll = db.getCollection(collName);
- coll.drop();
+const collName = "index_limits_not_bypassed";
+const coll = db.getCollection(collName);
+coll.drop();
- // A single collection can have no more than 64 indexes. We'll create 62 indexes here to
- // have a total of 63 indexes (the _id index and the 62 about to be created).
- for (let index = 0; index < 62; index++) {
- let spec = {};
- spec[index] = 1;
- assert.commandWorked(coll.createIndex(spec));
- }
+// A single collection can have no more than 64 indexes. We'll create 62 indexes here to
+// have a total of 63 indexes (the _id index and the 62 about to be created).
+for (let index = 0; index < 62; index++) {
+ let spec = {};
+ spec[index] = 1;
+ assert.commandWorked(coll.createIndex(spec));
+}
- let indexes = db.runCommand({listIndexes: collName});
- assert.eq(63, indexes.cursor.firstBatch.length);
+let indexes = db.runCommand({listIndexes: collName});
+assert.eq(63, indexes.cursor.firstBatch.length);
- // Creating multiple indexes via 'createIndexes()' shouldn't bypass index limits.
- assert.commandFailedWithCode(coll.createIndexes([{x: 1}, {y: 1}]),
- ErrorCodes.CannotCreateIndex);
+// Creating multiple indexes via 'createIndexes()' shouldn't bypass index limits.
+assert.commandFailedWithCode(coll.createIndexes([{x: 1}, {y: 1}]), ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.dropIndex("x"), ErrorCodes.IndexNotFound);
- assert.commandFailedWithCode(coll.dropIndex("y"), ErrorCodes.IndexNotFound);
+assert.commandFailedWithCode(coll.dropIndex("x"), ErrorCodes.IndexNotFound);
+assert.commandFailedWithCode(coll.dropIndex("y"), ErrorCodes.IndexNotFound);
- // Try to create two text indexes at the same time using 'createIndexes()'. The limit for text
- // indexes is one per collection.
- assert.commandFailedWithCode(
- coll.createIndexes([{x: "text", weights: {x: 5}}, {y: "text", weights: {y: 10}}]),
- ErrorCodes.CannotCreateIndex);
+// Try to create two text indexes at the same time using 'createIndexes()'. The limit for text
+// indexes is one per collection.
+assert.commandFailedWithCode(
+ coll.createIndexes([{x: "text", weights: {x: 5}}, {y: "text", weights: {y: 10}}]),
+ ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.dropIndex("x"), ErrorCodes.IndexNotFound);
- assert.commandFailedWithCode(coll.dropIndex("y"), ErrorCodes.IndexNotFound);
+assert.commandFailedWithCode(coll.dropIndex("x"), ErrorCodes.IndexNotFound);
+assert.commandFailedWithCode(coll.dropIndex("y"), ErrorCodes.IndexNotFound);
}());
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
index da3f2c9f1d0..446502905cb 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
@@ -6,81 +6,81 @@
// @tags: [requires_persistence, requires_journaling, requires_replication]
(function() {
- 'use strict';
+'use strict';
- // Set up replica set
- var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
- var nodes = replTest.nodeList();
+// Set up replica set
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
+var nodes = replTest.nodeList();
- // We need an arbiter to ensure that the primary doesn't step down
- // when we restart the secondary.
- replTest.startSet();
- replTest.initiate({
- "_id": "bgIndex",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
+// We need an arbiter to ensure that the primary doesn't step down
+// when we restart the secondary.
+replTest.startSet();
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
- var master = replTest.getPrimary();
- var second = replTest.getSecondary();
+var master = replTest.getPrimary();
+var second = replTest.getSecondary();
- var masterDB = master.getDB('bgIndexSec');
- var secondDB = second.getDB('bgIndexSec');
+var masterDB = master.getDB('bgIndexSec');
+var secondDB = second.getDB('bgIndexSec');
- var collectionName = 'jstests_bgsec';
+var collectionName = 'jstests_bgsec';
- var coll = masterDB.getCollection(collectionName);
+var coll = masterDB.getCollection(collectionName);
- var size = 100;
+var size = 100;
- var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute({j: true}));
- assert.eq(size, coll.count(), 'unexpected number of documents after bulk insert.');
+var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
+}
+assert.writeOK(bulk.execute({j: true}));
+assert.eq(size, coll.count(), 'unexpected number of documents after bulk insert.');
- // Make sure the documents make it to the secondary.
- replTest.awaitReplication();
+// Make sure the documents make it to the secondary.
+replTest.awaitReplication();
- assert.commandWorked(secondDB.adminCommand(
- {configureFailPoint: 'leaveIndexBuildUnfinishedForShutdown', mode: 'alwaysOn'}));
- try {
- coll.createIndex({i: 1}, {background: true});
- masterDB.getLastError(2);
- assert.eq(2, coll.getIndexes().length);
+assert.commandWorked(secondDB.adminCommand(
+ {configureFailPoint: 'leaveIndexBuildUnfinishedForShutdown', mode: 'alwaysOn'}));
+try {
+ coll.createIndex({i: 1}, {background: true});
+ masterDB.getLastError(2);
+ assert.eq(2, coll.getIndexes().length);
- // Make sure all writes are durable on the secondary so that we can restart it knowing that
- // the index build will be found on startup.
- // Waiting for durable is important for both (A) the record that we started the index build
- // so it is rebuild on restart, and (B) the update to minvalid to show that we've already
- // applied the oplog entry so it isn't replayed. If (A) is present without (B), then there
- // are two ways that the index can be rebuilt on startup and this test is only for the one
- // triggered by (A).
- secondDB.adminCommand({fsync: 1});
- } finally {
- assert.commandWorked(secondDB.adminCommand(
- {configureFailPoint: 'leaveIndexBuildUnfinishedForShutdown', mode: 'off'}));
- }
+ // Make sure all writes are durable on the secondary so that we can restart it knowing that
+ // the index build will be found on startup.
+ // Waiting for durable is important for both (A) the record that we started the index build
+ // so it is rebuild on restart, and (B) the update to minvalid to show that we've already
+ // applied the oplog entry so it isn't replayed. If (A) is present without (B), then there
+ // are two ways that the index can be rebuilt on startup and this test is only for the one
+ // triggered by (A).
+ secondDB.adminCommand({fsync: 1});
+} finally {
+ assert.commandWorked(secondDB.adminCommand(
+ {configureFailPoint: 'leaveIndexBuildUnfinishedForShutdown', mode: 'off'}));
+}
- MongoRunner.stopMongod(second);
- replTest.start(second, {}, /*restart=*/true, /*wait=*/true);
+MongoRunner.stopMongod(second);
+replTest.start(second, {}, /*restart=*/true, /*wait=*/true);
- // Make sure secondary comes back.
- assert.soon(function() {
- try {
- secondDB.isMaster(); // trigger a reconnect if needed
- return true;
- } catch (e) {
- return false;
- }
- }, "secondary didn't restart", 30000, 1000);
+// Make sure secondary comes back.
+assert.soon(function() {
+ try {
+ secondDB.isMaster(); // trigger a reconnect if needed
+ return true;
+ } catch (e) {
+ return false;
+ }
+}, "secondary didn't restart", 30000, 1000);
- assert.soon(function() {
- return 2 == secondDB.getCollection(collectionName).getIndexes().length;
- }, "Index build not resumed after restart", 30000, 50);
- replTest.stopSet();
+assert.soon(function() {
+ return 2 == secondDB.getCollection(collectionName).getIndexes().length;
+}, "Index build not resumed after restart", 30000, 50);
+replTest.stopSet();
}());
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js
index 97bcaff5412..16165ce3f96 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js
@@ -6,90 +6,90 @@
// @tags: [requires_persistence, requires_journaling, requires_replication]
(function() {
- 'use strict';
-
- // Assert that running `mongod` with `--noIndexBuildRetry` and `--replSet` does not startup.
- {
- // If code breaks the incompatibility between `--noIndexBuildRetry` and `--replSet`, using
- // `notAStorageEngine` will cause a failure later in execution that returns a different
- // exit code (100).
- var process = MongoRunner.runMongod({
- noIndexBuildRetry: "",
- replSet: "rs0",
- storageEngine: "notAStorageEngine",
- waitForConnect: false
- });
- var exitCode = waitProgram(process.pid);
- assert.eq(1, exitCode);
- }
-
- // Skip db hash check because secondary will have different number of indexes due to the
- // --noIndexBuildRetry command line option.
- TestData.skipCheckDBHashes = true;
-
- // Set up replica set.
- var replTest = new ReplSetTest({name: 'bgIndexNoRetry', nodes: 3});
- var nodenames = replTest.nodeList();
-
- var nodes = replTest.startSet();
- replTest.initiate({
- "_id": "bgIndexNoRetry",
- "members": [
- {"_id": 0, "host": nodenames[0]},
- {"_id": 1, "host": nodenames[1]},
- {"_id": 2, "host": nodenames[2], arbiterOnly: true}
- ]
+'use strict';
+
+// Assert that running `mongod` with `--noIndexBuildRetry` and `--replSet` does not startup.
+{
+ // If code breaks the incompatibility between `--noIndexBuildRetry` and `--replSet`, using
+ // `notAStorageEngine` will cause a failure later in execution that returns a different
+ // exit code (100).
+ var process = MongoRunner.runMongod({
+ noIndexBuildRetry: "",
+ replSet: "rs0",
+ storageEngine: "notAStorageEngine",
+ waitForConnect: false
});
+ var exitCode = waitProgram(process.pid);
+ assert.eq(1, exitCode);
+}
+
+// Skip db hash check because secondary will have different number of indexes due to the
+// --noIndexBuildRetry command line option.
+TestData.skipCheckDBHashes = true;
+
+// Set up replica set.
+var replTest = new ReplSetTest({name: 'bgIndexNoRetry', nodes: 3});
+var nodenames = replTest.nodeList();
+
+var nodes = replTest.startSet();
+replTest.initiate({
+ "_id": "bgIndexNoRetry",
+ "members": [
+ {"_id": 0, "host": nodenames[0]},
+ {"_id": 1, "host": nodenames[1]},
+ {"_id": 2, "host": nodenames[2], arbiterOnly: true}
+ ]
+});
+
+var master = replTest.getPrimary();
+var second = replTest.getSecondary();
+
+var masterDB = master.getDB('bgIndexNoRetrySec');
+var secondDB = second.getDB('bgIndexNoRetrySec');
+
+var collectionName = 'jstests_bgsec';
+
+var size = 100;
+
+var masterColl = masterDB.getCollection(collectionName);
+var bulk = masterColl.initializeUnorderedBulkOp();
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
+}
+assert.writeOK(bulk.execute({j: true}));
+assert.eq(size, masterColl.count(), 'unexpected number of documents after bulk insert.');
+
+// Make sure the documents get replicated to the secondary.
+replTest.awaitReplication();
+
+assert.commandWorked(secondDB.adminCommand(
+ {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: 'alwaysOn'}));
+masterColl.createIndex({i: 1}, {background: true});
+masterDB.getLastError(2);
+assert.eq(2, masterColl.getIndexes().length);
+
+// Kill -9 and restart the secondary, after making sure all writes are durable.
+// Waiting for durable is important for both (A) the record that we started the index build so
+// it is rebuild on restart, and (B) the update to minvalid to show that we've already applied
+// the oplog entry so it isn't replayed. If (A) is present without (B), then there are two ways
+// that the index can be rebuilt on startup and this test is only for the one triggered by (A).
+secondDB.adminCommand({fsync: 1});
+replTest.stop(second, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+replTest.start(
+ second, {"noReplSet": true, "noIndexBuildRetry": ""}, /*restart*/ true, /*wait=*/false);
+
+// Make sure secondary comes back.
+assert.soon(function() {
+ try {
+ secondDB.isMaster(); // trigger a reconnect if needed
+ return true;
+ } catch (e) {
+ return false;
+ }
+}, "secondary didn't restart", 60000, 1000);
- var master = replTest.getPrimary();
- var second = replTest.getSecondary();
-
- var masterDB = master.getDB('bgIndexNoRetrySec');
- var secondDB = second.getDB('bgIndexNoRetrySec');
-
- var collectionName = 'jstests_bgsec';
-
- var size = 100;
+var secondaryColl = secondDB.getCollection(collectionName);
- var masterColl = masterDB.getCollection(collectionName);
- var bulk = masterColl.initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute({j: true}));
- assert.eq(size, masterColl.count(), 'unexpected number of documents after bulk insert.');
-
- // Make sure the documents get replicated to the secondary.
- replTest.awaitReplication();
-
- assert.commandWorked(secondDB.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: 'alwaysOn'}));
- masterColl.createIndex({i: 1}, {background: true});
- masterDB.getLastError(2);
- assert.eq(2, masterColl.getIndexes().length);
-
- // Kill -9 and restart the secondary, after making sure all writes are durable.
- // Waiting for durable is important for both (A) the record that we started the index build so
- // it is rebuild on restart, and (B) the update to minvalid to show that we've already applied
- // the oplog entry so it isn't replayed. If (A) is present without (B), then there are two ways
- // that the index can be rebuilt on startup and this test is only for the one triggered by (A).
- secondDB.adminCommand({fsync: 1});
- replTest.stop(second, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- replTest.start(
- second, {"noReplSet": true, "noIndexBuildRetry": ""}, /*restart*/ true, /*wait=*/false);
-
- // Make sure secondary comes back.
- assert.soon(function() {
- try {
- secondDB.isMaster(); // trigger a reconnect if needed
- return true;
- } catch (e) {
- return false;
- }
- }, "secondary didn't restart", 60000, 1000);
-
- var secondaryColl = secondDB.getCollection(collectionName);
-
- assert.neq(2, secondaryColl.getIndexes().length);
- replTest.stopSet();
+assert.neq(2, secondaryColl.getIndexes().length);
+replTest.stopSet();
}());
diff --git a/jstests/noPassthroughWithMongod/indexbg_updates.js b/jstests/noPassthroughWithMongod/indexbg_updates.js
index c3465f78047..5511c83074d 100644
--- a/jstests/noPassthroughWithMongod/indexbg_updates.js
+++ b/jstests/noPassthroughWithMongod/indexbg_updates.js
@@ -4,57 +4,57 @@
// Variation of index_multi.js
(function() {
- "use strict";
- Random.setRandomSeed();
-
- var coll = db.getSiblingDB("indexbg_updates").coll;
- coll.drop();
-
- var numDocs = 10000;
-
- var bulk = coll.initializeUnorderedBulkOp();
- print("Populate the collection with random data");
- for (var i = 0; i < numDocs; i++) {
- var doc = {"_id": i, "field0": Random.rand()};
-
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
-
- // Perform a bulk update on a single document, targeting the updates on the
- // field being actively indexed in the background
- bulk = coll.initializeUnorderedBulkOp();
- for (i = 0; i < numDocs; i++) {
- var criteria = {"_id": 1000};
- var mod = {};
-
- if (Random.rand() < .8) {
- mod["$set"] = {};
- mod["$set"]["field0"] = Random.rand();
- } else {
- mod["$unset"] = {};
- mod["$unset"]["field0"] = true;
- }
-
- bulk.find(criteria).update(mod);
+"use strict";
+Random.setRandomSeed();
+
+var coll = db.getSiblingDB("indexbg_updates").coll;
+coll.drop();
+
+var numDocs = 10000;
+
+var bulk = coll.initializeUnorderedBulkOp();
+print("Populate the collection with random data");
+for (var i = 0; i < numDocs; i++) {
+ var doc = {"_id": i, "field0": Random.rand()};
+
+ bulk.insert(doc);
+}
+assert.writeOK(bulk.execute());
+
+// Perform a bulk update on a single document, targeting the updates on the
+// field being actively indexed in the background
+bulk = coll.initializeUnorderedBulkOp();
+for (i = 0; i < numDocs; i++) {
+ var criteria = {"_id": 1000};
+ var mod = {};
+
+ if (Random.rand() < .8) {
+ mod["$set"] = {};
+ mod["$set"]["field0"] = Random.rand();
+ } else {
+ mod["$unset"] = {};
+ mod["$unset"]["field0"] = true;
}
- // Build an index in the background on field0
- var backgroundIndexBuildShell = startParallelShell(
- function() {
- var coll = db.getSiblingDB("indexbg_updates").coll;
- assert.commandWorked(coll.createIndex({"field0": 1}, {"background": true}));
- },
- null, // port -- use default
- false // noconnect
- );
+ bulk.find(criteria).update(mod);
+}
+
+// Build an index in the background on field0
+var backgroundIndexBuildShell = startParallelShell(
+ function() {
+ var coll = db.getSiblingDB("indexbg_updates").coll;
+ assert.commandWorked(coll.createIndex({"field0": 1}, {"background": true}));
+ },
+ null, // port -- use default
+ false // noconnect
+);
- print("Do some sets and unsets");
- assert.writeOK(bulk.execute());
+print("Do some sets and unsets");
+assert.writeOK(bulk.execute());
- print("Start background index build");
- backgroundIndexBuildShell();
+print("Start background index build");
+backgroundIndexBuildShell();
- var explain = coll.find().hint({"field0": 1}).explain();
- assert("queryPlanner" in explain, tojson(explain));
+var explain = coll.find().hint({"field0": 1}).explain();
+assert("queryPlanner" in explain, tojson(explain));
}());
diff --git a/jstests/noPassthroughWithMongod/insertMulti.js b/jstests/noPassthroughWithMongod/insertMulti.js
index 2000669d698..2e09e799a2f 100644
--- a/jstests/noPassthroughWithMongod/insertMulti.js
+++ b/jstests/noPassthroughWithMongod/insertMulti.js
@@ -1,53 +1,53 @@
// check the insertMulti path works, including the error handling
(function() {
- "use strict";
+"use strict";
- function makeDocument(docSize) {
- var doc = {"fieldName": ""};
- var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
- while (Object.bsonsize(doc) < docSize) {
- if (Object.bsonsize(doc) < docSize - longString.length) {
- doc.fieldName += longString;
- } else {
- doc.fieldName += "x";
- }
+function makeDocument(docSize) {
+ var doc = {"fieldName": ""};
+ var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+ while (Object.bsonsize(doc) < docSize) {
+ if (Object.bsonsize(doc) < docSize - longString.length) {
+ doc.fieldName += longString;
+ } else {
+ doc.fieldName += "x";
}
- return doc;
}
+ return doc;
+}
- db.getMongo().forceWriteMode('legacy');
- var t = db.foo;
+db.getMongo().forceWriteMode('legacy');
+var t = db.foo;
- t.drop();
- t.insert([{_id: 1}, {_id: 2}]);
- assert.eq(t.count(), 2);
- t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 0); // no ContinueOnError
- assert.eq(t.count(), 3);
- assert.eq(t.count({"_id": 1}), 1);
- assert.eq(t.count({"_id": 2}), 1);
- assert.eq(t.count({"_id": 3}), 1);
- assert.eq(t.count({"_id": 4}), 0);
+t.drop();
+t.insert([{_id: 1}, {_id: 2}]);
+assert.eq(t.count(), 2);
+t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 0); // no ContinueOnError
+assert.eq(t.count(), 3);
+assert.eq(t.count({"_id": 1}), 1);
+assert.eq(t.count({"_id": 2}), 1);
+assert.eq(t.count({"_id": 3}), 1);
+assert.eq(t.count({"_id": 4}), 0);
- t.drop();
- t.insert([{_id: 1}, {_id: 2}]);
- assert.eq(t.count(), 2);
- t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 1); // ContinueOnError
- assert.eq(t.count(), 4);
- assert.eq(t.count({"_id": 1}), 1);
- assert.eq(t.count({"_id": 2}), 1);
- assert.eq(t.count({"_id": 3}), 1);
- assert.eq(t.count({"_id": 4}), 1);
+t.drop();
+t.insert([{_id: 1}, {_id: 2}]);
+assert.eq(t.count(), 2);
+t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 1); // ContinueOnError
+assert.eq(t.count(), 4);
+assert.eq(t.count({"_id": 1}), 1);
+assert.eq(t.count({"_id": 2}), 1);
+assert.eq(t.count({"_id": 3}), 1);
+assert.eq(t.count({"_id": 4}), 1);
- // Push a large vector in bigger than the subset size we'll break it up into
- t.drop();
- var doc = makeDocument(16 * 1024);
- var docs = [];
- for (var i = 0; i < 1000; i++)
- docs.push(Object.extend({}, doc));
- t.insert(docs);
- assert.eq(null, t.getDB().getLastError());
- assert.eq(t.count(), docs.length);
+// Push a large vector in bigger than the subset size we'll break it up into
+t.drop();
+var doc = makeDocument(16 * 1024);
+var docs = [];
+for (var i = 0; i < 1000; i++)
+ docs.push(Object.extend({}, doc));
+t.insert(docs);
+assert.eq(null, t.getDB().getLastError());
+assert.eq(t.count(), docs.length);
- t.drop();
+t.drop();
})();
diff --git a/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js b/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
index fbffeffad42..618e76ddaaf 100644
--- a/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
+++ b/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
@@ -5,169 +5,169 @@
// The outer mode test starts a mongod with --ipv6 and then starts a mongo shell with --ipv6
// and a command line to run the test in inner_mode. The inner mode test is the actual test.
(function() {
- if ("undefined" == typeof inner_mode) {
- // Start a mongod with --ipv6
- jsTest.log("Outer mode test starting mongod with --ipv6");
- // NOTE: bind_ip arg is present to test if it can parse ipv6 addresses (::1 in this case).
- // Unfortunately, having bind_ip = ::1 won't work in the test framework (But does work when
- // tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
- // with that address.
- var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
- if (mongod == null) {
- jsTest.log("Unable to run test because ipv6 is not on machine, see BF-10990");
- return;
- }
- var args = [
- "mongo",
- "--nodb",
- "--ipv6",
- "--host",
- "::1",
- "--port",
- mongod.port,
- "--eval",
- "inner_mode=true;port=" + mongod.port + ";",
- "jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js"
- ];
- var exitCode = _runMongoProgram.apply(null, args);
- jsTest.log("Inner mode test finished, exit code was " + exitCode);
-
- // Pass the inner test's exit code back as the outer test's exit code
- if (exitCode != 0) {
- doassert("inner test failed with exit code " + exitCode);
- }
- MongoRunner.stopMongod(mongod);
+if ("undefined" == typeof inner_mode) {
+ // Start a mongod with --ipv6
+ jsTest.log("Outer mode test starting mongod with --ipv6");
+ // NOTE: bind_ip arg is present to test if it can parse ipv6 addresses (::1 in this case).
+ // Unfortunately, having bind_ip = ::1 won't work in the test framework (But does work when
+ // tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
+ // with that address.
+ var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
+ if (mongod == null) {
+ jsTest.log("Unable to run test because ipv6 is not on machine, see BF-10990");
return;
}
-
- var goodStrings = [
- "localhost:27999/test",
- "[::1]:27999/test",
- "[0:0:0:0:0:0:0:1]:27999/test",
- "[0000:0000:0000:0000:0000:0000:0000:0001]:27999/test",
- "localhost:27999",
- "[::1]:27999",
- "[0:0:0:0:0:0:0:1]:27999",
- "[0000:0000:0000:0000:0000:0000:0000:0001]:27999",
+ var args = [
+ "mongo",
+ "--nodb",
+ "--ipv6",
+ "--host",
+ "::1",
+ "--port",
+ mongod.port,
+ "--eval",
+ "inner_mode=true;port=" + mongod.port + ";",
+ "jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js"
];
+ var exitCode = _runMongoProgram.apply(null, args);
+ jsTest.log("Inner mode test finished, exit code was " + exitCode);
- var missingConnString = /^Missing connection string$/;
- var incorrectType = /^Incorrect type/;
- var emptyConnString = /^Empty connection string$/;
- var badHost = /^Failed to parse mongodb/;
- var emptyHost = /^Empty host component/;
- var noPort = /^No digits/;
- var invalidPort = /^Port number \d+ out of range/;
- var moreThanOneColon = /^More than one ':' detected/;
- var charBeforeSquareBracket = /^'\[' present, but not first character/;
- var noCloseBracket = /^ipv6 address is missing closing '\]'/;
- var noOpenBracket = /^'\]' present without '\['/;
- var noColonPrePort = /^missing colon after '\]' before the port/;
- var badStrings = [
- {s: undefined, r: missingConnString},
- {s: 7, r: incorrectType},
- {s: null, r: incorrectType},
- {s: "", r: emptyConnString},
- {s: " ", r: emptyConnString},
- {s: ":", r: emptyHost},
- {s: "/", r: badHost},
- {s: ":/", r: emptyHost},
- {s: ":/test", r: emptyHost},
- {s: ":27999/", r: emptyHost},
- {s: ":27999/test", r: emptyHost},
- {s: "/test", r: badHost},
- {s: "localhost:/test", r: noPort},
- {s: "[::1]:/test", r: noPort},
- {s: "[::1]:cat/test", c: ErrorCodes.FailedToParse},
- {s: "[::1]:1cat/test", c: ErrorCodes.FailedToParse},
- {s: "[::1]:123456/test", r: invalidPort},
- {s: "[::1]:65536/test", r: invalidPort},
- {s: "127.0.0.1:65536/test", r: invalidPort},
- {s: "::1:27999/test", r: moreThanOneColon},
- {s: "0:0::0:0:1:27999/test", r: moreThanOneColon},
- {s: "0000:0000:0000:0000:0000:0000:0000:0001:27999/test", r: moreThanOneColon},
- {s: "a[127.0.0.1]:27999/", r: charBeforeSquareBracket},
- {s: "a[::1:]27999/", r: charBeforeSquareBracket},
- {s: "[::1:27999/", r: noCloseBracket},
- {s: "[::1:]27999/", r: noColonPrePort},
- {s: "::1]:27999/", r: noOpenBracket},
- ];
+ // Pass the inner test's exit code back as the outer test's exit code
+ if (exitCode != 0) {
+ doassert("inner test failed with exit code " + exitCode);
+ }
+ MongoRunner.stopMongod(mongod);
+ return;
+}
- var substitutePort = function(connectionString) {
- // This will be called with non-strings as well as strings, so we need to catch exceptions
- try {
- return connectionString.replace("27999", "" + port);
- } catch (e) {
- return connectionString;
- }
- };
+var goodStrings = [
+ "localhost:27999/test",
+ "[::1]:27999/test",
+ "[0:0:0:0:0:0:0:1]:27999/test",
+ "[0000:0000:0000:0000:0000:0000:0000:0001]:27999/test",
+ "localhost:27999",
+ "[::1]:27999",
+ "[0:0:0:0:0:0:0:1]:27999",
+ "[0000:0000:0000:0000:0000:0000:0000:0001]:27999",
+];
- var testGood = function(i, connectionString) {
- print("\n---\nTesting good connection string " + i + " (\"" + connectionString + "\") ...");
- var gotException = false;
- var exception;
- try {
- var connectDB = connect(connectionString);
- connectDB = null;
- } catch (e) {
- gotException = true;
- exception = e;
- }
- if (!gotException) {
- print("Good connection string " + i + " (\"" + connectionString +
- "\") correctly validated");
- return;
- }
- var message = "FAILED to correctly validate goodString " + i + " (\"" + connectionString +
- "\"): exception was \"" + tojson(exception) + "\"";
- doassert(message);
- };
+var missingConnString = /^Missing connection string$/;
+var incorrectType = /^Incorrect type/;
+var emptyConnString = /^Empty connection string$/;
+var badHost = /^Failed to parse mongodb/;
+var emptyHost = /^Empty host component/;
+var noPort = /^No digits/;
+var invalidPort = /^Port number \d+ out of range/;
+var moreThanOneColon = /^More than one ':' detected/;
+var charBeforeSquareBracket = /^'\[' present, but not first character/;
+var noCloseBracket = /^ipv6 address is missing closing '\]'/;
+var noOpenBracket = /^'\]' present without '\['/;
+var noColonPrePort = /^missing colon after '\]' before the port/;
+var badStrings = [
+ {s: undefined, r: missingConnString},
+ {s: 7, r: incorrectType},
+ {s: null, r: incorrectType},
+ {s: "", r: emptyConnString},
+ {s: " ", r: emptyConnString},
+ {s: ":", r: emptyHost},
+ {s: "/", r: badHost},
+ {s: ":/", r: emptyHost},
+ {s: ":/test", r: emptyHost},
+ {s: ":27999/", r: emptyHost},
+ {s: ":27999/test", r: emptyHost},
+ {s: "/test", r: badHost},
+ {s: "localhost:/test", r: noPort},
+ {s: "[::1]:/test", r: noPort},
+ {s: "[::1]:cat/test", c: ErrorCodes.FailedToParse},
+ {s: "[::1]:1cat/test", c: ErrorCodes.FailedToParse},
+ {s: "[::1]:123456/test", r: invalidPort},
+ {s: "[::1]:65536/test", r: invalidPort},
+ {s: "127.0.0.1:65536/test", r: invalidPort},
+ {s: "::1:27999/test", r: moreThanOneColon},
+ {s: "0:0::0:0:1:27999/test", r: moreThanOneColon},
+ {s: "0000:0000:0000:0000:0000:0000:0000:0001:27999/test", r: moreThanOneColon},
+ {s: "a[127.0.0.1]:27999/", r: charBeforeSquareBracket},
+ {s: "a[::1:]27999/", r: charBeforeSquareBracket},
+ {s: "[::1:27999/", r: noCloseBracket},
+ {s: "[::1:]27999/", r: noColonPrePort},
+ {s: "::1]:27999/", r: noOpenBracket},
+];
- var testBad = function(i, connectionString, errorRegex, errorCode) {
- print("\n---\nTesting bad connection string " + i + " (\"" + connectionString + "\") ...");
- var gotException = false;
- var gotCorrectErrorText = false;
- var gotCorrectErrorCode = false;
- var exception;
- try {
- var connectDB = connect(connectionString);
- connectDB = null;
- } catch (e) {
- gotException = true;
- exception = e;
- if (errorRegex && errorRegex.test(e.message)) {
- gotCorrectErrorText = true;
- }
- if (errorCode == e.code) {
- gotCorrectErrorCode = true;
- }
- }
- if (gotCorrectErrorText || gotCorrectErrorCode) {
- print("Bad connection string " + i + " (\"" + connectionString +
- "\") correctly rejected:\n" + tojson(exception));
- return;
- }
- var message = "FAILED to generate correct exception for badString " + i + " (\"" +
- connectionString + "\"): ";
- if (gotException) {
- message += "exception was \"" + tojson(exception) + "\", it should have matched \"" +
- errorRegex.toString() + "\"";
- } else {
- message += "no exception was thrown";
- }
- doassert(message);
- };
+var substitutePort = function(connectionString) {
+ // This will be called with non-strings as well as strings, so we need to catch exceptions
+ try {
+ return connectionString.replace("27999", "" + port);
+ } catch (e) {
+ return connectionString;
+ }
+};
- var i;
- jsTest.log("TESTING " + goodStrings.length + " good connection strings");
- for (i = 0; i < goodStrings.length; ++i) {
- testGood(i, substitutePort(goodStrings[i]));
+var testGood = function(i, connectionString) {
+ print("\n---\nTesting good connection string " + i + " (\"" + connectionString + "\") ...");
+ var gotException = false;
+ var exception;
+ try {
+ var connectDB = connect(connectionString);
+ connectDB = null;
+ } catch (e) {
+ gotException = true;
+ exception = e;
+ }
+ if (!gotException) {
+ print("Good connection string " + i + " (\"" + connectionString +
+ "\") correctly validated");
+ return;
}
+ var message = "FAILED to correctly validate goodString " + i + " (\"" + connectionString +
+ "\"): exception was \"" + tojson(exception) + "\"";
+ doassert(message);
+};
- jsTest.log("TESTING " + badStrings.length + " bad connection strings");
- for (i = 0; i < badStrings.length; ++i) {
- testBad(i, substitutePort(badStrings[i].s), badStrings[i].r, badStrings[i].c);
+var testBad = function(i, connectionString, errorRegex, errorCode) {
+ print("\n---\nTesting bad connection string " + i + " (\"" + connectionString + "\") ...");
+ var gotException = false;
+ var gotCorrectErrorText = false;
+ var gotCorrectErrorCode = false;
+ var exception;
+ try {
+ var connectDB = connect(connectionString);
+ connectDB = null;
+ } catch (e) {
+ gotException = true;
+ exception = e;
+ if (errorRegex && errorRegex.test(e.message)) {
+ gotCorrectErrorText = true;
+ }
+ if (errorCode == e.code) {
+ gotCorrectErrorCode = true;
+ }
}
+ if (gotCorrectErrorText || gotCorrectErrorCode) {
+ print("Bad connection string " + i + " (\"" + connectionString +
+ "\") correctly rejected:\n" + tojson(exception));
+ return;
+ }
+ var message = "FAILED to generate correct exception for badString " + i + " (\"" +
+ connectionString + "\"): ";
+ if (gotException) {
+ message += "exception was \"" + tojson(exception) + "\", it should have matched \"" +
+ errorRegex.toString() + "\"";
+ } else {
+ message += "no exception was thrown";
+ }
+ doassert(message);
+};
+
+var i;
+jsTest.log("TESTING " + goodStrings.length + " good connection strings");
+for (i = 0; i < goodStrings.length; ++i) {
+ testGood(i, substitutePort(goodStrings[i]));
+}
+
+jsTest.log("TESTING " + badStrings.length + " bad connection strings");
+for (i = 0; i < badStrings.length; ++i) {
+ testBad(i, substitutePort(badStrings[i].s), badStrings[i].r, badStrings[i].c);
+}
- jsTest.log("SUCCESSFUL test completion");
+jsTest.log("SUCCESSFUL test completion");
})();
diff --git a/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js b/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js
index ab22bfb5f6c..10a019e95cf 100644
--- a/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js
+++ b/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js
@@ -4,54 +4,53 @@
// upgrading, or downgrading.
//
(function() {
- "use strict";
- load('./jstests/libs/feature_compatibility_version.js');
+"use strict";
+load('./jstests/libs/feature_compatibility_version.js');
- const adminDB = db.getSiblingDB("admin");
- const isMasterCommand = {
- isMaster: 1,
- internalClient: {minWireVersion: NumberInt(0), maxWireVersion: NumberInt(7)}
- };
+const adminDB = db.getSiblingDB("admin");
+const isMasterCommand = {
+ isMaster: 1,
+ internalClient: {minWireVersion: NumberInt(0), maxWireVersion: NumberInt(7)}
+};
- // When the featureCompatibilityVersion is equal to the upgrade version, running isMaster with
- // internalClient returns minWireVersion == maxWireVersion.
- checkFCV(adminDB, latestFCV);
- let res = adminDB.runCommand(isMasterCommand);
- assert.commandWorked(res);
- assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
+// When the featureCompatibilityVersion is equal to the upgrade version, running isMaster with
+// internalClient returns minWireVersion == maxWireVersion.
+checkFCV(adminDB, latestFCV);
+let res = adminDB.runCommand(isMasterCommand);
+assert.commandWorked(res);
+assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
- // When the featureCompatibilityVersion is upgrading, running isMaster with internalClient
- // returns minWireVersion == maxWireVersion.
- assert.writeOK(
- adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV, targetVersion: latestFCV}}));
- res = adminDB.runCommand(isMasterCommand);
- assert.commandWorked(res);
- assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
+// When the featureCompatibilityVersion is upgrading, running isMaster with internalClient
+// returns minWireVersion == maxWireVersion.
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV, targetVersion: latestFCV}}));
+res = adminDB.runCommand(isMasterCommand);
+assert.commandWorked(res);
+assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
- // When the featureCompatibilityVersion is downgrading, running isMaster with internalClient
- // returns minWireVersion == maxWireVersion.
- assert.writeOK(adminDB.system.version.update(
- {_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
- res = adminDB.runCommand(isMasterCommand);
- assert.commandWorked(res);
- assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
+// When the featureCompatibilityVersion is downgrading, running isMaster with internalClient
+// returns minWireVersion == maxWireVersion.
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
+res = adminDB.runCommand(isMasterCommand);
+assert.commandWorked(res);
+assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
- // When the featureCompatibilityVersion is equal to the downgrade version, running isMaster with
- // internalClient returns minWireVersion + 1 == maxWireVersion.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- res = adminDB.runCommand(isMasterCommand);
- assert.commandWorked(res);
- assert.eq(res.minWireVersion + 1, res.maxWireVersion, tojson(res));
-
- // When the internalClient field is missing from the isMaster command, the response returns the
- // full wire version range from minWireVersion == 0 to maxWireVersion == latest version, even if
- // the featureCompatibilityVersion is equal to the upgrade version.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- res = adminDB.runCommand({isMaster: 1});
- assert.commandWorked(res);
- assert.eq(res.minWireVersion, 0, tojson(res));
- assert.lt(res.minWireVersion, res.maxWireVersion, tojson(res));
+// When the featureCompatibilityVersion is equal to the downgrade version, running isMaster with
+// internalClient returns minWireVersion + 1 == maxWireVersion.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+res = adminDB.runCommand(isMasterCommand);
+assert.commandWorked(res);
+assert.eq(res.minWireVersion + 1, res.maxWireVersion, tojson(res));
+// When the internalClient field is missing from the isMaster command, the response returns the
+// full wire version range from minWireVersion == 0 to maxWireVersion == latest version, even if
+// the featureCompatibilityVersion is equal to the upgrade version.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+res = adminDB.runCommand({isMaster: 1});
+assert.commandWorked(res);
+assert.eq(res.minWireVersion, 0, tojson(res));
+assert.lt(res.minWireVersion, res.maxWireVersion, tojson(res));
})();
diff --git a/jstests/noPassthroughWithMongod/log_component_helpers.js b/jstests/noPassthroughWithMongod/log_component_helpers.js
index 405b2bdaca3..d767c9f7e00 100644
--- a/jstests/noPassthroughWithMongod/log_component_helpers.js
+++ b/jstests/noPassthroughWithMongod/log_component_helpers.js
@@ -1,45 +1,44 @@
// Basic sanity check of log component helpers
(function(db) {
- "use strict";
- var mongo = db.getMongo();
-
- // Get current log component setttings. We will reset to these later.
- var originalSettings =
- assert.commandWorked(db.adminCommand({getParameter: 1, logComponentVerbosity: 1}))
- .logComponentVerbosity;
-
- // getLogComponents
- var components1 = mongo.getLogComponents();
- assert.docEq(components1, originalSettings);
-
- // getLogComponents via db
- var components2 = db.getLogComponents();
- assert.docEq(components2, originalSettings);
-
- // setLogLevel - default component
- mongo.setLogLevel(2);
- assert.eq(mongo.getLogComponents().verbosity, 2);
-
- db.setLogLevel(0);
- assert.eq(mongo.getLogComponents().verbosity, 0);
-
- // setLogLevel - valid log component
- mongo.setLogLevel(2, "storage.journal");
- assert.eq(mongo.getLogComponents().storage.journal.verbosity, 2);
-
- db.setLogLevel(1, "storage.journal");
- assert.eq(mongo.getLogComponents().storage.journal.verbosity, 1);
-
- // setLogLevel - invalid argument
- assert.throws(function() {
- mongo.setLogLevel(2, 24);
- });
- assert.throws(function() {
- db.setLogLevel(2, ["array", "not.allowed"]);
- });
-
- // Restore originalSettings
- assert.commandWorked(
- db.adminCommand({setParameter: 1, logComponentVerbosity: originalSettings}));
+"use strict";
+var mongo = db.getMongo();
+
+// Get current log component setttings. We will reset to these later.
+var originalSettings =
+ assert.commandWorked(db.adminCommand({getParameter: 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
+
+// getLogComponents
+var components1 = mongo.getLogComponents();
+assert.docEq(components1, originalSettings);
+
+// getLogComponents via db
+var components2 = db.getLogComponents();
+assert.docEq(components2, originalSettings);
+
+// setLogLevel - default component
+mongo.setLogLevel(2);
+assert.eq(mongo.getLogComponents().verbosity, 2);
+
+db.setLogLevel(0);
+assert.eq(mongo.getLogComponents().verbosity, 0);
+
+// setLogLevel - valid log component
+mongo.setLogLevel(2, "storage.journal");
+assert.eq(mongo.getLogComponents().storage.journal.verbosity, 2);
+
+db.setLogLevel(1, "storage.journal");
+assert.eq(mongo.getLogComponents().storage.journal.verbosity, 1);
+
+// setLogLevel - invalid argument
+assert.throws(function() {
+ mongo.setLogLevel(2, 24);
+});
+assert.throws(function() {
+ db.setLogLevel(2, ["array", "not.allowed"]);
+});
+
+// Restore originalSettings
+assert.commandWorked(db.adminCommand({setParameter: 1, logComponentVerbosity: originalSettings}));
}(db));
diff --git a/jstests/noPassthroughWithMongod/logpath.js b/jstests/noPassthroughWithMongod/logpath.js
index eea71efdc4f..bb39282871f 100644
--- a/jstests/noPassthroughWithMongod/logpath.js
+++ b/jstests/noPassthroughWithMongod/logpath.js
@@ -4,7 +4,8 @@ var name = "logpath";
var token = "logpath_token";
var dbdir = MongoRunner.dataPath + name + "/"; // this will work under windows as well as linux
-var basedir = MongoRunner.dataPath + name + "files" + "/";
+var basedir = MongoRunner.dataPath + name + "files" +
+ "/";
var logdir = basedir + "logdir/";
var testdir = basedir + "testdir/";
var sfile = _isWindows() ? "NUL" : "/dev/null";
diff --git a/jstests/noPassthroughWithMongod/moveprimary-replset.js b/jstests/noPassthroughWithMongod/moveprimary-replset.js
index 016b2e215c0..9b1e9f7d3bc 100644
--- a/jstests/noPassthroughWithMongod/moveprimary-replset.js
+++ b/jstests/noPassthroughWithMongod/moveprimary-replset.js
@@ -4,56 +4,56 @@
// @tags: [requires_replication, requires_sharding]
(function() {
- "use strict";
-
- var numDocs = 10000;
- var baseName = "moveprimary-replset";
- var testDBName = baseName;
- var testCollName = 'coll';
-
- var shardingTestConfig = {
- name: baseName,
- mongos: 1,
- shards: 2,
- config: 3,
- rs: {nodes: 3},
- other: {manualAddShard: true}
- };
-
- var shardingTest = new ShardingTest(shardingTestConfig);
-
- var replSet1 = shardingTest.rs0;
- var replSet2 = shardingTest.rs1;
-
- var repset1DB = replSet1.getPrimary().getDB(testDBName);
- for (var i = 1; i <= numDocs; i++) {
- repset1DB[testCollName].insert({x: i});
- }
- replSet1.awaitReplication();
-
- var mongosConn = shardingTest.s;
- var testDB = mongosConn.getDB(testDBName);
-
- mongosConn.adminCommand({addshard: replSet1.getURL()});
-
- testDB[testCollName].update({}, {$set: {y: 'hello'}}, false /*upsert*/, true /*multi*/);
- assert.eq(testDB[testCollName].count({y: 'hello'}),
- numDocs,
- 'updating and counting docs via mongos failed');
-
- mongosConn.adminCommand({addshard: replSet2.getURL()});
-
- assert.commandWorked(
- mongosConn.getDB('admin').runCommand({moveprimary: testDBName, to: replSet2.getURL()}));
- mongosConn.getDB('admin').printShardingStatus();
- assert.eq(testDB.getSiblingDB("config").databases.findOne({"_id": testDBName}).primary,
- replSet2.name,
- "Failed to change primary shard for unsharded database.");
-
- testDB[testCollName].update({}, {$set: {z: 'world'}}, false /*upsert*/, true /*multi*/);
- assert.eq(testDB[testCollName].count({z: 'world'}),
- numDocs,
- 'updating and counting docs via mongos failed');
-
- shardingTest.stop();
+"use strict";
+
+var numDocs = 10000;
+var baseName = "moveprimary-replset";
+var testDBName = baseName;
+var testCollName = 'coll';
+
+var shardingTestConfig = {
+ name: baseName,
+ mongos: 1,
+ shards: 2,
+ config: 3,
+ rs: {nodes: 3},
+ other: {manualAddShard: true}
+};
+
+var shardingTest = new ShardingTest(shardingTestConfig);
+
+var replSet1 = shardingTest.rs0;
+var replSet2 = shardingTest.rs1;
+
+var repset1DB = replSet1.getPrimary().getDB(testDBName);
+for (var i = 1; i <= numDocs; i++) {
+ repset1DB[testCollName].insert({x: i});
+}
+replSet1.awaitReplication();
+
+var mongosConn = shardingTest.s;
+var testDB = mongosConn.getDB(testDBName);
+
+mongosConn.adminCommand({addshard: replSet1.getURL()});
+
+testDB[testCollName].update({}, {$set: {y: 'hello'}}, false /*upsert*/, true /*multi*/);
+assert.eq(testDB[testCollName].count({y: 'hello'}),
+ numDocs,
+ 'updating and counting docs via mongos failed');
+
+mongosConn.adminCommand({addshard: replSet2.getURL()});
+
+assert.commandWorked(
+ mongosConn.getDB('admin').runCommand({moveprimary: testDBName, to: replSet2.getURL()}));
+mongosConn.getDB('admin').printShardingStatus();
+assert.eq(testDB.getSiblingDB("config").databases.findOne({"_id": testDBName}).primary,
+ replSet2.name,
+ "Failed to change primary shard for unsharded database.");
+
+testDB[testCollName].update({}, {$set: {z: 'world'}}, false /*upsert*/, true /*multi*/);
+assert.eq(testDB[testCollName].count({z: 'world'}),
+ numDocs,
+ 'updating and counting docs via mongos failed');
+
+shardingTest.stop();
})();
diff --git a/jstests/noPassthroughWithMongod/mr_writeconflict.js b/jstests/noPassthroughWithMongod/mr_writeconflict.js
index af54a150df4..204c9040c22 100644
--- a/jstests/noPassthroughWithMongod/mr_writeconflict.js
+++ b/jstests/noPassthroughWithMongod/mr_writeconflict.js
@@ -1,70 +1,69 @@
// SERVER-16262: Write-conflict during map-reduce operations
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/parallelTester.js');
+load('jstests/libs/parallelTester.js');
- var makeDoc = function(keyLimit, valueLimit) {
- return {_id: ObjectId(), key: Random.randInt(keyLimit), value: Random.randInt(valueLimit)};
- };
+var makeDoc = function(keyLimit, valueLimit) {
+ return {_id: ObjectId(), key: Random.randInt(keyLimit), value: Random.randInt(valueLimit)};
+};
- var main = function() {
-
- function mapper() {
- var obj = {};
- obj[this.value] = 1;
- emit(this.key, obj);
- }
+var main = function() {
+ function mapper() {
+ var obj = {};
+ obj[this.value] = 1;
+ emit(this.key, obj);
+ }
- function reducer(key, values) {
- var res = {};
+ function reducer(key, values) {
+ var res = {};
- values.forEach(function(obj) {
- Object.keys(obj).forEach(function(value) {
- if (!res.hasOwnProperty(value)) {
- res[value] = 0;
- }
- res[value] += obj[value];
- });
+ values.forEach(function(obj) {
+ Object.keys(obj).forEach(function(value) {
+ if (!res.hasOwnProperty(value)) {
+ res[value] = 0;
+ }
+ res[value] += obj[value];
});
+ });
- return res;
- }
+ return res;
+ }
- for (var i = 0; i < 10; i++) {
- // Have all threads combine their results into the same collection
- var res = db.source.mapReduce(mapper, reducer, {out: {reduce: 'dest'}});
- assert.commandWorked(res);
- }
- };
+ for (var i = 0; i < 10; i++) {
+ // Have all threads combine their results into the same collection
+ var res = db.source.mapReduce(mapper, reducer, {out: {reduce: 'dest'}});
+ assert.commandWorked(res);
+ }
+};
- Random.setRandomSeed();
+Random.setRandomSeed();
- var numDocs = 200;
- var bulk = db.source.initializeUnorderedBulkOp();
- var i;
- for (i = 0; i < numDocs; ++i) {
- var doc = makeDoc(numDocs / 100, numDocs / 10);
- bulk.insert(doc);
- }
+var numDocs = 200;
+var bulk = db.source.initializeUnorderedBulkOp();
+var i;
+for (i = 0; i < numDocs; ++i) {
+ var doc = makeDoc(numDocs / 100, numDocs / 10);
+ bulk.insert(doc);
+}
- var res = bulk.execute();
- assert.writeOK(res);
- assert.eq(numDocs, res.nInserted);
+var res = bulk.execute();
+assert.writeOK(res);
+assert.eq(numDocs, res.nInserted);
- db.dest.drop();
- assert.commandWorked(db.createCollection('dest'));
+db.dest.drop();
+assert.commandWorked(db.createCollection('dest'));
- var numThreads = 6;
- var t = [];
- for (i = 0; i < numThreads - 1; ++i) {
- t[i] = new ScopedThread(main);
- t[i].start();
- }
+var numThreads = 6;
+var t = [];
+for (i = 0; i < numThreads - 1; ++i) {
+ t[i] = new ScopedThread(main);
+ t[i].start();
+}
- main();
- for (i = 0; i < numThreads - 1; ++i) {
- t[i].join();
- }
+main();
+for (i = 0; i < numThreads - 1; ++i) {
+ t[i].join();
+}
}());
diff --git a/jstests/noPassthroughWithMongod/ne_array_indexability.js b/jstests/noPassthroughWithMongod/ne_array_indexability.js
index 284389c1303..606956c5753 100644
--- a/jstests/noPassthroughWithMongod/ne_array_indexability.js
+++ b/jstests/noPassthroughWithMongod/ne_array_indexability.js
@@ -2,44 +2,44 @@
* Test that $ne: [] queries are cached correctly. See SERVER-39764.
*/
(function() {
- const coll = db.ne_array_indexability;
- coll.drop();
-
- coll.createIndex({"obj": 1});
- coll.createIndex({"obj": 1, "abc": 1});
-
- assert.commandWorked(coll.insert({obj: "hi there"}));
-
- function runTest(queryToCache, queryToRunAfterCaching) {
- assert.eq(coll.find(queryToCache).itcount(), 1);
- assert.eq(coll.find(queryToCache).itcount(), 1);
-
- const cacheEntries =
- coll.aggregate([
- {$planCacheStats: {}},
- {
- $match: {
- isActive: true,
- createdFromQuery: {query: queryToCache, sort: {}, projection: {}}
- }
+const coll = db.ne_array_indexability;
+coll.drop();
+
+coll.createIndex({"obj": 1});
+coll.createIndex({"obj": 1, "abc": 1});
+
+assert.commandWorked(coll.insert({obj: "hi there"}));
+
+function runTest(queryToCache, queryToRunAfterCaching) {
+ assert.eq(coll.find(queryToCache).itcount(), 1);
+ assert.eq(coll.find(queryToCache).itcount(), 1);
+
+ const cacheEntries =
+ coll.aggregate([
+ {$planCacheStats: {}},
+ {
+ $match: {
+ isActive: true,
+ createdFromQuery: {query: queryToCache, sort: {}, projection: {}}
}
- ])
- .toArray();
- assert.eq(cacheEntries.length, 1);
+ }
+ ])
+ .toArray();
+ assert.eq(cacheEntries.length, 1);
- assert.eq(coll.find(queryToRunAfterCaching).itcount(), 1);
+ assert.eq(coll.find(queryToRunAfterCaching).itcount(), 1);
- const explain = assert.commandWorked(coll.find(queryToRunAfterCaching).explain());
- // The query with the $ne: array should have the same queryHash, but a different
- // planCacheKey.
- assert.eq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);
- assert.neq(explain.queryPlanner.planCacheKey, cacheEntries[0].planCacheKey);
- }
+ const explain = assert.commandWorked(coll.find(queryToRunAfterCaching).explain());
+ // The query with the $ne: array should have the same queryHash, but a different
+ // planCacheKey.
+ assert.eq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);
+ assert.neq(explain.queryPlanner.planCacheKey, cacheEntries[0].planCacheKey);
+}
- runTest({'obj': {$ne: 'def'}}, {'obj': {$ne: [[1]]}});
+runTest({'obj': {$ne: 'def'}}, {'obj': {$ne: [[1]]}});
- // Clear the cache.
- assert.commandWorked(coll.runCommand('planCacheClear'));
+// Clear the cache.
+assert.commandWorked(coll.runCommand('planCacheClear'));
- runTest({'obj': {$nin: ['abc', 'def']}}, {'obj': {$nin: [[1], 'abc']}});
+runTest({'obj': {$nin: ['abc', 'def']}}, {'obj': {$nin: [[1], 'abc']}});
})();
diff --git a/jstests/noPassthroughWithMongod/now_variable.js b/jstests/noPassthroughWithMongod/now_variable.js
index b64d029b436..a3b1f793941 100644
--- a/jstests/noPassthroughWithMongod/now_variable.js
+++ b/jstests/noPassthroughWithMongod/now_variable.js
@@ -2,122 +2,121 @@
* Tests for the $$NOW and $$CLUSTER_TIME system variable.
*/
(function() {
- "use strict";
-
- const coll = db[jsTest.name()];
- const otherColl = db[coll.getName() + "_other"];
- otherColl.drop();
- coll.drop();
- db["viewWithNow"].drop();
- db["viewWithClusterTime"].drop();
-
- // Insert simple documents into the main test collection. Aggregation and view pipelines will
- // augment these docs with time-based fields.
- const numdocs = 1000;
- let bulk = coll.initializeUnorderedBulkOp();
+"use strict";
+
+const coll = db[jsTest.name()];
+const otherColl = db[coll.getName() + "_other"];
+otherColl.drop();
+coll.drop();
+db["viewWithNow"].drop();
+db["viewWithClusterTime"].drop();
+
+// Insert simple documents into the main test collection. Aggregation and view pipelines will
+// augment these docs with time-based fields.
+const numdocs = 1000;
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
+
+// Insert into another collection with pre-made fields for testing the find() command.
+bulk = otherColl.initializeUnorderedBulkOp();
+const timeFieldValue = new Date();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
+}
+assert.commandWorked(bulk.execute());
+
+assert.commandWorked(
+ db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
+const viewWithNow = db["viewWithNow"];
+
+assert.commandWorked(db.createView(
+ "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+const viewWithClusterTime = db["viewWithClusterTime"];
+
+function runTests(query) {
+ const results = query().toArray();
+ assert.eq(results.length, numdocs);
+
+ // Make sure the values are the same for all documents
for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i});
+ assert.eq(results[0].timeField, results[i].timeField);
}
- assert.commandWorked(bulk.execute());
- // Insert into another collection with pre-made fields for testing the find() command.
- bulk = otherColl.initializeUnorderedBulkOp();
- const timeFieldValue = new Date();
- for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
- }
- assert.commandWorked(bulk.execute());
-
- assert.commandWorked(
- db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
- const viewWithNow = db["viewWithNow"];
-
- assert.commandWorked(db.createView(
- "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
- const viewWithClusterTime = db["viewWithClusterTime"];
-
- function runTests(query) {
- const results = query().toArray();
- assert.eq(results.length, numdocs);
-
- // Make sure the values are the same for all documents
- for (let i = 0; i < numdocs; ++i) {
- assert.eq(results[0].timeField, results[i].timeField);
- }
-
- // Sleep for a while and then rerun.
- sleep(3000);
-
- const resultsLater = query().toArray();
- assert.eq(resultsLater.length, numdocs);
-
- // Later results should be later in time.
- assert.lte(results[0].timeField, resultsLater[0].timeField);
- }
-
- function runTestsExpectFailure(query) {
- const results = query();
- // Expect to see "Builtin variable '$$CLUSTER_TIME' is not available" error.
- assert.commandFailedWithCode(results, 51144);
- }
-
- function baseCollectionNowFind() {
- return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
- }
-
- function baseCollectionClusterTimeFind() {
- return db.runCommand({
- find: otherColl.getName(),
- filter: {$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}
- });
- }
-
- function baseCollectionNowAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
- }
-
- function baseCollectionClusterTimeAgg() {
- return db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$addFields: {timeField: "$$CLUSTER_TIME"}}],
- cursor: {}
- });
- }
-
- function fromViewWithNow() {
- return viewWithNow.find();
- }
-
- function fromViewWithClusterTime() {
- return db.runCommand({find: viewWithClusterTime.getName()});
- }
-
- function withExprNow() {
- return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
- }
-
- function withExprClusterTime() {
- return db.runCommand({
- find: viewWithClusterTime.getName(),
- filter: {$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}
- });
- }
-
- // Test that $$NOW is usable in all contexts.
- runTests(baseCollectionNowFind);
- runTests(baseCollectionNowAgg);
- runTests(fromViewWithNow);
- runTests(withExprNow);
-
- // Test that $$NOW can be used in explain for both find and aggregate.
- assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
-
- // $$CLUSTER_TIME is not available on a standalone mongod.
- runTestsExpectFailure(baseCollectionClusterTimeFind);
- runTestsExpectFailure(baseCollectionClusterTimeAgg);
- runTestsExpectFailure(fromViewWithClusterTime);
- runTestsExpectFailure(withExprClusterTime);
+ // Sleep for a while and then rerun.
+ sleep(3000);
+
+ const resultsLater = query().toArray();
+ assert.eq(resultsLater.length, numdocs);
+
+ // Later results should be later in time.
+ assert.lte(results[0].timeField, resultsLater[0].timeField);
+}
+
+function runTestsExpectFailure(query) {
+ const results = query();
+ // Expect to see "Builtin variable '$$CLUSTER_TIME' is not available" error.
+ assert.commandFailedWithCode(results, 51144);
+}
+
+function baseCollectionNowFind() {
+ return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
+}
+
+function baseCollectionClusterTimeFind() {
+ return db.runCommand({
+ find: otherColl.getName(),
+ filter: {$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}
+ });
+}
+
+function baseCollectionNowAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
+}
+
+function baseCollectionClusterTimeAgg() {
+ return db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$addFields: {timeField: "$$CLUSTER_TIME"}}],
+ cursor: {}
+ });
+}
+
+function fromViewWithNow() {
+ return viewWithNow.find();
+}
+
+function fromViewWithClusterTime() {
+ return db.runCommand({find: viewWithClusterTime.getName()});
+}
+
+function withExprNow() {
+ return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
+}
+
+function withExprClusterTime() {
+ return db.runCommand({
+ find: viewWithClusterTime.getName(),
+ filter: {$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}
+ });
+}
+
+// Test that $$NOW is usable in all contexts.
+runTests(baseCollectionNowFind);
+runTests(baseCollectionNowAgg);
+runTests(fromViewWithNow);
+runTests(withExprNow);
+
+// Test that $$NOW can be used in explain for both find and aggregate.
+assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
+
+// $$CLUSTER_TIME is not available on a standalone mongod.
+runTestsExpectFailure(baseCollectionClusterTimeFind);
+runTestsExpectFailure(baseCollectionClusterTimeAgg);
+runTestsExpectFailure(fromViewWithClusterTime);
+runTestsExpectFailure(withExprClusterTime);
}());
diff --git a/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js b/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js
index 52829d74717..c19b4a14fcb 100644
--- a/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js
+++ b/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js
@@ -3,62 +3,62 @@
* hijack the cached plan for an earlier $not-$in query.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/analyze_plan.js'); // For isCollScan.
+load('jstests/libs/analyze_plan.js'); // For isCollScan.
- const coll = db.plan_cache_not_in_regex;
- coll.drop();
+const coll = db.plan_cache_not_in_regex;
+coll.drop();
- // Helper function which obtains the cached plan, if any, for a given query shape.
- function getPlanForCacheEntry(query, proj, sort) {
- const key = {query: query, sort: sort, projection: proj};
- const cursor = coll.aggregate([
- {$planCacheStats: {}},
- {
- $match: {
- "createdFromQuery.query": query,
- "createdFromQuery.projection": proj,
- "createdFromQuery.sort": sort
- }
+// Helper function which obtains the cached plan, if any, for a given query shape.
+function getPlanForCacheEntry(query, proj, sort) {
+ const key = {query: query, sort: sort, projection: proj};
+ const cursor = coll.aggregate([
+ {$planCacheStats: {}},
+ {
+ $match: {
+ "createdFromQuery.query": query,
+ "createdFromQuery.projection": proj,
+ "createdFromQuery.sort": sort
}
- ]);
- const entryStats = cursor.toArray();
- assert.eq(entryStats.length, 1, `Expected one cached plan, found: ${tojson(entryStats)}`);
- return entryStats.shift();
- }
+ }
+ ]);
+ const entryStats = cursor.toArray();
+ assert.eq(entryStats.length, 1, `Expected one cached plan, found: ${tojson(entryStats)}`);
+ return entryStats.shift();
+}
- // Insert a document containing a field 'a', and create two indexes that can support queries on
- // this field. This is to ensure that the plan we choose will be cached, since if only a single
- // index is available, the solution will not be cached.
- assert.commandWorked(coll.insert({a: "foo"}));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+// Insert a document containing a field 'a', and create two indexes that can support queries on
+// this field. This is to ensure that the plan we choose will be cached, since if only a single
+// index is available, the solution will not be cached.
+assert.commandWorked(coll.insert({a: "foo"}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- // Repeat the test for query, query with projection, and query with projection and sort.
- for (let [proj, sort] of[[{}, {}], [{_id: 0, a: 1}, {}], [{_id: 0, a: 1}, {a: 1}]]) {
- // Perform a plain $not-$in query on 'a' and confirm that the plan is cached.
- const queryShape = {a: {$not: {$in: [32, 33]}}};
+// Repeat the test for query, query with projection, and query with projection and sort.
+for (let [proj, sort] of [[{}, {}], [{_id: 0, a: 1}, {}], [{_id: 0, a: 1}, {a: 1}]]) {
+ // Perform a plain $not-$in query on 'a' and confirm that the plan is cached.
+ const queryShape = {a: {$not: {$in: [32, 33]}}};
+ assert.eq(1, coll.find(queryShape, proj).sort(sort).itcount());
+ let cacheEntry = getPlanForCacheEntry(queryShape, proj, sort);
+ assert(cacheEntry);
+
+ // If the cached plan is inactive, perform the same query to activate it.
+ if (cacheEntry.isActive === false) {
assert.eq(1, coll.find(queryShape, proj).sort(sort).itcount());
- let cacheEntry = getPlanForCacheEntry(queryShape, proj, sort);
+ cacheEntry = getPlanForCacheEntry(queryShape, proj, sort);
assert(cacheEntry);
+ assert(cacheEntry.isActive);
+ }
- // If the cached plan is inactive, perform the same query to activate it.
- if (cacheEntry.isActive === false) {
- assert.eq(1, coll.find(queryShape, proj).sort(sort).itcount());
- cacheEntry = getPlanForCacheEntry(queryShape, proj, sort);
- assert(cacheEntry);
- assert(cacheEntry.isActive);
- }
-
- // Now perform a $not-$in-$regex query, confirm that it obtains the correct results, and
- // that it used a COLLSCAN rather than planning from the cache.
- const explainOutput = assert.commandWorked(
- coll.find({a: {$not: {$in: [34, /bar/]}}}).explain("executionStats"));
- assert(isCollscan(coll.getDB(), explainOutput.queryPlanner.winningPlan));
- assert.eq(1, explainOutput.executionStats.nReturned);
+ // Now perform a $not-$in-$regex query, confirm that it obtains the correct results, and
+ // that it used a COLLSCAN rather than planning from the cache.
+ const explainOutput =
+ assert.commandWorked(coll.find({a: {$not: {$in: [34, /bar/]}}}).explain("executionStats"));
+ assert(isCollscan(coll.getDB(), explainOutput.queryPlanner.winningPlan));
+ assert.eq(1, explainOutput.executionStats.nReturned);
- // Flush the plan cache before the next iteration.
- coll.getPlanCache().clear();
- }
+ // Flush the plan cache before the next iteration.
+ coll.getPlanCache().clear();
+}
})(); \ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/plan_cache_replanning.js b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
index fa901b5e8d1..3882a2c4106 100644
--- a/jstests/noPassthroughWithMongod/plan_cache_replanning.js
+++ b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
@@ -2,121 +2,130 @@
* This test will attempt to create a scenario where the plan cache entry for a given query shape
* oscillates. It achieves this by creating two indexes, A and B, on a collection, and interleaving
* queries which are "ideal" for index A with queries that are "ideal" for index B.
-*/
+ */
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/analyze_plan.js'); // For getPlanStage().
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load('jstests/libs/analyze_plan.js'); // For getPlanStage().
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, "plan_cache_replanning");
+const coll = assertDropAndRecreateCollection(db, "plan_cache_replanning");
- function getPlansForCacheEntry(query) {
- let key = {query: query, sort: {}, projection: {}};
- let res = coll.runCommand("planCacheListPlans", key);
- assert.commandWorked(res, `planCacheListPlans(${tojson(key)}) failed`);
- assert(res.hasOwnProperty("plans"),
- `plans missing from planCacheListPlans(${tojson(key)}) failed`);
+function getPlansForCacheEntry(query) {
+ let key = {query: query, sort: {}, projection: {}};
+ let res = coll.runCommand("planCacheListPlans", key);
+ assert.commandWorked(res, `planCacheListPlans(${tojson(key)}) failed`);
+ assert(res.hasOwnProperty("plans"),
+ `plans missing from planCacheListPlans(${tojson(key)}) failed`);
- return res;
- }
-
- function planHasIxScanStageForKey(planStats, keyPattern) {
- const stage = getPlanStage(planStats, "IXSCAN");
- if (stage === null) {
- return false;
- }
-
- return bsonWoCompare(keyPattern, stage.keyPattern) == 0;
- }
-
- const queryShape = {a: 1, b: 1};
+ return res;
+}
- // Carefully construct a collection so that some queries will do well with an {a: 1} index
- // and others with a {b: 1} index.
- for (let i = 1000; i < 1100; i++) {
- assert.commandWorked(coll.insert({a: 1, b: i}));
+function planHasIxScanStageForKey(planStats, keyPattern) {
+ const stage = getPlanStage(planStats, "IXSCAN");
+ if (stage === null) {
+ return false;
}
- for (let i = 1000; i < 1100; i++) {
- assert.commandWorked(coll.insert({a: i, b: 2}));
- }
-
- // This query will be quick with {a: 1} index, and far slower {b: 1} index. With the {a: 1}
- // index, the server should only need to examine one document. Using {b: 1}, it will have to
- // scan through each document which has 2 as the value of the 'b' field.
- const aIndexQuery = {a: 1099, b: 2};
- // Opposite of 'aIndexQuery'. Should be quick if the {b: 1} index is used, and slower if the
- // {a: 1} index is used.
- const bIndexQuery = {a: 1, b: 1099};
-
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
-
- // Run a query where the {b: 1} index will easily win.
- assert.eq(1, coll.find(bIndexQuery).itcount());
-
- // The plan cache should now hold an inactive entry.
- let entry = getPlansForCacheEntry(queryShape);
- let entryWorks = entry.works;
- assert.eq(entry.isActive, false);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // Re-run the query. The inactive cache entry should be promoted to an active entry.
- assert.eq(1, coll.find(bIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, true);
- assert.eq(entry.works, entryWorks);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // Now we will attempt to oscillate the cache entry by interleaving queries which should use
- // the {a:1} and {b:1} index. When the plan using the {b: 1} index is in the cache, running a
- // query which should use the {a: 1} index will perform very poorly, and trigger
- // replanning (and vice versa).
-
- // The {b: 1} plan is currently in the cache. Run the query which should use the {a: 1}
- // index. The current cache entry will be deactivated, and then the cache entry for the {a: 1}
- // will overwrite it (as active).
- assert.eq(1, coll.find(aIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, true);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
-
- // Run the query which should use the {b: 1} index.
- assert.eq(1, coll.find(bIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, true);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // The {b: 1} plan is again in the cache. Run the query which should use the {a: 1}
- // index.
- assert.eq(1, coll.find(aIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, true);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
-
- // The {a: 1} plan is back in the cache. Run the query which would perform better on the plan
- // using the {b: 1} index, and ensure that plan gets written to the cache.
- assert.eq(1, coll.find(bIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- entryWorks = entry.works;
- assert.eq(entry.isActive, true);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // Now run a plan that will perform poorly with both indices (it will be required to scan 500
- // documents). This will result in replanning (and the cache entry being deactivated). However,
- // the new plan will have a very high works value, and will not replace the existing cache
- // entry. It will only bump the existing cache entry's works value.
- for (let i = 0; i < 500; i++) {
- assert.commandWorked(coll.insert({a: 3, b: 3}));
- }
- assert.eq(500, coll.find({a: 3, b: 3}).itcount());
-
- // The cache entry should have been deactivated.
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, false);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // The works value should have doubled.
- assert.eq(entry.works, entryWorks * 2);
+ return bsonWoCompare(keyPattern, stage.keyPattern) == 0;
+}
+
+const queryShape = {
+ a: 1,
+ b: 1
+};
+
+// Carefully construct a collection so that some queries will do well with an {a: 1} index
+// and others with a {b: 1} index.
+for (let i = 1000; i < 1100; i++) {
+ assert.commandWorked(coll.insert({a: 1, b: i}));
+}
+
+for (let i = 1000; i < 1100; i++) {
+ assert.commandWorked(coll.insert({a: i, b: 2}));
+}
+
+// This query will be quick with {a: 1} index, and far slower {b: 1} index. With the {a: 1}
+// index, the server should only need to examine one document. Using {b: 1}, it will have to
+// scan through each document which has 2 as the value of the 'b' field.
+const aIndexQuery = {
+ a: 1099,
+ b: 2
+};
+// Opposite of 'aIndexQuery'. Should be quick if the {b: 1} index is used, and slower if the
+// {a: 1} index is used.
+const bIndexQuery = {
+ a: 1,
+ b: 1099
+};
+
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+
+// Run a query where the {b: 1} index will easily win.
+assert.eq(1, coll.find(bIndexQuery).itcount());
+
+// The plan cache should now hold an inactive entry.
+let entry = getPlansForCacheEntry(queryShape);
+let entryWorks = entry.works;
+assert.eq(entry.isActive, false);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// Re-run the query. The inactive cache entry should be promoted to an active entry.
+assert.eq(1, coll.find(bIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, true);
+assert.eq(entry.works, entryWorks);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// Now we will attempt to oscillate the cache entry by interleaving queries which should use
+// the {a:1} and {b:1} index. When the plan using the {b: 1} index is in the cache, running a
+// query which should use the {a: 1} index will perform very poorly, and trigger
+// replanning (and vice versa).
+
+// The {b: 1} plan is currently in the cache. Run the query which should use the {a: 1}
+// index. The current cache entry will be deactivated, and then the cache entry for the {a: 1}
+// will overwrite it (as active).
+assert.eq(1, coll.find(aIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, true);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
+
+// Run the query which should use the {b: 1} index.
+assert.eq(1, coll.find(bIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, true);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// The {b: 1} plan is again in the cache. Run the query which should use the {a: 1}
+// index.
+assert.eq(1, coll.find(aIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, true);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
+
+// The {a: 1} plan is back in the cache. Run the query which would perform better on the plan
+// using the {b: 1} index, and ensure that plan gets written to the cache.
+assert.eq(1, coll.find(bIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+entryWorks = entry.works;
+assert.eq(entry.isActive, true);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// Now run a plan that will perform poorly with both indices (it will be required to scan 500
+// documents). This will result in replanning (and the cache entry being deactivated). However,
+// the new plan will have a very high works value, and will not replace the existing cache
+// entry. It will only bump the existing cache entry's works value.
+for (let i = 0; i < 500; i++) {
+ assert.commandWorked(coll.insert({a: 3, b: 3}));
+}
+assert.eq(500, coll.find({a: 3, b: 3}).itcount());
+
+// The cache entry should have been deactivated.
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, false);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// The works value should have doubled.
+assert.eq(entry.works, entryWorks * 2);
})();
diff --git a/jstests/noPassthroughWithMongod/query_oplogreplay.js b/jstests/noPassthroughWithMongod/query_oplogreplay.js
index cf7c255ce4b..4acbd4676dc 100644
--- a/jstests/noPassthroughWithMongod/query_oplogreplay.js
+++ b/jstests/noPassthroughWithMongod/query_oplogreplay.js
@@ -2,265 +2,255 @@
// @tags: [requires_replication, requires_capped]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js");
- load("jstests/libs/storage_engine_utils.js");
+load("jstests/libs/analyze_plan.js");
+load("jstests/libs/storage_engine_utils.js");
- const t = db.getSiblingDB("local").oplog.jstests_query_oplogreplay;
+const t = db.getSiblingDB("local").oplog.jstests_query_oplogreplay;
- function dropOplogAndCreateNew(oplog, newCollectionSpec) {
- if (storageEngineIsWiredTigerOrInMemory()) {
- // We forbid dropping the oplog when using the WiredTiger or in-memory storage engines
- // and so we can't drop the oplog here. Because Evergreen reuses nodes for testing,
- // the oplog may already exist on the test node; in this case, trying to create the
- // oplog once again would fail.
- // To ensure we are working with a clean oplog (an oplog without entries), we resort
- // to truncating the oplog instead.
- if (!oplog.getDB().getCollectionNames().includes(oplog.getName())) {
- oplog.getDB().createCollection(oplog.getName(), newCollectionSpec);
- }
- oplog.runCommand('emptycapped');
- oplog.getDB().adminCommand({replSetResizeOplog: 1, size: 16 * 1024});
- } else {
- oplog.drop();
- assert.commandWorked(
- oplog.getDB().createCollection(oplog.getName(), newCollectionSpec));
+function dropOplogAndCreateNew(oplog, newCollectionSpec) {
+ if (storageEngineIsWiredTigerOrInMemory()) {
+ // We forbid dropping the oplog when using the WiredTiger or in-memory storage engines
+ // and so we can't drop the oplog here. Because Evergreen reuses nodes for testing,
+ // the oplog may already exist on the test node; in this case, trying to create the
+ // oplog once again would fail.
+ // To ensure we are working with a clean oplog (an oplog without entries), we resort
+ // to truncating the oplog instead.
+ if (!oplog.getDB().getCollectionNames().includes(oplog.getName())) {
+ oplog.getDB().createCollection(oplog.getName(), newCollectionSpec);
}
+ oplog.runCommand('emptycapped');
+ oplog.getDB().adminCommand({replSetResizeOplog: 1, size: 16 * 1024});
+ } else {
+ oplog.drop();
+ assert.commandWorked(oplog.getDB().createCollection(oplog.getName(), newCollectionSpec));
}
+}
- dropOplogAndCreateNew(t, {capped: true, size: 16 * 1024});
+dropOplogAndCreateNew(t, {capped: true, size: 16 * 1024});
- /**
- * Helper function for making timestamps with the property that if i < j, then makeTS(i) <
- * makeTS(j).
- */
- function makeTS(i) {
- return Timestamp(1000, i);
- }
+/**
+ * Helper function for making timestamps with the property that if i < j, then makeTS(i) <
+ * makeTS(j).
+ */
+function makeTS(i) {
+ return Timestamp(1000, i);
+}
- for (let i = 1; i <= 100; i++) {
- assert.writeOK(t.insert({_id: i, ts: makeTS(i)}));
- }
+for (let i = 1; i <= 100; i++) {
+ assert.writeOK(t.insert({_id: i, ts: makeTS(i)}));
+}
- // A $gt query on just the 'ts' field should return the next document after the timestamp.
- var cursor = t.find({ts: {$gt: makeTS(20)}});
- assert.eq(21, cursor.next()["_id"]);
- assert.eq(22, cursor.next()["_id"]);
+// A $gt query on just the 'ts' field should return the next document after the timestamp.
+var cursor = t.find({ts: {$gt: makeTS(20)}});
+assert.eq(21, cursor.next()["_id"]);
+assert.eq(22, cursor.next()["_id"]);
- // A $gte query on the 'ts' field should include the timestamp.
- cursor = t.find({ts: {$gte: makeTS(20)}});
- assert.eq(20, cursor.next()["_id"]);
- assert.eq(21, cursor.next()["_id"]);
+// A $gte query on the 'ts' field should include the timestamp.
+cursor = t.find({ts: {$gte: makeTS(20)}});
+assert.eq(20, cursor.next()["_id"]);
+assert.eq(21, cursor.next()["_id"]);
- // An $eq query on the 'ts' field should return the single record with the timestamp.
- cursor = t.find({ts: {$eq: makeTS(20)}});
- assert.eq(20, cursor.next()["_id"]);
- assert(!cursor.hasNext());
+// An $eq query on the 'ts' field should return the single record with the timestamp.
+cursor = t.find({ts: {$eq: makeTS(20)}});
+assert.eq(20, cursor.next()["_id"]);
+assert(!cursor.hasNext());
- // An AND with both a $gt and $lt query on the 'ts' field will correctly return results in
- // the proper bounds.
- cursor = t.find({$and: [{ts: {$lt: makeTS(5)}}, {ts: {$gt: makeTS(1)}}]});
- assert.eq(2, cursor.next()["_id"]);
- assert.eq(3, cursor.next()["_id"]);
- assert.eq(4, cursor.next()["_id"]);
- assert(!cursor.hasNext());
+// An AND with both a $gt and $lt query on the 'ts' field will correctly return results in
+// the proper bounds.
+cursor = t.find({$and: [{ts: {$lt: makeTS(5)}}, {ts: {$gt: makeTS(1)}}]});
+assert.eq(2, cursor.next()["_id"]);
+assert.eq(3, cursor.next()["_id"]);
+assert.eq(4, cursor.next()["_id"]);
+assert(!cursor.hasNext());
- // An AND with multiple predicates on the 'ts' field correctly returns results on the
- // tightest range.
- cursor = t.find({
- $and: [
- {ts: {$gte: makeTS(2)}},
- {ts: {$gt: makeTS(3)}},
- {ts: {$lte: makeTS(7)}},
- {ts: {$lt: makeTS(7)}}
- ]
- });
- assert.eq(4, cursor.next()["_id"]);
- assert.eq(5, cursor.next()["_id"]);
- assert.eq(6, cursor.next()["_id"]);
- assert(!cursor.hasNext());
+// An AND with multiple predicates on the 'ts' field correctly returns results on the
+// tightest range.
+cursor = t.find({
+ $and: [
+ {ts: {$gte: makeTS(2)}},
+ {ts: {$gt: makeTS(3)}},
+ {ts: {$lte: makeTS(7)}},
+ {ts: {$lt: makeTS(7)}}
+ ]
+});
+assert.eq(4, cursor.next()["_id"]);
+assert.eq(5, cursor.next()["_id"]);
+assert.eq(6, cursor.next()["_id"]);
+assert(!cursor.hasNext());
- // An AND with an $eq predicate in conjunction with other bounds correctly returns one
- // result.
- cursor = t.find({
- $and: [
- {ts: {$gte: makeTS(1)}},
- {ts: {$gt: makeTS(2)}},
- {ts: {$eq: makeTS(5)}},
- {ts: {$lte: makeTS(8)}},
- {ts: {$lt: makeTS(8)}}
- ]
- });
- assert.eq(5, cursor.next()["_id"]);
- assert(!cursor.hasNext());
+// An AND with an $eq predicate in conjunction with other bounds correctly returns one
+// result.
+cursor = t.find({
+ $and: [
+ {ts: {$gte: makeTS(1)}},
+ {ts: {$gt: makeTS(2)}},
+ {ts: {$eq: makeTS(5)}},
+ {ts: {$lte: makeTS(8)}},
+ {ts: {$lt: makeTS(8)}}
+ ]
+});
+assert.eq(5, cursor.next()["_id"]);
+assert(!cursor.hasNext());
- // An $eq query stops scanning after passing the max timestamp.
- let res = t.find({ts: {$eq: makeTS(10)}}).explain("executionStats");
- assert.commandWorked(res);
- // We expect to be able to seek directly to the entry with a 'ts' of 10.
- assert.lte(res.executionStats.totalDocsExamined, 2, tojson(res));
- let collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
+// An $eq query stops scanning after passing the max timestamp.
+let res = t.find({ts: {$eq: makeTS(10)}}).explain("executionStats");
+assert.commandWorked(res);
+// We expect to be able to seek directly to the entry with a 'ts' of 10.
+assert.lte(res.executionStats.totalDocsExamined, 2, tojson(res));
+let collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
- // An AND with an $lt predicate stops scanning after passing the max timestamp.
- res = t.find({
- $and: [{ts: {$gte: makeTS(1)}}, {ts: {$lt: makeTS(10)}}]
- }).explain("executionStats");
- assert.commandWorked(res);
- assert.lte(res.executionStats.totalDocsExamined, 11, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
+// An AND with an $lt predicate stops scanning after passing the max timestamp.
+res = t.find({$and: [{ts: {$gte: makeTS(1)}}, {ts: {$lt: makeTS(10)}}]}).explain("executionStats");
+assert.commandWorked(res);
+assert.lte(res.executionStats.totalDocsExamined, 11, tojson(res));
+collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
- // An AND with an $lte predicate stops scanning after passing the max timestamp.
- res = t.find({
- $and: [{ts: {$gte: makeTS(1)}}, {ts: {$lte: makeTS(10)}}]
- }).explain("executionStats");
- assert.commandWorked(res);
- assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
+// An AND with an $lte predicate stops scanning after passing the max timestamp.
+res = t.find({$and: [{ts: {$gte: makeTS(1)}}, {ts: {$lte: makeTS(10)}}]}).explain("executionStats");
+assert.commandWorked(res);
+assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res));
+collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
- // The max timestamp is respected even when the min timestamp is smaller than the lowest
- // timestamp in the collection.
- res = t.find({
- $and: [{ts: {$gte: makeTS(0)}}, {ts: {$lte: makeTS(10)}}]
- }).explain("executionStats");
- assert.commandWorked(res);
- assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
+// The max timestamp is respected even when the min timestamp is smaller than the lowest
+// timestamp in the collection.
+res = t.find({$and: [{ts: {$gte: makeTS(0)}}, {ts: {$lte: makeTS(10)}}]}).explain("executionStats");
+assert.commandWorked(res);
+assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res));
+collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
- // An AND with redundant $eq/$lt/$lte predicates stops scanning after passing the max
- // timestamp.
- res = t.find({
- $and: [
- {ts: {$gte: makeTS(0)}},
- {ts: {$lte: makeTS(10)}},
- {ts: {$eq: makeTS(5)}},
- {ts: {$lt: makeTS(20)}}
- ]
- }).explain("executionStats");
- assert.commandWorked(res);
- // We expect to be able to seek directly to the entry with a 'ts' of 5.
- assert.lte(res.executionStats.totalDocsExamined, 2, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(5), collScanStage.maxTs, tojson(res));
- assert.eq(makeTS(5), collScanStage.minTs, tojson(res));
+// An AND with redundant $eq/$lt/$lte predicates stops scanning after passing the max
+// timestamp.
+res = t.find({
+ $and: [
+ {ts: {$gte: makeTS(0)}},
+ {ts: {$lte: makeTS(10)}},
+ {ts: {$eq: makeTS(5)}},
+ {ts: {$lt: makeTS(20)}}
+ ]
+ }).explain("executionStats");
+assert.commandWorked(res);
+// We expect to be able to seek directly to the entry with a 'ts' of 5.
+assert.lte(res.executionStats.totalDocsExamined, 2, tojson(res));
+collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+assert.eq(makeTS(5), collScanStage.maxTs, tojson(res));
+assert.eq(makeTS(5), collScanStage.minTs, tojson(res));
- // An $eq query for a non-existent timestamp scans a single oplog document.
- res = t.find({ts: {$eq: makeTS(200)}}).explain("executionStats");
- assert.commandWorked(res);
- // We expect to be able to seek directly to the end of the oplog.
- assert.lte(res.executionStats.totalDocsExamined, 1, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(200), collScanStage.maxTs, tojson(res));
+// An $eq query for a non-existent timestamp scans a single oplog document.
+res = t.find({ts: {$eq: makeTS(200)}}).explain("executionStats");
+assert.commandWorked(res);
+// We expect to be able to seek directly to the end of the oplog.
+assert.lte(res.executionStats.totalDocsExamined, 1, tojson(res));
+collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+assert.eq(makeTS(200), collScanStage.maxTs, tojson(res));
- // When the filter matches the last document within the timestamp range, the collection scan
- // examines at most one more document.
- res = t.find({
- $and: [{ts: {$gte: makeTS(4)}}, {ts: {$lte: makeTS(8)}}, {_id: 8}]
- }).explain("executionStats");
- assert.commandWorked(res);
- // We expect to be able to seek directly to the start of the 'ts' range.
- assert.lte(res.executionStats.totalDocsExamined, 6, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(8), collScanStage.maxTs, tojson(res));
+// When the filter matches the last document within the timestamp range, the collection scan
+// examines at most one more document.
+res = t.find({
+ $and: [{ts: {$gte: makeTS(4)}}, {ts: {$lte: makeTS(8)}}, {_id: 8}]
+ }).explain("executionStats");
+assert.commandWorked(res);
+// We expect to be able to seek directly to the start of the 'ts' range.
+assert.lte(res.executionStats.totalDocsExamined, 6, tojson(res));
+collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+assert.eq(makeTS(8), collScanStage.maxTs, tojson(res));
- // A filter with only an upper bound predicate on 'ts' stops scanning after
- // passing the max timestamp.
- res = t.find({ts: {$lt: makeTS(4)}}).explain("executionStats");
- assert.commandWorked(res);
- assert.lte(res.executionStats.totalDocsExamined, 4, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(4), collScanStage.maxTs, tojson(res));
+// A filter with only an upper bound predicate on 'ts' stops scanning after
+// passing the max timestamp.
+res = t.find({ts: {$lt: makeTS(4)}}).explain("executionStats");
+assert.commandWorked(res);
+assert.lte(res.executionStats.totalDocsExamined, 4, tojson(res));
+collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+assert.eq(makeTS(4), collScanStage.maxTs, tojson(res));
- // Oplog replay optimization should work with projection.
- res = t.find({ts: {$lte: makeTS(4)}}).projection({'_id': 0});
- while (res.hasNext()) {
- const next = res.next();
- assert(!next.hasOwnProperty('_id'));
- assert(next.hasOwnProperty('ts'));
- }
- res = res.explain("executionStats");
- assert.commandWorked(res);
- assert.lte(res.executionStats.totalDocsExamined, 5);
+// Oplog replay optimization should work with projection.
+res = t.find({ts: {$lte: makeTS(4)}}).projection({'_id': 0});
+while (res.hasNext()) {
+ const next = res.next();
+ assert(!next.hasOwnProperty('_id'));
+ assert(next.hasOwnProperty('ts'));
+}
+res = res.explain("executionStats");
+assert.commandWorked(res);
+assert.lte(res.executionStats.totalDocsExamined, 5);
- res = t.find({ts: {$gte: makeTS(90)}}).projection({'_id': 0});
- while (res.hasNext()) {
- const next = res.next();
- assert(!next.hasOwnProperty('_id'));
- assert(next.hasOwnProperty('ts'));
- }
- res = res.explain("executionStats");
- assert.commandWorked(res);
- assert.lte(res.executionStats.totalDocsExamined, 11);
+res = t.find({ts: {$gte: makeTS(90)}}).projection({'_id': 0});
+while (res.hasNext()) {
+ const next = res.next();
+ assert(!next.hasOwnProperty('_id'));
+ assert(next.hasOwnProperty('ts'));
+}
+res = res.explain("executionStats");
+assert.commandWorked(res);
+assert.lte(res.executionStats.totalDocsExamined, 11);
- // Oplog replay optimization should work with limit.
- res = t.find({$and: [{ts: {$gte: makeTS(4)}}, {ts: {$lte: makeTS(8)}}]})
- .limit(2)
- .explain("executionStats");
- assert.commandWorked(res);
- assert.eq(2, res.executionStats.totalDocsExamined);
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.eq(2, collScanStage.nReturned);
+// Oplog replay optimization should work with limit.
+res = t.find({$and: [{ts: {$gte: makeTS(4)}}, {ts: {$lte: makeTS(8)}}]})
+ .limit(2)
+ .explain("executionStats");
+assert.commandWorked(res);
+assert.eq(2, res.executionStats.totalDocsExamined);
+collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.eq(2, collScanStage.nReturned);
- // A query over both 'ts' and '_id' should only pay attention to the 'ts' field for finding
- // the oplog start (SERVER-13566).
- cursor = t.find({ts: {$gte: makeTS(20)}, _id: 25});
- assert.eq(25, cursor.next()["_id"]);
- assert(!cursor.hasNext());
+// A query over both 'ts' and '_id' should only pay attention to the 'ts' field for finding
+// the oplog start (SERVER-13566).
+cursor = t.find({ts: {$gte: makeTS(20)}, _id: 25});
+assert.eq(25, cursor.next()["_id"]);
+assert(!cursor.hasNext());
- // 'oplogreplay' flag is allowed but ignored on the oplog collection.
- assert.commandWorked(t.runCommand({find: t.getName(), oplogReplay: true}));
+// 'oplogreplay' flag is allowed but ignored on the oplog collection.
+assert.commandWorked(t.runCommand({find: t.getName(), oplogReplay: true}));
- // 'oplogreplay' flag is allowed but ignored on capped collections.
- const cappedColl = db.cappedColl_jstests_query_oplogreplay;
- cappedColl.drop();
- assert.commandWorked(
- db.createCollection(cappedColl.getName(), {capped: true, size: 16 * 1024}));
- for (let i = 1; i <= 100; i++) {
- assert.commandWorked(cappedColl.insert({_id: i, ts: makeTS(i)}));
- }
- res = cappedColl.runCommand({
- explain:
- {find: cappedColl.getName(), filter: {ts: {$eq: makeTS(200)}}, oplogReplay: true}
- });
- assert.commandWorked(res);
- assert.eq(res.executionStats.totalDocsExamined, 100);
+// 'oplogreplay' flag is allowed but ignored on capped collections.
+const cappedColl = db.cappedColl_jstests_query_oplogreplay;
+cappedColl.drop();
+assert.commandWorked(db.createCollection(cappedColl.getName(), {capped: true, size: 16 * 1024}));
+for (let i = 1; i <= 100; i++) {
+ assert.commandWorked(cappedColl.insert({_id: i, ts: makeTS(i)}));
+}
+res = cappedColl.runCommand(
+ {explain: {find: cappedColl.getName(), filter: {ts: {$eq: makeTS(200)}}, oplogReplay: true}});
+assert.commandWorked(res);
+assert.eq(res.executionStats.totalDocsExamined, 100);
- // Ensure oplog replay hack does not work for backward scans.
- res = t.find({ts: {$lt: makeTS(4)}}).sort({$natural: -1}).explain("executionStats");
- assert.commandWorked(res);
- assert.eq(res.executionStats.totalDocsExamined, 100, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+// Ensure oplog replay hack does not work for backward scans.
+res = t.find({ts: {$lt: makeTS(4)}}).sort({$natural: -1}).explain("executionStats");
+assert.commandWorked(res);
+assert.eq(res.executionStats.totalDocsExamined, 100, tojson(res));
+collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- // We expect correct results when no collation specified and collection has a default collation.
- const t_collation = db.getSiblingDB("local").oplog.jstests_query_oplogreplay_collation;
- dropOplogAndCreateNew(
- t_collation, {collation: {locale: "en_US", strength: 2}, capped: true, size: 16 * 1024});
+// We expect correct results when no collation specified and collection has a default collation.
+const t_collation = db.getSiblingDB("local").oplog.jstests_query_oplogreplay_collation;
+dropOplogAndCreateNew(t_collation,
+ {collation: {locale: "en_US", strength: 2}, capped: true, size: 16 * 1024});
- assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 0)}));
- assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 1)}));
- assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 2)}));
- assert.eq(2, t_collation.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}}).itcount());
+assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 0)}));
+assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 1)}));
+assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 2)}));
+assert.eq(2, t_collation.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}}).itcount());
- // We expect correct results when "simple" collation specified and collection has a default
- // collation.
- assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 0)}));
- assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 1)}));
- assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 2)}));
- assert.eq(0,
- t_collation.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}})
- .collation({locale: "simple"})
- .itcount());
+// We expect correct results when "simple" collation specified and collection has a default
+// collation.
+assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 0)}));
+assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 1)}));
+assert.writeOK(t_collation.insert({str: "FOO", ts: Timestamp(1000, 2)}));
+assert.eq(0,
+ t_collation.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}})
+ .collation({locale: "simple"})
+ .itcount());
}());
diff --git a/jstests/noPassthroughWithMongod/renameWithWCE.js b/jstests/noPassthroughWithMongod/renameWithWCE.js
index f09c45ebeff..0c232ec0b37 100644
--- a/jstests/noPassthroughWithMongod/renameWithWCE.js
+++ b/jstests/noPassthroughWithMongod/renameWithWCE.js
@@ -4,52 +4,52 @@
*/
// @tags: [requires_profiling]
(function() {
- // Set up namespaces a and b.
- var admin = db.getMongo().getDB("admin");
- var db_a = db.getMongo().getDB("db_a");
- var db_b = db.getMongo().getDB("db_b");
-
- var a = db_a.rename7;
- var b = db_b.rename7;
-
- // Ensure that the databases are created
- db_a.coll.insert({});
- db_b.coll.insert({});
-
- a.drop();
- b.drop();
-
- // Put some documents and indexes in a.
- a.save({a: 1});
- a.save({a: 2});
- a.save({a: 3});
- a.ensureIndex({a: 1});
- a.ensureIndex({b: 1});
-
- assert.commandWorked(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
-
- assert.eq(0, a.find().count());
- assert(db_a.getCollectionNames().indexOf("rename7") < 0);
-
- assert.eq(3, b.find().count());
- assert(db_b.getCollectionNames().indexOf("rename7") >= 0);
-
- a.drop();
- b.drop();
-
- // Test that the dropTarget option works when renaming across databases.
- a.save({});
- b.save({});
- assert.commandFailed(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
-
- // Ensure that a WCE during renaming doesn't cause a failure.
- assert.commandWorked(db_a.setProfilingLevel(2)); // So we can check WCE happens.
- assert.commandWorked(db_a.adminCommand(
- {"configureFailPoint": 'writeConflictInRenameCollCopyToTmp', "mode": {times: 1}}));
- assert.commandWorked(
- admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7", dropTarget: true}));
- assert.gte(db_a.system.profile.findOne().writeConflicts, 1); // Make sure that our WCE happened
- assert.commandWorked(db_a.setProfilingLevel(0));
- a.drop();
- b.drop();
+// Set up namespaces a and b.
+var admin = db.getMongo().getDB("admin");
+var db_a = db.getMongo().getDB("db_a");
+var db_b = db.getMongo().getDB("db_b");
+
+var a = db_a.rename7;
+var b = db_b.rename7;
+
+// Ensure that the databases are created
+db_a.coll.insert({});
+db_b.coll.insert({});
+
+a.drop();
+b.drop();
+
+// Put some documents and indexes in a.
+a.save({a: 1});
+a.save({a: 2});
+a.save({a: 3});
+a.ensureIndex({a: 1});
+a.ensureIndex({b: 1});
+
+assert.commandWorked(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
+
+assert.eq(0, a.find().count());
+assert(db_a.getCollectionNames().indexOf("rename7") < 0);
+
+assert.eq(3, b.find().count());
+assert(db_b.getCollectionNames().indexOf("rename7") >= 0);
+
+a.drop();
+b.drop();
+
+// Test that the dropTarget option works when renaming across databases.
+a.save({});
+b.save({});
+assert.commandFailed(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
+
+// Ensure that a WCE during renaming doesn't cause a failure.
+assert.commandWorked(db_a.setProfilingLevel(2)); // So we can check WCE happens.
+assert.commandWorked(db_a.adminCommand(
+ {"configureFailPoint": 'writeConflictInRenameCollCopyToTmp', "mode": {times: 1}}));
+assert.commandWorked(
+ admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7", dropTarget: true}));
+assert.gte(db_a.system.profile.findOne().writeConflicts, 1); // Make sure that our WCE happened
+assert.commandWorked(db_a.setProfilingLevel(0));
+a.drop();
+b.drop();
})();
diff --git a/jstests/noPassthroughWithMongod/replset_host_connection_validation.js b/jstests/noPassthroughWithMongod/replset_host_connection_validation.js
index 159efefd3b1..29ca7436b92 100644
--- a/jstests/noPassthroughWithMongod/replset_host_connection_validation.js
+++ b/jstests/noPassthroughWithMongod/replset_host_connection_validation.js
@@ -2,84 +2,84 @@
// @tags: [requires_replication]
(function() {
- 'use strict';
+'use strict';
- const replSetName = 'hostTestReplSetName';
+const replSetName = 'hostTestReplSetName';
- // This "inner_mode" method of spawning a replset and re-running was copied from
- // host_connection_string_validation.js
- if ("undefined" == typeof inner_mode) {
- jsTest.log("Outer mode test starting a replica set");
+// This "inner_mode" method of spawning a replset and re-running was copied from
+// host_connection_string_validation.js
+if ("undefined" == typeof inner_mode) {
+ jsTest.log("Outer mode test starting a replica set");
- const replTest = new ReplSetTest({name: replSetName, nodes: 2});
- replTest.startSet();
- replTest.initiate();
+ const replTest = new ReplSetTest({name: replSetName, nodes: 2});
+ replTest.startSet();
+ replTest.initiate();
- const primary = replTest.getPrimary();
+ const primary = replTest.getPrimary();
- const args = [
- "mongo",
- "--nodb",
- "--eval",
- "inner_mode=true;port=" + primary.port + ";",
- "jstests/noPassthroughWithMongod/replset_host_connection_validation.js"
- ];
- const exitCode = _runMongoProgram(...args);
- jsTest.log("Inner mode test finished, exit code was " + exitCode);
+ const args = [
+ "mongo",
+ "--nodb",
+ "--eval",
+ "inner_mode=true;port=" + primary.port + ";",
+ "jstests/noPassthroughWithMongod/replset_host_connection_validation.js"
+ ];
+ const exitCode = _runMongoProgram(...args);
+ jsTest.log("Inner mode test finished, exit code was " + exitCode);
- replTest.stopSet();
- // Pass the inner test's exit code back as the outer test's exit code
- if (exitCode != 0) {
- doassert("inner test failed with exit code " + exitCode);
- }
- return;
+ replTest.stopSet();
+ // Pass the inner test's exit code back as the outer test's exit code
+ if (exitCode != 0) {
+ doassert("inner test failed with exit code " + exitCode);
}
+ return;
+}
- function testHost(host, uri, ok) {
- const exitCode = runMongoProgram('mongo', '--eval', ';', '--host', host, uri);
- if (ok) {
- assert.eq(exitCode, 0, "failed to connect with `--host " + host + "`");
- } else {
- assert.neq(exitCode, 0, "unexpectedly succeeded to connect with `--host " + host + "`");
- }
+function testHost(host, uri, ok) {
+ const exitCode = runMongoProgram('mongo', '--eval', ';', '--host', host, uri);
+ if (ok) {
+ assert.eq(exitCode, 0, "failed to connect with `--host " + host + "`");
+ } else {
+ assert.neq(exitCode, 0, "unexpectedly succeeded to connect with `--host " + host + "`");
}
+}
- function runConnectionStringTestFor(connectionString, uri, ok) {
- print("* Testing: --host " + connectionString + " " + uri);
- if (!ok) {
- print(" This should fail");
- }
- testHost(connectionString, uri, ok);
+function runConnectionStringTestFor(connectionString, uri, ok) {
+ print("* Testing: --host " + connectionString + " " + uri);
+ if (!ok) {
+ print(" This should fail");
}
+ testHost(connectionString, uri, ok);
+}
- function expSuccess(str) {
- runConnectionStringTestFor(str, '', true);
- if (!str.startsWith('mongodb://')) {
- runConnectionStringTestFor(str, 'dbname', true);
- }
+function expSuccess(str) {
+ runConnectionStringTestFor(str, '', true);
+ if (!str.startsWith('mongodb://')) {
+ runConnectionStringTestFor(str, 'dbname', true);
}
+}
- function expFailure(str) {
- runConnectionStringTestFor(str, '', false);
- }
+function expFailure(str) {
+ runConnectionStringTestFor(str, '', false);
+}
- expSuccess(`localhost:${port}`);
- expSuccess(`${replSetName}/localhost:${port}`);
- expSuccess(`${replSetName}/localhost:${port},[::1]:${port}`);
- expSuccess(`${replSetName}/localhost:${port},`);
- expSuccess(`${replSetName}/localhost:${port},,`);
- expSuccess(`mongodb://localhost:${port}/admin?replicaSet=${replSetName}`);
- expSuccess(`mongodb://localhost:${port}`);
+expSuccess(`localhost:${port}`);
+expSuccess(`${replSetName}/localhost:${port}`);
+expSuccess(`${replSetName}/localhost:${port},[::1]:${port}`);
+expSuccess(`${replSetName}/localhost:${port},`);
+expSuccess(`${replSetName}/localhost:${port},,`);
+expSuccess(`mongodb://localhost:${port}/admin?replicaSet=${replSetName}`);
+expSuccess(`mongodb://localhost:${port}`);
- expFailure(',');
- expFailure(',,');
- expFailure(`${replSetName}/`);
- expFailure(`${replSetName}/,`);
- expFailure(`${replSetName}/,,`);
- expFailure(`${replSetName}//not/a/socket`);
- expFailure(`mongodb://localhost:${port}/admin?replicaSet=`);
- expFailure('mongodb://localhost:');
- expFailure(`mongodb://:${port}`);
+expFailure(',');
+expFailure(',,');
+expFailure(`${replSetName}/`);
+expFailure(`${replSetName}/,`);
+expFailure(`${replSetName}/,,`);
+expFailure(`${replSetName}//not/a/socket`);
+expFailure(`mongodb://localhost:${port}/admin?replicaSet=`);
+expFailure('mongodb://localhost:');
+expFailure(`mongodb://:${port}`);
- jsTest.log("SUCCESSFUL test completion");
+jsTest.log("SUCCESSFUL test completion");
})();
diff --git a/jstests/noPassthroughWithMongod/rpc_protocols.js b/jstests/noPassthroughWithMongod/rpc_protocols.js
index ef847e2a359..95e028526c0 100644
--- a/jstests/noPassthroughWithMongod/rpc_protocols.js
+++ b/jstests/noPassthroughWithMongod/rpc_protocols.js
@@ -8,65 +8,66 @@
var RPC_PROTOCOLS = {OP_QUERY: "opQueryOnly", OP_MSG: "opMsgOnly"};
(function() {
- "use strict";
+"use strict";
- db.rpcProtocols.drop();
+db.rpcProtocols.drop();
- var oldProfilingLevel = db.getProfilingLevel();
+var oldProfilingLevel = db.getProfilingLevel();
- assert.commandWorked(db.setProfilingLevel(2));
+assert.commandWorked(db.setProfilingLevel(2));
- function runInShell(rpcProtocol, func) {
- assert(0 == _runMongoProgram("mongo",
- "--rpcProtocols=" + rpcProtocol,
- "--readMode=commands", // ensure we use the find command.
- "--eval",
- "(" + func.toString() + ")();",
- db.getMongo().host));
- }
+function runInShell(rpcProtocol, func) {
+ assert(0 ==
+ _runMongoProgram("mongo",
+ "--rpcProtocols=" + rpcProtocol,
+ "--readMode=commands", // ensure we use the find command.
+ "--eval",
+ "(" + func.toString() + ")();",
+ db.getMongo().host));
+}
- // Test that --rpcProtocols=opQueryOnly forces OP_QUERY commands.
- runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
- });
- var profileDoc = db.system.profile.findOne({"command.comment": "opQueryCommandLine"});
- assert(profileDoc !== null);
- assert.eq(profileDoc.protocol, "op_query");
+// Test that --rpcProtocols=opQueryOnly forces OP_QUERY commands.
+runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
+});
+var profileDoc = db.system.profile.findOne({"command.comment": "opQueryCommandLine"});
+assert(profileDoc !== null);
+assert.eq(profileDoc.protocol, "op_query");
- // Test that --rpcProtocols=opMsgOnly forces OP_MSG commands.
- runInShell(RPC_PROTOCOLS.OP_MSG, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opMsgCommandLine").itcount();
- });
- profileDoc = db.system.profile.findOne({"command.comment": "opMsgCommandLine"});
- assert(profileDoc !== null);
- assert.eq(profileDoc.protocol, "op_msg");
+// Test that --rpcProtocols=opMsgOnly forces OP_MSG commands.
+runInShell(RPC_PROTOCOLS.OP_MSG, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opMsgCommandLine").itcount();
+});
+profileDoc = db.system.profile.findOne({"command.comment": "opMsgCommandLine"});
+assert(profileDoc !== null);
+assert.eq(profileDoc.protocol, "op_msg");
- // Test that .setClientRPCProtocols("opQueryOnly") forces OP_QUERY commands. We start the shell
- // in OP_MSG only mode, then switch it to OP_QUERY mode at runtime.
- runInShell(RPC_PROTOCOLS.OP_MSG, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
- db.getMongo().setClientRPCProtocols("opQueryOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
- });
- profileDoc = db.system.profile.findOne({"command.comment": "opQueryRuntime"});
- assert(profileDoc !== null);
- assert.eq(profileDoc.protocol, "op_query");
+// Test that .setClientRPCProtocols("opQueryOnly") forces OP_QUERY commands. We start the shell
+// in OP_MSG only mode, then switch it to OP_QUERY mode at runtime.
+runInShell(RPC_PROTOCOLS.OP_MSG, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
+ db.getMongo().setClientRPCProtocols("opQueryOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
+});
+profileDoc = db.system.profile.findOne({"command.comment": "opQueryRuntime"});
+assert(profileDoc !== null);
+assert.eq(profileDoc.protocol, "op_query");
- // Test that .setClientRPCProtocols("opMsgOnly") forces OP_MSG commands. We start the
- // shell in OP_QUERY only mode, then switch it to OP_MSG mode at runtime.
- runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getMongo().setClientRPCProtocols("opMsgOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opMsgRuntime").itcount();
- });
- profileDoc = db.system.profile.findOne({"command.comment": "opMsgRuntime"});
- assert(profileDoc !== null);
- assert.eq(profileDoc.protocol, "op_msg");
+// Test that .setClientRPCProtocols("opMsgOnly") forces OP_MSG commands. We start the
+// shell in OP_QUERY only mode, then switch it to OP_MSG mode at runtime.
+runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getMongo().setClientRPCProtocols("opMsgOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opMsgRuntime").itcount();
+});
+profileDoc = db.system.profile.findOne({"command.comment": "opMsgRuntime"});
+assert(profileDoc !== null);
+assert.eq(profileDoc.protocol, "op_msg");
- // Reset profiling level.
- assert.commandWorked(db.setProfilingLevel(oldProfilingLevel));
+// Reset profiling level.
+assert.commandWorked(db.setProfilingLevel(oldProfilingLevel));
})();
diff --git a/jstests/noPassthroughWithMongod/shell_advance_cluster_time.js b/jstests/noPassthroughWithMongod/shell_advance_cluster_time.js
index db3c4904896..c0d4580aae5 100644
--- a/jstests/noPassthroughWithMongod/shell_advance_cluster_time.js
+++ b/jstests/noPassthroughWithMongod/shell_advance_cluster_time.js
@@ -3,27 +3,27 @@
*/
(function() {
- assert.throws(function() {
- db.getMongo().advanceClusterTime();
- });
+assert.throws(function() {
+ db.getMongo().advanceClusterTime();
+});
- assert.throws(function() {
- db.getMongo().advanceClusterTime(123);
- });
+assert.throws(function() {
+ db.getMongo().advanceClusterTime(123);
+});
- assert.throws(function() {
- db.getMongo().advanceClusterTime('abc');
- });
+assert.throws(function() {
+ db.getMongo().advanceClusterTime('abc');
+});
- db.getMongo().advanceClusterTime({'clusterTime': 123});
+db.getMongo().advanceClusterTime({'clusterTime': 123});
- assert.eq({'clusterTime': 123}, db.getMongo().getClusterTime());
+assert.eq({'clusterTime': 123}, db.getMongo().getClusterTime());
- db.getMongo().advanceClusterTime({'clusterTime': 100});
+db.getMongo().advanceClusterTime({'clusterTime': 100});
- assert.eq({'clusterTime': 123}, db.getMongo().getClusterTime());
+assert.eq({'clusterTime': 123}, db.getMongo().getClusterTime());
- db.getMongo().advanceClusterTime({'clusterTime': 456});
+db.getMongo().advanceClusterTime({'clusterTime': 456});
- assert.eq({'clusterTime': 456}, db.getMongo().getClusterTime());
+assert.eq({'clusterTime': 456}, db.getMongo().getClusterTime());
})();
diff --git a/jstests/noPassthroughWithMongod/shelllimit.js b/jstests/noPassthroughWithMongod/shelllimit.js
index 3b270bddc12..cd021a6df61 100644
--- a/jstests/noPassthroughWithMongod/shelllimit.js
+++ b/jstests/noPassthroughWithMongod/shelllimit.js
@@ -1,21 +1,21 @@
// This checks to make sure that cursors with a limit get killed by the shell
// after all their results have been returned. See SERVER-17792.
(function() {
- "use strict";
+"use strict";
- var t = db.cursor_limit_test;
- t.drop();
- var pre = db.serverStatus().metrics.cursor.open.total;
+var t = db.cursor_limit_test;
+t.drop();
+var pre = db.serverStatus().metrics.cursor.open.total;
- for (var i = 1; i <= 5; i++) {
- t.save({a: i});
- }
+for (var i = 1; i <= 5; i++) {
+ t.save({a: i});
+}
- var c = t.find().limit(3);
- while (c.hasNext()) {
- var v = c.next();
- }
+var c = t.find().limit(3);
+while (c.hasNext()) {
+ var v = c.next();
+}
- assert.eq(pre, db.serverStatus().metrics.cursor.open.total);
- t.drop();
+assert.eq(pre, db.serverStatus().metrics.cursor.open.total);
+t.drop();
}());
diff --git a/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js b/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
index 591c5fffd95..600d8be4733 100644
--- a/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
+++ b/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
@@ -3,34 +3,34 @@
// This test was designed to reproduce SERVER-33942 against a mongod.
// @tags: [requires_capped]
(function() {
- "use strict";
+"use strict";
- // This test runs a getMore in a parallel shell, which will not inherit the implicit session of
- // the cursor establishing command.
- TestData.disableImplicitSessions = true;
+// This test runs a getMore in a parallel shell, which will not inherit the implicit session of
+// the cursor establishing command.
+TestData.disableImplicitSessions = true;
- const coll = db.tailable_getmore_no_timeout;
- coll.drop();
+const coll = db.tailable_getmore_no_timeout;
+coll.drop();
- assert.commandWorked(db.runCommand({create: coll.getName(), capped: true, size: 1024}));
+assert.commandWorked(db.runCommand({create: coll.getName(), capped: true, size: 1024}));
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({_id: i}));
- }
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({_id: i}));
+}
- const findResponse = assert.commandWorked(
- db.runCommand({find: coll.getName(), filter: {}, tailable: true, awaitData: true}));
- const cursorId = findResponse.cursor.id;
- assert.neq(0, cursorId);
+const findResponse = assert.commandWorked(
+ db.runCommand({find: coll.getName(), filter: {}, tailable: true, awaitData: true}));
+const cursorId = findResponse.cursor.id;
+assert.neq(0, cursorId);
- // Start an operation in a parallel shell that holds the lock for a while.
- const awaitSleepShell = startParallelShell(
- () => assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", secs: 600}),
- ErrorCodes.Interrupted));
+// Start an operation in a parallel shell that holds the lock for a while.
+const awaitSleepShell = startParallelShell(
+ () => assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", secs: 600}),
+ ErrorCodes.Interrupted));
- // Start a getMore and verify that it is waiting for the lock.
- const getMoreMaxTimeMS = 10;
- const awaitGetMoreShell = startParallelShell(`
+// Start a getMore and verify that it is waiting for the lock.
+const getMoreMaxTimeMS = 10;
+const awaitGetMoreShell = startParallelShell(`
// Wait for the sleep command to take the lock.
assert.soon(() => db.getSiblingDB("admin")
.currentOp({"command.sleep": 1, active: true})
@@ -43,20 +43,19 @@
}));
`);
- // Wait to see the getMore waiting on the lock.
- assert.soon(
- () =>
- db.currentOp({"command.getMore": cursorId, waitingForLock: true}).inprog.length === 1);
+// Wait to see the getMore waiting on the lock.
+assert.soon(
+ () => db.currentOp({"command.getMore": cursorId, waitingForLock: true}).inprog.length === 1);
- // Sleep for twice the maxTimeMS to prove that the getMore won't time out waiting for the lock.
- sleep(2 * getMoreMaxTimeMS);
+// Sleep for twice the maxTimeMS to prove that the getMore won't time out waiting for the lock.
+sleep(2 * getMoreMaxTimeMS);
- // Then kill the command with the lock, allowing the getMore to continue successfully.
- const sleepOps = db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog;
- assert.eq(sleepOps.length, 1);
- const sleepOpId = sleepOps[0].opid;
- assert.commandWorked(db.adminCommand({killOp: 1, op: sleepOpId}));
+// Then kill the command with the lock, allowing the getMore to continue successfully.
+const sleepOps = db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog;
+assert.eq(sleepOps.length, 1);
+const sleepOpId = sleepOps[0].opid;
+assert.commandWorked(db.adminCommand({killOp: 1, op: sleepOpId}));
- awaitSleepShell();
- awaitGetMoreShell();
+awaitSleepShell();
+awaitGetMoreShell();
}());
diff --git a/jstests/noPassthroughWithMongod/temp_namespace.js b/jstests/noPassthroughWithMongod/temp_namespace.js
index f33d2abfd76..257874d5966 100644
--- a/jstests/noPassthroughWithMongod/temp_namespace.js
+++ b/jstests/noPassthroughWithMongod/temp_namespace.js
@@ -8,16 +8,14 @@ testname = 'temp_namespace_sw';
var conn = MongoRunner.runMongod();
d = conn.getDB('test');
assert.commandWorked(d.runCommand({
- applyOps:
- [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'temp1', temp: true}}]
+ applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'temp1', temp: true}}]
}));
d[testname + 'temp1'].ensureIndex({x: 1});
assert.commandWorked(d.runCommand(
{applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'temp2', temp: 1}}]}));
d[testname + 'temp2'].ensureIndex({x: 1});
assert.commandWorked(d.runCommand({
- applyOps:
- [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'keep1', temp: false}}]
+ applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'keep1', temp: false}}]
}));
assert.commandWorked(d.runCommand(
{applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'keep2', temp: 0}}]}));
diff --git a/jstests/noPassthroughWithMongod/top_drop.js b/jstests/noPassthroughWithMongod/top_drop.js
index 17949e59ff6..b280aa20efd 100644
--- a/jstests/noPassthroughWithMongod/top_drop.js
+++ b/jstests/noPassthroughWithMongod/top_drop.js
@@ -3,77 +3,77 @@
* TODO(SERVER-21167): Move this test from noPassthrough to core.
*/
(function() {
- "use strict";
+"use strict";
- let topDB = db.getSiblingDB("topdrop");
- assert.commandWorked(topDB.dropDatabase());
+let topDB = db.getSiblingDB("topdrop");
+assert.commandWorked(topDB.dropDatabase());
- // Asserts that the output of top contains exactly these collection entries for topDB.
- function checkTopEntries(expectedEntries) {
- let res = topDB.adminCommand("top");
- assert.commandWorked(res, "Failed to run the top command");
+// Asserts that the output of top contains exactly these collection entries for topDB.
+function checkTopEntries(expectedEntries) {
+ let res = topDB.adminCommand("top");
+ assert.commandWorked(res, "Failed to run the top command");
- let entriesInTop = Object.keys(res.totals).filter(function(ns) {
- // This filter only includes non-system collections in our test database.
- const dbPrefix = topDB.getName() + ".";
- const systemCollectionPrefix = "system.";
- return ns.startsWith(dbPrefix) && !ns.startsWith(dbPrefix + systemCollectionPrefix);
- });
- let expectedEntryNames = expectedEntries.map(function(coll) {
- return coll.getFullName();
- });
+ let entriesInTop = Object.keys(res.totals).filter(function(ns) {
+ // This filter only includes non-system collections in our test database.
+ const dbPrefix = topDB.getName() + ".";
+ const systemCollectionPrefix = "system.";
+ return ns.startsWith(dbPrefix) && !ns.startsWith(dbPrefix + systemCollectionPrefix);
+ });
+ let expectedEntryNames = expectedEntries.map(function(coll) {
+ return coll.getFullName();
+ });
- const entriesAreEqual = friendlyEqual(entriesInTop.sort(), expectedEntryNames.sort());
- if (!entriesAreEqual) {
- // TODO(SERVER-26750): This block can be removed once SERVER-26750 is resolved.
- jsTest.log("Warning: expected to see " + tojson(expectedEntryNames) +
- " in top, but got " + tojson(entriesInTop));
+ const entriesAreEqual = friendlyEqual(entriesInTop.sort(), expectedEntryNames.sort());
+ if (!entriesAreEqual) {
+ // TODO(SERVER-26750): This block can be removed once SERVER-26750 is resolved.
+ jsTest.log("Warning: expected to see " + tojson(expectedEntryNames) + " in top, but got " +
+ tojson(entriesInTop));
- assert.lt(expectedEntryNames.length,
- entriesInTop.length,
- "Fewer entries in top than expected; got " + tojson(entriesInTop) +
- " but expected " + tojson(expectedEntryNames) + "\nFull top output:\n" +
- tojson(res.totals));
+ assert.lt(expectedEntryNames.length,
+ entriesInTop.length,
+ "Fewer entries in top than expected; got " + tojson(entriesInTop) +
+ " but expected " + tojson(expectedEntryNames) + "\nFull top output:\n" +
+ tojson(res.totals));
- // We allow an unexpected entry in top if the insert counter has been cleared. This is
- // probably due to a background job releasing an AutoGetCollectionForReadCommand for
- // that collection.
- entriesInTop.forEach(function(coll) {
- if (expectedEntryNames.includes(coll)) {
- return;
- }
+ // We allow an unexpected entry in top if the insert counter has been cleared. This is
+ // probably due to a background job releasing an AutoGetCollectionForReadCommand for
+ // that collection.
+ entriesInTop.forEach(function(coll) {
+ if (expectedEntryNames.includes(coll)) {
+ return;
+ }
- let topStats = res.totals[coll];
- assert.eq(0,
- res.totals[coll].insert.count,
- coll + " has unexpected insert entries in top. Full top output:\n" +
- tojson(res.totals));
- });
- }
+ let topStats = res.totals[coll];
+ assert.eq(0,
+ res.totals[coll].insert.count,
+ coll + " has unexpected insert entries in top. Full top output:\n" +
+ tojson(res.totals));
+ });
}
+}
- // Create a few entries in top.
- assert.writeOK(topDB.coll1.insert({}));
- assert.writeOK(topDB.coll2.insert({}));
- assert.writeOK(topDB.coll3.insert({}));
- checkTopEntries([topDB.coll1, topDB.coll2, topDB.coll3]);
+// Create a few entries in top.
+assert.writeOK(topDB.coll1.insert({}));
+assert.writeOK(topDB.coll2.insert({}));
+assert.writeOK(topDB.coll3.insert({}));
+checkTopEntries([topDB.coll1, topDB.coll2, topDB.coll3]);
- // Check that dropping a collection removes that collection but leaves the others.
- assert.commandWorked(topDB.runCommand({drop: "coll3"}));
- checkTopEntries([topDB.coll1, topDB.coll2]);
+// Check that dropping a collection removes that collection but leaves the others.
+assert.commandWorked(topDB.runCommand({drop: "coll3"}));
+checkTopEntries([topDB.coll1, topDB.coll2]);
- // Check that dropping the database removes the remaining collections.
- assert.commandWorked(topDB.dropDatabase());
- checkTopEntries([]);
+// Check that dropping the database removes the remaining collections.
+assert.commandWorked(topDB.dropDatabase());
+checkTopEntries([]);
- // Check that top doesn't keep state about non-existent collections.
- assert.commandWorked(topDB.dropDatabase());
- topDB.foo.find().itcount();
- topDB.baz.update({}, {$set: {a: 1}});
- topDB.bar.remove({});
+// Check that top doesn't keep state about non-existent collections.
+assert.commandWorked(topDB.dropDatabase());
+topDB.foo.find().itcount();
+topDB.baz.update({}, {$set: {a: 1}});
+topDB.bar.remove({});
- let res = topDB.adminCommand("top");
- checkTopEntries([]);
+let res = topDB.adminCommand("top");
+checkTopEntries([]);
- assert.commandWorked(topDB.dropDatabase());
+assert.commandWorked(topDB.dropDatabase());
}());
diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js
index 97257da5ce6..b0c7c342987 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl.js
@@ -1,4 +1,5 @@
-/** Test TTL collections with replication
+/**
+ * Test TTL collections with replication
* Part 1: Initiate replica set. Insert some docs and create a TTL index.
* Check that the correct # of docs age out.
* Part 2: Add a new member to the set. Check that it also gets the correct # of docs.
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
index 44250c4f1d2..4616a306084 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
@@ -1,4 +1,5 @@
-/** This tests ensures that when a stand-alone server is started with something in
+/**
+ * This tests ensures that when a stand-alone server is started with something in
* local.system.replset, it doesn't start the TTL monitor (SERVER-6609). The test creates a
* dummy replset config & TTL collection, then restarts the member and ensures that it doesn't
* time out the docs in the TTL collection. Then it removes the "config" and
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
index 135bc41a608..a9fb3c46108 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
@@ -1,4 +1,5 @@
-/** Test TTL docs are not deleted from secondaries directly
+/**
+ * Test TTL docs are not deleted from secondaries directly
* @tags: [requires_replication]
*/
diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js
index 197832f6933..6788dfcdcf9 100644
--- a/jstests/noPassthroughWithMongod/ttl_sharded.js
+++ b/jstests/noPassthroughWithMongod/ttl_sharded.js
@@ -1,4 +1,5 @@
-/** Simple test of sharding TTL collections.
+/**
+ * Simple test of sharding TTL collections.
* - Creates a new collection with a TTL index
* - Shards it, and moves one chunk containing half the docs to another shard.
* - Checks that both shards have TTL index, and docs get deleted on both shards.
diff --git a/jstests/noPassthroughWithMongod/validate_command.js b/jstests/noPassthroughWithMongod/validate_command.js
index 9c52c9acad7..7f5a8533705 100644
--- a/jstests/noPassthroughWithMongod/validate_command.js
+++ b/jstests/noPassthroughWithMongod/validate_command.js
@@ -1,40 +1,40 @@
// Tests that the basic values returned from the validate command are correct
(function() {
- // Set the number of documents to insert
- var count = 10;
+// Set the number of documents to insert
+var count = 10;
- function testValidate(output) {
- assert.eq(output.nrecords, count, "validate returned an invalid count");
- assert.eq(output.nIndexes, 3, "validate returned an invalid number of indexes");
+function testValidate(output) {
+ assert.eq(output.nrecords, count, "validate returned an invalid count");
+ assert.eq(output.nIndexes, 3, "validate returned an invalid number of indexes");
- var indexNames = output.keysPerIndex;
+ var indexNames = output.keysPerIndex;
- for (var i in indexNames) {
- if (!indexNames.hasOwnProperty(i))
- continue;
- assert.eq(indexNames[i], count, "validate returned an invalid number of indexes");
- }
+ for (var i in indexNames) {
+ if (!indexNames.hasOwnProperty(i))
+ continue;
+ assert.eq(indexNames[i], count, "validate returned an invalid number of indexes");
}
+}
- // Test to confirm that validate is working as expected.
+// Test to confirm that validate is working as expected.
- // SETUP DATA
- t = db.jstests_validate;
- t.drop();
+// SETUP DATA
+t = db.jstests_validate;
+t.drop();
- for (var i = 0; i < count; i++) {
- t.insert({x: i});
- }
+for (var i = 0; i < count; i++) {
+ t.insert({x: i});
+}
- t.ensureIndex({x: 1}, {name: "forward"});
- t.ensureIndex({x: -1}, {name: "reverse"});
+t.ensureIndex({x: 1}, {name: "forward"});
+t.ensureIndex({x: -1}, {name: "reverse"});
- // TEST NORMAL VALIDATE
- var output = t.validate();
- testValidate(output);
+// TEST NORMAL VALIDATE
+var output = t.validate();
+testValidate(output);
- // TEST FULL
- var output = t.validate({full: true});
- testValidate(output);
+// TEST FULL
+var output = t.validate({full: true});
+testValidate(output);
}()); \ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/validate_interrupt.js b/jstests/noPassthroughWithMongod/validate_interrupt.js
index 7b76551f4e4..c19e682eae3 100644
--- a/jstests/noPassthroughWithMongod/validate_interrupt.js
+++ b/jstests/noPassthroughWithMongod/validate_interrupt.js
@@ -4,43 +4,43 @@
'use strict';
(function() {
- var t = db.validate_interrupt;
- t.drop();
-
- var bulk = t.initializeUnorderedBulkOp();
-
- var i;
- for (i = 0; i < 1000; i++) {
- bulk.insert({a: i});
- }
- assert.writeOK(bulk.execute());
-
- function setTimeoutFailPoint(mode) {
- var res = db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: mode});
- assert.commandWorked(res);
- }
-
- setTimeoutFailPoint('alwaysOn');
- var res = t.runCommand({validate: t.getName(), full: true, maxTimeMS: 1});
- setTimeoutFailPoint('off');
-
- // Sanity check to make sure the failpoint is turned off.
- assert.commandWorked(t.runCommand({validate: t.getName(), full: true}));
-
- if (res.ok === 0) {
- assert.eq(res.code,
- ErrorCodes.MaxTimeMSExpired,
- 'validate command did not time out:\n' + tojson(res));
- } else {
- // validate() should only succeed if it EBUSY'd. See SERVER-23131.
- var numWarnings = res.warnings.length;
- // validate() could EBUSY when verifying the index and/or the RecordStore, so EBUSY could
- // appear once or twice.
- assert((numWarnings === 1) || (numWarnings === 2),
- 'Expected 1 or 2 validation warnings:\n' + tojson(res));
- assert(res.warnings[0].includes('EBUSY'), 'Expected an EBUSY warning:\n' + tojson(res));
- if (numWarnings === 2) {
- assert(res.warnings[1].includes('EBUSY'), 'Expected an EBUSY warning:\n' + tojson(res));
- }
+var t = db.validate_interrupt;
+t.drop();
+
+var bulk = t.initializeUnorderedBulkOp();
+
+var i;
+for (i = 0; i < 1000; i++) {
+ bulk.insert({a: i});
+}
+assert.writeOK(bulk.execute());
+
+function setTimeoutFailPoint(mode) {
+ var res = db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: mode});
+ assert.commandWorked(res);
+}
+
+setTimeoutFailPoint('alwaysOn');
+var res = t.runCommand({validate: t.getName(), full: true, maxTimeMS: 1});
+setTimeoutFailPoint('off');
+
+// Sanity check to make sure the failpoint is turned off.
+assert.commandWorked(t.runCommand({validate: t.getName(), full: true}));
+
+if (res.ok === 0) {
+ assert.eq(res.code,
+ ErrorCodes.MaxTimeMSExpired,
+ 'validate command did not time out:\n' + tojson(res));
+} else {
+ // validate() should only succeed if it EBUSY'd. See SERVER-23131.
+ var numWarnings = res.warnings.length;
+ // validate() could EBUSY when verifying the index and/or the RecordStore, so EBUSY could
+ // appear once or twice.
+ assert((numWarnings === 1) || (numWarnings === 2),
+ 'Expected 1 or 2 validation warnings:\n' + tojson(res));
+ assert(res.warnings[0].includes('EBUSY'), 'Expected an EBUSY warning:\n' + tojson(res));
+ if (numWarnings === 2) {
+ assert(res.warnings[1].includes('EBUSY'), 'Expected an EBUSY warning:\n' + tojson(res));
}
+}
})();
diff --git a/jstests/noPassthroughWithMongod/views_invalid.js b/jstests/noPassthroughWithMongod/views_invalid.js
index 1749a9900d7..a525b68d32f 100644
--- a/jstests/noPassthroughWithMongod/views_invalid.js
+++ b/jstests/noPassthroughWithMongod/views_invalid.js
@@ -1,67 +1,63 @@
(function() {
- "use strict";
-
- const dbname = 'views_invalid';
- let invalidDB = db.getSiblingDB(dbname);
-
- // Wait for the invalid view definition to be replicated to any secondaries and then drop the
- // database.
- assert.writeOK(invalidDB.system.views.insert({z: '\0\uFFFFf'}),
- {writeConcern: {w: "majority"}});
- invalidDB.dropDatabase();
-
- // Create a database with one valid and one invalid view through direct system.views writes.
- assert.writeOK(invalidDB.coll.insert({x: 1}));
- assert.writeOK(
- invalidDB.system.views.insert({_id: dbname + '.view', viewOn: 'coll', pipeline: []}));
- assert.eq(invalidDB.view.findOne({}, {_id: 0}),
- {x: 1},
- 'find on view created with direct write to views catalog should work');
- assert.writeOK(invalidDB.system.views.insert({_id: 'invalid', pipeline: 3.0}));
-
- // Check that view-related commands fail with an invalid view catalog, but other commands on
- // existing collections still succeed.
- assert.commandFailedWithCode(
- invalidDB.runCommand({find: 'view'}),
- ErrorCodes.InvalidViewDefinition,
- 'find on existing view in DB with invalid system.views should fail');
-
- assert.eq(invalidDB.coll.findOne({}, {_id: 0}),
- {x: 1},
- 'find on existing collection in DB with invalid views catalog should work');
-
- assert.writeOK(invalidDB.coll.insert({x: 2}),
- 'insert in existing collection in DB with invalid views catalog should work');
-
- assert.writeError(invalidDB.x.insert({x: 2}),
- 'insert into new collection in DB with invalid views catalog should fail');
-
- assert.commandWorked(
- invalidDB.runCommand({drop: 'coll'}),
- 'dropping an existing collection in DB with invalid views catalog should work');
-
- assert.commandFailedWithCode(
- invalidDB.runCommand({drop: 'view'}),
- ErrorCodes.InvalidViewDefinition,
- 'dropping an existing view in DB with invalid views catalog should fail');
-
- assert.commandFailedWithCode(
- invalidDB.createCollection('x'),
- ErrorCodes.InvalidViewDefinition,
- 'creating a collection in DB with invalid views catalog should fail');
-
- assert.commandFailedWithCode(
- invalidDB.runCommand({find: 'x'}),
- ErrorCodes.InvalidViewDefinition,
- 'find on non-existent collection in DB with invalid system.views should fail');
-
- // Now fix the database by removing the invalid system.views entry, and check all is OK.
- assert.writeOK(invalidDB.system.views.remove({_id: 'invalid'}),
- 'should be able to remove invalid view with direct write to view catalog');
- assert.writeOK(
- invalidDB.coll.insert({x: 1}),
- 'after remove invalid view from catalog, should be able to create new collection');
- assert.eq(invalidDB.view.findOne({}, {_id: 0}),
- {x: 1},
- 'find on view should work again after removing invalid view from catalog');
+"use strict";
+
+const dbname = 'views_invalid';
+let invalidDB = db.getSiblingDB(dbname);
+
+// Wait for the invalid view definition to be replicated to any secondaries and then drop the
+// database.
+assert.writeOK(invalidDB.system.views.insert({z: '\0\uFFFFf'}), {writeConcern: {w: "majority"}});
+invalidDB.dropDatabase();
+
+// Create a database with one valid and one invalid view through direct system.views writes.
+assert.writeOK(invalidDB.coll.insert({x: 1}));
+assert.writeOK(
+ invalidDB.system.views.insert({_id: dbname + '.view', viewOn: 'coll', pipeline: []}));
+assert.eq(invalidDB.view.findOne({}, {_id: 0}),
+ {x: 1},
+ 'find on view created with direct write to views catalog should work');
+assert.writeOK(invalidDB.system.views.insert({_id: 'invalid', pipeline: 3.0}));
+
+// Check that view-related commands fail with an invalid view catalog, but other commands on
+// existing collections still succeed.
+assert.commandFailedWithCode(invalidDB.runCommand({find: 'view'}),
+ ErrorCodes.InvalidViewDefinition,
+ 'find on existing view in DB with invalid system.views should fail');
+
+assert.eq(invalidDB.coll.findOne({}, {_id: 0}),
+ {x: 1},
+ 'find on existing collection in DB with invalid views catalog should work');
+
+assert.writeOK(invalidDB.coll.insert({x: 2}),
+ 'insert in existing collection in DB with invalid views catalog should work');
+
+assert.writeError(invalidDB.x.insert({x: 2}),
+ 'insert into new collection in DB with invalid views catalog should fail');
+
+assert.commandWorked(
+ invalidDB.runCommand({drop: 'coll'}),
+ 'dropping an existing collection in DB with invalid views catalog should work');
+
+assert.commandFailedWithCode(
+ invalidDB.runCommand({drop: 'view'}),
+ ErrorCodes.InvalidViewDefinition,
+ 'dropping an existing view in DB with invalid views catalog should fail');
+
+assert.commandFailedWithCode(invalidDB.createCollection('x'),
+ ErrorCodes.InvalidViewDefinition,
+ 'creating a collection in DB with invalid views catalog should fail');
+
+assert.commandFailedWithCode(
+ invalidDB.runCommand({find: 'x'}),
+ ErrorCodes.InvalidViewDefinition,
+ 'find on non-existent collection in DB with invalid system.views should fail');
+
+// Now fix the database by removing the invalid system.views entry, and check all is OK.
+assert.writeOK(invalidDB.system.views.remove({_id: 'invalid'}),
+ 'should be able to remove invalid view with direct write to view catalog');
+assert.writeOK(invalidDB.coll.insert({x: 1}),
+ 'after remove invalid view from catalog, should be able to create new collection');
+assert.eq(invalidDB.view.findOne({}, {_id: 0}),
+ {x: 1},
+ 'find on view should work again after removing invalid view from catalog');
})();
diff --git a/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js b/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
index 65ebef5ccf3..9664cfe4538 100644
--- a/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
+++ b/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
@@ -3,42 +3,40 @@
* collection or index with the same WiredTiger options.
*/
(function() {
- 'use strict';
+'use strict';
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (db.serverStatus().storageEngine.name !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (db.serverStatus().storageEngine.name !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
- var collNamePrefix = 'wt_roundtrip_creation_string';
+var collNamePrefix = 'wt_roundtrip_creation_string';
- // Drop the collections used by the test to ensure that the create commands don't fail because
- // the collections already exist.
- db[collNamePrefix].source.drop();
- db[collNamePrefix].dest.drop();
+// Drop the collections used by the test to ensure that the create commands don't fail because
+// the collections already exist.
+db[collNamePrefix].source.drop();
+db[collNamePrefix].dest.drop();
- assert.commandWorked(db.createCollection(collNamePrefix + '.source'));
- assert.commandWorked(db[collNamePrefix].source.createIndex({a: 1}, {name: 'a_1'}));
+assert.commandWorked(db.createCollection(collNamePrefix + '.source'));
+assert.commandWorked(db[collNamePrefix].source.createIndex({a: 1}, {name: 'a_1'}));
- var collStats = db.runCommand({collStats: collNamePrefix + '.source'});
- assert.commandWorked(collStats);
+var collStats = db.runCommand({collStats: collNamePrefix + '.source'});
+assert.commandWorked(collStats);
- assert.commandWorked(
- db.runCommand({
- create: collNamePrefix + '.dest',
- storageEngine: {wiredTiger: {configString: collStats.wiredTiger.creationString}}
- }),
- 'unable to create collection using the creation string of another collection');
+assert.commandWorked(db.runCommand({
+ create: collNamePrefix + '.dest',
+ storageEngine: {wiredTiger: {configString: collStats.wiredTiger.creationString}}
+}),
+ 'unable to create collection using the creation string of another collection');
- assert.commandWorked(db.runCommand({
- createIndexes: collNamePrefix + '.dest',
- indexes: [{
- key: {b: 1},
- name: 'b_1',
- storageEngine:
- {wiredTiger: {configString: collStats.indexDetails.a_1.creationString}}
- }]
- }),
- 'unable to create index using the creation string of another index');
+assert.commandWorked(db.runCommand({
+ createIndexes: collNamePrefix + '.dest',
+ indexes: [{
+ key: {b: 1},
+ name: 'b_1',
+ storageEngine: {wiredTiger: {configString: collStats.indexDetails.a_1.creationString}}
+ }]
+}),
+ 'unable to create index using the creation string of another index');
})();