summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
Diffstat (limited to 'jstests')
-rw-r--r--jstests/core/compact_keeps_indexes.js2
-rw-r--r--jstests/core/count10.js7
-rw-r--r--jstests/core/count_plan_summary.js10
-rw-r--r--jstests/core/currentop.js2
-rw-r--r--jstests/core/cursora.js1
-rw-r--r--jstests/core/distinct3.js2
-rw-r--r--jstests/core/explain3.js6
-rw-r--r--jstests/core/find_and_modify_concurrent_update.js2
-rw-r--r--jstests/core/geo_s2cursorlimitskip.js7
-rw-r--r--jstests/core/getlog2.js7
-rw-r--r--jstests/core/index_filter_commands.js20
-rw-r--r--jstests/core/index_stats.js7
-rw-r--r--jstests/core/killop.js4
-rw-r--r--jstests/core/killop_drop_collection.js2
-rw-r--r--jstests/core/loadserverscripts.js3
-rw-r--r--jstests/core/max_time_ms.js7
-rw-r--r--jstests/core/mr_killop.js2
-rw-r--r--jstests/core/mr_optim.js15
-rw-r--r--jstests/core/notablescan.js7
-rw-r--r--jstests/core/plan_cache_clear.js7
-rw-r--r--jstests/core/plan_cache_list_plans.js7
-rw-r--r--jstests/core/plan_cache_list_shapes.js7
-rw-r--r--jstests/core/plan_cache_shell_helpers.js7
-rw-r--r--jstests/core/queryoptimizer3.js2
-rw-r--r--jstests/core/remove9.js2
-rw-r--r--jstests/core/removeb.js2
-rw-r--r--jstests/core/removec.js2
-rw-r--r--jstests/core/shellstartparallel.js1
-rw-r--r--jstests/core/startup_log.js209
-rw-r--r--jstests/core/top.js6
-rw-r--r--jstests/core/updatef.js2
-rw-r--r--jstests/libs/override_methods/set_read_and_write_concerns.js30
-rw-r--r--jstests/libs/override_methods/set_read_preference_secondary.js88
33 files changed, 364 insertions, 121 deletions
diff --git a/jstests/core/compact_keeps_indexes.js b/jstests/core/compact_keeps_indexes.js
index f2da7597cdf..d112dac3d61 100644
--- a/jstests/core/compact_keeps_indexes.js
+++ b/jstests/core/compact_keeps_indexes.js
@@ -1,6 +1,8 @@
// SERVER-16676 Make sure compact doesn't leave the collection with bad indexes
// SERVER-16967 Make sure compact doesn't crash while collections are being dropped
// in a different database.
+//
+// @tags: [requires_parallel_shell]
(function() {
'use strict';
diff --git a/jstests/core/count10.js b/jstests/core/count10.js
index 2a1853c399a..453775c97f5 100644
--- a/jstests/core/count10.js
+++ b/jstests/core/count10.js
@@ -1,4 +1,11 @@
// Test that interrupting a count returns an error code.
+//
+// @tags: [
+// # This test attempts to perform a count command and find it using the currentOp command. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
t = db.count10;
t.drop();
diff --git a/jstests/core/count_plan_summary.js b/jstests/core/count_plan_summary.js
index 48891d21e8e..365f289c457 100644
--- a/jstests/core/count_plan_summary.js
+++ b/jstests/core/count_plan_summary.js
@@ -1,5 +1,11 @@
-// Test that the plan summary string appears in db.currentOp() for
-// count operations. SERVER-14064.
+// Test that the plan summary string appears in db.currentOp() for count operations. SERVER-14064.
+//
+// @tags: [
+// # This test attempts to perform a find command and find it using the currentOp command. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_count_plan_summary;
t.drop();
diff --git a/jstests/core/currentop.js b/jstests/core/currentop.js
index 296ad69355c..082554c9ec8 100644
--- a/jstests/core/currentop.js
+++ b/jstests/core/currentop.js
@@ -1,5 +1,7 @@
/**
* Tests that long-running operations show up in currentOp and report the locks they are holding.
+ *
+ * @tags: [requires_parallel_shell]
*/
(function() {
"use strict";
diff --git a/jstests/core/cursora.js b/jstests/core/cursora.js
index dfd9e28f281..ae281c2002f 100644
--- a/jstests/core/cursora.js
+++ b/jstests/core/cursora.js
@@ -1,3 +1,4 @@
+// @tags: [requires_parallel_shell]
t = db.cursora;
function run(n, atomic) {
diff --git a/jstests/core/distinct3.js b/jstests/core/distinct3.js
index 6ab21599f97..c36afcc37ec 100644
--- a/jstests/core/distinct3.js
+++ b/jstests/core/distinct3.js
@@ -1,4 +1,6 @@
// Yield and delete test case for query optimizer cursor. SERVER-4401
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_distinct3;
t.drop();
diff --git a/jstests/core/explain3.js b/jstests/core/explain3.js
index 64db7686699..d2145bc1fb9 100644
--- a/jstests/core/explain3.js
+++ b/jstests/core/explain3.js
@@ -1,4 +1,8 @@
-/** SERVER-2451 Kill cursor while explain is yielding */
+/**
+ * SERVER-2451 Kill cursor while explain is yielding
+ *
+ * @tags: [requires_parallel_shell]
+ */
t = db.jstests_explain3;
t.drop();
diff --git a/jstests/core/find_and_modify_concurrent_update.js b/jstests/core/find_and_modify_concurrent_update.js
index 3986ac62ea9..fe52016623d 100644
--- a/jstests/core/find_and_modify_concurrent_update.js
+++ b/jstests/core/find_and_modify_concurrent_update.js
@@ -1,5 +1,7 @@
// Ensures that find and modify will not apply an update to a document which, due to a concurrent
// modification, no longer matches the query predicate.
+//
+// @tags: [requires_parallel_shell]
(function() {
"use strict";
diff --git a/jstests/core/geo_s2cursorlimitskip.js b/jstests/core/geo_s2cursorlimitskip.js
index 868b57de39f..9eb580edd25 100644
--- a/jstests/core/geo_s2cursorlimitskip.js
+++ b/jstests/core/geo_s2cursorlimitskip.js
@@ -1,4 +1,11 @@
// Test various cursor behaviors
+//
+// @tags: [
+// # This test attempts to enable profiling on a server and then get profiling data by reading
+// # from the "system.profile" collection. The former operation must be routed to the primary in
+// # a replica set, whereas the latter may be routed to a secondary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.geo_s2getmmm;
t.drop();
t.ensureIndex({geo: "2dsphere"});
diff --git a/jstests/core/getlog2.js b/jstests/core/getlog2.js
index b6cf223b967..29b6d299123 100644
--- a/jstests/core/getlog2.js
+++ b/jstests/core/getlog2.js
@@ -1,4 +1,11 @@
// tests getlog as well as slow querying logging
+//
+// @tags: [
+// # This test attempts to perform a find command and see that it ran using the getLog command.
+// # The former operation may be routed to a secondary in the replica set, whereas the latter must
+// # be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
glcol = db.getLogTest2;
glcol.drop();
diff --git a/jstests/core/index_filter_commands.js b/jstests/core/index_filter_commands.js
index 027731e97cf..641edf426a7 100644
--- a/jstests/core/index_filter_commands.js
+++ b/jstests/core/index_filter_commands.js
@@ -6,20 +6,24 @@
* Displays index filters for all query shapes in a collection.
*
* - planCacheClearFilters
- * Clears index filter for a single query shape or,
- * if the query shape is omitted, all filters for the collection.
+ * Clears index filter for a single query shape or, if the query shape is omitted, all filters for
+ * the collection.
*
* - planCacheSetFilter
* Sets index filter for a query shape. Overrides existing filter.
*
- * Not a lot of data access in this test suite. Hint commands
- * manage a non-persistent mapping in the server of
- * query shape to list of index specs.
+ * Not a lot of data access in this test suite. Hint commands manage a non-persistent mapping in the
+ * server of query shape to list of index specs.
*
- * Only time we might need to execute a query is to check the plan
- * cache state. We would do this with the planCacheListPlans command
- * on the same query shape with the index filters.
+ * Only time we might need to execute a query is to check the plan cache state. We would do this
+ * with the planCacheListPlans command on the same query shape with the index filters.
*
+ * @tags: [
+ * # This test attempts to perform queries with plan cache filters set up. The former operation
+ * # may be routed to a secondary in the replica set, whereas the latter must be routed to the
+ * # primary.
+ * assumes_read_preference_unchanged,
+ * ]
*/
var t = db.jstests_index_filter_commands;
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
index 7db4559210c..508c8d25183 100644
--- a/jstests/core/index_stats.js
+++ b/jstests/core/index_stats.js
@@ -1,3 +1,10 @@
+// @tags: [
+// # This test attempts to perform write operations and get index usage statistics using the
+// # $indexStats stage. The former operation must be routed to the primary in a replica set,
+// # whereas the latter may be routed to a secondary.
+// assumes_read_preference_unchanged,
+// ]
+
(function() {
"use strict";
diff --git a/jstests/core/killop.js b/jstests/core/killop.js
index 66476ec10f4..4cbc5d54dde 100644
--- a/jstests/core/killop.js
+++ b/jstests/core/killop.js
@@ -10,6 +10,8 @@
* terminate until the server determines that they've spent too much time in JS execution, typically
* after 30 seconds of wall clock time have passed. For these operations to take a long time, the
* counted collection must not be empty; hence an initial write to the collection is required.
+ *
+ * @tags: [requires_parallel_shell]
*/
t = db.jstests_killop;
@@ -73,4 +75,4 @@ jsTestLog("Waiting for ops to terminate");
// don't want to pass if timeout killed the js function.
var end = new Date();
var diff = end - start;
-assert.lt(diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff); \ No newline at end of file
+assert.lt(diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff);
diff --git a/jstests/core/killop_drop_collection.js b/jstests/core/killop_drop_collection.js
index 7138ee8eda6..c262d1b4ffd 100644
--- a/jstests/core/killop_drop_collection.js
+++ b/jstests/core/killop_drop_collection.js
@@ -3,6 +3,8 @@
* to complete. Interrupting a collection drop could leave the database in an inconsistent state.
* This test confirms that killOp won't interrupt a collection drop, and that the drop occurs
* successfully.
+ *
+ * @tags: [requires_parallel_shell]
*/
(function() {
"use strict";
diff --git a/jstests/core/loadserverscripts.js b/jstests/core/loadserverscripts.js
index daf87b2475b..1c382d27ad4 100644
--- a/jstests/core/loadserverscripts.js
+++ b/jstests/core/loadserverscripts.js
@@ -1,5 +1,6 @@
-
// Test db.loadServerScripts()
+//
+// @tags: [requires_parallel_shell]
var testdb = db.getSisterDB("loadserverscripts");
diff --git a/jstests/core/max_time_ms.js b/jstests/core/max_time_ms.js
index 39fce0fc9ca..5147201cacc 100644
--- a/jstests/core/max_time_ms.js
+++ b/jstests/core/max_time_ms.js
@@ -1,4 +1,11 @@
// Tests query/command option $maxTimeMS.
+//
+// @tags: [
+// # This test attempts to perform read operations after having enabled the maxTimeAlwaysTimeOut
+// # failpoint. The former operations may be routed to a secondary in the replica set, whereas the
+// # latter must be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.max_time_ms;
var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
diff --git a/jstests/core/mr_killop.js b/jstests/core/mr_killop.js
index 186424b1db9..437a02511de 100644
--- a/jstests/core/mr_killop.js
+++ b/jstests/core/mr_killop.js
@@ -1,4 +1,6 @@
// Test killop applied to m/r operations and child ops of m/r operations.
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_mr_killop;
t.drop();
diff --git a/jstests/core/mr_optim.js b/jstests/core/mr_optim.js
index 7437753ca67..1c525ae3de3 100644
--- a/jstests/core/mr_optim.js
+++ b/jstests/core/mr_optim.js
@@ -3,8 +3,17 @@
t = db.mr_optim;
t.drop();
+// We drop the output collection to ensure the test can be run multiple times successfully. We
+// explicitly avoid using the DBCollection#drop() shell helper to avoid implicitly sharding the
+// collection during the sharded_collections_jscore_passthrough.yml test suite when reading the
+// results from the output collection in the reformat() function.
+var res = db.runCommand({drop: "mr_optim_out"});
+if (res.ok !== 1) {
+ assert.commandFailedWithCode(res, ErrorCodes.NamespaceNotFound);
+}
+
for (var i = 0; i < 1000; ++i) {
- t.save({a: Math.random(1000), b: Math.random(10000)});
+ assert.writeOK(t.save({a: Math.random(1000), b: Math.random(10000)}));
}
function m() {
@@ -21,7 +30,7 @@ function reformat(r) {
if (r.results)
cursor = r.results;
else
- cursor = r.find();
+ cursor = r.find().sort({_id: 1});
cursor.forEach(function(z) {
x[z._id] = z.value;
});
@@ -43,4 +52,4 @@ res.drop();
assert.eq(x, x2, "object from inline and collection are not equal");
-t.drop(); \ No newline at end of file
+t.drop();
diff --git a/jstests/core/notablescan.js b/jstests/core/notablescan.js
index 80306c08cf2..bb4c170a603 100644
--- a/jstests/core/notablescan.js
+++ b/jstests/core/notablescan.js
@@ -1,4 +1,11 @@
// check notablescan mode
+//
+// @tags: [
+// # This test attempts to perform read operations after having enabled the notablescan server
+// # parameter. The former operations may be routed to a secondary in the replica set, whereas the
+// # latter must be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
t = db.test_notablescan;
t.drop();
diff --git a/jstests/core/plan_cache_clear.js b/jstests/core/plan_cache_clear.js
index 8f9cf0ea302..778239616b5 100644
--- a/jstests/core/plan_cache_clear.js
+++ b/jstests/core/plan_cache_clear.js
@@ -1,5 +1,12 @@
// Test clearing of the plan cache, either manually through the planCacheClear command,
// or due to system events such as an index build.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect/manipulate the server's plan cache
+// # entries. The former operation may be routed to a secondary in the replica set, whereas the
+// # latter must be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_clear;
t.drop();
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index b4be4ad46c4..14cf9c97c28 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -1,4 +1,11 @@
// Test the planCacheListPlans command.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect the server's plan cache entries. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_list_plans;
t.drop();
diff --git a/jstests/core/plan_cache_list_shapes.js b/jstests/core/plan_cache_list_shapes.js
index 4711940870d..f0061459968 100644
--- a/jstests/core/plan_cache_list_shapes.js
+++ b/jstests/core/plan_cache_list_shapes.js
@@ -1,5 +1,12 @@
// Test the planCacheListQueryShapes command, which returns a list of query shapes
// for the queries currently cached in the collection.
+//
+// @tags: [
+// # This test attempts to perform queries with plan cache filters set up. The former operation
+// # may be routed to a secondary in the replica set, whereas the latter must be routed to the
+// # primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_list_shapes;
t.drop();
diff --git a/jstests/core/plan_cache_shell_helpers.js b/jstests/core/plan_cache_shell_helpers.js
index a61421afc7b..f2ac0e7051f 100644
--- a/jstests/core/plan_cache_shell_helpers.js
+++ b/jstests/core/plan_cache_shell_helpers.js
@@ -1,4 +1,11 @@
// Test the shell helpers which wrap the plan cache commands.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect the server's plan cache entries. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_shell_helpers;
t.drop();
diff --git a/jstests/core/queryoptimizer3.js b/jstests/core/queryoptimizer3.js
index 4bc3754ff7a..b83a02cbf80 100644
--- a/jstests/core/queryoptimizer3.js
+++ b/jstests/core/queryoptimizer3.js
@@ -1,4 +1,6 @@
// Check cases where index scans are aborted due to the collection being dropped. SERVER-4400
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_queryoptimizer3;
t.drop();
diff --git a/jstests/core/remove9.js b/jstests/core/remove9.js
index 9b7b2f31190..ed0c8f45130 100644
--- a/jstests/core/remove9.js
+++ b/jstests/core/remove9.js
@@ -1,4 +1,6 @@
// SERVER-2009 Count odd numbered entries while updating and deleting even numbered entries.
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_remove9;
t.drop();
diff --git a/jstests/core/removeb.js b/jstests/core/removeb.js
index 2141e138254..aba5976ccf7 100644
--- a/jstests/core/removeb.js
+++ b/jstests/core/removeb.js
@@ -1,4 +1,6 @@
// Test removal of Records that have been reused since the remove operation began. SERVER-5198
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_removeb;
t.drop();
diff --git a/jstests/core/removec.js b/jstests/core/removec.js
index b4fe09ef970..15f62bc206d 100644
--- a/jstests/core/removec.js
+++ b/jstests/core/removec.js
@@ -1,4 +1,6 @@
// Sanity test for removing documents with adjacent index keys. SERVER-2008
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_removec;
t.drop();
diff --git a/jstests/core/shellstartparallel.js b/jstests/core/shellstartparallel.js
index 7e288e0d589..f92c1d507dd 100644
--- a/jstests/core/shellstartparallel.js
+++ b/jstests/core/shellstartparallel.js
@@ -1,3 +1,4 @@
+// @tags: [requires_parallel_shell]
function f() {
throw Error("intentional_throw_to_test_assert_throws");
}
diff --git a/jstests/core/startup_log.js b/jstests/core/startup_log.js
index 3b0cbe3464d..c73013d1744 100644
--- a/jstests/core/startup_log.js
+++ b/jstests/core/startup_log.js
@@ -1,101 +1,108 @@
-load('jstests/aggregation/extras/utils.js');
-
-(function() {
- 'use strict';
-
- // Check that smallArray is entirely contained by largeArray
- // returns false if a member of smallArray is not in largeArray
- function arrayIsSubset(smallArray, largeArray) {
- for (var i = 0; i < smallArray.length; i++) {
- if (!Array.contains(largeArray, smallArray[i])) {
- print("Could not find " + smallArray[i] + " in largeArray");
- return false;
- }
- }
-
- return true;
- }
-
- // Test startup_log
- var stats = db.getSisterDB("local").startup_log.stats();
- assert(stats.capped);
-
- var latestStartUpLog =
- db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next();
- var serverStatus = db._adminCommand("serverStatus");
- var cmdLine = db._adminCommand("getCmdLineOpts").parsed;
-
- // Test that the startup log has the expected keys
- var verbose = false;
- var expectedKeys =
- ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
- var keys = Object.keySet(latestStartUpLog);
- assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
-
- // Tests _id implicitly - should be comprised of host-timestamp
- // Setup expected startTime and startTimeLocal from the supplied timestamp
- var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
- var _idUptime = _id.pop();
- var _idHost = _id.join('-');
- var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000;
- var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
-
- assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
- assert.eq(serverStatus.host.split(':')[0],
- latestStartUpLog.hostname,
- "Hostname doesn't match one in server status");
- assert.closeWithinMS(startTime,
- latestStartUpLog.startTime,
- "StartTime doesn't match one from _id",
- 2000); // Expect less than 2 sec delta
- assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
- assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
-
- // Test buildinfo
- var buildinfo = db.runCommand("buildinfo");
- delete buildinfo.ok; // Delete extra meta info not in startup_log
- var isMaster = db._adminCommand("ismaster");
-
- // Test buildinfo has the expected keys
- var expectedKeys = [
- "version",
- "gitVersion",
- "allocator",
- "versionArray",
- "javascriptEngine",
- "openssl",
- "buildEnvironment",
- "debug",
- "maxBsonObjectSize",
- "bits",
- "modules"
- ];
-
- var keys = Object.keySet(latestStartUpLog.buildinfo);
- // Disabled to check
- assert(arrayIsSubset(expectedKeys, keys),
- "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
- assert.eq(buildinfo,
- latestStartUpLog.buildinfo,
- "buildinfo doesn't match that from buildinfo command");
-
- // Test version and version Array
- var version = latestStartUpLog.buildinfo.version.split('-')[0];
- var versionArray = latestStartUpLog.buildinfo.versionArray;
- var versionArrayCleaned = versionArray.slice(0, 3);
- if (versionArray[3] == -100) {
- versionArrayCleaned[2] -= 1;
- }
-
- assert.eq(serverStatus.version,
- latestStartUpLog.buildinfo.version,
- "Mongo version doesn't match that from ServerStatus");
- assert.eq(
- version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
- var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
- assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
- assert.eq(isMaster.maxBsonObjectSize,
- latestStartUpLog.buildinfo.maxBsonObjectSize,
- "maxBsonObjectSize doesn't match one from ismaster");
-
-})();
+/**
+ * This test attempts to read from the "local.startup_log" collection and assert that it has an
+ * entry matching the server's response from the "getCmdLineOpts" command. The former operation may
+ * be routed to a secondary in the replica set, whereas the latter must be routed to the primary.
+ *
+ * @tags: [assumes_read_preference_unchanged]
+ */
+load('jstests/aggregation/extras/utils.js');
+
+(function() {
+ 'use strict';
+
+ // Check that smallArray is entirely contained by largeArray
+ // returns false if a member of smallArray is not in largeArray
+ function arrayIsSubset(smallArray, largeArray) {
+ for (var i = 0; i < smallArray.length; i++) {
+ if (!Array.contains(largeArray, smallArray[i])) {
+ print("Could not find " + smallArray[i] + " in largeArray");
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ // Test startup_log
+ var stats = db.getSisterDB("local").startup_log.stats();
+ assert(stats.capped);
+
+ var latestStartUpLog =
+ db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next();
+ var serverStatus = db._adminCommand("serverStatus");
+ var cmdLine = db._adminCommand("getCmdLineOpts").parsed;
+
+ // Test that the startup log has the expected keys
+ var verbose = false;
+ var expectedKeys =
+ ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
+ var keys = Object.keySet(latestStartUpLog);
+ assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
+
+ // Tests _id implicitly - should be comprised of host-timestamp
+ // Setup expected startTime and startTimeLocal from the supplied timestamp
+ var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
+ var _idUptime = _id.pop();
+ var _idHost = _id.join('-');
+ var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000;
+ var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
+
+ assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
+ assert.eq(serverStatus.host.split(':')[0],
+ latestStartUpLog.hostname,
+ "Hostname doesn't match one in server status");
+ assert.closeWithinMS(startTime,
+ latestStartUpLog.startTime,
+ "StartTime doesn't match one from _id",
+ 2000); // Expect less than 2 sec delta
+ assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
+ assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
+
+ // Test buildinfo
+ var buildinfo = db.runCommand("buildinfo");
+ delete buildinfo.ok; // Delete extra meta info not in startup_log
+ var isMaster = db._adminCommand("ismaster");
+
+ // Test buildinfo has the expected keys
+ var expectedKeys = [
+ "version",
+ "gitVersion",
+ "allocator",
+ "versionArray",
+ "javascriptEngine",
+ "openssl",
+ "buildEnvironment",
+ "debug",
+ "maxBsonObjectSize",
+ "bits",
+ "modules"
+ ];
+
+ var keys = Object.keySet(latestStartUpLog.buildinfo);
+ // Disabled to check
+ assert(arrayIsSubset(expectedKeys, keys),
+ "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
+ assert.eq(buildinfo,
+ latestStartUpLog.buildinfo,
+ "buildinfo doesn't match that from buildinfo command");
+
+ // Test version and version Array
+ var version = latestStartUpLog.buildinfo.version.split('-')[0];
+ var versionArray = latestStartUpLog.buildinfo.versionArray;
+ var versionArrayCleaned = versionArray.slice(0, 3);
+ if (versionArray[3] == -100) {
+ versionArrayCleaned[2] -= 1;
+ }
+
+ assert.eq(serverStatus.version,
+ latestStartUpLog.buildinfo.version,
+ "Mongo version doesn't match that from ServerStatus");
+ assert.eq(
+ version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
+ var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
+ assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
+ assert.eq(isMaster.maxBsonObjectSize,
+ latestStartUpLog.buildinfo.maxBsonObjectSize,
+ "maxBsonObjectSize doesn't match one from ismaster");
+
+})();
diff --git a/jstests/core/top.js b/jstests/core/top.js
index 1aff2a4136b..a8f72091896 100644
--- a/jstests/core/top.js
+++ b/jstests/core/top.js
@@ -1,5 +1,11 @@
/**
* 1. check top numbers are correct
+ *
+ * This test attempts to perform read operations and get statistics using the top command. The
+ * former operation may be routed to a secondary in the replica set, whereas the latter must be
+ * routed to the primary.
+ *
+ * @tags: [assumes_read_preference_unchanged]
*/
var name = "toptest";
diff --git a/jstests/core/updatef.js b/jstests/core/updatef.js
index 6bc8df4e0c1..925e718bbd9 100644
--- a/jstests/core/updatef.js
+++ b/jstests/core/updatef.js
@@ -1,4 +1,6 @@
// Test unsafe management of nsdt on update command yield SERVER-3208
+//
+// @tags: [requires_parallel_shell]
prefixNS = db.jstests_updatef;
prefixNS.save({});
diff --git a/jstests/libs/override_methods/set_read_and_write_concerns.js b/jstests/libs/override_methods/set_read_and_write_concerns.js
index 30c853fb54b..267b5ab9bfc 100644
--- a/jstests/libs/override_methods/set_read_and_write_concerns.js
+++ b/jstests/libs/override_methods/set_read_and_write_concerns.js
@@ -57,6 +57,31 @@
"updateUser",
]);
+ const kCommandsToEmulateWriteConcern = new Set([
+ "aggregate",
+ "appendOplogNote",
+ "captrunc",
+ "cleanupOrphaned",
+ "clone",
+ "cloneCollection",
+ "cloneCollectionAsCapped",
+ "convertToCapped",
+ "copydb",
+ "create",
+ "createIndexes",
+ "drop",
+ "dropDatabase",
+ "dropIndexes",
+ "emptycapped",
+ "godinsert",
+ "mapReduce",
+ "mapreduce",
+ "mapreduce.shardedfinish",
+ "moveChunk",
+ "renameCollection",
+ "revokePrivilegesFromRole",
+ ]);
+
function runCommandWithReadAndWriteConcerns(
conn, dbName, commandName, commandObj, func, makeFuncArgs) {
if (typeof commandObj !== "object" || commandObj === null) {
@@ -79,10 +104,7 @@
var shouldForceReadConcern = kCommandsSupportingReadConcern.has(commandName);
var shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(commandName);
- var shouldEmulateWriteConcern =
- (commandName === "aggregate" || commandName === "createIndexes" ||
- commandName === "mapReduce" || commandName === "mapreduce" ||
- commandName === "mapreduce.shardedfinish");
+ var shouldEmulateWriteConcern = kCommandsToEmulateWriteConcern.has(commandName);
if (commandName === "aggregate") {
if (OverrideHelpers.isAggregationWithOutStage(commandName, commandObjUnwrapped)) {
diff --git a/jstests/libs/override_methods/set_read_preference_secondary.js b/jstests/libs/override_methods/set_read_preference_secondary.js
new file mode 100644
index 00000000000..270bbf40272
--- /dev/null
+++ b/jstests/libs/override_methods/set_read_preference_secondary.js
@@ -0,0 +1,88 @@
+/**
+ * Use prototype overrides to set read preference to "secondary" when running tests.
+ */
+(function() {
+ "use strict";
+
+ load("jstests/libs/override_methods/override_helpers.js");
+
+ const kReadPreferenceSecondary = {
+ mode: "secondary"
+ };
+ const kCommandsSupportingReadPreference = new Set([
+ "aggregate",
+ "collStats",
+ "count",
+ "dbStats",
+ "distinct",
+ "find",
+ "geoNear",
+ "geoSearch",
+ "group",
+ "mapReduce",
+ "mapreduce",
+ "parallelCollectionScan",
+ ]);
+
+ function runCommandWithReadPreferenceSecondary(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
+
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ var commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
+ }
+
+ if (commandObj[commandName] === "system.profile") {
+ throw new Error("Cowardly refusing to run test with overridden read preference" +
+ " when it reads from a non-replicated collection: " +
+ tojson(commandObj));
+ }
+
+ var shouldForceReadPreference = kCommandsSupportingReadPreference.has(commandName);
+ if (OverrideHelpers.isAggregationWithOutStage(commandName, commandObjUnwrapped)) {
+ // An aggregation with a $out stage must be sent to the primary.
+ shouldForceReadPreference = false;
+ } else if ((commandName === "mapReduce" || commandName === "mapreduce") &&
+ !OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
+ // A map-reduce operation with non-inline output must be sent to the primary.
+ shouldForceReadPreference = false;
+ }
+
+ if (shouldForceReadPreference) {
+ if (commandObj === commandObjUnwrapped) {
+ // We wrap the command object using a "query" field rather than a "$query" field to
+ // match the implementation of DB.prototype._attachReadPreferenceToCommand().
+ commandObj = {
+ query: commandObj
+ };
+ } else {
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller
+ // specified.
+ commandObj = Object.assign({}, commandObj);
+ }
+
+ if (commandObj.hasOwnProperty("$readPreference") &&
+ !bsonBinaryEqual({_: commandObj.$readPreference}, {_: kReadPreferenceSecondary})) {
+ throw new Error("Cowardly refusing to override read preference of command: " +
+ tojson(commandObj));
+ }
+
+ commandObj.$readPreference = kReadPreferenceSecondary;
+ }
+
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
+
+ startParallelShell = () => {throw new Error(
+ "Cowardly refusing to a run a test that starts a parallel shell because prior to" +
+ " MongoDB 3.4 replica set connections couldn't be used in it.");
+ };
+
+ OverrideHelpers.overrideRunCommand(runCommandWithReadPreferenceSecondary);
+})();