summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2018-02-24 02:07:16 -0500
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2018-02-24 02:07:16 -0500
commite54239b3b99687ab79048f4ae0f20b2095910e18 (patch)
tree0a6c2cb74d58888998c78c17d40b71a538870b7d
parent896885c9e43d95b1796a45815937c3edd6b22065 (diff)
downloadmongo-e54239b3b99687ab79048f4ae0f20b2095910e18.tar.gz
SERVER-32691 Add write_concern_majority_passthrough.yml test suite.
Reverts to emulating the write concern to work around how prior to MongoDB 3.4, operations that did writes didn't necessarily accept a writeConcern object. Also limits the usage of replica set connection strings to only the write_concern_majority_passthrough.yml test suite to work around the lack of complete support of MongoURI parsing in versions of the mongo shell prior to MongoDB 3.4. (cherry picked from commit 264d971842cffdf8b4f80def1d90241f132345b7)
-rw-r--r--buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml93
-rw-r--r--buildscripts/resmokelib/core/programs.py16
-rw-r--r--buildscripts/resmokelib/testing/fixtures/interface.py13
-rw-r--r--buildscripts/resmokelib/testing/fixtures/masterslave.py9
-rw-r--r--buildscripts/resmokelib/testing/fixtures/replicaset.py33
-rw-r--r--buildscripts/resmokelib/testing/fixtures/shardedcluster.py33
-rw-r--r--buildscripts/resmokelib/testing/fixtures/standalone.py7
-rw-r--r--buildscripts/resmokelib/testing/testcases.py18
-rw-r--r--jstests/core/compact_keeps_indexes.js2
-rw-r--r--jstests/core/count10.js7
-rw-r--r--jstests/core/count_plan_summary.js10
-rw-r--r--jstests/core/currentop.js2
-rw-r--r--jstests/core/cursora.js1
-rw-r--r--jstests/core/distinct3.js2
-rw-r--r--jstests/core/explain3.js6
-rw-r--r--jstests/core/find_and_modify_concurrent_update.js2
-rw-r--r--jstests/core/geo_s2cursorlimitskip.js7
-rw-r--r--jstests/core/getlog2.js7
-rw-r--r--jstests/core/index_filter_commands.js20
-rw-r--r--jstests/core/index_stats.js7
-rw-r--r--jstests/core/killop.js4
-rw-r--r--jstests/core/killop_drop_collection.js2
-rw-r--r--jstests/core/loadserverscripts.js3
-rw-r--r--jstests/core/max_time_ms.js7
-rw-r--r--jstests/core/mr_killop.js2
-rw-r--r--jstests/core/mr_optim.js15
-rw-r--r--jstests/core/notablescan.js7
-rw-r--r--jstests/core/plan_cache_clear.js7
-rw-r--r--jstests/core/plan_cache_list_plans.js7
-rw-r--r--jstests/core/plan_cache_list_shapes.js7
-rw-r--r--jstests/core/plan_cache_shell_helpers.js7
-rw-r--r--jstests/core/queryoptimizer3.js2
-rw-r--r--jstests/core/remove9.js2
-rw-r--r--jstests/core/removeb.js2
-rw-r--r--jstests/core/removec.js2
-rw-r--r--jstests/core/shellstartparallel.js1
-rw-r--r--jstests/core/startup_log.js209
-rw-r--r--jstests/core/top.js6
-rw-r--r--jstests/core/updatef.js2
-rw-r--r--jstests/libs/override_methods/set_read_and_write_concerns.js30
-rw-r--r--jstests/libs/override_methods/set_read_preference_secondary.js88
41 files changed, 558 insertions, 149 deletions
diff --git a/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
new file mode 100644
index 00000000000..b20f6790f9f
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
@@ -0,0 +1,93 @@
+test_kind: js_test
+
+selector:
+ js_test:
+ roots:
+ - jstests/core/**/*.js
+ exclude_files:
+ # These tests are not expected to pass with replica-sets:
+ - jstests/core/dbadmin.js
+ - jstests/core/opcounters_write_cmd.js
+ - jstests/core/read_after_optime.js
+ - jstests/core/capped_update.js
+ # These tests do not expect the mongo shell to be using a replica set connection string.
+ - jstests/core/bench_test*.js
+ - jstests/core/connection_string_validation.js
+ # The bypass_doc_validation.js test runs an applyOps command that causes the primary to generate
+ # an oplog entry without having applied the write. We skip this test to avoid causing a dbhash
+ # mismatch.
+ - jstests/core/bypass_doc_validation.js
+ # These tests use DBCommandCursor which doesn't correctly route getMore and killCursors
+ # commands to the original server the cursor was established on (prior to SERVER-23219).
+ - jstests/core/find_getmore_bsonsize.js
+ - jstests/core/find_getmore_cmd.js
+ - jstests/core/getmore_cmd_maxtimems.js
+ - jstests/core/kill_cursors.js
+ - jstests/core/list_collections1.js
+ - jstests/core/list_indexes.js
+ # These tests attempt to read from the "system.profile" collection, which may be missing entries
+ # if a write was performed on the primary of the replica set instead.
+ - jstests/core/*profile*.js
+ # The shellkillop.js test spawns a parallel shell without using startParallelShell() and
+ # therefore doesn't inherit the w="majority" write concern when performing its writes.
+ - jstests/core/shellkillop.js
+ exclude_with_any_tags:
+ ##
+ # The next three tags correspond to the special errors thrown by the
+ # set_read_and_write_concerns.js override when it refuses to replace the readConcern or
+ # writeConcern of a particular command. Above each tag are the message(s) that cause the tag to
+ # be warranted.
+ ##
+ # "Cowardly refusing to override read concern of command: ..."
+ - assumes_read_concern_unchanged
+ # "Cowardly refusing to override write concern of command: ..."
+ - assumes_write_concern_unchanged
+ # "Cowardly refusing to run test with overridden write concern when it uses a command that can
+ # only perform w=1 writes: ..."
+ - requires_eval_command
+ ##
+ # The next two tags corresponds to the special error thrown by the
+ # set_read_preference_secondary.js override when it refuses to replace the readPreference of a
+ # particular command. Above each tag are the message(s) that cause the tag to be warranted.
+ ##
+ # "Cowardly refusing to override read preference of command: ..."
+ # "Cowardly refusing to run test with overridden read preference when it reads from a
+ # non-replicated collection: ..."
+ - assumes_read_preference_unchanged
+ # "Cowardly refusing to a run a test that starts a parallel shell because prior to MongoDB 3.4
+ # replica set connections couldn't be used in it."
+ - requires_parallel_shell
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ defaultReadConcernLevel: local
+ eval: >-
+ testingReplication = true;
+ load('jstests/libs/override_methods/set_read_and_write_concerns.js');
+ load('jstests/libs/override_methods/set_read_preference_secondary.js');
+ # We use --readMode=legacy because until MongoDB 3.4, DBCommandCursor wouldn't route the
+ # getMore and killCursors operations to the original server the cursor was established on.
+ readMode: legacy
+ use_connection_string: true
+ hooks:
+ # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
+ # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
+ # validating the entire contents of the collection.
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ReplicaSetFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ # This suite requires w="majority" writes to be applied on all secondaries. By using a 2-node
+ # replica set and having secondaries vote, the majority of the replica set is all nodes.
+ num_nodes: 2
+ voting_secondaries: true
+ use_replica_set_connection_string: true
diff --git a/buildscripts/resmokelib/core/programs.py b/buildscripts/resmokelib/core/programs.py
index 5c5f27d06c2..e1a603e4e70 100644
--- a/buildscripts/resmokelib/core/programs.py
+++ b/buildscripts/resmokelib/core/programs.py
@@ -115,7 +115,8 @@ def mongos_program(logger, executable=None, process_kwargs=None, **kwargs):
return _process.Process(logger, args, **process_kwargs)
-def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=None, **kwargs):
+def mongo_shell_program(logger, executable=None, connection_string=None, filename=None,
+ process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts a mongo shell with arguments
constructed from 'kwargs'.
@@ -181,9 +182,22 @@ def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=N
if config.SHELL_WRITE_MODE is not None:
kwargs["writeMode"] = config.SHELL_WRITE_MODE
+ if connection_string is not None:
+ # The --host and --port options are ignored by the mongo shell when an explicit connection
+ # string is specified. We remove these options to avoid any ambiguity with what server the
+ # logged mongo shell invocation will connect to.
+ if "port" in kwargs:
+ kwargs.pop("port")
+
+ if "host" in kwargs:
+ kwargs.pop("host")
+
# Apply the rest of the command line arguments.
_apply_kwargs(args, kwargs)
+ if connection_string is not None:
+ args.append(connection_string)
+
# Have the mongos shell run the specified file.
args.append(filename)
diff --git a/buildscripts/resmokelib/testing/fixtures/interface.py b/buildscripts/resmokelib/testing/fixtures/interface.py
index 8921aa1159c..98b6fd70f17 100644
--- a/buildscripts/resmokelib/testing/fixtures/interface.py
+++ b/buildscripts/resmokelib/testing/fixtures/interface.py
@@ -60,13 +60,22 @@ class Fixture(object):
"""
return True
- def get_connection_string(self):
+ def get_internal_connection_string(self):
"""
Returns the connection string for this fixture. This is NOT a
driver connection string, but a connection string of the format
expected by the mongo::ConnectionString class.
"""
- raise NotImplementedError("get_connection_string must be implemented by Fixture subclasses")
+ raise NotImplementedError(
+ "get_internal_connection_string must be implemented by Fixture subclasses")
+
+ def get_driver_connection_url(self):
+ """
+ Return the mongodb connection string as defined here:
+ https://docs.mongodb.com/manual/reference/connection-string/
+ """
+ raise NotImplementedError(
+ "get_driver_connection_url must be implemented by Fixture subclasses")
def __str__(self):
return "%s (Job #%d)" % (self.__class__.__name__, self.job_num)
diff --git a/buildscripts/resmokelib/testing/fixtures/masterslave.py b/buildscripts/resmokelib/testing/fixtures/masterslave.py
index 2bcd3c5d6d2..41463c0d421 100644
--- a/buildscripts/resmokelib/testing/fixtures/masterslave.py
+++ b/buildscripts/resmokelib/testing/fixtures/masterslave.py
@@ -5,7 +5,6 @@ Master/slave fixture for executing JSTests against.
from __future__ import absolute_import
import os.path
-import socket
import pymongo
@@ -160,6 +159,12 @@ class MasterSlaveFixture(interface.ReplFixture):
mongod_options = self.mongod_options.copy()
mongod_options.update(self.slave_options)
mongod_options["slave"] = ""
- mongod_options["source"] = "%s:%d" % (socket.gethostname(), self.port)
+ mongod_options["source"] = self.master.get_internal_connection_string()
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "slave")
return self._new_mongod(mongod_logger, mongod_options)
+
+ def get_internal_connection_string(self):
+ return self.master.get_internal_connection_string()
+
+ def get_driver_connection_url(self):
+ return self.master.get_driver_connection_url()
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py
index b47898e8d9e..a32198b8357 100644
--- a/buildscripts/resmokelib/testing/fixtures/replicaset.py
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -30,7 +30,9 @@ class ReplicaSetFixture(interface.ReplFixture):
preserve_dbpath=False,
num_nodes=2,
auth_options=None,
- replset_config_options=None):
+ replset_config_options=None,
+ voting_secondaries=False,
+ use_replica_set_connection_string=False):
interface.ReplFixture.__init__(self, logger, job_num)
@@ -40,6 +42,8 @@ class ReplicaSetFixture(interface.ReplFixture):
self.num_nodes = num_nodes
self.auth_options = auth_options
self.replset_config_options = utils.default_if_none(replset_config_options, {})
+ self.voting_secondaries = voting_secondaries
+ self.use_replica_set_connection_string = use_replica_set_connection_string
# The dbpath in mongod_options is used as the dbpath prefix for replica set members and
# takes precedence over other settings. The ShardedClusterFixture uses this parameter to
@@ -78,10 +82,13 @@ class ReplicaSetFixture(interface.ReplFixture):
# Initiate the replica set.
members = []
for (i, node) in enumerate(self.nodes):
- member_info = {"_id": i, "host": node.get_connection_string()}
+ member_info = {"_id": i, "host": node.get_internal_connection_string()}
if i > 0:
member_info["priority"] = 0
- member_info["votes"] = 0
+ if i >= 7 or not self.voting_secondaries:
+ # Only 7 nodes in a replica set can vote, so the other members must still be
+ # non-voting when this fixture is configured to have voting secondaries.
+ member_info["votes"] = 0
members.append(member_info)
initiate_cmd_obj = {"replSetInitiate": {"_id": self.replset_name, "members": members}}
@@ -182,9 +189,23 @@ class ReplicaSetFixture(interface.ReplFixture):
return logging.loggers.new_logger(logger_name, parent=self.logger)
- def get_connection_string(self):
+ def get_internal_connection_string(self):
if self.replset_name is None:
- raise ValueError("Must call setup() before calling get_connection_string()")
+ raise ValueError("Must call setup() before calling get_internal_connection_string()")
- conn_strs = [node.get_connection_string() for node in self.nodes]
+ conn_strs = [node.get_internal_connection_string() for node in self.nodes]
return self.replset_name + "/" + ",".join(conn_strs)
+
+ def get_driver_connection_url(self):
+ if self.replset_name is None:
+ raise ValueError("Must call setup() before calling get_driver_connection_url()")
+
+ if self.use_replica_set_connection_string:
+ # The mongo shell requires the database name when specifying a replica set connection
+ # string, so we hardcode "test" because that's the default database anyway.
+ conn_strs = [node.get_internal_connection_string() for node in self.nodes]
+ return "mongodb://" + ",".join(conn_strs) + "/test?replicaSet=" + self.replset_name
+ else:
+ # We return a direct connection to the expected pimary when only the first node is
+ # electable because we want the client to error out if a stepdown occurs.
+ return self.nodes[0].get_driver_connection_url()
diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
index 282319a235c..d45ed2cfa01 100644
--- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
+++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -170,6 +170,15 @@ class ShardedClusterFixture(interface.Fixture):
all(shard.is_running() for shard in self.shards) and
self.mongos is not None and self.mongos.is_running())
+ def get_internal_connection_string(self):
+ if self.mongos is None:
+ raise ValueError("Must call setup() before calling get_internal_connection_string()")
+
+ return self.mongos.get_internal_connection_string()
+
+ def get_driver_connection_url(self):
+ return "mongodb://" + self.get_internal_connection_string()
+
def _new_configsvr(self):
"""
Returns a replicaset.ReplicaSetFixture configured to be used as
@@ -222,16 +231,11 @@ class ShardedClusterFixture(interface.Fixture):
mongos_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongos_options = copy.deepcopy(self.mongos_options)
- configdb_hostname = socket.gethostname()
if self.separate_configsvr:
- configdb_replset = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
- configdb_port = self.configsvr.port
- mongos_options["configdb"] = "%s/%s:%d" % (configdb_replset,
- configdb_hostname,
- configdb_port)
+ mongos_options["configdb"] = self.configsvr.get_internal_connection_string()
else:
- mongos_options["configdb"] = "%s:%d" % (configdb_hostname, self.shards[0].port)
+ mongos_options["configdb"] = "%s:%d" % (socket.gethostname(), self.shards[0].port)
return _MongoSFixture(mongos_logger,
self.job_num,
@@ -247,9 +251,9 @@ class ShardedClusterFixture(interface.Fixture):
for more details.
"""
- hostname = socket.gethostname()
- self.logger.info("Adding %s:%d as a shard..." % (hostname, shard.port))
- client.admin.command({"addShard": "%s:%d" % (hostname, shard.port)})
+ connection_string = shard.get_internal_connection_string()
+ self.logger.info("Adding %s as a shard...", connection_string)
+ client.admin.command({"addShard": connection_string})
class _MongoSFixture(interface.Fixture):
@@ -351,3 +355,12 @@ class _MongoSFixture(interface.Fixture):
def is_running(self):
return self.mongos is not None and self.mongos.poll() is None
+
+ def get_internal_connection_string(self):
+ if self.mongos is None:
+ raise ValueError("Must call setup() before calling get_internal_connection_string()")
+
+ return "%s:%d" % (socket.gethostname(), self.port)
+
+ def get_driver_connection_url(self):
+ return "mongodb://" + self.get_internal_connection_string()
diff --git a/buildscripts/resmokelib/testing/fixtures/standalone.py b/buildscripts/resmokelib/testing/fixtures/standalone.py
index edb38177fa2..7e44aa2db72 100644
--- a/buildscripts/resmokelib/testing/fixtures/standalone.py
+++ b/buildscripts/resmokelib/testing/fixtures/standalone.py
@@ -178,8 +178,11 @@ class MongoDFixture(interface.Fixture):
def is_running(self):
return self.mongod is not None and self.mongod.poll() is None
- def get_connection_string(self):
+ def get_internal_connection_string(self):
if self.mongod is None:
- raise ValueError("Must call setup() before calling get_connection_string()")
+ raise ValueError("Must call setup() before calling get_internal_connection_string()")
return "%s:%d" % (socket.gethostname(), self.port)
+
+ def get_driver_connection_url(self):
+ return "mongodb://" + self.get_internal_connection_string()
diff --git a/buildscripts/resmokelib/testing/testcases.py b/buildscripts/resmokelib/testing/testcases.py
index de63c171088..0f6dfb8b447 100644
--- a/buildscripts/resmokelib/testing/testcases.py
+++ b/buildscripts/resmokelib/testing/testcases.py
@@ -173,7 +173,7 @@ class CPPIntegrationTestCase(TestCase):
def configure(self, fixture):
TestCase.configure(self, fixture)
- self.program_options["connectionString"] = self.fixture.get_connection_string()
+ self.program_options["connectionString"] = self.fixture.get_internal_connection_string()
def run_test(self):
try:
@@ -279,6 +279,7 @@ class JSTestCase(TestCase):
js_filename,
shell_executable=None,
shell_options=None,
+ use_connection_string=False,
test_kind="JSTest"):
"Initializes the JSTestCase with the JS file to run."
@@ -289,6 +290,7 @@ class JSTestCase(TestCase):
self.js_filename = js_filename
self.shell_options = utils.default_if_none(shell_options, {}).copy()
+ self.use_connection_string = use_connection_string
def configure(self, fixture):
TestCase.configure(self, fixture)
@@ -349,10 +351,16 @@ class JSTestCase(TestCase):
raise
def _make_process(self):
- return core.programs.mongo_shell_program(self.logger,
- executable=self.shell_executable,
- filename=self.js_filename,
- **self.shell_options)
+ connection_string = None
+ if self.use_connection_string:
+ connection_string = self.fixture.get_driver_connection_url()
+
+ return core.programs.mongo_shell_program(
+ self.logger,
+ executable=self.shell_executable,
+ filename=self.js_filename,
+ connection_string=connection_string,
+ **self.shell_options)
class MongosTestCase(TestCase):
diff --git a/jstests/core/compact_keeps_indexes.js b/jstests/core/compact_keeps_indexes.js
index f2da7597cdf..d112dac3d61 100644
--- a/jstests/core/compact_keeps_indexes.js
+++ b/jstests/core/compact_keeps_indexes.js
@@ -1,6 +1,8 @@
// SERVER-16676 Make sure compact doesn't leave the collection with bad indexes
// SERVER-16967 Make sure compact doesn't crash while collections are being dropped
// in a different database.
+//
+// @tags: [requires_parallel_shell]
(function() {
'use strict';
diff --git a/jstests/core/count10.js b/jstests/core/count10.js
index 2a1853c399a..453775c97f5 100644
--- a/jstests/core/count10.js
+++ b/jstests/core/count10.js
@@ -1,4 +1,11 @@
// Test that interrupting a count returns an error code.
+//
+// @tags: [
+// # This test attempts to perform a count command and find it using the currentOp command. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
t = db.count10;
t.drop();
diff --git a/jstests/core/count_plan_summary.js b/jstests/core/count_plan_summary.js
index 48891d21e8e..365f289c457 100644
--- a/jstests/core/count_plan_summary.js
+++ b/jstests/core/count_plan_summary.js
@@ -1,5 +1,11 @@
-// Test that the plan summary string appears in db.currentOp() for
-// count operations. SERVER-14064.
+// Test that the plan summary string appears in db.currentOp() for count operations. SERVER-14064.
+//
+// @tags: [
+// # This test attempts to perform a find command and find it using the currentOp command. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_count_plan_summary;
t.drop();
diff --git a/jstests/core/currentop.js b/jstests/core/currentop.js
index 296ad69355c..082554c9ec8 100644
--- a/jstests/core/currentop.js
+++ b/jstests/core/currentop.js
@@ -1,5 +1,7 @@
/**
* Tests that long-running operations show up in currentOp and report the locks they are holding.
+ *
+ * @tags: [requires_parallel_shell]
*/
(function() {
"use strict";
diff --git a/jstests/core/cursora.js b/jstests/core/cursora.js
index dfd9e28f281..ae281c2002f 100644
--- a/jstests/core/cursora.js
+++ b/jstests/core/cursora.js
@@ -1,3 +1,4 @@
+// @tags: [requires_parallel_shell]
t = db.cursora;
function run(n, atomic) {
diff --git a/jstests/core/distinct3.js b/jstests/core/distinct3.js
index 6ab21599f97..c36afcc37ec 100644
--- a/jstests/core/distinct3.js
+++ b/jstests/core/distinct3.js
@@ -1,4 +1,6 @@
// Yield and delete test case for query optimizer cursor. SERVER-4401
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_distinct3;
t.drop();
diff --git a/jstests/core/explain3.js b/jstests/core/explain3.js
index 64db7686699..d2145bc1fb9 100644
--- a/jstests/core/explain3.js
+++ b/jstests/core/explain3.js
@@ -1,4 +1,8 @@
-/** SERVER-2451 Kill cursor while explain is yielding */
+/**
+ * SERVER-2451 Kill cursor while explain is yielding
+ *
+ * @tags: [requires_parallel_shell]
+ */
t = db.jstests_explain3;
t.drop();
diff --git a/jstests/core/find_and_modify_concurrent_update.js b/jstests/core/find_and_modify_concurrent_update.js
index 3986ac62ea9..fe52016623d 100644
--- a/jstests/core/find_and_modify_concurrent_update.js
+++ b/jstests/core/find_and_modify_concurrent_update.js
@@ -1,5 +1,7 @@
// Ensures that find and modify will not apply an update to a document which, due to a concurrent
// modification, no longer matches the query predicate.
+//
+// @tags: [requires_parallel_shell]
(function() {
"use strict";
diff --git a/jstests/core/geo_s2cursorlimitskip.js b/jstests/core/geo_s2cursorlimitskip.js
index 868b57de39f..9eb580edd25 100644
--- a/jstests/core/geo_s2cursorlimitskip.js
+++ b/jstests/core/geo_s2cursorlimitskip.js
@@ -1,4 +1,11 @@
// Test various cursor behaviors
+//
+// @tags: [
+// # This test attempts to enable profiling on a server and then get profiling data by reading
+// # from the "system.profile" collection. The former operation must be routed to the primary in
+// # a replica set, whereas the latter may be routed to a secondary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.geo_s2getmmm;
t.drop();
t.ensureIndex({geo: "2dsphere"});
diff --git a/jstests/core/getlog2.js b/jstests/core/getlog2.js
index b6cf223b967..29b6d299123 100644
--- a/jstests/core/getlog2.js
+++ b/jstests/core/getlog2.js
@@ -1,4 +1,11 @@
// tests getlog as well as slow querying logging
+//
+// @tags: [
+// # This test attempts to perform a find command and see that it ran using the getLog command.
+// # The former operation may be routed to a secondary in the replica set, whereas the latter must
+// # be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
glcol = db.getLogTest2;
glcol.drop();
diff --git a/jstests/core/index_filter_commands.js b/jstests/core/index_filter_commands.js
index 027731e97cf..641edf426a7 100644
--- a/jstests/core/index_filter_commands.js
+++ b/jstests/core/index_filter_commands.js
@@ -6,20 +6,24 @@
* Displays index filters for all query shapes in a collection.
*
* - planCacheClearFilters
- * Clears index filter for a single query shape or,
- * if the query shape is omitted, all filters for the collection.
+ * Clears index filter for a single query shape or, if the query shape is omitted, all filters for
+ * the collection.
*
* - planCacheSetFilter
* Sets index filter for a query shape. Overrides existing filter.
*
- * Not a lot of data access in this test suite. Hint commands
- * manage a non-persistent mapping in the server of
- * query shape to list of index specs.
+ * Not a lot of data access in this test suite. Hint commands manage a non-persistent mapping in the
+ * server of query shape to list of index specs.
*
- * Only time we might need to execute a query is to check the plan
- * cache state. We would do this with the planCacheListPlans command
- * on the same query shape with the index filters.
+ * Only time we might need to execute a query is to check the plan cache state. We would do this
+ * with the planCacheListPlans command on the same query shape with the index filters.
*
+ * @tags: [
+ * # This test attempts to perform queries with plan cache filters set up. The former operation
+ * # may be routed to a secondary in the replica set, whereas the latter must be routed to the
+ * # primary.
+ * assumes_read_preference_unchanged,
+ * ]
*/
var t = db.jstests_index_filter_commands;
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
index 7db4559210c..508c8d25183 100644
--- a/jstests/core/index_stats.js
+++ b/jstests/core/index_stats.js
@@ -1,3 +1,10 @@
+// @tags: [
+// # This test attempts to perform write operations and get index usage statistics using the
+// # $indexStats stage. The former operation must be routed to the primary in a replica set,
+// # whereas the latter may be routed to a secondary.
+// assumes_read_preference_unchanged,
+// ]
+
(function() {
"use strict";
diff --git a/jstests/core/killop.js b/jstests/core/killop.js
index 66476ec10f4..4cbc5d54dde 100644
--- a/jstests/core/killop.js
+++ b/jstests/core/killop.js
@@ -10,6 +10,8 @@
* terminate until the server determines that they've spent too much time in JS execution, typically
* after 30 seconds of wall clock time have passed. For these operations to take a long time, the
* counted collection must not be empty; hence an initial write to the collection is required.
+ *
+ * @tags: [requires_parallel_shell]
*/
t = db.jstests_killop;
@@ -73,4 +75,4 @@ jsTestLog("Waiting for ops to terminate");
// don't want to pass if timeout killed the js function.
var end = new Date();
var diff = end - start;
-assert.lt(diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff); \ No newline at end of file
+assert.lt(diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff);
diff --git a/jstests/core/killop_drop_collection.js b/jstests/core/killop_drop_collection.js
index 7138ee8eda6..c262d1b4ffd 100644
--- a/jstests/core/killop_drop_collection.js
+++ b/jstests/core/killop_drop_collection.js
@@ -3,6 +3,8 @@
* to complete. Interrupting a collection drop could leave the database in an inconsistent state.
* This test confirms that killOp won't interrupt a collection drop, and that the drop occurs
* successfully.
+ *
+ * @tags: [requires_parallel_shell]
*/
(function() {
"use strict";
diff --git a/jstests/core/loadserverscripts.js b/jstests/core/loadserverscripts.js
index daf87b2475b..1c382d27ad4 100644
--- a/jstests/core/loadserverscripts.js
+++ b/jstests/core/loadserverscripts.js
@@ -1,5 +1,6 @@
-
// Test db.loadServerScripts()
+//
+// @tags: [requires_parallel_shell]
var testdb = db.getSisterDB("loadserverscripts");
diff --git a/jstests/core/max_time_ms.js b/jstests/core/max_time_ms.js
index 39fce0fc9ca..5147201cacc 100644
--- a/jstests/core/max_time_ms.js
+++ b/jstests/core/max_time_ms.js
@@ -1,4 +1,11 @@
// Tests query/command option $maxTimeMS.
+//
+// @tags: [
+// # This test attempts to perform read operations after having enabled the maxTimeAlwaysTimeOut
+// # failpoint. The former operations may be routed to a secondary in the replica set, whereas the
+// # latter must be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.max_time_ms;
var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
diff --git a/jstests/core/mr_killop.js b/jstests/core/mr_killop.js
index 186424b1db9..437a02511de 100644
--- a/jstests/core/mr_killop.js
+++ b/jstests/core/mr_killop.js
@@ -1,4 +1,6 @@
// Test killop applied to m/r operations and child ops of m/r operations.
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_mr_killop;
t.drop();
diff --git a/jstests/core/mr_optim.js b/jstests/core/mr_optim.js
index 7437753ca67..1c525ae3de3 100644
--- a/jstests/core/mr_optim.js
+++ b/jstests/core/mr_optim.js
@@ -3,8 +3,17 @@
t = db.mr_optim;
t.drop();
+// We drop the output collection to ensure the test can be run multiple times successfully. We
+// explicitly avoid using the DBCollection#drop() shell helper to avoid implicitly sharding the
+// collection during the sharded_collections_jscore_passthrough.yml test suite when reading the
+// results from the output collection in the reformat() function.
+var res = db.runCommand({drop: "mr_optim_out"});
+if (res.ok !== 1) {
+ assert.commandFailedWithCode(res, ErrorCodes.NamespaceNotFound);
+}
+
for (var i = 0; i < 1000; ++i) {
- t.save({a: Math.random(1000), b: Math.random(10000)});
+ assert.writeOK(t.save({a: Math.random(1000), b: Math.random(10000)}));
}
function m() {
@@ -21,7 +30,7 @@ function reformat(r) {
if (r.results)
cursor = r.results;
else
- cursor = r.find();
+ cursor = r.find().sort({_id: 1});
cursor.forEach(function(z) {
x[z._id] = z.value;
});
@@ -43,4 +52,4 @@ res.drop();
assert.eq(x, x2, "object from inline and collection are not equal");
-t.drop(); \ No newline at end of file
+t.drop();
diff --git a/jstests/core/notablescan.js b/jstests/core/notablescan.js
index 80306c08cf2..bb4c170a603 100644
--- a/jstests/core/notablescan.js
+++ b/jstests/core/notablescan.js
@@ -1,4 +1,11 @@
// check notablescan mode
+//
+// @tags: [
+// # This test attempts to perform read operations after having enabled the notablescan server
+// # parameter. The former operations may be routed to a secondary in the replica set, whereas the
+// # latter must be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
t = db.test_notablescan;
t.drop();
diff --git a/jstests/core/plan_cache_clear.js b/jstests/core/plan_cache_clear.js
index 8f9cf0ea302..778239616b5 100644
--- a/jstests/core/plan_cache_clear.js
+++ b/jstests/core/plan_cache_clear.js
@@ -1,5 +1,12 @@
// Test clearing of the plan cache, either manually through the planCacheClear command,
// or due to system events such as an index build.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect/manipulate the server's plan cache
+// # entries. The former operation may be routed to a secondary in the replica set, whereas the
+// # latter must be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_clear;
t.drop();
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index b4be4ad46c4..14cf9c97c28 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -1,4 +1,11 @@
// Test the planCacheListPlans command.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect the server's plan cache entries. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_list_plans;
t.drop();
diff --git a/jstests/core/plan_cache_list_shapes.js b/jstests/core/plan_cache_list_shapes.js
index 4711940870d..f0061459968 100644
--- a/jstests/core/plan_cache_list_shapes.js
+++ b/jstests/core/plan_cache_list_shapes.js
@@ -1,5 +1,12 @@
// Test the planCacheListQueryShapes command, which returns a list of query shapes
// for the queries currently cached in the collection.
+//
+// @tags: [
+// # This test attempts to perform queries with plan cache filters set up. The former operation
+// # may be routed to a secondary in the replica set, whereas the latter must be routed to the
+// # primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_list_shapes;
t.drop();
diff --git a/jstests/core/plan_cache_shell_helpers.js b/jstests/core/plan_cache_shell_helpers.js
index a61421afc7b..f2ac0e7051f 100644
--- a/jstests/core/plan_cache_shell_helpers.js
+++ b/jstests/core/plan_cache_shell_helpers.js
@@ -1,4 +1,11 @@
// Test the shell helpers which wrap the plan cache commands.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect the server's plan cache entries. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_shell_helpers;
t.drop();
diff --git a/jstests/core/queryoptimizer3.js b/jstests/core/queryoptimizer3.js
index 4bc3754ff7a..b83a02cbf80 100644
--- a/jstests/core/queryoptimizer3.js
+++ b/jstests/core/queryoptimizer3.js
@@ -1,4 +1,6 @@
// Check cases where index scans are aborted due to the collection being dropped. SERVER-4400
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_queryoptimizer3;
t.drop();
diff --git a/jstests/core/remove9.js b/jstests/core/remove9.js
index 9b7b2f31190..ed0c8f45130 100644
--- a/jstests/core/remove9.js
+++ b/jstests/core/remove9.js
@@ -1,4 +1,6 @@
// SERVER-2009 Count odd numbered entries while updating and deleting even numbered entries.
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_remove9;
t.drop();
diff --git a/jstests/core/removeb.js b/jstests/core/removeb.js
index 2141e138254..aba5976ccf7 100644
--- a/jstests/core/removeb.js
+++ b/jstests/core/removeb.js
@@ -1,4 +1,6 @@
// Test removal of Records that have been reused since the remove operation began. SERVER-5198
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_removeb;
t.drop();
diff --git a/jstests/core/removec.js b/jstests/core/removec.js
index b4fe09ef970..15f62bc206d 100644
--- a/jstests/core/removec.js
+++ b/jstests/core/removec.js
@@ -1,4 +1,6 @@
// Sanity test for removing documents with adjacent index keys. SERVER-2008
+//
+// @tags: [requires_parallel_shell]
t = db.jstests_removec;
t.drop();
diff --git a/jstests/core/shellstartparallel.js b/jstests/core/shellstartparallel.js
index 7e288e0d589..f92c1d507dd 100644
--- a/jstests/core/shellstartparallel.js
+++ b/jstests/core/shellstartparallel.js
@@ -1,3 +1,4 @@
+// @tags: [requires_parallel_shell]
function f() {
throw Error("intentional_throw_to_test_assert_throws");
}
diff --git a/jstests/core/startup_log.js b/jstests/core/startup_log.js
index 3b0cbe3464d..c73013d1744 100644
--- a/jstests/core/startup_log.js
+++ b/jstests/core/startup_log.js
@@ -1,101 +1,108 @@
-load('jstests/aggregation/extras/utils.js');
-
-(function() {
- 'use strict';
-
- // Check that smallArray is entirely contained by largeArray
- // returns false if a member of smallArray is not in largeArray
- function arrayIsSubset(smallArray, largeArray) {
- for (var i = 0; i < smallArray.length; i++) {
- if (!Array.contains(largeArray, smallArray[i])) {
- print("Could not find " + smallArray[i] + " in largeArray");
- return false;
- }
- }
-
- return true;
- }
-
- // Test startup_log
- var stats = db.getSisterDB("local").startup_log.stats();
- assert(stats.capped);
-
- var latestStartUpLog =
- db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next();
- var serverStatus = db._adminCommand("serverStatus");
- var cmdLine = db._adminCommand("getCmdLineOpts").parsed;
-
- // Test that the startup log has the expected keys
- var verbose = false;
- var expectedKeys =
- ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
- var keys = Object.keySet(latestStartUpLog);
- assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
-
- // Tests _id implicitly - should be comprised of host-timestamp
- // Setup expected startTime and startTimeLocal from the supplied timestamp
- var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
- var _idUptime = _id.pop();
- var _idHost = _id.join('-');
- var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000;
- var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
-
- assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
- assert.eq(serverStatus.host.split(':')[0],
- latestStartUpLog.hostname,
- "Hostname doesn't match one in server status");
- assert.closeWithinMS(startTime,
- latestStartUpLog.startTime,
- "StartTime doesn't match one from _id",
- 2000); // Expect less than 2 sec delta
- assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
- assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
-
- // Test buildinfo
- var buildinfo = db.runCommand("buildinfo");
- delete buildinfo.ok; // Delete extra meta info not in startup_log
- var isMaster = db._adminCommand("ismaster");
-
- // Test buildinfo has the expected keys
- var expectedKeys = [
- "version",
- "gitVersion",
- "allocator",
- "versionArray",
- "javascriptEngine",
- "openssl",
- "buildEnvironment",
- "debug",
- "maxBsonObjectSize",
- "bits",
- "modules"
- ];
-
- var keys = Object.keySet(latestStartUpLog.buildinfo);
- // Disabled to check
- assert(arrayIsSubset(expectedKeys, keys),
- "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
- assert.eq(buildinfo,
- latestStartUpLog.buildinfo,
- "buildinfo doesn't match that from buildinfo command");
-
- // Test version and version Array
- var version = latestStartUpLog.buildinfo.version.split('-')[0];
- var versionArray = latestStartUpLog.buildinfo.versionArray;
- var versionArrayCleaned = versionArray.slice(0, 3);
- if (versionArray[3] == -100) {
- versionArrayCleaned[2] -= 1;
- }
-
- assert.eq(serverStatus.version,
- latestStartUpLog.buildinfo.version,
- "Mongo version doesn't match that from ServerStatus");
- assert.eq(
- version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
- var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
- assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
- assert.eq(isMaster.maxBsonObjectSize,
- latestStartUpLog.buildinfo.maxBsonObjectSize,
- "maxBsonObjectSize doesn't match one from ismaster");
-
-})();
+/**
+ * This test attempts to read from the "local.startup_log" collection and assert that it has an
+ * entry matching the server's response from the "getCmdLineOpts" command. The former operation may
+ * be routed to a secondary in the replica set, whereas the latter must be routed to the primary.
+ *
+ * @tags: [assumes_read_preference_unchanged]
+ */
+load('jstests/aggregation/extras/utils.js');
+
+(function() {
+ 'use strict';
+
+ // Check that smallArray is entirely contained by largeArray
+ // returns false if a member of smallArray is not in largeArray
+ function arrayIsSubset(smallArray, largeArray) {
+ for (var i = 0; i < smallArray.length; i++) {
+ if (!Array.contains(largeArray, smallArray[i])) {
+ print("Could not find " + smallArray[i] + " in largeArray");
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ // Test startup_log
+ var stats = db.getSisterDB("local").startup_log.stats();
+ assert(stats.capped);
+
+ var latestStartUpLog =
+ db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next();
+ var serverStatus = db._adminCommand("serverStatus");
+ var cmdLine = db._adminCommand("getCmdLineOpts").parsed;
+
+ // Test that the startup log has the expected keys
+ var verbose = false;
+ var expectedKeys =
+ ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
+ var keys = Object.keySet(latestStartUpLog);
+ assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
+
+ // Tests _id implicitly - should be comprised of host-timestamp
+ // Setup expected startTime and startTimeLocal from the supplied timestamp
+ var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
+ var _idUptime = _id.pop();
+ var _idHost = _id.join('-');
+ var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000;
+ var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
+
+ assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
+ assert.eq(serverStatus.host.split(':')[0],
+ latestStartUpLog.hostname,
+ "Hostname doesn't match one in server status");
+ assert.closeWithinMS(startTime,
+ latestStartUpLog.startTime,
+ "StartTime doesn't match one from _id",
+ 2000); // Expect less than 2 sec delta
+ assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
+ assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
+
+ // Test buildinfo
+ var buildinfo = db.runCommand("buildinfo");
+ delete buildinfo.ok; // Delete extra meta info not in startup_log
+ var isMaster = db._adminCommand("ismaster");
+
+ // Test buildinfo has the expected keys
+ var expectedKeys = [
+ "version",
+ "gitVersion",
+ "allocator",
+ "versionArray",
+ "javascriptEngine",
+ "openssl",
+ "buildEnvironment",
+ "debug",
+ "maxBsonObjectSize",
+ "bits",
+ "modules"
+ ];
+
+ var keys = Object.keySet(latestStartUpLog.buildinfo);
+ // Disabled to check
+ assert(arrayIsSubset(expectedKeys, keys),
+ "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
+ assert.eq(buildinfo,
+ latestStartUpLog.buildinfo,
+ "buildinfo doesn't match that from buildinfo command");
+
+ // Test version and version Array
+ var version = latestStartUpLog.buildinfo.version.split('-')[0];
+ var versionArray = latestStartUpLog.buildinfo.versionArray;
+ var versionArrayCleaned = versionArray.slice(0, 3);
+ if (versionArray[3] == -100) {
+ versionArrayCleaned[2] -= 1;
+ }
+
+ assert.eq(serverStatus.version,
+ latestStartUpLog.buildinfo.version,
+ "Mongo version doesn't match that from ServerStatus");
+ assert.eq(
+ version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
+ var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
+ assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
+ assert.eq(isMaster.maxBsonObjectSize,
+ latestStartUpLog.buildinfo.maxBsonObjectSize,
+ "maxBsonObjectSize doesn't match one from ismaster");
+
+})();
diff --git a/jstests/core/top.js b/jstests/core/top.js
index 1aff2a4136b..a8f72091896 100644
--- a/jstests/core/top.js
+++ b/jstests/core/top.js
@@ -1,5 +1,11 @@
/**
* 1. check top numbers are correct
+ *
+ * This test attempts to perform read operations and get statistics using the top command. The
+ * former operation may be routed to a secondary in the replica set, whereas the latter must be
+ * routed to the primary.
+ *
+ * @tags: [assumes_read_preference_unchanged]
*/
var name = "toptest";
diff --git a/jstests/core/updatef.js b/jstests/core/updatef.js
index 6bc8df4e0c1..925e718bbd9 100644
--- a/jstests/core/updatef.js
+++ b/jstests/core/updatef.js
@@ -1,4 +1,6 @@
// Test unsafe management of nsdt on update command yield SERVER-3208
+//
+// @tags: [requires_parallel_shell]
prefixNS = db.jstests_updatef;
prefixNS.save({});
diff --git a/jstests/libs/override_methods/set_read_and_write_concerns.js b/jstests/libs/override_methods/set_read_and_write_concerns.js
index 30c853fb54b..267b5ab9bfc 100644
--- a/jstests/libs/override_methods/set_read_and_write_concerns.js
+++ b/jstests/libs/override_methods/set_read_and_write_concerns.js
@@ -57,6 +57,31 @@
"updateUser",
]);
+ const kCommandsToEmulateWriteConcern = new Set([
+ "aggregate",
+ "appendOplogNote",
+ "captrunc",
+ "cleanupOrphaned",
+ "clone",
+ "cloneCollection",
+ "cloneCollectionAsCapped",
+ "convertToCapped",
+ "copydb",
+ "create",
+ "createIndexes",
+ "drop",
+ "dropDatabase",
+ "dropIndexes",
+ "emptycapped",
+ "godinsert",
+ "mapReduce",
+ "mapreduce",
+ "mapreduce.shardedfinish",
+ "moveChunk",
+ "renameCollection",
+ "revokePrivilegesFromRole",
+ ]);
+
function runCommandWithReadAndWriteConcerns(
conn, dbName, commandName, commandObj, func, makeFuncArgs) {
if (typeof commandObj !== "object" || commandObj === null) {
@@ -79,10 +104,7 @@
var shouldForceReadConcern = kCommandsSupportingReadConcern.has(commandName);
var shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(commandName);
- var shouldEmulateWriteConcern =
- (commandName === "aggregate" || commandName === "createIndexes" ||
- commandName === "mapReduce" || commandName === "mapreduce" ||
- commandName === "mapreduce.shardedfinish");
+ var shouldEmulateWriteConcern = kCommandsToEmulateWriteConcern.has(commandName);
if (commandName === "aggregate") {
if (OverrideHelpers.isAggregationWithOutStage(commandName, commandObjUnwrapped)) {
diff --git a/jstests/libs/override_methods/set_read_preference_secondary.js b/jstests/libs/override_methods/set_read_preference_secondary.js
new file mode 100644
index 00000000000..270bbf40272
--- /dev/null
+++ b/jstests/libs/override_methods/set_read_preference_secondary.js
@@ -0,0 +1,88 @@
+/**
+ * Use prototype overrides to set read preference to "secondary" when running tests.
+ */
+(function() {
+ "use strict";
+
+ load("jstests/libs/override_methods/override_helpers.js");
+
+ const kReadPreferenceSecondary = {
+ mode: "secondary"
+ };
+ const kCommandsSupportingReadPreference = new Set([
+ "aggregate",
+ "collStats",
+ "count",
+ "dbStats",
+ "distinct",
+ "find",
+ "geoNear",
+ "geoSearch",
+ "group",
+ "mapReduce",
+ "mapreduce",
+ "parallelCollectionScan",
+ ]);
+
+ function runCommandWithReadPreferenceSecondary(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
+
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ var commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
+ }
+
+ if (commandObj[commandName] === "system.profile") {
+ throw new Error("Cowardly refusing to run test with overridden read preference" +
+ " when it reads from a non-replicated collection: " +
+ tojson(commandObj));
+ }
+
+ var shouldForceReadPreference = kCommandsSupportingReadPreference.has(commandName);
+ if (OverrideHelpers.isAggregationWithOutStage(commandName, commandObjUnwrapped)) {
+ // An aggregation with a $out stage must be sent to the primary.
+ shouldForceReadPreference = false;
+ } else if ((commandName === "mapReduce" || commandName === "mapreduce") &&
+ !OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
+ // A map-reduce operation with non-inline output must be sent to the primary.
+ shouldForceReadPreference = false;
+ }
+
+ if (shouldForceReadPreference) {
+ if (commandObj === commandObjUnwrapped) {
+ // We wrap the command object using a "query" field rather than a "$query" field to
+ // match the implementation of DB.prototype._attachReadPreferenceToCommand().
+ commandObj = {
+ query: commandObj
+ };
+ } else {
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller
+ // specified.
+ commandObj = Object.assign({}, commandObj);
+ }
+
+ if (commandObj.hasOwnProperty("$readPreference") &&
+ !bsonBinaryEqual({_: commandObj.$readPreference}, {_: kReadPreferenceSecondary})) {
+ throw new Error("Cowardly refusing to override read preference of command: " +
+ tojson(commandObj));
+ }
+
+ commandObj.$readPreference = kReadPreferenceSecondary;
+ }
+
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
+
+ startParallelShell = () => {throw new Error(
+ "Cowardly refusing to a run a test that starts a parallel shell because prior to" +
+ " MongoDB 3.4 replica set connections couldn't be used in it.");
+ };
+
+ OverrideHelpers.overrideRunCommand(runCommandWithReadPreferenceSecondary);
+})();