summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2018-02-14 17:27:55 -0500
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2018-02-14 17:27:55 -0500
commit7585ab8e5a5fd1b1c2f5926a98cf12387d717fa9 (patch)
tree317e8a9862999dff5502b5f8cf2d50a4b562e489
parent5217cd27eabdc090baecf25e9b6d33e5c2eee6ad (diff)
downloadmongo-7585ab8e5a5fd1b1c2f5926a98cf12387d717fa9.tar.gz
SERVER-32691 Add write_concern_majority_passthrough.yml test suite.
Also adds support for using replica set connection strings in resmoke.py without making all nodes electable. (cherry picked from commit 264d971842cffdf8b4f80def1d90241f132345b7)
-rw-r--r--buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml77
-rw-r--r--buildscripts/resmokelib/core/programs.py17
-rw-r--r--buildscripts/resmokelib/testing/fixtures/interface.py13
-rw-r--r--buildscripts/resmokelib/testing/fixtures/masterslave.py9
-rw-r--r--buildscripts/resmokelib/testing/fixtures/replicaset.py32
-rw-r--r--buildscripts/resmokelib/testing/fixtures/shardedcluster.py33
-rw-r--r--buildscripts/resmokelib/testing/fixtures/standalone.py10
-rw-r--r--buildscripts/resmokelib/testing/testcases.py14
-rw-r--r--jstests/core/collation_plan_cache.js9
-rw-r--r--jstests/core/count10.js7
-rw-r--r--jstests/core/count_plan_summary.js10
-rw-r--r--jstests/core/geo_s2cursorlimitskip.js7
-rw-r--r--jstests/core/getlog2.js7
-rw-r--r--jstests/core/index_filter_commands.js20
-rw-r--r--jstests/core/index_stats.js7
-rw-r--r--jstests/core/max_time_ms.js7
-rw-r--r--jstests/core/mr_optim.js15
-rw-r--r--jstests/core/notablescan.js7
-rw-r--r--jstests/core/operation_latency_histogram.js6
-rw-r--r--jstests/core/plan_cache_clear.js7
-rw-r--r--jstests/core/plan_cache_list_plans.js7
-rw-r--r--jstests/core/plan_cache_list_shapes.js7
-rw-r--r--jstests/core/plan_cache_shell_helpers.js7
-rw-r--r--jstests/core/startup_log.js209
-rw-r--r--jstests/core/top.js6
-rw-r--r--jstests/core/views/views_all_commands.js9
-rw-r--r--jstests/core/views/views_stats.js6
-rw-r--r--jstests/libs/override_methods/set_read_preference_secondary.js162
28 files changed, 574 insertions, 153 deletions
diff --git a/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
new file mode 100644
index 00000000000..614e0834e67
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
@@ -0,0 +1,77 @@
+test_kind: js_test
+
+selector:
+ js_test:
+ roots:
+ - jstests/core/**/*.js
+ exclude_files:
+ # These tests are not expected to pass with replica-sets:
+ - jstests/core/dbadmin.js
+ - jstests/core/opcounters_write_cmd.js
+ - jstests/core/read_after_optime.js
+ - jstests/core/capped_update.js
+ # The connection_string_validation.js test does not expect the mongo shell to be using a replica
+ # set connection string.
+ - jstests/core/connection_string_validation.js
+ # These tests attempt to read from the "system.profile" collection, which may be missing entries
+ # if a write was performed on the primary of the replica set instead.
+ - jstests/core/*profile*.js
+ # The shellkillop.js test spawns a parallel shell without using startParallelShell() and
+ # therefore doesn't inherit the w="majority" write concern when performing its writes.
+ - jstests/core/shellkillop.js
+ exclude_with_any_tags:
+ ##
+ # The next three tags correspond to the special errors thrown by the
+ # set_read_and_write_concerns.js override when it refuses to replace the readConcern or
+ # writeConcern of a particular command. Above each tag are the message(s) that cause the tag to
+ # be warranted.
+ ##
+ # "Cowardly refusing to override read concern of command: ..."
+ - assumes_read_concern_unchanged
+ # "Cowardly refusing to override write concern of command: ..."
+ - assumes_write_concern_unchanged
+ # "Cowardly refusing to run test with overridden write concern when it uses a command that can
+ # only perform w=1 writes: ..."
+ - requires_eval_command
+ ##
+ # The next tag corresponds to the special error thrown by the set_read_preference_secondary.js
+ # override when it refuses to replace the readPreference of a particular command. Above each tag
+ # are the message(s) that cause the tag to be warranted.
+ ##
+ # "Cowardly refusing to override read preference of command: ..."
+ # "Cowardly refusing to run test with overridden read preference when it reads from a
+ # non-replicated collection: ..."
+ - assumes_read_preference_unchanged
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ defaultReadConcernLevel: local
+ eval: >-
+ testingReplication = true;
+ load('jstests/libs/override_methods/set_read_and_write_concerns.js');
+ load('jstests/libs/override_methods/set_read_preference_secondary.js');
+ readMode: commands
+ hooks:
+ # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
+ # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
+ # validating the entire contents of the collection.
+ - class: CheckReplOplogs
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ReplicaSetFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ numInitialSyncAttempts: 1
+ # This suite requires w="majority" writes to be applied on all secondaries. By using a 2-node
+ # replica set and having secondaries vote, the majority of the replica set is all nodes.
+ num_nodes: 2
+ voting_secondaries: true
+ use_replica_set_connection_string: true
diff --git a/buildscripts/resmokelib/core/programs.py b/buildscripts/resmokelib/core/programs.py
index e4cadc8e006..d94cd438ee0 100644
--- a/buildscripts/resmokelib/core/programs.py
+++ b/buildscripts/resmokelib/core/programs.py
@@ -115,8 +115,8 @@ def mongos_program(logger, executable=None, process_kwargs=None, **kwargs):
return _process.Process(logger, args, **process_kwargs)
-def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=None,
- isMainTest=True, **kwargs):
+def mongo_shell_program(logger, executable=None, connection_string=None, filename=None,
+ process_kwargs=None, isMainTest=True, **kwargs):
"""
Returns a Process instance that starts a mongo shell with arguments
constructed from 'kwargs'.
@@ -184,9 +184,22 @@ def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=N
if config.SHELL_WRITE_MODE is not None:
kwargs["writeMode"] = config.SHELL_WRITE_MODE
+ if connection_string is not None:
+ # The --host and --port options are ignored by the mongo shell when an explicit connection
+ # string is specified. We remove these options to avoid any ambiguity with what server the
+ # logged mongo shell invocation will connect to.
+ if "port" in kwargs:
+ kwargs.pop("port")
+
+ if "host" in kwargs:
+ kwargs.pop("host")
+
# Apply the rest of the command line arguments.
_apply_kwargs(args, kwargs)
+ if connection_string is not None:
+ args.append(connection_string)
+
# Have the mongos shell run the specified file.
args.append(filename)
diff --git a/buildscripts/resmokelib/testing/fixtures/interface.py b/buildscripts/resmokelib/testing/fixtures/interface.py
index b4b0066a5aa..83418502ead 100644
--- a/buildscripts/resmokelib/testing/fixtures/interface.py
+++ b/buildscripts/resmokelib/testing/fixtures/interface.py
@@ -79,13 +79,22 @@ class Fixture(object):
"""
return True
- def get_connection_string(self):
+ def get_internal_connection_string(self):
"""
Returns the connection string for this fixture. This is NOT a
driver connection string, but a connection string of the format
expected by the mongo::ConnectionString class.
"""
- raise NotImplementedError("get_connection_string must be implemented by Fixture subclasses")
+ raise NotImplementedError(
+ "get_internal_connection_string must be implemented by Fixture subclasses")
+
+ def get_driver_connection_url(self):
+ """
+ Return the mongodb connection string as defined here:
+ https://docs.mongodb.com/manual/reference/connection-string/
+ """
+ raise NotImplementedError(
+ "get_driver_connection_url must be implemented by Fixture subclasses")
def __str__(self):
return "%s (Job #%d)" % (self.__class__.__name__, self.job_num)
diff --git a/buildscripts/resmokelib/testing/fixtures/masterslave.py b/buildscripts/resmokelib/testing/fixtures/masterslave.py
index 469c7ac0816..fb444cfe097 100644
--- a/buildscripts/resmokelib/testing/fixtures/masterslave.py
+++ b/buildscripts/resmokelib/testing/fixtures/masterslave.py
@@ -5,7 +5,6 @@ Master/slave fixture for executing JSTests against.
from __future__ import absolute_import
import os.path
-import socket
import pymongo
@@ -160,6 +159,12 @@ class MasterSlaveFixture(interface.ReplFixture):
mongod_options = self.mongod_options.copy()
mongod_options.update(self.slave_options)
mongod_options["slave"] = ""
- mongod_options["source"] = "%s:%d" % (socket.gethostname(), self.port)
+ mongod_options["source"] = self.master.get_internal_connection_string()
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "slave")
return self._new_mongod(mongod_logger, mongod_options)
+
+ def get_internal_connection_string(self):
+ return self.master.get_internal_connection_string()
+
+ def get_driver_connection_url(self):
+ return self.master.get_driver_connection_url()
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py
index fb578bafe6f..0ec3de30280 100644
--- a/buildscripts/resmokelib/testing/fixtures/replicaset.py
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -37,7 +37,8 @@ class ReplicaSetFixture(interface.ReplFixture):
write_concern_majority_journal_default=None,
auth_options=None,
replset_config_options=None,
- voting_secondaries=False):
+ voting_secondaries=False,
+ use_replica_set_connection_string=False):
interface.ReplFixture.__init__(self, logger, job_num)
@@ -50,6 +51,7 @@ class ReplicaSetFixture(interface.ReplFixture):
self.auth_options = auth_options
self.replset_config_options = utils.default_if_none(replset_config_options, {})
self.voting_secondaries = voting_secondaries
+ self.use_replica_set_connection_string = use_replica_set_connection_string
# The dbpath in mongod_options is used as the dbpath prefix for replica set members and
# takes precedence over other settings. The ShardedClusterFixture uses this parameter to
@@ -97,7 +99,7 @@ class ReplicaSetFixture(interface.ReplFixture):
# Initiate the replica set.
members = []
for (i, node) in enumerate(self.nodes):
- member_info = {"_id": i, "host": node.get_connection_string()}
+ member_info = {"_id": i, "host": node.get_internal_connection_string()}
if i > 0:
member_info["priority"] = 0
if i >= 7 or not self.voting_secondaries:
@@ -107,7 +109,7 @@ class ReplicaSetFixture(interface.ReplFixture):
members.append(member_info)
if self.initial_sync_node:
members.append({"_id": self.initial_sync_node_idx,
- "host": self.initial_sync_node.get_connection_string(),
+ "host": self.initial_sync_node.get_internal_connection_string(),
"priority": 0,
"hidden": 1,
"votes": 0})
@@ -291,11 +293,27 @@ class ReplicaSetFixture(interface.ReplFixture):
return logging.loggers.new_logger(logger_name, parent=self.logger)
- def get_connection_string(self):
+ def get_internal_connection_string(self):
if self.replset_name is None:
- raise ValueError("Must call setup() before calling get_connection_string()")
+ raise ValueError("Must call setup() before calling get_internal_connection_string()")
- conn_strs = [node.get_connection_string() for node in self.nodes]
+ conn_strs = [node.get_internal_connection_string() for node in self.nodes]
if self.initial_sync_node:
- conn_strs.append(self.initial_sync_node.get_connection_string())
+ conn_strs.append(self.initial_sync_node.get_internal_connection_string())
return self.replset_name + "/" + ",".join(conn_strs)
+
+ def get_driver_connection_url(self):
+ if self.replset_name is None:
+ raise ValueError("Must call setup() before calling get_driver_connection_url()")
+
+ if self.use_replica_set_connection_string:
+ # We use a replica set connection string when all nodes are electable because we
+ # anticipate the client will want to gracefully handle any failovers.
+ conn_strs = [node.get_internal_connection_string() for node in self.nodes]
+ if self.initial_sync_node:
+ conn_strs.append(self.initial_sync_node.get_internal_connection_string())
+ return "mongodb://" + ",".join(conn_strs) + "/?replicaSet=" + self.replset_name
+ else:
+ # We return a direct connection to the expected pimary when only the first node is
+ # electable because we want the client to error out if a stepdown occurs.
+ return self.nodes[0].get_driver_connection_url()
diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
index ac7e597f24b..2e2db535d6d 100644
--- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
+++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -170,11 +170,14 @@ class ShardedClusterFixture(interface.Fixture):
all(shard.is_running() for shard in self.shards) and
self.mongos is not None and self.mongos.is_running())
- def get_connection_string(self):
+ def get_internal_connection_string(self):
if self.mongos is None:
- raise ValueError("Must call setup() before calling get_connection_string()")
+ raise ValueError("Must call setup() before calling get_internal_connection_string()")
- return "%s:%d" % (socket.gethostname(), self.mongos.port)
+ return self.mongos.get_internal_connection_string()
+
+ def get_driver_connection_url(self):
+ return "mongodb://" + self.get_internal_connection_string()
def _new_configsvr(self):
"""
@@ -229,16 +232,11 @@ class ShardedClusterFixture(interface.Fixture):
mongos_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongos_options = copy.deepcopy(self.mongos_options)
- configdb_hostname = socket.gethostname()
if self.separate_configsvr:
- configdb_replset = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
- configdb_port = self.configsvr.port
- mongos_options["configdb"] = "%s/%s:%d" % (configdb_replset,
- configdb_hostname,
- configdb_port)
+ mongos_options["configdb"] = self.configsvr.get_internal_connection_string()
else:
- mongos_options["configdb"] = "%s:%d" % (configdb_hostname, self.shards[0].port)
+ mongos_options["configdb"] = "localhost:%d" % (self.shards[0].port)
return _MongoSFixture(mongos_logger,
self.job_num,
@@ -254,9 +252,9 @@ class ShardedClusterFixture(interface.Fixture):
for more details.
"""
- hostname = socket.gethostname()
- self.logger.info("Adding %s:%d as a shard..." % (hostname, shard.port))
- client.admin.command({"addShard": "%s:%d" % (hostname, shard.port)})
+ connection_string = shard.get_internal_connection_string()
+ self.logger.info("Adding %s as a shard...", connection_string)
+ client.admin.command({"addShard": connection_string})
class _MongoSFixture(interface.Fixture):
@@ -356,3 +354,12 @@ class _MongoSFixture(interface.Fixture):
def is_running(self):
return self.mongos is not None and self.mongos.poll() is None
+
+ def get_internal_connection_string(self):
+ if self.mongos is None:
+ raise ValueError("Must call setup() before calling get_internal_connection_string()")
+
+ return "localhost:%d" % self.port
+
+ def get_driver_connection_url(self):
+ return "mongodb://" + self.get_internal_connection_string()
diff --git a/buildscripts/resmokelib/testing/fixtures/standalone.py b/buildscripts/resmokelib/testing/fixtures/standalone.py
index ba62b3d2b8c..bc69775c285 100644
--- a/buildscripts/resmokelib/testing/fixtures/standalone.py
+++ b/buildscripts/resmokelib/testing/fixtures/standalone.py
@@ -7,7 +7,6 @@ from __future__ import absolute_import
import os
import os.path
import shutil
-import socket
import time
import pymongo
@@ -146,8 +145,11 @@ class MongoDFixture(interface.Fixture):
def is_running(self):
return self.mongod is not None and self.mongod.poll() is None
- def get_connection_string(self):
+ def get_internal_connection_string(self):
if self.mongod is None:
- raise ValueError("Must call setup() before calling get_connection_string()")
+ raise ValueError("Must call setup() before calling get_internal_connection_string()")
- return "%s:%d" % (socket.gethostname(), self.port)
+ return "localhost:%d" % self.port
+
+ def get_driver_connection_url(self):
+ return "mongodb://" + self.get_internal_connection_string()
diff --git a/buildscripts/resmokelib/testing/testcases.py b/buildscripts/resmokelib/testing/testcases.py
index 1428cb5e763..21d35215a29 100644
--- a/buildscripts/resmokelib/testing/testcases.py
+++ b/buildscripts/resmokelib/testing/testcases.py
@@ -187,7 +187,7 @@ class CPPIntegrationTestCase(TestCase):
def configure(self, fixture, *args, **kwargs):
TestCase.configure(self, fixture, *args, **kwargs)
- self.program_options["connectionString"] = self.fixture.get_connection_string()
+ self.program_options["connectionString"] = self.fixture.get_internal_connection_string()
def run_test(self):
try:
@@ -421,11 +421,13 @@ class JSTestCase(TestCase):
is_main_test = True
if thread_id > 0:
is_main_test = False
- return core.programs.mongo_shell_program(logger,
- executable=self.shell_executable,
- filename=self.js_filename,
- isMainTest=is_main_test,
- **self.shell_options)
+ return core.programs.mongo_shell_program(
+ logger,
+ executable=self.shell_executable,
+ filename=self.js_filename,
+ connection_string=self.fixture.get_driver_connection_url(),
+ isMainTest=is_main_test,
+ **self.shell_options)
def _run_test_in_thread(self, thread_id):
# Make a logger for each thread.
diff --git a/jstests/core/collation_plan_cache.js b/jstests/core/collation_plan_cache.js
index 0eec77388e4..790bbbadaa6 100644
--- a/jstests/core/collation_plan_cache.js
+++ b/jstests/core/collation_plan_cache.js
@@ -1,4 +1,11 @@
// Integration testing for the plan cache and index filter commands with collation.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect the server's plan cache entries. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
(function() {
'use strict';
@@ -237,4 +244,4 @@
assert.eq(0,
coll.runCommand('planCacheListFilters').filters.length,
'unexpected number of plan cache filters');
-})(); \ No newline at end of file
+})();
diff --git a/jstests/core/count10.js b/jstests/core/count10.js
index 2a1853c399a..453775c97f5 100644
--- a/jstests/core/count10.js
+++ b/jstests/core/count10.js
@@ -1,4 +1,11 @@
// Test that interrupting a count returns an error code.
+//
+// @tags: [
+// # This test attempts to perform a count command and find it using the currentOp command. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
t = db.count10;
t.drop();
diff --git a/jstests/core/count_plan_summary.js b/jstests/core/count_plan_summary.js
index 48891d21e8e..365f289c457 100644
--- a/jstests/core/count_plan_summary.js
+++ b/jstests/core/count_plan_summary.js
@@ -1,5 +1,11 @@
-// Test that the plan summary string appears in db.currentOp() for
-// count operations. SERVER-14064.
+// Test that the plan summary string appears in db.currentOp() for count operations. SERVER-14064.
+//
+// @tags: [
+// # This test attempts to perform a find command and find it using the currentOp command. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_count_plan_summary;
t.drop();
diff --git a/jstests/core/geo_s2cursorlimitskip.js b/jstests/core/geo_s2cursorlimitskip.js
index 427fbf8fe29..dc645dc68af 100644
--- a/jstests/core/geo_s2cursorlimitskip.js
+++ b/jstests/core/geo_s2cursorlimitskip.js
@@ -1,4 +1,11 @@
// Test various cursor behaviors
+//
+// @tags: [
+// # This test attempts to enable profiling on a server and then get profiling data by reading
+// # from the "system.profile" collection. The former operation must be routed to the primary in
+// # a replica set, whereas the latter may be routed to a secondary.
+// assumes_read_preference_unchanged,
+// ]
var testDB = db.getSiblingDB("geo_s2cursorlimitskip");
var t = testDB.geo_s2getmmm;
diff --git a/jstests/core/getlog2.js b/jstests/core/getlog2.js
index 597a85e20ee..e5287ea8c1b 100644
--- a/jstests/core/getlog2.js
+++ b/jstests/core/getlog2.js
@@ -1,4 +1,11 @@
// tests getlog as well as slow querying logging
+//
+// @tags: [
+// # This test attempts to perform a find command and see that it ran using the getLog command.
+// # The former operation may be routed to a secondary in the replica set, whereas the latter must
+// # be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
glcol = db.getLogTest2;
glcol.drop();
diff --git a/jstests/core/index_filter_commands.js b/jstests/core/index_filter_commands.js
index 8684be3b2b9..58f78d0514e 100644
--- a/jstests/core/index_filter_commands.js
+++ b/jstests/core/index_filter_commands.js
@@ -6,20 +6,24 @@
* Displays index filters for all query shapes in a collection.
*
* - planCacheClearFilters
- * Clears index filter for a single query shape or,
- * if the query shape is omitted, all filters for the collection.
+ * Clears index filter for a single query shape or, if the query shape is omitted, all filters for
+ * the collection.
*
* - planCacheSetFilter
* Sets index filter for a query shape. Overrides existing filter.
*
- * Not a lot of data access in this test suite. Hint commands
- * manage a non-persistent mapping in the server of
- * query shape to list of index specs.
+ * Not a lot of data access in this test suite. Hint commands manage a non-persistent mapping in the
+ * server of query shape to list of index specs.
*
- * Only time we might need to execute a query is to check the plan
- * cache state. We would do this with the planCacheListPlans command
- * on the same query shape with the index filters.
+ * Only time we might need to execute a query is to check the plan cache state. We would do this
+ * with the planCacheListPlans command on the same query shape with the index filters.
*
+ * @tags: [
+ * # This test attempts to perform queries with plan cache filters set up. The former operation
+ * # may be routed to a secondary in the replica set, whereas the latter must be routed to the
+ * # primary.
+ * assumes_read_preference_unchanged,
+ * ]
*/
load("jstests/libs/analyze_plan.js");
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
index 60b37fd571e..ee4d13d4d0a 100644
--- a/jstests/core/index_stats.js
+++ b/jstests/core/index_stats.js
@@ -1,3 +1,10 @@
+// @tags: [
+// # This test attempts to perform write operations and get index usage statistics using the
+// # $indexStats stage. The former operation must be routed to the primary in a replica set,
+// # whereas the latter may be routed to a secondary.
+// assumes_read_preference_unchanged,
+// ]
+
(function() {
"use strict";
diff --git a/jstests/core/max_time_ms.js b/jstests/core/max_time_ms.js
index 0442ffcba68..0cc7c684605 100644
--- a/jstests/core/max_time_ms.js
+++ b/jstests/core/max_time_ms.js
@@ -1,4 +1,11 @@
// Tests query/command option $maxTimeMS.
+//
+// @tags: [
+// # This test attempts to perform read operations after having enabled the maxTimeAlwaysTimeOut
+// # failpoint. The former operations may be routed to a secondary in the replica set, whereas the
+// # latter must be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.max_time_ms;
var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
diff --git a/jstests/core/mr_optim.js b/jstests/core/mr_optim.js
index 7437753ca67..1c525ae3de3 100644
--- a/jstests/core/mr_optim.js
+++ b/jstests/core/mr_optim.js
@@ -3,8 +3,17 @@
t = db.mr_optim;
t.drop();
+// We drop the output collection to ensure the test can be run multiple times successfully. We
+// explicitly avoid using the DBCollection#drop() shell helper to avoid implicitly sharding the
+// collection during the sharded_collections_jscore_passthrough.yml test suite when reading the
+// results from the output collection in the reformat() function.
+var res = db.runCommand({drop: "mr_optim_out"});
+if (res.ok !== 1) {
+ assert.commandFailedWithCode(res, ErrorCodes.NamespaceNotFound);
+}
+
for (var i = 0; i < 1000; ++i) {
- t.save({a: Math.random(1000), b: Math.random(10000)});
+ assert.writeOK(t.save({a: Math.random(1000), b: Math.random(10000)}));
}
function m() {
@@ -21,7 +30,7 @@ function reformat(r) {
if (r.results)
cursor = r.results;
else
- cursor = r.find();
+ cursor = r.find().sort({_id: 1});
cursor.forEach(function(z) {
x[z._id] = z.value;
});
@@ -43,4 +52,4 @@ res.drop();
assert.eq(x, x2, "object from inline and collection are not equal");
-t.drop(); \ No newline at end of file
+t.drop();
diff --git a/jstests/core/notablescan.js b/jstests/core/notablescan.js
index 80306c08cf2..bb4c170a603 100644
--- a/jstests/core/notablescan.js
+++ b/jstests/core/notablescan.js
@@ -1,4 +1,11 @@
// check notablescan mode
+//
+// @tags: [
+// # This test attempts to perform read operations after having enabled the notablescan server
+// # parameter. The former operations may be routed to a secondary in the replica set, whereas the
+// # latter must be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
t = db.test_notablescan;
t.drop();
diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js
index 1e3f1a59b95..947a8be6520 100644
--- a/jstests/core/operation_latency_histogram.js
+++ b/jstests/core/operation_latency_histogram.js
@@ -1,4 +1,10 @@
// Checks that histogram counters for collections are updated as we expect.
+//
+// This test attempts to perform write operations and get latency statistics using the $collStats
+// stage. The former operation must be routed to the primary in a replica set, whereas the latter
+// may be routed to a secondary.
+//
+// @tags: [assumes_read_preference_unchanged]
(function() {
"use strict";
diff --git a/jstests/core/plan_cache_clear.js b/jstests/core/plan_cache_clear.js
index 8f9cf0ea302..778239616b5 100644
--- a/jstests/core/plan_cache_clear.js
+++ b/jstests/core/plan_cache_clear.js
@@ -1,5 +1,12 @@
// Test clearing of the plan cache, either manually through the planCacheClear command,
// or due to system events such as an index build.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect/manipulate the server's plan cache
+// # entries. The former operation may be routed to a secondary in the replica set, whereas the
+// # latter must be routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_clear;
t.drop();
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index 7ca599483ff..3359980ab07 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -1,4 +1,11 @@
// Test the planCacheListPlans command.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect the server's plan cache entries. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_list_plans;
t.drop();
diff --git a/jstests/core/plan_cache_list_shapes.js b/jstests/core/plan_cache_list_shapes.js
index 1c9ecdf9e1b..61c3111cd8a 100644
--- a/jstests/core/plan_cache_list_shapes.js
+++ b/jstests/core/plan_cache_list_shapes.js
@@ -1,5 +1,12 @@
// Test the planCacheListQueryShapes command, which returns a list of query shapes
// for the queries currently cached in the collection.
+//
+// @tags: [
+// # This test attempts to perform queries with plan cache filters set up. The former operation
+// # may be routed to a secondary in the replica set, whereas the latter must be routed to the
+// # primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_list_shapes;
t.drop();
diff --git a/jstests/core/plan_cache_shell_helpers.js b/jstests/core/plan_cache_shell_helpers.js
index dc990b19dcc..6c4ff185014 100644
--- a/jstests/core/plan_cache_shell_helpers.js
+++ b/jstests/core/plan_cache_shell_helpers.js
@@ -1,4 +1,11 @@
// Test the shell helpers which wrap the plan cache commands.
+//
+// @tags: [
+// # This test attempts to perform queries and introspect the server's plan cache entries. The
+// # former operation may be routed to a secondary in the replica set, whereas the latter must be
+// # routed to the primary.
+// assumes_read_preference_unchanged,
+// ]
var t = db.jstests_plan_cache_shell_helpers;
t.drop();
diff --git a/jstests/core/startup_log.js b/jstests/core/startup_log.js
index 3b0cbe3464d..c73013d1744 100644
--- a/jstests/core/startup_log.js
+++ b/jstests/core/startup_log.js
@@ -1,101 +1,108 @@
-load('jstests/aggregation/extras/utils.js');
-
-(function() {
- 'use strict';
-
- // Check that smallArray is entirely contained by largeArray
- // returns false if a member of smallArray is not in largeArray
- function arrayIsSubset(smallArray, largeArray) {
- for (var i = 0; i < smallArray.length; i++) {
- if (!Array.contains(largeArray, smallArray[i])) {
- print("Could not find " + smallArray[i] + " in largeArray");
- return false;
- }
- }
-
- return true;
- }
-
- // Test startup_log
- var stats = db.getSisterDB("local").startup_log.stats();
- assert(stats.capped);
-
- var latestStartUpLog =
- db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next();
- var serverStatus = db._adminCommand("serverStatus");
- var cmdLine = db._adminCommand("getCmdLineOpts").parsed;
-
- // Test that the startup log has the expected keys
- var verbose = false;
- var expectedKeys =
- ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
- var keys = Object.keySet(latestStartUpLog);
- assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
-
- // Tests _id implicitly - should be comprised of host-timestamp
- // Setup expected startTime and startTimeLocal from the supplied timestamp
- var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
- var _idUptime = _id.pop();
- var _idHost = _id.join('-');
- var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000;
- var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
-
- assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
- assert.eq(serverStatus.host.split(':')[0],
- latestStartUpLog.hostname,
- "Hostname doesn't match one in server status");
- assert.closeWithinMS(startTime,
- latestStartUpLog.startTime,
- "StartTime doesn't match one from _id",
- 2000); // Expect less than 2 sec delta
- assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
- assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
-
- // Test buildinfo
- var buildinfo = db.runCommand("buildinfo");
- delete buildinfo.ok; // Delete extra meta info not in startup_log
- var isMaster = db._adminCommand("ismaster");
-
- // Test buildinfo has the expected keys
- var expectedKeys = [
- "version",
- "gitVersion",
- "allocator",
- "versionArray",
- "javascriptEngine",
- "openssl",
- "buildEnvironment",
- "debug",
- "maxBsonObjectSize",
- "bits",
- "modules"
- ];
-
- var keys = Object.keySet(latestStartUpLog.buildinfo);
- // Disabled to check
- assert(arrayIsSubset(expectedKeys, keys),
- "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
- assert.eq(buildinfo,
- latestStartUpLog.buildinfo,
- "buildinfo doesn't match that from buildinfo command");
-
- // Test version and version Array
- var version = latestStartUpLog.buildinfo.version.split('-')[0];
- var versionArray = latestStartUpLog.buildinfo.versionArray;
- var versionArrayCleaned = versionArray.slice(0, 3);
- if (versionArray[3] == -100) {
- versionArrayCleaned[2] -= 1;
- }
-
- assert.eq(serverStatus.version,
- latestStartUpLog.buildinfo.version,
- "Mongo version doesn't match that from ServerStatus");
- assert.eq(
- version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
- var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
- assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
- assert.eq(isMaster.maxBsonObjectSize,
- latestStartUpLog.buildinfo.maxBsonObjectSize,
- "maxBsonObjectSize doesn't match one from ismaster");
-
-})();
+/**
+ * This test attempts to read from the "local.startup_log" collection and assert that it has an
+ * entry matching the server's response from the "getCmdLineOpts" command. The former operation may
+ * be routed to a secondary in the replica set, whereas the latter must be routed to the primary.
+ *
+ * @tags: [assumes_read_preference_unchanged]
+ */
+load('jstests/aggregation/extras/utils.js');
+
+(function() {
+ 'use strict';
+
+ // Check that smallArray is entirely contained by largeArray
+ // returns false if a member of smallArray is not in largeArray
+ function arrayIsSubset(smallArray, largeArray) {
+ for (var i = 0; i < smallArray.length; i++) {
+ if (!Array.contains(largeArray, smallArray[i])) {
+ print("Could not find " + smallArray[i] + " in largeArray");
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ // Test startup_log
+ var stats = db.getSisterDB("local").startup_log.stats();
+ assert(stats.capped);
+
+ var latestStartUpLog =
+ db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next();
+ var serverStatus = db._adminCommand("serverStatus");
+ var cmdLine = db._adminCommand("getCmdLineOpts").parsed;
+
+ // Test that the startup log has the expected keys
+ var verbose = false;
+ var expectedKeys =
+ ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
+ var keys = Object.keySet(latestStartUpLog);
+ assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
+
+ // Tests _id implicitly - should be comprised of host-timestamp
+ // Setup expected startTime and startTimeLocal from the supplied timestamp
+ var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
+ var _idUptime = _id.pop();
+ var _idHost = _id.join('-');
+ var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000;
+ var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
+
+ assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
+ assert.eq(serverStatus.host.split(':')[0],
+ latestStartUpLog.hostname,
+ "Hostname doesn't match one in server status");
+ assert.closeWithinMS(startTime,
+ latestStartUpLog.startTime,
+ "StartTime doesn't match one from _id",
+ 2000); // Expect less than 2 sec delta
+ assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
+ assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
+
+ // Test buildinfo
+ var buildinfo = db.runCommand("buildinfo");
+ delete buildinfo.ok; // Delete extra meta info not in startup_log
+ var isMaster = db._adminCommand("ismaster");
+
+ // Test buildinfo has the expected keys
+ var expectedKeys = [
+ "version",
+ "gitVersion",
+ "allocator",
+ "versionArray",
+ "javascriptEngine",
+ "openssl",
+ "buildEnvironment",
+ "debug",
+ "maxBsonObjectSize",
+ "bits",
+ "modules"
+ ];
+
+ var keys = Object.keySet(latestStartUpLog.buildinfo);
+ // Disabled to check
+ assert(arrayIsSubset(expectedKeys, keys),
+ "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
+ assert.eq(buildinfo,
+ latestStartUpLog.buildinfo,
+ "buildinfo doesn't match that from buildinfo command");
+
+ // Test version and version Array
+ var version = latestStartUpLog.buildinfo.version.split('-')[0];
+ var versionArray = latestStartUpLog.buildinfo.versionArray;
+ var versionArrayCleaned = versionArray.slice(0, 3);
+ if (versionArray[3] == -100) {
+ versionArrayCleaned[2] -= 1;
+ }
+
+ assert.eq(serverStatus.version,
+ latestStartUpLog.buildinfo.version,
+ "Mongo version doesn't match that from ServerStatus");
+ assert.eq(
+ version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
+ var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
+ assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
+ assert.eq(isMaster.maxBsonObjectSize,
+ latestStartUpLog.buildinfo.maxBsonObjectSize,
+ "maxBsonObjectSize doesn't match one from ismaster");
+
+})();
diff --git a/jstests/core/top.js b/jstests/core/top.js
index 819b41b0981..3d98f5a7b2d 100644
--- a/jstests/core/top.js
+++ b/jstests/core/top.js
@@ -1,5 +1,11 @@
/**
* 1. check top numbers are correct
+ *
+ * This test attempts to perform read operations and get statistics using the top command. The
+ * former operation may be routed to a secondary in the replica set, whereas the latter must be
+ * routed to the primary.
+ *
+ * @tags: [assumes_read_preference_unchanged]
*/
(function() {
load("jstests/libs/stats.js");
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 0115672dcda..3d90bd24d32 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -294,14 +294,19 @@
},
command: function(conn) {
// First get and check a partial result for an aggregate command.
- let aggCmd = {aggregate: "view", pipeline: [], cursor: {batchSize: 2}};
+ let aggCmd = {
+ aggregate: "view",
+ pipeline: [{$sort: {_id: 1}}],
+ cursor: {batchSize: 2}
+ };
let res = conn.runCommand(aggCmd);
assert.commandWorked(res, aggCmd);
let cursor = res.cursor;
assert.eq(
cursor.ns, "test.view", "expected view namespace in cursor: " + tojson(cursor));
let expectedFirstBatch = [{_id: 1}, {_id: 2}];
- assert.eq(cursor.firstBatch, expectedFirstBatch, "find returned wrong firstBatch");
+ assert.eq(
+ cursor.firstBatch, expectedFirstBatch, "aggregate returned wrong firstBatch");
// Then check correct execution of the killCursors command.
let killCursorsCmd = {killCursors: "view", cursors: [cursor.id]};
diff --git a/jstests/core/views/views_stats.js b/jstests/core/views/views_stats.js
index 75feb857c9a..22261e9fa81 100644
--- a/jstests/core/views/views_stats.js
+++ b/jstests/core/views/views_stats.js
@@ -1,4 +1,10 @@
// Test that top and latency histogram statistics are recorded for views.
+//
+// This test attempts to perform write operations and get latency statistics using the $collStats
+// stage. The former operation must be routed to the primary in a replica set, whereas the latter
+// may be routed to a secondary.
+//
+// @tags: [assumes_read_preference_unchanged]
(function() {
"use strict";
diff --git a/jstests/libs/override_methods/set_read_preference_secondary.js b/jstests/libs/override_methods/set_read_preference_secondary.js
new file mode 100644
index 00000000000..d1d26433c5c
--- /dev/null
+++ b/jstests/libs/override_methods/set_read_preference_secondary.js
@@ -0,0 +1,162 @@
+/**
+ * Use prototype overrides to set read preference to "secondary" when running tests.
+ */
+(function() {
+ "use strict";
+
+ load("jstests/libs/override_methods/override_helpers.js");
+
+ const kReadPreferenceSecondary = {mode: "secondary"};
+ const kCommandsSupportingReadPreference = new Set([
+ "aggregate",
+ "collStats",
+ "count",
+ "dbStats",
+ "distinct",
+ "find",
+ "geoNear",
+ "geoSearch",
+ "group",
+ "mapReduce",
+ "mapreduce",
+ "parallelCollectionScan",
+ ]);
+
+ // This list of cursor-generating commands is incomplete. For example, "listCollections",
+ // "listIndexes", "parallelCollectionScan", and "repairCursor" are all missing from this list.
+ // If we ever add tests that attempt to run getMore or killCursors on cursors generated from
+ // those commands, then we should update the contents of this list and also handle any
+ // differences in the server's response format.
+ const kCursorGeneratingCommands = new Set(["aggregate", "find"]);
+
+ const CursorTracker = (function() {
+ const kNoCursor = new NumberLong(0);
+
+ const connectionsByCursorId = {};
+
+ return {
+ getConnectionUsedForCursor: function getConnectionUsedForCursor(cursorId) {
+ return (cursorId instanceof NumberLong) ? connectionsByCursorId[cursorId]
+ : undefined;
+ },
+
+ setConnectionUsedForCursor: function setConnectionUsedForCursor(cursorId, cursorConn) {
+ if (cursorId instanceof NumberLong &&
+ !bsonBinaryEqual({_: cursorId}, {_: kNoCursor})) {
+ connectionsByCursorId[cursorId] = cursorConn;
+ }
+ },
+ };
+ })();
+
+ function runCommandWithReadPreferenceSecondary(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
+
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
+ }
+
+ if (commandObj[commandName] === "system.profile") {
+ throw new Error("Cowardly refusing to run test with overridden read preference" +
+ " when it reads from a non-replicated collection: " +
+ tojson(commandObj));
+ }
+
+ if (conn.isReplicaSetConnection()) {
+ // When a "getMore" or "killCursors" command is issued on a replica set connection, we
+ // attempt to automatically route the command to the server the cursor(s) were
+ // originally established on. This makes it possible to use the
+ // set_read_preference_secondary.js override without needing to update calls of
+ // DB#runCommand() to explicitly track the connection that was used. If the connection
+ // is actually a direct connection to a mongod or mongos process, or if the cursor id
+ // cannot be found in the CursorTracker, then we'll fall back to using DBClientRS's
+ // server selection and send the operation to the current primary. It is possible that
+ // the test is trying to exercise the behavior around when an unknown cursor id is sent
+ // to the server.
+ if (commandName === "getMore") {
+ const cursorId = commandObjUnwrapped[commandName];
+ const cursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
+ if (cursorConn !== undefined) {
+ return func.apply(cursorConn, makeFuncArgs(commandObj));
+ }
+ } else if (commandName === "killCursors") {
+ const cursorIds = commandObjUnwrapped.cursors;
+ if (Array.isArray(cursorIds)) {
+ let cursorConn;
+
+ for (let cursorId of cursorIds) {
+ const otherCursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
+ if (cursorConn === undefined) {
+ cursorConn = otherCursorConn;
+ } else if (otherCursorConn !== undefined) {
+ // We set 'cursorConn' back to undefined and break out of the loop so
+ // that we don't attempt to automatically route the "killCursors"
+ // command when there are cursors from different servers.
+ cursorConn = undefined;
+ break;
+ }
+ }
+
+ if (cursorConn !== undefined) {
+ return func.apply(cursorConn, makeFuncArgs(commandObj));
+ }
+ }
+ }
+ }
+
+ let shouldForceReadPreference = kCommandsSupportingReadPreference.has(commandName);
+ if (OverrideHelpers.isAggregationWithOutStage(commandName, commandObjUnwrapped)) {
+ // An aggregation with a $out stage must be sent to the primary.
+ shouldForceReadPreference = false;
+ } else if ((commandName === "mapReduce" || commandName === "mapreduce") &&
+ !OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
+ // A map-reduce operation with non-inline output must be sent to the primary.
+ shouldForceReadPreference = false;
+ }
+
+ if (shouldForceReadPreference) {
+ if (commandObj === commandObjUnwrapped) {
+ // We wrap the command object using a "query" field rather than a "$query" field to
+ // match the implementation of DB.prototype._attachReadPreferenceToCommand().
+ commandObj = {query: commandObj};
+ } else {
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller
+ // specified.
+ commandObj = Object.assign({}, commandObj);
+ }
+
+ if (commandObj.hasOwnProperty("$readPreference") &&
+ !bsonBinaryEqual({_: commandObj.$readPreference}, {_: kReadPreferenceSecondary})) {
+ throw new Error("Cowardly refusing to override read preference of command: " +
+ tojson(commandObj));
+ }
+
+ commandObj.$readPreference = kReadPreferenceSecondary;
+ }
+
+ const serverResponse = func.apply(conn, makeFuncArgs(commandObj));
+
+ if (conn.isReplicaSetConnection() && kCursorGeneratingCommands.has(commandName) &&
+ serverResponse.ok === 1 && serverResponse.hasOwnProperty("cursor")) {
+ // We associate the cursor id returned by the server with the connection that was used
+ // to establish it so that we can attempt to automatically route subsequent "getMore"
+ // and "killCursors" commands.
+ CursorTracker.setConnectionUsedForCursor(serverResponse.cursor.id,
+ serverResponse._mongo);
+ }
+
+ return serverResponse;
+ }
+
+ OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/set_read_preference_secondary.js");
+
+ OverrideHelpers.overrideRunCommand(runCommandWithReadPreferenceSecondary);
+})();