summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYves Duhem <yves.duhem@mongodb.com>2017-09-01 17:40:29 -0400
committerYves Duhem <yves.duhem@mongodb.com>2017-09-01 17:41:35 -0400
commit2f6a2294feb20a9a3a11dcdbfbfed05f4756c1b2 (patch)
tree7016c0c574b55768eac13413e21ff5a849c1a9fd
parentedfcb9cfbef2f0bcd85cd46cb1b9ccbe89d0f299 (diff)
downloadmongo-2f6a2294feb20a9a3a11dcdbfbfed05f4756c1b2.tar.gz
SERVER-30683 Support failovers in ReplicaSetFixture
-rw-r--r--buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/core.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/integration_tests_replset.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/integration_tests_sharded.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/integration_tests_standalone.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/json_schema.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/jstestfuzz_replication.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/jstestfuzz_replication_initsync.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/jstestfuzz_replication_resync.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/jstestfuzz_sharded.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/master_slave_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_resync_static_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_gle_auth_basics_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokelib/core/programs.py17
-rw-r--r--buildscripts/resmokelib/testing/fixtures/__init__.py4
-rw-r--r--buildscripts/resmokelib/testing/fixtures/interface.py41
-rw-r--r--buildscripts/resmokelib/testing/fixtures/masterslave.py10
-rw-r--r--buildscripts/resmokelib/testing/fixtures/replicaset.py101
-rw-r--r--buildscripts/resmokelib/testing/fixtures/shardedcluster.py27
-rw-r--r--buildscripts/resmokelib/testing/fixtures/standalone.py18
-rw-r--r--buildscripts/resmokelib/testing/hooks/initialsync.py4
-rw-r--r--buildscripts/resmokelib/testing/hooks/periodic_kill_secondaries.py122
-rw-r--r--buildscripts/resmokelib/testing/testcases/json_schema_test.py14
-rw-r--r--buildscripts/resmokelib/testing/testcases/jstest.py13
-rw-r--r--buildscripts/resmokelib/utils/__init__.py18
-rw-r--r--jstests/hooks/run_check_repl_dbhash.js8
41 files changed, 228 insertions, 206 deletions
diff --git a/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml
index d0c7ca7ac6a..1344f26cec0 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml
@@ -39,7 +39,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
enableMajorityReadConcern: ''
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml
index c326b8e4875..de8b568eee6 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml
@@ -49,11 +49,9 @@ executor:
fixture:
class: ShardedClusterFixture
mongos_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
mongod_options:
- bind_ip_all: ''
nopreallocj: ''
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
index afe6f6a43fc..55d8db5f075 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
@@ -185,7 +185,6 @@ executor:
fixture:
class: ShardedClusterFixture
mongos_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
logComponentVerbosity:
@@ -196,7 +195,6 @@ executor:
asio: 2
tracking: 0
mongod_options:
- bind_ip_all: ''
nopreallocj: ''
enableMajorityReadConcern: ''
set_parameters:
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
index bbdf1f8ab57..02c4a03a93d 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
@@ -232,7 +232,6 @@ executor:
fixture:
class: ShardedClusterFixture
mongos_options:
- bind_ip_all: ''
keyFile: *keyFile
set_parameters:
enableTestCommands: 1
@@ -244,7 +243,6 @@ executor:
asio: 2
tracking: 0
mongod_options:
- bind_ip_all: ''
nopreallocj: ''
enableMajorityReadConcern: ''
auth: ''
diff --git a/buildscripts/resmokeconfig/suites/core.yml b/buildscripts/resmokeconfig/suites/core.yml
index 1156c5e88cb..4aa054a49bb 100644
--- a/buildscripts/resmokeconfig/suites/core.yml
+++ b/buildscripts/resmokeconfig/suites/core.yml
@@ -19,6 +19,5 @@ executor:
fixture:
class: MongoDFixture
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/integration_tests_replset.yml b/buildscripts/resmokeconfig/suites/integration_tests_replset.yml
index 6ea48b5a55d..92900f32692 100644
--- a/buildscripts/resmokeconfig/suites/integration_tests_replset.yml
+++ b/buildscripts/resmokeconfig/suites/integration_tests_replset.yml
@@ -15,7 +15,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
numInitialSyncAttempts: 1
diff --git a/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml b/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml
index 2942a01474a..8f7d234bbd8 100644
--- a/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml
+++ b/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml
@@ -12,12 +12,10 @@ executor:
fixture:
class: ShardedClusterFixture
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
numInitialSyncAttempts: 1
mongos_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
enable_sharding:
diff --git a/buildscripts/resmokeconfig/suites/integration_tests_standalone.yml b/buildscripts/resmokeconfig/suites/integration_tests_standalone.yml
index 1ab54b6e61f..f8d826c5de8 100644
--- a/buildscripts/resmokeconfig/suites/integration_tests_standalone.yml
+++ b/buildscripts/resmokeconfig/suites/integration_tests_standalone.yml
@@ -12,6 +12,5 @@ executor:
fixture:
class: MongoDFixture
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/json_schema.yml b/buildscripts/resmokeconfig/suites/json_schema.yml
index e886c3bc4b2..a0535172c67 100644
--- a/buildscripts/resmokeconfig/suites/json_schema.yml
+++ b/buildscripts/resmokeconfig/suites/json_schema.yml
@@ -39,6 +39,5 @@ executor:
fixture:
class: MongoDFixture
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/jstestfuzz_replication.yml b/buildscripts/resmokeconfig/suites/jstestfuzz_replication.yml
index c8073c10b45..b14a7432ea4 100644
--- a/buildscripts/resmokeconfig/suites/jstestfuzz_replication.yml
+++ b/buildscripts/resmokeconfig/suites/jstestfuzz_replication.yml
@@ -28,7 +28,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
numInitialSyncAttempts: 1
diff --git a/buildscripts/resmokeconfig/suites/jstestfuzz_replication_initsync.yml b/buildscripts/resmokeconfig/suites/jstestfuzz_replication_initsync.yml
index d5c17ffc8f2..5ff2d73f41b 100644
--- a/buildscripts/resmokeconfig/suites/jstestfuzz_replication_initsync.yml
+++ b/buildscripts/resmokeconfig/suites/jstestfuzz_replication_initsync.yml
@@ -24,7 +24,6 @@ executor:
class: ReplicaSetFixture
mongod_options:
oplogSize: 511
- bind_ip_all: ''
verbose: ''
set_parameters:
logComponentVerbosity:
diff --git a/buildscripts/resmokeconfig/suites/jstestfuzz_replication_resync.yml b/buildscripts/resmokeconfig/suites/jstestfuzz_replication_resync.yml
index 1a39d00cd77..7fdea6e25cd 100644
--- a/buildscripts/resmokeconfig/suites/jstestfuzz_replication_resync.yml
+++ b/buildscripts/resmokeconfig/suites/jstestfuzz_replication_resync.yml
@@ -23,7 +23,6 @@ executor:
class: ReplicaSetFixture
mongod_options:
oplogSize: 511
- bind_ip_all: ''
verbose: ''
set_parameters:
logComponentVerbosity:
diff --git a/buildscripts/resmokeconfig/suites/jstestfuzz_sharded.yml b/buildscripts/resmokeconfig/suites/jstestfuzz_sharded.yml
index 4f95e982852..d99b98ba75c 100644
--- a/buildscripts/resmokeconfig/suites/jstestfuzz_sharded.yml
+++ b/buildscripts/resmokeconfig/suites/jstestfuzz_sharded.yml
@@ -19,12 +19,10 @@ executor:
mongos_options:
set_parameters:
enableTestCommands: 1
- bind_ip_all: ''
verbose: ''
mongod_options:
set_parameters:
enableTestCommands: 1
numInitialSyncAttempts: 1
verbose: ''
- bind_ip_all: ''
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/master_slave_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/master_slave_jscore_passthrough.yml
index 823cd4ef067..f7aefe690fc 100644
--- a/buildscripts/resmokeconfig/suites/master_slave_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/master_slave_jscore_passthrough.yml
@@ -25,7 +25,6 @@ executor:
fixture:
class: MasterSlaveFixture
mongod_options:
- bind_ip_all: ''
oplogSize: 511
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml b/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml
index ae5845f7724..ab655bca294 100644
--- a/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml
@@ -96,7 +96,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
oplogSize: 511
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml
index 73bfa5a27b9..e5afb848e02 100644
--- a/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml
@@ -93,7 +93,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
numInitialSyncAttempts: 1
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml
index 9d2017f2e70..2142079d643 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml
@@ -92,7 +92,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
oplogSize: 511
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml
index 659e8efaff5..b3e157d86ee 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml
@@ -26,7 +26,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
oplogSize: 511
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml
index 6933e40e1a8..853b409d318 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml
@@ -27,7 +27,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
oplogSize: 511
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
index 6b9b3b20347..c64ed9ebb5b 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
@@ -24,7 +24,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
oplogSize: 511
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_resync_static_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_resync_static_jscore_passthrough.yml
index 27726964c60..90efaa192e6 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_resync_static_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_resync_static_jscore_passthrough.yml
@@ -27,7 +27,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
oplogSize: 511
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml
index a319397b83b..a98b7222562 100644
--- a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml
@@ -50,7 +50,6 @@ executor:
fixture:
class: ReplicaSetFixture
mongod_options:
- bind_ip_all: ''
oplogSize: 511
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml
index c89afbe78cd..c918b66e91f 100644
--- a/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml
@@ -20,6 +20,5 @@ executor:
fixture:
class: MongoDFixture
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
index 2ab418e0bf0..89065f2d07b 100644
--- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
@@ -212,7 +212,6 @@ executor:
fixture:
class: ShardedClusterFixture
mongos_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
logComponentVerbosity:
@@ -221,7 +220,6 @@ executor:
verbosity: 1
asio: 2
mongod_options:
- bind_ip_all: ''
nopreallocj: ''
enableMajorityReadConcern: ''
set_parameters:
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
index fae8086dfa5..b8500d3ed36 100644
--- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
@@ -102,7 +102,6 @@ executor:
set_parameters:
enableTestCommands: 1
mongod_options:
- bind_ip_all: ''
nopreallocj: ''
set_parameters:
enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/sharding_gle_auth_basics_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_gle_auth_basics_passthrough.yml
index bfbf8b8e405..5ea71956614 100644
--- a/buildscripts/resmokeconfig/suites/sharding_gle_auth_basics_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_gle_auth_basics_passthrough.yml
@@ -32,13 +32,11 @@ executor:
fixture:
class: ShardedClusterFixture
mongos_options:
- bind_ip_all: ''
keyFile: *keyFile
set_parameters:
enableTestCommands: 1
enableLocalhostAuthBypass: false
mongod_options:
- bind_ip_all: ''
auth: ''
keyFile: *keyFile
set_parameters:
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml
index 298476e37ef..1248e6d80ad 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml
@@ -64,11 +64,9 @@ executor:
fixture:
class: ShardedClusterFixture
mongos_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
numInitialSyncAttempts: 1
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
index b6b3dde7e5b..c2be25c8e84 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
@@ -63,11 +63,9 @@ executor:
fixture:
class: ShardedClusterFixture
mongos_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
mongod_options:
- bind_ip_all: ''
set_parameters:
enableTestCommands: 1
numInitialSyncAttempts: 1
diff --git a/buildscripts/resmokelib/core/programs.py b/buildscripts/resmokelib/core/programs.py
index aee4e6cf01a..ceef27dbe57 100644
--- a/buildscripts/resmokelib/core/programs.py
+++ b/buildscripts/resmokelib/core/programs.py
@@ -129,11 +129,13 @@ def mongos_program(logger, executable=None, process_kwargs=None, **kwargs):
return _process.Process(logger, args, **process_kwargs)
-def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=None, **kwargs):
+def mongo_shell_program(logger, executable=None, connection_string=None, filename=None,
+ process_kwargs=None, **kwargs):
"""
- Returns a Process instance that starts a mongo shell with arguments
- constructed from 'kwargs'.
+ Returns a Process instance that starts a mongo shell with the given connection string and
+ arguments constructed from 'kwargs'.
"""
+ connection_string = utils.default_if_none(config.SHELL_CONN_STRING, connection_string)
executable = utils.default_if_none(executable, config.DEFAULT_MONGO_EXECUTABLE)
args = [executable]
@@ -191,7 +193,7 @@ def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=N
eval_sb.append(str(kwargs.pop("eval")))
# Load this file to allow a callback to validate collections before shutting down mongod.
- eval_sb.append("load('jstests/libs/override_methods/validate_collections_on_shutdown.js')");
+ eval_sb.append("load('jstests/libs/override_methods/validate_collections_on_shutdown.js');")
eval_str = "; ".join(eval_sb)
args.append("--eval")
@@ -203,7 +205,7 @@ def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=N
if config.SHELL_WRITE_MODE is not None:
kwargs["writeMode"] = config.SHELL_WRITE_MODE
- if config.SHELL_CONN_STRING is not None:
+ if connection_string is not None:
# The --host and --port options are ignored by the mongo shell when an explicit connection
# string is specified. We remove these options to avoid any ambiguity with what server the
# logged mongo shell invocation will connect to.
@@ -216,9 +218,8 @@ def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=N
# Apply the rest of the command line arguments.
_apply_kwargs(args, kwargs)
-
- if config.SHELL_CONN_STRING is not None:
- args.append(config.SHELL_CONN_STRING)
+ if connection_string is not None:
+ args.append(connection_string)
# Have the mongos shell run the specified file.
args.append(filename)
diff --git a/buildscripts/resmokelib/testing/fixtures/__init__.py b/buildscripts/resmokelib/testing/fixtures/__init__.py
index b1436ae9148..d8c3d8a2b53 100644
--- a/buildscripts/resmokelib/testing/fixtures/__init__.py
+++ b/buildscripts/resmokelib/testing/fixtures/__init__.py
@@ -4,12 +4,12 @@ Fixtures for executing JSTests against.
from __future__ import absolute_import
-from .interface import Fixture as _Fixture
+from .interface import NoOpFixture as _NoOpFixture
from .interface import make_fixture
from ...utils import autoloader as _autoloader
-NOOP_FIXTURE_CLASS = _Fixture.REGISTERED_NAME
+NOOP_FIXTURE_CLASS = _NoOpFixture.REGISTERED_NAME
# We dynamically load all modules in the fixtures/ package so that any Fixture classes declared
diff --git a/buildscripts/resmokelib/testing/fixtures/interface.py b/buildscripts/resmokelib/testing/fixtures/interface.py
index 2d950461927..6dffa24e430 100644
--- a/buildscripts/resmokelib/testing/fixtures/interface.py
+++ b/buildscripts/resmokelib/testing/fixtures/interface.py
@@ -7,6 +7,7 @@ from __future__ import absolute_import
import time
import pymongo
+import pymongo.errors
from ... import errors
from ... import logging
@@ -53,8 +54,6 @@ class Fixture(object):
self.logger = logger
self.job_num = job_num
- self.port = None # Port that the mongo shell should connect to.
-
def setup(self):
"""
Creates the fixture.
@@ -115,6 +114,24 @@ class Fixture(object):
raise NotImplementedError(
"get_driver_connection_url must be implemented by Fixture subclasses")
+ def mongo_client(self, read_preference=pymongo.ReadPreference.PRIMARY, timeout_millis=30000):
+ """
+ Returns a pymongo.MongoClient connecting to this fixture with a read
+ preference of 'read_preference'.
+
+ The PyMongo driver will wait up to 'timeout_millis' milliseconds
+ before concluding that the server is unavailable.
+ """
+
+ kwargs = {"connectTimeoutMS": timeout_millis}
+ if pymongo.version_tuple[0] >= 3:
+ kwargs["serverSelectionTimeoutMS"] = timeout_millis
+ kwargs["connect"] = True
+
+ return pymongo.MongoClient(host=self.get_driver_connection_url(),
+ read_preference=read_preference,
+ **kwargs)
+
def __str__(self):
return "%s (Job #%d)" % (self.__class__.__name__, self.job_num)
@@ -166,5 +183,21 @@ class ReplFixture(Fixture):
except pymongo.errors.ConnectionFailure:
remaining = deadline - time.time()
if remaining <= 0.0:
- raise errors.ServerFailure("Failed to connect to the primary on port %d" %
- self.port)
+ raise errors.ServerFailure(
+ "Failed to connect to ".format(self.get_driver_connection_url()))
+
+
+class NoOpFixture(Fixture):
+ """A Fixture implementation that does not start any servers.
+
+ Used when the MongoDB deployment is started by the JavaScript test itself with MongoRunner,
+ ReplSetTest, or ShardingTest.
+ """
+
+ REGISTERED_NAME = "NoOpFixture"
+
+ def get_internal_connection_string(self):
+ return None
+
+ def get_driver_connection_url(self):
+ return None
diff --git a/buildscripts/resmokelib/testing/fixtures/masterslave.py b/buildscripts/resmokelib/testing/fixtures/masterslave.py
index bef701ff8ee..96b4ec5e96b 100644
--- a/buildscripts/resmokelib/testing/fixtures/masterslave.py
+++ b/buildscripts/resmokelib/testing/fixtures/masterslave.py
@@ -5,14 +5,13 @@ Master/slave fixture for executing JSTests against.
from __future__ import absolute_import
import os.path
-import socket
import pymongo
+import pymongo.errors
from . import interface
from . import standalone
from ... import config
-from ... import logging
from ... import utils
@@ -47,7 +46,7 @@ class MasterSlaveFixture(interface.ReplFixture):
dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
self._dbpath_prefix = os.path.join(dbpath_prefix,
- "job%d" % (self.job_num),
+ "job{}".format(self.job_num),
config.FIXTURE_SUBDIR)
self.master = None
@@ -57,7 +56,6 @@ class MasterSlaveFixture(interface.ReplFixture):
if self.master is None:
self.master = self._new_mongod_master()
self.master.setup()
- self.port = self.master.port
if self.slave is None:
self.slave = self._new_mongod_slave()
@@ -69,7 +67,7 @@ class MasterSlaveFixture(interface.ReplFixture):
# Do a replicated write to ensure that the slave has finished with its initial sync before
# starting to run any tests.
- client = utils.new_mongo_client(self.port)
+ client = self.master.mongo_client()
# Keep retrying this until it times out waiting for replication.
def insert_fn(remaining_secs):
@@ -158,7 +156,7 @@ class MasterSlaveFixture(interface.ReplFixture):
mongod_options = self.mongod_options.copy()
mongod_options.update(self.slave_options)
mongod_options["slave"] = ""
- mongod_options["source"] = "%s:%d" % (socket.gethostname(), self.port)
+ mongod_options["source"] = self.master.get_internal_connection_string()
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "slave")
return self._new_mongod(mongod_logger, mongod_options)
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py
index 54e5c2fe52c..025ce257a7b 100644
--- a/buildscripts/resmokelib/testing/fixtures/replicaset.py
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -8,11 +8,12 @@ import os.path
import time
import pymongo
+import pymongo.errors
from . import interface
from . import standalone
from ... import config
-from ... import logging
+from ... import errors
from ... import utils
@@ -36,7 +37,8 @@ class ReplicaSetFixture(interface.ReplFixture):
write_concern_majority_journal_default=None,
auth_options=None,
replset_config_options=None,
- voting_secondaries=True):
+ voting_secondaries=True,
+ all_nodes_electable=False):
interface.ReplFixture.__init__(self, logger, job_num)
@@ -49,6 +51,7 @@ class ReplicaSetFixture(interface.ReplFixture):
self.auth_options = auth_options
self.replset_config_options = utils.default_if_none(replset_config_options, {})
self.voting_secondaries = voting_secondaries
+ self.all_nodes_electable = all_nodes_electable
# The dbpath in mongod_options is used as the dbpath prefix for replica set members and
# takes precedence over other settings. The ShardedClusterFixture uses this parameter to
@@ -60,7 +63,7 @@ class ReplicaSetFixture(interface.ReplFixture):
dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
self._dbpath_prefix = os.path.join(dbpath_prefix,
- "job%d" % (self.job_num),
+ "job{}".format(self.job_num),
config.FIXTURE_SUBDIR)
self.nodes = []
@@ -87,18 +90,17 @@ class ReplicaSetFixture(interface.ReplFixture):
self.initial_sync_node.setup()
self.initial_sync_node.await_ready()
- self.port = self.get_primary().port
-
# We need only to wait to connect to the first node of the replica set because we first
# initiate it as a single node replica set.
- self.get_primary().await_ready()
+ self.nodes[0].await_ready()
# Initiate the replica set.
members = []
for (i, node) in enumerate(self.nodes):
member_info = {"_id": i, "host": node.get_internal_connection_string()}
if i > 0:
- member_info["priority"] = 0
+ if not self.all_nodes_electable:
+ member_info["priority"] = 0
if i >= 7 or not self.voting_secondaries:
# Only 7 nodes in a replica set can vote, so the other members must still be
# non-voting when this fixture is configured to have voting secondaries.
@@ -112,7 +114,7 @@ class ReplicaSetFixture(interface.ReplFixture):
"votes": 0})
config = {"_id": self.replset_name}
- client = utils.new_mongo_client(port=self.port)
+ client = self.nodes[0].mongo_client()
if self.auth_options is not None:
auth_db = client[self.auth_options["authenticationDatabase"]]
@@ -146,10 +148,10 @@ class ReplicaSetFixture(interface.ReplFixture):
self._configure_repl_set(client, {"replSetInitiate": config})
self._await_primary()
- if self.get_secondaries():
+ if self.nodes[1:]:
# Wait to connect to each of the secondaries before running the replSetReconfig
# command.
- for node in self.get_secondaries():
+ for node in self.nodes[1:]:
node.await_ready()
config["version"] = 2
config["members"] = members
@@ -184,24 +186,28 @@ class ReplicaSetFixture(interface.ReplFixture):
def _await_primary(self):
# Wait for the primary to be elected.
- client = utils.new_mongo_client(port=self.port)
+ # Since this method is called at startup we expect the first node to be primary even when
+ # self.all_nodes_electable is True.
+ primary = self.nodes[0]
+ client = primary.mongo_client()
while True:
- self.logger.info("Waiting for primary on port %d to be elected.", self.port)
+ self.logger.info("Waiting for primary on port %d to be elected.", primary.port)
is_master = client.admin.command("isMaster")["ismaster"]
if is_master:
break
time.sleep(0.1) # Wait a little bit before trying again.
- self.logger.info("Primary on port %d successfully elected.", self.port)
+ self.logger.info("Primary on port %d successfully elected.", primary.port)
def _await_secondaries(self):
# Wait for the secondaries to become available.
- secondaries = self.get_secondaries()
+ # Since this method is called at startup we expect the nodes 1 to n to be secondaries even
+ # when self.all_nodes_electable is True.
+ secondaries = self.nodes[1:]
if self.initial_sync_node:
secondaries.append(self.initial_sync_node)
for secondary in secondaries:
- client = utils.new_mongo_client(port=secondary.port,
- read_preference=pymongo.ReadPreference.SECONDARY)
+ client = secondary.mongo_client(read_preference=pymongo.ReadPreference.SECONDARY)
while True:
self.logger.info("Waiting for secondary on port %d to become available.",
secondary.port)
@@ -241,13 +247,38 @@ class ReplicaSetFixture(interface.ReplFixture):
return running
- def get_primary(self):
- # The primary is always the first element of the 'nodes' list because all other members of
- # the replica set are configured with priority=0.
- return self.nodes[0]
+ def get_primary(self, timeout_secs=30):
+ if not self.all_nodes_electable:
+ # The primary is always the first element of the 'nodes' list because all other members
+ # of the replica set are configured with priority=0.
+ return self.nodes[0]
+
+ start = time.time()
+ clients = {}
+ while True:
+ for node in self.nodes:
+ self._check_get_primary_timeout(start, timeout_secs)
+ client = clients.get(node.port)
+ if not client:
+ client = node.mongo_client()
+ clients[node.port] = client
+ is_master = client.admin.command("isMaster")["ismaster"]
+ if is_master:
+ self.logger.info("The node on port %d is primary of replica set '%s'",
+ node.port, self.replset_name)
+ return node
+
+ def _check_get_primary_timeout(self, start, timeout_secs):
+ now = time.time()
+ if (now - start) >= timeout_secs:
+ msg = "Timed out while waiting for a primary for replica set '{}'.".format(
+ self.replset_name)
+ self.logger.error(msg)
+ raise errors.ServerFailure(msg)
def get_secondaries(self):
- return self.nodes[1:]
+ primary = self.get_primary()
+ return [node for node in self.nodes if node.port != primary.port]
def get_initial_sync_node(self):
return self.initial_sync_node
@@ -261,7 +292,7 @@ class ReplicaSetFixture(interface.ReplFixture):
mongod_logger = self._get_logger_for_mongod(index)
mongod_options = self.mongod_options.copy()
mongod_options["replSet"] = replset_name
- mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "node%d" % (index))
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "node{}".format(index))
return standalone.MongoDFixture(mongod_logger,
self.job_num,
@@ -275,13 +306,15 @@ class ReplicaSetFixture(interface.ReplFixture):
sync member of a replica-set.
"""
- if index == 0:
- node_name = "primary"
- elif index == self.initial_sync_node_idx:
+ if index == self.initial_sync_node_idx:
node_name = "initsync"
+ elif self.all_nodes_electable:
+ node_name = "node{}".format(index)
+ elif index == 0:
+ node_name = "primary"
else:
suffix = str(index - 1) if self.num_nodes > 2 else ""
- node_name = "secondary%s" % suffix
+ node_name = "secondary{}".format(suffix)
return self.logger.new_fixture_node_logger(node_name)
@@ -298,8 +331,14 @@ class ReplicaSetFixture(interface.ReplFixture):
if self.replset_name is None:
raise ValueError("Must call setup() before calling get_driver_connection_url()")
- conn_strs = [node.get_internal_connection_string() for node in self.nodes]
- if self.initial_sync_node:
- conn_strs.append(self.initial_sync_node.get_internal_connection_string())
-
- return "mongodb://" + ",".join(conn_strs) + "/?replicaSet=" + self.replset_name
+ if self.all_nodes_electable:
+ # We use a replica set connection string when all nodes are electable because we
+ # anticipate the client will want to gracefully handle any failovers.
+ conn_strs = [node.get_internal_connection_string() for node in self.nodes]
+ if self.initial_sync_node:
+ conn_strs.append(self.initial_sync_node.get_internal_connection_string())
+ return "mongodb://" + ",".join(conn_strs) + "/?replicaSet=" + self.replset_name
+ else:
+ # We return a direct connection to the expected pimary when only the first node is
+ # electable because we want the client to error out if a stepdown occurs.
+ return self.nodes[0].get_driver_connection_url()
diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
index 2c65c70e5a8..e9104e114ca 100644
--- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
+++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -10,6 +10,7 @@ import socket
import time
import pymongo
+import pymongo.errors
from . import interface
from . import standalone
@@ -115,9 +116,8 @@ class ShardedClusterFixture(interface.Fixture):
# Wait for the mongos
self.mongos.await_ready()
- self.port = self.mongos.port
- client = utils.new_mongo_client(port=self.port)
+ client = self.mongo_client()
if self.auth_options is not None:
auth_db = client[self.auth_options["authenticationDatabase"]]
auth_db.authenticate(self.auth_options["username"],
@@ -184,7 +184,7 @@ class ShardedClusterFixture(interface.Fixture):
if self.mongos is None:
raise ValueError("Must call setup() before calling get_internal_connection_string()")
- return "%s:%d" % (socket.gethostname(), self.mongos.port)
+ return self.mongos.get_internal_connection_string()
def get_driver_connection_url(self):
return "mongodb://" + self.get_internal_connection_string()
@@ -261,16 +261,11 @@ class ShardedClusterFixture(interface.Fixture):
mongos_logger = self.logger.new_fixture_node_logger("mongos")
mongos_options = copy.deepcopy(self.mongos_options)
- configdb_hostname = socket.gethostname()
if self.separate_configsvr:
- configdb_replset = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
- configdb_port = self.configsvr.port
- mongos_options["configdb"] = "%s/%s:%d" % (configdb_replset,
- configdb_hostname,
- configdb_port)
+ mongos_options["configdb"] = self.configsvr.get_internal_connection_string()
else:
- mongos_options["configdb"] = "%s:%d" % (configdb_hostname, self.shards[0].port)
+ mongos_options["configdb"] = "localhost:%d" % self.shards[0].port
return _MongoSFixture(mongos_logger,
self.job_num,
@@ -312,6 +307,7 @@ class _MongoSFixture(interface.Fixture):
self.mongos_options = utils.default_if_none(mongos_options, {}).copy()
self.mongos = None
+ self.port = None
def setup(self):
if "port" not in self.mongos_options:
@@ -346,7 +342,7 @@ class _MongoSFixture(interface.Fixture):
try:
# Use a shorter connection timeout to more closely satisfy the requested deadline.
- client = utils.new_mongo_client(self.port, timeout_millis=500)
+ client = self.mongo_client(timeout_millis=500)
client.admin.command("ping")
break
except pymongo.errors.ConnectionFailure:
@@ -391,3 +387,12 @@ class _MongoSFixture(interface.Fixture):
def is_running(self):
return self.mongos is not None and self.mongos.poll() is None
+
+ def get_internal_connection_string(self):
+ if self.mongos is None:
+ raise ValueError("Must call setup() before calling get_internal_connection_string()")
+
+ return "localhost:%d" % self.port
+
+ def get_driver_connection_url(self):
+ return "mongodb://" + self.get_internal_connection_string()
diff --git a/buildscripts/resmokelib/testing/fixtures/standalone.py b/buildscripts/resmokelib/testing/fixtures/standalone.py
index a657959259f..1fd8a80c7e1 100644
--- a/buildscripts/resmokelib/testing/fixtures/standalone.py
+++ b/buildscripts/resmokelib/testing/fixtures/standalone.py
@@ -7,10 +7,10 @@ from __future__ import absolute_import
import os
import os.path
import shutil
-import socket
import time
import pymongo
+import pymongo.errors
from . import interface
from ... import config
@@ -53,11 +53,12 @@ class MongoDFixture(interface.Fixture):
dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
self.mongod_options["dbpath"] = os.path.join(dbpath_prefix,
- "job%d" % (self.job_num),
+ "job{}".format(self.job_num),
config.FIXTURE_SUBDIR)
self._dbpath = self.mongod_options["dbpath"]
self.mongod = None
+ self.port = None
def setup(self):
if not self.preserve_dbpath:
@@ -96,20 +97,21 @@ class MongoDFixture(interface.Fixture):
# Check whether the mongod exited for some reason.
exit_code = self.mongod.poll()
if exit_code is not None:
- raise errors.ServerFailure("Could not connect to mongod on port %d, process ended"
- " unexpectedly with code %d." % (self.port, exit_code))
+ raise errors.ServerFailure("Could not connect to mongod on port {}, process ended"
+ " unexpectedly with code {}.".format(
+ self.port, exit_code))
try:
# Use a shorter connection timeout to more closely satisfy the requested deadline.
- client = utils.new_mongo_client(self.port, timeout_millis=500)
+ client = self.mongo_client(timeout_millis=500)
client.admin.command("ping")
break
except pymongo.errors.ConnectionFailure:
remaining = deadline - time.time()
if remaining <= 0.0:
raise errors.ServerFailure(
- "Failed to connect to mongod on port %d after %d seconds"
- % (self.port, MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
+ "Failed to connect to mongod on port {} after {} seconds".format(
+ self.port, MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
self.logger.info("Waiting to connect to mongod on port %d.", self.port)
time.sleep(0.1) # Wait a little bit before trying again.
@@ -151,7 +153,7 @@ class MongoDFixture(interface.Fixture):
if self.mongod is None:
raise ValueError("Must call setup() before calling get_internal_connection_string()")
- return "%s:%d" % (socket.gethostname(), self.port)
+ return "localhost:%d" % self.port
def get_driver_connection_url(self):
return "mongodb://" + self.get_internal_connection_string()
diff --git a/buildscripts/resmokelib/testing/hooks/initialsync.py b/buildscripts/resmokelib/testing/hooks/initialsync.py
index 1b99d766622..20b422bccb3 100644
--- a/buildscripts/resmokelib/testing/hooks/initialsync.py
+++ b/buildscripts/resmokelib/testing/hooks/initialsync.py
@@ -69,7 +69,7 @@ class BackgroundInitialSync(jsfile.JsCustomBehavior):
def _after_test_impl(self, test, test_report, description):
self.tests_run += 1
sync_node = self.fixture.get_initial_sync_node()
- sync_node_conn = utils.new_mongo_client(port=sync_node.port)
+ sync_node_conn = sync_node.mongo_client()
# If it's been 'n' tests so far, wait for the initial sync node to finish syncing.
if self.tests_run >= self.n:
@@ -163,7 +163,7 @@ class IntermediateInitialSync(jsfile.JsCustomBehavior):
def _after_test_impl(self, test, test_report, description):
sync_node = self.fixture.get_initial_sync_node()
- sync_node_conn = utils.new_mongo_client(port=sync_node.port)
+ sync_node_conn = sync_node.mongo_client()
if self.use_resync:
self.hook_test_case.logger.info("Calling resync on initial sync node...")
diff --git a/buildscripts/resmokelib/testing/hooks/periodic_kill_secondaries.py b/buildscripts/resmokelib/testing/hooks/periodic_kill_secondaries.py
index dc19f05f26e..ecbb3969243 100644
--- a/buildscripts/resmokelib/testing/hooks/periodic_kill_secondaries.py
+++ b/buildscripts/resmokelib/testing/hooks/periodic_kill_secondaries.py
@@ -19,7 +19,6 @@ from ..fixtures import interface as fixture
from ..fixtures import replicaset
from ..testcases import interface as testcase
from ... import errors
-from ... import utils
class PeriodicKillSecondaries(interface.CustomBehavior):
@@ -31,19 +30,18 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
DEFAULT_PERIOD_SECS = 30
- def __init__(self, hook_logger, fixture, period_secs=DEFAULT_PERIOD_SECS):
- if not isinstance(fixture, replicaset.ReplicaSetFixture):
- raise TypeError("%s either does not support replication or does not support writing to"
- " its oplog early"
- % (fixture.__class__.__name__))
+ def __init__(self, hook_logger, rs_fixture, period_secs=DEFAULT_PERIOD_SECS):
+ if not isinstance(rs_fixture, replicaset.ReplicaSetFixture):
+ raise TypeError("{} either does not support replication or does not support writing to"
+ " its oplog early".format(rs_fixture.__class__.__name__))
- if fixture.num_nodes <= 1:
+ if rs_fixture.num_nodes <= 1:
raise ValueError("PeriodicKillSecondaries requires the replica set to contain at least"
" one secondary")
description = ("PeriodicKillSecondaries (kills the secondary after running tests for a"
" configurable period of time)")
- interface.CustomBehavior.__init__(self, hook_logger, fixture, description)
+ interface.CustomBehavior.__init__(self, hook_logger, rs_fixture, description)
self._period_secs = period_secs
self._start_time = None
@@ -59,20 +57,10 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
# The "rsSyncApplyStop" failpoint is already enabled.
return
- # Enable the "rsSyncApplyStop" failpoint on each of the secondaries to prevent them from
- # applying any oplog entries while the test is running.
for secondary in self.fixture.get_secondaries():
- client = utils.new_mongo_client(port=secondary.port)
- try:
- client.admin.command(bson.SON([
- ("configureFailPoint", "rsSyncApplyStop"),
- ("mode", "alwaysOn")]))
- except pymongo.errors.OperationFailure as err:
- self.logger.exception(
- "Unable to disable oplog application on the mongod on port %d", secondary.port)
- raise errors.ServerFailure(
- "Unable to disable oplog application on the mongod on port %d: %s"
- % (secondary.port, err.args[0]))
+ # Enable the "rsSyncApplyStop" failpoint on the secondary to prevent them from
+ # applying any oplog entries while the test is running.
+ self._enable_rssyncapplystop(secondary)
self._start_time = time.time()
@@ -91,7 +79,7 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
self.hook_test_case = testcase.TestCase(
self.logger,
"Hook",
- "%s:%s" % (self._last_test_name, self.logger_name))
+ "{}:{}".format(self._last_test_name, self.logger_name))
interface.CustomBehavior.start_dynamic_test(self.hook_test_case, test_report)
try:
@@ -127,19 +115,7 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
for secondary in self.fixture.get_secondaries():
# Disable the "rsSyncApplyStop" failpoint on the secondary to have it resume applying
# oplog entries.
- for secondary in self.fixture.get_secondaries():
- client = utils.new_mongo_client(port=secondary.port)
- try:
- client.admin.command(bson.SON([
- ("configureFailPoint", "rsSyncApplyStop"),
- ("mode", "off")]))
- except pymongo.errors.OperationFailure as err:
- self.logger.exception(
- "Unable to re-enable oplog application on the mongod on port %d",
- secondary.port)
- raise errors.ServerFailure(
- "Unable to re-enable oplog application on the mongod on port %d: %s"
- % (secondary.port, err.args[0]))
+ self._disable_rssyncapplystop(secondary)
# Wait a little bit for the secondary to start apply oplog entries so that we are more
# likely to kill the mongod process while it is partway into applying a batch.
@@ -149,12 +125,11 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
# we still detect some cases in which the secondary has already crashed.
if not secondary.is_running():
raise errors.ServerFailure(
- "mongod on port %d was expected to be running in"
- " PeriodicKillSecondaries.after_test(), but wasn't."
- % (secondary.port))
+ "mongod on port {} was expected to be running in"
+ " PeriodicKillSecondaries.after_test(), but wasn't.".format(secondary.port))
self.hook_test_case.logger.info(
- "Killing the secondary on port %d..." % (secondary.port))
+ "Killing the secondary on port %d...", secondary.port)
secondary.mongod.stop(kill=True)
# Teardown may or may not be considered a success as a result of killing a secondary, so we
@@ -179,7 +154,8 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
teardown_success = secondary.teardown()
if not teardown_success:
raise errors.ServerFailure(
- "%s did not exit cleanly after reconciling the end of its oplog" % (secondary))
+ "{} did not exit cleanly after reconciling the end of its oplog".format(
+ secondary))
self.hook_test_case.logger.info(
"Starting the fixture back up again with its data files intact...")
@@ -217,8 +193,7 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
teardown_success = self.fixture.teardown()
if not teardown_success:
raise errors.ServerFailure(
- "%s did not exit cleanly after verifying data consistency"
- % (self.fixture))
+ "{} did not exit cleanly after verifying data consistency".format(self.fixture))
self.hook_test_case.logger.info("Starting the fixture back up again...")
self.fixture.setup()
@@ -232,7 +207,7 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
secondary.setup()
secondary.await_ready()
- client = utils.new_mongo_client(port=secondary.port)
+ client = secondary.mongo_client()
minvalid_doc = client.local["replset.minvalid"].find_one()
latest_oplog_doc = client.local["oplog.rs"].find_one(
@@ -261,26 +236,27 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
if not begin_ts <= minvalid_ts:
raise errors.ServerFailure(
- "The condition begin <= minValid (%s <= %s) doesn't hold: minValid"
- " document=%s, latest oplog entry=%s"
- % (begin_ts, minvalid_ts, minvalid_doc, latest_oplog_doc))
+ "The condition begin <= minValid ({} <= {}) doesn't hold: minValid"
+ " document={}, latest oplog entry={}".format(
+ begin_ts, minvalid_ts, minvalid_doc, latest_oplog_doc))
if not minvalid_ts <= oplog_delete_point_ts:
raise errors.ServerFailure(
- "The condition minValid <= oplogDeletePoint (%s <= %s) doesn't hold:"
- " minValid document=%s, latest oplog entry=%s"
- % (minvalid_ts, oplog_delete_point_ts, minvalid_doc, latest_oplog_doc))
+ "The condition minValid <= oplogDeletePoint ({} <= {}) doesn't hold:"
+ " minValid document={}, latest oplog entry={}".format(
+ minvalid_ts, oplog_delete_point_ts, minvalid_doc, latest_oplog_doc))
if not minvalid_ts <= latest_oplog_entry_ts:
raise errors.ServerFailure(
- "The condition minValid <= top of oplog (%s <= %s) doesn't hold: minValid"
- " document=%s, latest oplog entry=%s"
- % (minvalid_ts, latest_oplog_entry_ts, minvalid_doc, latest_oplog_doc))
+ "The condition minValid <= top of oplog ({} <= {}) doesn't hold: minValid"
+ " document={}, latest oplog entry={}".format(
+ minvalid_ts, latest_oplog_entry_ts, minvalid_doc, latest_oplog_doc))
teardown_success = secondary.teardown()
if not teardown_success:
raise errors.ServerFailure(
- "%s did not exit cleanly after being started up as a standalone" % (secondary))
+ "{} did not exit cleanly after being started up as a standalone".format(
+ secondary))
except pymongo.errors.OperationFailure as err:
self.hook_test_case.logger.exception(
"Failed to read the minValid document or the latest oplog entry from the mongod on"
@@ -288,14 +264,13 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
secondary.port)
raise errors.ServerFailure(
"Failed to read the minValid document or the latest oplog entry from the mongod on"
- " port %d: %s"
- % (secondary.port, err.args[0]))
+ " port {}: {}".format(secondary.port, err.args[0]))
finally:
# Set the secondary's options back to their original values.
secondary.mongod_options["replSet"] = replset_name
def _await_secondary_state(self, secondary):
- client = utils.new_mongo_client(port=secondary.port)
+ client = secondary.mongo_client()
try:
client.admin.command(bson.SON([
("replSetTest", 1),
@@ -307,5 +282,36 @@ class PeriodicKillSecondaries(interface.CustomBehavior):
secondary.port,
fixture.ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60)
raise errors.ServerFailure(
- "mongod on port %d failed to reach state SECONDARY after %d seconds: %s"
- % (secondary.port, fixture.ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60, err.args[0]))
+ "mongod on port {} failed to reach state SECONDARY after {} seconds: {}".format(
+ secondary.port, fixture.ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60, err.args[0]))
+
+ def _enable_rssyncapplystop(self, secondary):
+ # Enable the "rsSyncApplyStop" failpoint on the secondary to prevent them from
+ # applying any oplog entries while the test is running.
+ client = secondary.mongo_client()
+ try:
+ client.admin.command(bson.SON([
+ ("configureFailPoint", "rsSyncApplyStop"),
+ ("mode", "alwaysOn")]))
+ except pymongo.errors.OperationFailure as err:
+ self.logger.exception(
+ "Unable to disable oplog application on the mongod on port %d", secondary.port)
+ raise errors.ServerFailure(
+ "Unable to disable oplog application on the mongod on port {}: {}".format(
+ secondary.port, err.args[0]))
+
+ def _disable_rssyncapplystop(self, secondary):
+ # Disable the "rsSyncApplyStop" failpoint on the secondary to have it resume applying
+ # oplog entries.
+ client = secondary.mongo_client()
+ try:
+ client.admin.command(bson.SON([
+ ("configureFailPoint", "rsSyncApplyStop"),
+ ("mode", "off")]))
+ except pymongo.errors.OperationFailure as err:
+ self.logger.exception(
+ "Unable to re-enable oplog application on the mongod on port %d",
+ secondary.port)
+ raise errors.ServerFailure(
+ "Unable to re-enable oplog application on the mongod on port {}: {}".format(
+ secondary.port, err.args[0]))
diff --git a/buildscripts/resmokelib/testing/testcases/json_schema_test.py b/buildscripts/resmokelib/testing/testcases/json_schema_test.py
index 802dbb792a5..51a8daa03de 100644
--- a/buildscripts/resmokelib/testing/testcases/json_schema_test.py
+++ b/buildscripts/resmokelib/testing/testcases/json_schema_test.py
@@ -9,6 +9,7 @@ from ... import config
from ... import core
from ... import utils
+
class JSONSchemaTestCase(interface.TestCase):
"""
A JSON Schema test to execute.
@@ -35,9 +36,6 @@ class JSONSchemaTestCase(interface.TestCase):
def configure(self, fixture, *args, **kwargs):
interface.TestCase.configure(self, fixture, *args, **kwargs)
- if self.fixture.port is not None:
- self.shell_options["port"] = self.fixture.port
-
global_vars = self.shell_options.get("global_vars", {}).copy()
test_data = global_vars.get("TestData", {}).copy()
@@ -58,7 +56,9 @@ class JSONSchemaTestCase(interface.TestCase):
raise
def _make_process(self):
- return core.programs.mongo_shell_program(self.logger,
- executable=self.shell_executable,
- filename=JSONSchemaTestCase.TEST_RUNNER_FILE,
- **self.shell_options) \ No newline at end of file
+ return core.programs.mongo_shell_program(
+ self.logger,
+ executable=self.shell_executable,
+ connection_string=self.fixture.get_driver_connection_url(),
+ filename=JSONSchemaTestCase.TEST_RUNNER_FILE,
+ **self.shell_options)
diff --git a/buildscripts/resmokelib/testing/testcases/jstest.py b/buildscripts/resmokelib/testing/testcases/jstest.py
index a38c3859228..c1c93925936 100644
--- a/buildscripts/resmokelib/testing/testcases/jstest.py
+++ b/buildscripts/resmokelib/testing/testcases/jstest.py
@@ -62,9 +62,6 @@ class JSTestCase(interface.TestCase):
def configure(self, fixture, num_clients=DEFAULT_CLIENT_NUM, *args, **kwargs):
interface.TestCase.configure(self, fixture, *args, **kwargs)
- if self.fixture.port is not None:
- self.shell_options["port"] = self.fixture.port
-
global_vars = self.shell_options.get("global_vars", {}).copy()
data_dir = self._get_data_dir(global_vars)
@@ -179,10 +176,12 @@ class JSTestCase(interface.TestCase):
# set to self.logger.
logger = utils.default_if_none(logger, self.logger)
- return core.programs.mongo_shell_program(logger,
- executable=self.shell_executable,
- filename=self.js_filename,
- **shell_options)
+ return core.programs.mongo_shell_program(
+ logger,
+ executable=self.shell_executable,
+ filename=self.js_filename,
+ connection_string=self.fixture.get_driver_connection_url(),
+ **shell_options)
def _run_test_in_thread(self, thread_id):
# Make a logger for each thread. When this method gets called self.logger has been
diff --git a/buildscripts/resmokelib/utils/__init__.py b/buildscripts/resmokelib/utils/__init__.py
index df387cc3323..77cc6952fb0 100644
--- a/buildscripts/resmokelib/utils/__init__.py
+++ b/buildscripts/resmokelib/utils/__init__.py
@@ -6,7 +6,6 @@ from __future__ import absolute_import
import os.path
-import pymongo
import yaml
@@ -69,20 +68,3 @@ def load_yaml(value):
return yaml.safe_load(value)
except yaml.YAMLError as err:
raise ValueError("Attempted to parse invalid YAML value '%s': %s" % (value, err))
-
-
-def new_mongo_client(port, read_preference=pymongo.ReadPreference.PRIMARY, timeout_millis=30000):
- """
- Returns a pymongo.MongoClient connected on 'port' with a read
- preference of 'read_preference'.
-
- The PyMongo driver will wait up to 'timeout_millis' milliseconds
- before concluding that the server is unavailable.
- """
-
- kwargs = {"connectTimeoutMS": timeout_millis}
- if pymongo.version_tuple[0] >= 3:
- kwargs["serverSelectionTimeoutMS"] = timeout_millis
- kwargs["connect"] = True
-
- return pymongo.MongoClient(port=port, read_preference=read_preference, **kwargs)
diff --git a/jstests/hooks/run_check_repl_dbhash.js b/jstests/hooks/run_check_repl_dbhash.js
index 479abf5cf25..1afea4a8539 100644
--- a/jstests/hooks/run_check_repl_dbhash.js
+++ b/jstests/hooks/run_check_repl_dbhash.js
@@ -9,14 +9,8 @@
// fixture. Please do not use it with other master/slave clusters.
var MasterSlaveDBHashTest = function(primaryHost) {
var master = new Mongo(primaryHost);
- var resolvedHost = getHostName();
var masterPort = master.host.split(':')[1];
- // The 'host' property is modified manually because 'localhost' is used by default in a new
- // Mongo() connection. We set the value to the real hostname because that is what the server
- // uses.
- master.host = resolvedHost + ':' + masterPort;
-
- var slave = new Mongo(resolvedHost + ':' + String(parseInt(masterPort) + 1));
+ var slave = new Mongo('localhost:' + String(parseInt(masterPort) + 1));
this.nodeList = function() {
return [master.host, slave.host];