summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2016-02-10 16:48:01 -0500
committerMark Benvenuto <mark.benvenuto@mongodb.com>2016-02-26 17:07:20 -0500
commita8800cf352f1efd5f442764a151586d1701dc9a2 (patch)
tree14ebd2bdf2927bd42ecdbda11805b7cc44e9532a
parent822911ad424b140ffd77be43f2578a1474755539 (diff)
downloadmongo-a8800cf352f1efd5f442764a151586d1701dc9a2.tar.gz
TOOLS-836 Use Resmoke to run tests
-rw-r--r--common.yml27
-rw-r--r--test/qa-tests/buildscripts/__init__.py1
-rwxr-xr-xtest/qa-tests/buildscripts/resmoke.py216
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/__init__.py4
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py36
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml13
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml13
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml19
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml10
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py36
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/suites/core.yml27
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml38
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml23
-rw-r--r--test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml21
-rw-r--r--test/qa-tests/buildscripts/resmokelib/__init__.py7
-rw-r--r--test/qa-tests/buildscripts/resmokelib/config.py165
-rw-r--r--test/qa-tests/buildscripts/resmokelib/core/__init__.py5
-rw-r--r--test/qa-tests/buildscripts/resmokelib/core/network.py114
-rw-r--r--test/qa-tests/buildscripts/resmokelib/core/pipe.py87
-rw-r--r--test/qa-tests/buildscripts/resmokelib/core/process.py234
-rw-r--r--test/qa-tests/buildscripts/resmokelib/core/programs.py311
-rw-r--r--test/qa-tests/buildscripts/resmokelib/errors.py52
-rw-r--r--test/qa-tests/buildscripts/resmokelib/logging/__init__.py14
-rw-r--r--test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py284
-rw-r--r--test/qa-tests/buildscripts/resmokelib/logging/config.py161
-rw-r--r--test/qa-tests/buildscripts/resmokelib/logging/flush.py97
-rw-r--r--test/qa-tests/buildscripts/resmokelib/logging/formatters.py50
-rw-r--r--test/qa-tests/buildscripts/resmokelib/logging/handlers.py178
-rw-r--r--test/qa-tests/buildscripts/resmokelib/logging/loggers.py37
-rw-r--r--test/qa-tests/buildscripts/resmokelib/parser.py368
-rw-r--r--test/qa-tests/buildscripts/resmokelib/selector.py291
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/__init__.py9
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/executor.py307
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py32
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py128
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py209
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py211
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py347
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py151
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/hooks.py704
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/job.py195
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/report.py330
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/suite.py140
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/summary.py22
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/testcases.py407
-rw-r--r--test/qa-tests/buildscripts/resmokelib/testing/testgroup.py132
-rw-r--r--test/qa-tests/buildscripts/resmokelib/utils/__init__.py88
-rw-r--r--test/qa-tests/buildscripts/resmokelib/utils/globstar.py202
-rw-r--r--test/qa-tests/buildscripts/resmokelib/utils/jscomment.py78
-rw-r--r--test/qa-tests/buildscripts/resmokelib/utils/queue.py52
-rw-r--r--test/qa-tests/buildscripts/resmokelib/utils/timer.py125
51 files changed, 6803 insertions, 5 deletions
diff --git a/common.yml b/common.yml
index 96690bd89c7..42850aa0d5b 100644
--- a/common.yml
+++ b/common.yml
@@ -479,6 +479,9 @@ functions:
cat > mci.buildlogger <<END_OF_CREDS
slavename='${slave}'
passwd='${passwd}'
+ builder='MCI_${build_variant}'
+ build_num=${builder_num}
+ build_phase='${task_name}_${execution}'
END_OF_CREDS
"start mongod":
@@ -1049,6 +1052,7 @@ tasks:
params:
working_dir: src
script: |
+ set -o verbose
set -e
mv ./mongodb/mongod${extension} .
mv ./mongodb/mongo${extension} .
@@ -1059,7 +1063,8 @@ tasks:
rm -rf /data/install /data/multiversion
python buildscripts/setup_multiversion_mongodb.py /data/install /data/multiversion ${arch} "2.6" "2.4" --latest ${smoke_use_ssl} --os="${mongo_os}" || true
chmod 400 jstests/libs/key*
- PATH=$PATH:/data/multiversion python buildscripts/smoke.py ${smoke_use_ssl} --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json --continue-on-failure --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --authMechanism SCRAM-SHA-1 top oplog dump files restore stat bson export import ssl unstable
+
+ PATH=$PATH:/data/multiversion python buildscripts/resmoke.py --suite=core${resmoke_use_ssl} --continueOnFailure --log=buildlogger --reportFile=report.json
- name: qa-tests
@@ -1109,6 +1114,7 @@ tasks:
params:
working_dir: src
script: |
+ set -o verbose
set -e
mv ./mongodb/mongod${extension} .
mv ./mongodb/mongo${extension} .
@@ -1119,7 +1125,8 @@ tasks:
rm -rf /data/install /data/multiversion
python buildscripts/setup_multiversion_mongodb.py /data/install /data/multiversion ${arch} "2.6" "2.4" --latest ${smoke_use_ssl} --os="${mongo_os}" || true
chmod 400 jstests/libs/key*
- PATH=$PATH:/data/multiversion python buildscripts/smoke.py ${smoke_use_ssl} --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json --continue-on-failure --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --authMechanism SCRAM-SHA-1 top oplog dump files restore stat bson export import ssl
+
+ PATH=$PATH:/data/multiversion python buildscripts/resmoke.py --suite=core${resmoke_use_ssl} --continueOnFailure --log=buildlogger --reportFile=report.json
- name: qa-dump-restore-archiving
depends_on:
@@ -1147,6 +1154,7 @@ tasks:
params:
working_dir: src
script: |
+ set -o verbose
set -e
mv ./mongodb/mongod${extension} .
mv ./mongodb/mongo${extension} .
@@ -1156,7 +1164,8 @@ tasks:
rm -rf /data/install /data/multiversion
python buildscripts/setup_multiversion_mongodb.py /data/install /data/multiversion ${arch} "2.6" "2.4" --latest ${smoke_use_ssl} --os="${mongo_os}" || true
chmod 400 jstests/libs/key*
- SMOKE_EVAL="load('jstests/configs/archive_targets.js')" PATH=$PATH:/data/multiversion python buildscripts/smoke.py ${smoke_use_ssl} --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json --continue-on-failure --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --authMechanism SCRAM-SHA-1 dump restore
+
+ PATH=$PATH:/data/multiversion python buildscripts/resmoke.py --suite=restore_archive --continueOnFailure --log=buildlogger --reportFile=report.json
- name: qa-dump-restore-gzip
depends_on:
@@ -1184,6 +1193,7 @@ tasks:
params:
working_dir: src
script: |
+ set -o verbose
set -e
mv ./mongodb/mongod${extension} .
mv ./mongodb/mongo${extension} .
@@ -1193,7 +1203,8 @@ tasks:
rm -rf /data/install /data/multiversion
python buildscripts/setup_multiversion_mongodb.py /data/install /data/multiversion ${arch} "2.6" "2.4" --latest ${smoke_use_ssl} --os="${mongo_os}" || true
chmod 400 jstests/libs/key*
- SMOKE_EVAL="load('jstests/configs/gzip_targets.js')" PATH=$PATH:/data/multiversion python buildscripts/smoke.py ${smoke_use_ssl} --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json --continue-on-failure --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --authMechanism SCRAM-SHA-1 dump restore
+
+ PATH=$PATH:/data/multiversion python buildscripts/resmoke.py --suite=restore_gzip --continueOnFailure --log=buildlogger --reportFile=report.json
- name: qa-tests-wt
depends_on:
@@ -1242,6 +1253,7 @@ tasks:
params:
working_dir: src
script: |
+ set -o verbose
mv ./mongodb/mongod${extension} .
mv ./mongodb/mongo${extension} .
mv ./mongodb/mongos${extension} .
@@ -1251,7 +1263,8 @@ tasks:
rm -rf /data/install /data/multiversion
python buildscripts/setup_multiversion_mongodb.py /data/install /data/multiversion ${arch} "2.6" "2.4" --latest ${smoke_use_ssl} --os="${mongo_os}" || true
chmod 400 jstests/libs/key*
- PATH=$PATH:/data/multiversion python buildscripts/smoke.py ${smoke_use_ssl} --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json --continue-on-failure --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --authMechanism SCRAM-SHA-1 --storageEngine=wiredTiger top dump oplog files restore stat bson export import
+
+ PATH=$PATH:/data/multiversion python buildscripts/resmoke.py --suite=core --continueOnFailure --log=buildlogger --reportFile=report.json --storageEngine=wiredTiger
- name: text
commands:
@@ -1415,6 +1428,7 @@ buildvariants:
edition: ssl
arch: "linux/x86_64"
smoke_use_ssl: --use-ssl
+ resmoke_use_ssl: _ssl
integration_test_args: "integration,ssl"
tasks: *ubuntu1204_ssl_tasks
@@ -1428,6 +1442,7 @@ buildvariants:
<<: *mongo_default_startup_args
build_tags: -tags "ssl sasl"
smoke_use_ssl: --use-ssl
+ resmoke_use_ssl: _ssl
arch: "linux/x86_64"
edition: enterprise
run_kinit: true
@@ -1488,6 +1503,7 @@ buildvariants:
build_tags: -tags "ssl"
edition: ssl
smoke_use_ssl: --use-ssl
+ resmoke_use_ssl: _ssl
extension: .exe
arch: "win32/x86_64"
library_path: PATH="/cygdrive/c/mingw-w64/x86_64-4.9.1-posix-seh-rt_v3-rev1/mingw64/bin:/cygdrive/c/sasl/:$PATH"
@@ -1505,6 +1521,7 @@ buildvariants:
<<: *mongo_default_startup_args
build_tags: -tags "sasl ssl"
smoke_use_ssl: --use-ssl
+ resmoke_use_ssl: _ssl
edition: enterprise
extension: .exe
arch: "win32/x86_64"
diff --git a/test/qa-tests/buildscripts/__init__.py b/test/qa-tests/buildscripts/__init__.py
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/test/qa-tests/buildscripts/__init__.py
@@ -0,0 +1 @@
+
diff --git a/test/qa-tests/buildscripts/resmoke.py b/test/qa-tests/buildscripts/resmoke.py
new file mode 100755
index 00000000000..a6cb03cb620
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmoke.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python
+
+"""
+Command line utility for executing MongoDB tests of all kinds.
+"""
+
+from __future__ import absolute_import
+
+import json
+import os.path
+import random
+import signal
+import sys
+import time
+import traceback
+
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+ from buildscripts import resmokelib
+
+
+def _execute_suite(suite, logging_config):
+ """
+ Executes each test group of 'suite', failing fast if requested.
+
+ Returns true if the execution of the suite was interrupted by the
+ user, and false otherwise.
+ """
+
+ logger = resmokelib.logging.loggers.EXECUTOR
+
+ for group in suite.test_groups:
+ if resmokelib.config.SHUFFLE:
+ logger.info("Shuffling order of tests for %ss in suite %s. The seed is %d.",
+ group.test_kind, suite.get_name(), resmokelib.config.RANDOM_SEED)
+ random.seed(resmokelib.config.RANDOM_SEED)
+ random.shuffle(group.tests)
+
+ if resmokelib.config.DRY_RUN == "tests":
+ sb = []
+ sb.append("Tests that would be run for %ss in suite %s:"
+ % (group.test_kind, suite.get_name()))
+ if len(group.tests) > 0:
+ for test in group.tests:
+ sb.append(test)
+ else:
+ sb.append("(no tests)")
+ logger.info("\n".join(sb))
+
+ # Set a successful return code on the test group because we want to output the tests
+ # that would get run by any other suites the user specified.
+ group.return_code = 0
+ continue
+
+ if len(group.tests) == 0:
+ logger.info("Skipping %ss, no tests to run", group.test_kind)
+ continue
+
+ group_config = suite.get_executor_config().get(group.test_kind, {})
+ executor = resmokelib.testing.executor.TestGroupExecutor(logger,
+ group,
+ logging_config,
+ **group_config)
+
+ try:
+ executor.run()
+ if resmokelib.config.FAIL_FAST and group.return_code != 0:
+ suite.return_code = group.return_code
+ return False
+ except resmokelib.errors.UserInterrupt:
+ suite.return_code = 130 # Simulate SIGINT as exit code.
+ return True
+ except:
+ logger.exception("Encountered an error when running %ss of suite %s.",
+ group.test_kind, suite.get_name())
+ suite.return_code = 2
+ return False
+
+
+def _log_summary(logger, suites, time_taken):
+ if len(suites) > 1:
+ sb = []
+ sb.append("Summary of all suites: %d suites ran in %0.2f seconds"
+ % (len(suites), time_taken))
+ for suite in suites:
+ suite_sb = []
+ suite.summarize(suite_sb)
+ sb.append(" %s: %s" % (suite.get_name(), "\n ".join(suite_sb)))
+
+ logger.info("=" * 80)
+ logger.info("\n".join(sb))
+
+
+def _summarize_suite(suite):
+ sb = []
+ suite.summarize(sb)
+ return "\n".join(sb)
+
+
+def _dump_suite_config(suite, logging_config):
+ """
+ Returns a string that represents the YAML configuration of a suite.
+
+ TODO: include the "options" key in the result
+ """
+
+ sb = []
+ sb.append("YAML configuration of suite %s" % (suite.get_name()))
+ sb.append(resmokelib.utils.dump_yaml({"selector": suite.get_selector_config()}))
+ sb.append("")
+ sb.append(resmokelib.utils.dump_yaml({"executor": suite.get_executor_config()}))
+ sb.append("")
+ sb.append(resmokelib.utils.dump_yaml({"logging": logging_config}))
+ return "\n".join(sb)
+
+
+def _write_report_file(suites, pathname):
+ """
+ Writes the report.json file if requested.
+ """
+
+ reports = []
+ for suite in suites:
+ for group in suite.test_groups:
+ reports.extend(group.get_reports())
+
+ combined_report_dict = resmokelib.testing.report.TestReport.combine(*reports).as_dict()
+ with open(pathname, "w") as fp:
+ json.dump(combined_report_dict, fp)
+
+
+def main():
+ start_time = time.time()
+
+ values, args = resmokelib.parser.parse_command_line()
+
+ logging_config = resmokelib.parser.get_logging_config(values)
+ resmokelib.logging.config.apply_config(logging_config)
+ resmokelib.logging.flush.start_thread()
+
+ resmokelib.parser.update_config_vars(values)
+
+ exec_logger = resmokelib.logging.loggers.EXECUTOR
+ resmoke_logger = resmokelib.logging.loggers.new_logger("resmoke", parent=exec_logger)
+
+ if values.list_suites:
+ suite_names = resmokelib.parser.get_named_suites()
+ resmoke_logger.info("Suites available to execute:\n%s", "\n".join(suite_names))
+ sys.exit(0)
+
+ interrupted = False
+ suites = resmokelib.parser.get_suites(values, args)
+ try:
+ for suite in suites:
+ resmoke_logger.info(_dump_suite_config(suite, logging_config))
+
+ suite.record_start()
+ interrupted = _execute_suite(suite, logging_config)
+ suite.record_end()
+
+ resmoke_logger.info("=" * 80)
+ resmoke_logger.info("Summary of %s suite: %s",
+ suite.get_name(), _summarize_suite(suite))
+
+ if interrupted or (resmokelib.config.FAIL_FAST and suite.return_code != 0):
+ time_taken = time.time() - start_time
+ _log_summary(resmoke_logger, suites, time_taken)
+ sys.exit(suite.return_code)
+
+ time_taken = time.time() - start_time
+ _log_summary(resmoke_logger, suites, time_taken)
+
+ # Exit with a nonzero code if any of the suites failed.
+ exit_code = max(suite.return_code for suite in suites)
+ sys.exit(exit_code)
+ finally:
+ if not interrupted:
+ resmokelib.logging.flush.stop_thread()
+
+ if resmokelib.config.REPORT_FILE is not None:
+ _write_report_file(suites, resmokelib.config.REPORT_FILE)
+
+
+if __name__ == "__main__":
+
+ def _dump_stacks(signum, frame):
+ """
+ Signal handler that will dump the stacks of all threads.
+ """
+
+ header_msg = "Dumping stacks due to SIGUSR1 signal"
+
+ sb = []
+ sb.append("=" * len(header_msg))
+ sb.append(header_msg)
+ sb.append("=" * len(header_msg))
+
+ frames = sys._current_frames()
+ sb.append("Total threads: %d" % (len(frames)))
+ sb.append("")
+
+ for thread_id in frames:
+ stack = frames[thread_id]
+ sb.append("Thread %d:" % (thread_id))
+ sb.append("".join(traceback.format_stack(stack)))
+
+ sb.append("=" * len(header_msg))
+ print "\n".join(sb)
+
+ try:
+ signal.signal(signal.SIGUSR1, _dump_stacks)
+ except AttributeError:
+ print "Cannot catch signals on Windows"
+
+ main()
diff --git a/test/qa-tests/buildscripts/resmokeconfig/__init__.py b/test/qa-tests/buildscripts/resmokeconfig/__init__.py
new file mode 100644
index 00000000000..37f5a889956
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/__init__.py
@@ -0,0 +1,4 @@
+from __future__ import absolute_import
+
+from .suites import NAMED_SUITES
+from .loggers import NAMED_LOGGERS
diff --git a/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py b/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py
new file mode 100644
index 00000000000..6511d496364
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py
@@ -0,0 +1,36 @@
+"""
+Defines a mapping of shortened names for logger configuration files to
+their full path.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+
+
+def _get_named_loggers():
+ """
+ Explores this directory for any YAML configuration files.
+
+ Returns a mapping of basenames without the file extension to their
+ full path.
+ """
+
+ dirname = os.path.dirname(__file__)
+ named_loggers = {}
+
+ try:
+ (root, _dirs, files) = os.walk(dirname).next()
+ for filename in files:
+ (short_name, ext) = os.path.splitext(filename)
+ if ext in (".yml", ".yaml"):
+ pathname = os.path.join(root, filename)
+ named_loggers[short_name] = os.path.relpath(pathname)
+ except StopIteration:
+ # 'dirname' does not exist, which should be impossible because it contains __file__.
+ raise IOError("Directory '%s' does not exist" % (dirname))
+
+ return named_loggers
+
+NAMED_LOGGERS = _get_named_loggers()
diff --git a/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml b/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml
new file mode 100644
index 00000000000..302d2677491
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml
@@ -0,0 +1,13 @@
+logging:
+ executor:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.StreamHandler
+ fixture:
+ format: '[%(name)s] %(message)s'
+ handlers:
+ - class: buildlogger
+ tests:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: buildlogger
diff --git a/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml b/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml
new file mode 100644
index 00000000000..b233de409b3
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml
@@ -0,0 +1,13 @@
+logging:
+ executor:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.StreamHandler
+ fixture:
+ format: '[%(name)s] %(message)s'
+ handlers:
+ - class: logging.StreamHandler
+ tests:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.StreamHandler
diff --git a/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml b/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml
new file mode 100644
index 00000000000..3d2d15cd5bc
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml
@@ -0,0 +1,19 @@
+logging:
+ executor:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.FileHandler
+ filename: executor.log
+ mode: w
+ fixture:
+ format: '[%(name)s] %(message)s'
+ handlers:
+ - class: logging.FileHandler
+ filename: fixture.log
+ mode: w
+ tests:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.FileHandler
+ filename: tests.log
+ mode: w
diff --git a/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml b/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml
new file mode 100644
index 00000000000..c69bb793b0b
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml
@@ -0,0 +1,10 @@
+logging:
+ executor:
+ handlers:
+ - class: logging.NullHandler
+ fixture:
+ handlers:
+ - class: logging.NullHandler
+ tests:
+ handlers:
+ - class: logging.NullHandler
diff --git a/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py b/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py
new file mode 100644
index 00000000000..e075dd22e0d
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py
@@ -0,0 +1,36 @@
+"""
+Defines a mapping of shortened names for suite configuration files to
+their full path.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+
+
+def _get_named_suites():
+ """
+ Explores this directory for any YAML configuration files.
+
+ Returns a mapping of basenames without the file extension to their
+ full path.
+ """
+
+ dirname = os.path.dirname(__file__)
+ named_suites = {}
+
+ try:
+ (root, _dirs, files) = os.walk(dirname).next()
+ for filename in files:
+ (short_name, ext) = os.path.splitext(filename)
+ if ext in (".yml", ".yaml"):
+ pathname = os.path.join(root, filename)
+ named_suites[short_name] = os.path.relpath(pathname)
+ except StopIteration:
+ # 'dirname' does not exist, which should be impossible because it contains __file__.
+ raise IOError("Directory '%s' does not exist" % (dirname))
+
+ return named_suites
+
+NAMED_SUITES = _get_named_suites()
diff --git a/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml b/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml
new file mode 100644
index 00000000000..bc094c1f549
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml
@@ -0,0 +1,27 @@
+selector:
+ js_test:
+ roots:
+ - jstests/bson/*.js
+ - jstests/dump/*.js
+ - jstests/export/*.js
+ - jstests/files/*.js
+ - jstests/import/*.js
+ - jstests/oplog/*.js
+ - jstests/restore/*.js
+ - jstests/stat/*.js
+ - jstests/top/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ readMode: commands
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
diff --git a/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml b/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml
new file mode 100644
index 00000000000..2a9330e2856
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml
@@ -0,0 +1,38 @@
+selector:
+ js_test:
+ roots:
+ - jstests/bson/*.js
+ - jstests/dump/*.js
+ - jstests/export/*.js
+ - jstests/files/*.js
+ - jstests/import/*.js
+ - jstests/oplog/*.js
+ - jstests/restore/*.js
+ - jstests/stat/*.js
+ - jstests/top/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ useSSL: true
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ readMode: commands
+ ssl: ''
+ sslAllowInvalidCertificates: ''
+ sslCAFile: jstests/libs/ca.pem
+ sslPEMKeyFile: jstests/libs/client.pem
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ sslMode: allowSSL
+ sslPEMKeyFile: jstests/libs/server.pem
+ sslCAFile: jstests/libs/ca.pem
+ sslWeakCertificateValidation: ''
diff --git a/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml b/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml
new file mode 100644
index 00000000000..8c51a3b2f46
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml
@@ -0,0 +1,23 @@
+selector:
+ js_test:
+ roots:
+ - jstests/dump/*.js
+ - jstests/restore/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ eval: "load('jstests/configs/archive_targets.js');"
+ readMode: commands
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+
+
diff --git a/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml b/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml
new file mode 100644
index 00000000000..768b88ca6dd
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml
@@ -0,0 +1,21 @@
+selector:
+ js_test:
+ roots:
+ - jstests/dump/*.js
+ - jstests/restore/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ eval: "load('jstests/configs/gzip_targets.js');"
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+
diff --git a/test/qa-tests/buildscripts/resmokelib/__init__.py b/test/qa-tests/buildscripts/resmokelib/__init__.py
new file mode 100644
index 00000000000..06b0539e25b
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/__init__.py
@@ -0,0 +1,7 @@
+from __future__ import absolute_import
+
+from . import errors
+from . import logging
+from . import parser
+from . import testing
+from . import utils
diff --git a/test/qa-tests/buildscripts/resmokelib/config.py b/test/qa-tests/buildscripts/resmokelib/config.py
new file mode 100644
index 00000000000..ecb7fec7fa3
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/config.py
@@ -0,0 +1,165 @@
+"""
+Configuration options for resmoke.py.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import time
+
+
+##
+# Default values.
+##
+
+# Default path for where to look for executables.
+DEFAULT_DBTEST_EXECUTABLE = os.path.join(os.curdir, "dbtest")
+DEFAULT_MONGO_EXECUTABLE = os.path.join(os.curdir, "mongo")
+DEFAULT_MONGOD_EXECUTABLE = os.path.join(os.curdir, "mongod")
+DEFAULT_MONGOS_EXECUTABLE = os.path.join(os.curdir, "mongos")
+
+# Default root directory for where resmoke.py puts directories containing data files of mongod's it
+# starts, as well as those started by individual tests.
+DEFAULT_DBPATH_PREFIX = os.path.normpath("/data/db")
+
+# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started
+# by resmoke.py.
+FIXTURE_SUBDIR = "resmoke"
+
+# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started
+# by individual tests.
+MONGO_RUNNER_SUBDIR = "mongorunner"
+
+# Names below correspond to how they are specified via the command line or in the options YAML file.
+DEFAULTS = {
+ "basePort": 20000,
+ "buildloggerUrl": "https://logkeeper.mongodb.org",
+ "continueOnFailure": False,
+ "dbpathPrefix": None,
+ "dbtest": None,
+ "dryRun": None,
+ "excludeWithAllTags": None,
+ "excludeWithAnyTags": None,
+ "includeWithAllTags": None,
+ "includeWithAnyTags": None,
+ "jobs": 1,
+ "mongo": None,
+ "mongod": None,
+ "mongodSetParameters": None,
+ "mongos": None,
+ "mongosSetParameters": None,
+ "nojournal": False,
+ "repeat": 1,
+ "reportFile": None,
+ "seed": long(time.time() * 256), # Taken from random.py code in Python 2.7.
+ "shellReadMode": None,
+ "shellWriteMode": None,
+ "shuffle": False,
+ "storageEngine": None,
+ "wiredTigerCollectionConfigString": None,
+ "wiredTigerEngineConfigString": None,
+ "wiredTigerIndexConfigString": None
+}
+
+
+##
+# Variables that are set by the user at the command line or with --options.
+##
+
+# The starting port number to use for mongod and mongos processes spawned by resmoke.py and the
+# mongo shell.
+BASE_PORT = None
+
+# The root url of the buildlogger server.
+BUILDLOGGER_URL = None
+
+# Root directory for where resmoke.py puts directories containing data files of mongod's it starts,
+# as well as those started by individual tests.
+DBPATH_PREFIX = None
+
+# The path to the dbtest executable used by resmoke.py.
+DBTEST_EXECUTABLE = None
+
+# If set to "tests", then resmoke.py will output the tests that would be run by each suite (without
+# actually running them).
+DRY_RUN = None
+
+# If set, then any jstests that have all of the specified tags will be excluded from the suite(s).
+EXCLUDE_WITH_ALL_TAGS = None
+
+# If set, then any jstests that have any of the specified tags will be excluded from the suite(s).
+EXCLUDE_WITH_ANY_TAGS = None
+
+# If true, then a test failure or error will cause resmoke.py to exit and not run any more tests.
+FAIL_FAST = None
+
+# If set, then only jstests that have all of the specified tags will be run during the jstest
+# portion of the suite(s).
+INCLUDE_WITH_ALL_TAGS = None
+
+# If set, then only jstests that have at least one of the specified tags will be run during the
+# jstest portion of the suite(s).
+INCLUDE_WITH_ANY_TAGS = None
+
+# If set, then resmoke.py starts the specified number of Job instances to run tests.
+JOBS = None
+
+# The path to the mongo executable used by resmoke.py.
+MONGO_EXECUTABLE = None
+
+# The path to the mongod executable used by resmoke.py.
+MONGOD_EXECUTABLE = None
+
+# The --setParameter options passed to mongod.
+MONGOD_SET_PARAMETERS = None
+
+# The path to the mongos executable used by resmoke.py.
+MONGOS_EXECUTABLE = None
+
+# The --setParameter options passed to mongos.
+MONGOS_SET_PARAMETERS = None
+
+# If true, then all mongod's started by resmoke.py and by the mongo shell will not have journaling
+# enabled.
+NO_JOURNAL = None
+
+# If true, then all mongod's started by resmoke.py and by the mongo shell will not preallocate
+# journal files.
+NO_PREALLOC_JOURNAL = None
+
+# If set, then the RNG is seeded with the specified value. Otherwise uses a seed based on the time
+# this module was loaded.
+RANDOM_SEED = None
+
+# If set, then each suite is repeated the specified number of times.
+REPEAT = None
+
+# If set, then resmoke.py will write out a report file with the status of each test that ran.
+REPORT_FILE = None
+
+# If set, then mongo shells started by resmoke.py will use the specified read mode.
+SHELL_READ_MODE = None
+
+# If set, then mongo shells started by resmoke.py will use the specified write mode.
+SHELL_WRITE_MODE = None
+
+# If true, then the order the tests run in is randomized. Otherwise the tests will run in
+# alphabetical (case-insensitive) order.
+SHUFFLE = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# storage engine.
+STORAGE_ENGINE = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# WiredTiger collection configuration settings.
+WT_COLL_CONFIG = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# WiredTiger storage engine configuration settings.
+WT_ENGINE_CONFIG = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# WiredTiger index configuration settings.
+WT_INDEX_CONFIG = None
diff --git a/test/qa-tests/buildscripts/resmokelib/core/__init__.py b/test/qa-tests/buildscripts/resmokelib/core/__init__.py
new file mode 100644
index 00000000000..29a19a52500
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/core/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import process
+from . import programs
+from . import network
diff --git a/test/qa-tests/buildscripts/resmokelib/core/network.py b/test/qa-tests/buildscripts/resmokelib/core/network.py
new file mode 100644
index 00000000000..44e54667a67
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/core/network.py
@@ -0,0 +1,114 @@
+"""
+Class used to allocate ports for use by various mongod and mongos
+processes involved in running the tests.
+"""
+
+from __future__ import absolute_import
+
+import collections
+import functools
+import threading
+
+from .. import config
+from .. import errors
+
+
+def _check_port(func):
+ """
+ A decorator that verifies the port returned by the wrapped function
+ is in the valid range.
+
+ Returns the port if it is valid, and raises a PortAllocationError
+ otherwise.
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ port = func(*args, **kwargs)
+
+ if port < 0:
+ raise errors.PortAllocationError("Attempted to use a negative port")
+
+ if port > PortAllocator.MAX_PORT:
+ raise errors.PortAllocationError("Exhausted all available ports. Consider decreasing"
+ " the number of jobs, or using a lower base port")
+
+ return port
+
+ return wrapper
+
+
+class PortAllocator(object):
+ """
+ This class is responsible for allocating ranges of ports.
+
+ It reserves a range of ports for each job with the first part of
+ that range used for the fixture started by that job, and the second
+ part of the range used for mongod and mongos processes started by
+ tests run by that job.
+ """
+
+ # A PortAllocator will not return any port greater than this number.
+ MAX_PORT = 2 ** 16 - 1
+
+ # Each job gets a contiguous range of _PORTS_PER_JOB ports, with job 0 getting the first block
+ # of ports, job 1 getting the second block, and so on.
+ _PORTS_PER_JOB = 250
+
+ # The first _PORTS_PER_FIXTURE ports of each range are reserved for the fixtures, the remainder
+ # of the port range is used by tests.
+ _PORTS_PER_FIXTURE = 10
+
+ _NUM_USED_PORTS_LOCK = threading.Lock()
+
+ # Used to keep track of how many ports a fixture has allocated.
+ _NUM_USED_PORTS = collections.defaultdict(int)
+
+ @classmethod
+ @_check_port
+ def next_fixture_port(cls, job_num):
+ """
+ Returns the next port for a fixture to use.
+
+ Raises a PortAllocationError if the fixture has requested more
+ ports than are reserved per job, or if the next port is not a
+ valid port number.
+ """
+ with cls._NUM_USED_PORTS_LOCK:
+ start_port = config.BASE_PORT + (job_num * cls._PORTS_PER_JOB)
+ num_used_ports = cls._NUM_USED_PORTS[job_num]
+ next_port = start_port + num_used_ports
+
+ cls._NUM_USED_PORTS[job_num] += 1
+
+ if next_port >= start_port + cls._PORTS_PER_FIXTURE:
+ raise errors.PortAllocationError(
+ "Fixture has requested more than the %d ports reserved per fixture"
+ % cls._PORTS_PER_FIXTURE)
+
+ return next_port
+
+ @classmethod
+ @_check_port
+ def min_test_port(cls, job_num):
+ """
+ For the given job, returns the lowest port that is reserved for
+ use by tests.
+
+ Raises a PortAllocationError if that port is higher than the
+ maximum port.
+ """
+ return config.BASE_PORT + (job_num * cls._PORTS_PER_JOB) + cls._PORTS_PER_FIXTURE
+
+ @classmethod
+ @_check_port
+ def max_test_port(cls, job_num):
+ """
+ For the given job, returns the highest port that is reserved
+ for use by tests.
+
+ Raises a PortAllocationError if that port is higher than the
+ maximum port.
+ """
+ next_range_start = config.BASE_PORT + ((job_num + 1) * cls._PORTS_PER_JOB)
+ return next_range_start - 1
diff --git a/test/qa-tests/buildscripts/resmokelib/core/pipe.py b/test/qa-tests/buildscripts/resmokelib/core/pipe.py
new file mode 100644
index 00000000000..bb080721b2d
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/core/pipe.py
@@ -0,0 +1,87 @@
+"""
+Helper class to read output of a subprocess. Used to avoid deadlocks
+from the pipe buffer filling up and blocking the subprocess while it's
+being waited on.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+
+class LoggerPipe(threading.Thread):
+ """
+ Asynchronously reads the output of a subprocess and sends it to a
+ logger.
+ """
+
+ # The start() and join() methods are not intended to be called directly on the LoggerPipe
+ # instance. Since we override them for that effect, the super's version are preserved here.
+ __start = threading.Thread.start
+ __join = threading.Thread.join
+
+ def __init__(self, logger, level, pipe_out):
+ """
+ Initializes the LoggerPipe with the specified logger, logging
+ level to use, and pipe to read from.
+ """
+
+ threading.Thread.__init__(self)
+ # Main thread should not call join() when exiting
+ self.daemon = True
+
+ self.__logger = logger
+ self.__level = level
+ self.__pipe_out = pipe_out
+
+ self.__lock = threading.Lock()
+ self.__condition = threading.Condition(self.__lock)
+
+ self.__started = False
+ self.__finished = False
+
+ LoggerPipe.__start(self)
+
+ def start(self):
+ raise NotImplementedError("start should not be called directly")
+
+ def run(self):
+ """
+ Reads the output from 'pipe_out' and logs each line to 'logger'.
+ """
+
+ with self.__lock:
+ self.__started = True
+ self.__condition.notify_all()
+
+ # Close the pipe when finished reading all of the output.
+ with self.__pipe_out:
+ # Avoid buffering the output from the pipe.
+ for line in iter(self.__pipe_out.readline, b""):
+ # Convert the output of the process from a bytestring to a UTF-8 string, and replace
+ # any characters that cannot be decoded with the official Unicode replacement
+ # character, U+FFFD. The log messages of MongoDB processes are not always valid
+ # UTF-8 sequences. See SERVER-7506.
+ line = line.decode("utf-8", "replace")
+ self.__logger.log(self.__level, line.rstrip())
+
+ with self.__lock:
+ self.__finished = True
+ self.__condition.notify_all()
+
+ def join(self, timeout=None):
+ raise NotImplementedError("join should not be called directly")
+
+ def wait_until_started(self):
+ with self.__lock:
+ while not self.__started:
+ self.__condition.wait()
+
+ def wait_until_finished(self):
+ with self.__lock:
+ while not self.__finished:
+ self.__condition.wait()
+
+ # No need to pass a timeout to join() because the thread should already be done after
+ # notifying us it has finished reading output from the pipe.
+ LoggerPipe.__join(self) # Tidy up the started thread.
diff --git a/test/qa-tests/buildscripts/resmokelib/core/process.py b/test/qa-tests/buildscripts/resmokelib/core/process.py
new file mode 100644
index 00000000000..f54b0f0a640
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/core/process.py
@@ -0,0 +1,234 @@
+"""
+A more reliable way to create and destroy processes.
+
+Uses job objects when running on Windows to ensure that all created
+processes are terminated.
+"""
+
+from __future__ import absolute_import
+
+import atexit
+import logging
+import os
+import os.path
+import sys
+import threading
+
+# The subprocess32 module resolves the thread-safety issues of the subprocess module in Python 2.x
+# when the _posixsubprocess C extension module is also available. Additionally, the _posixsubprocess
+# C extension module avoids triggering invalid free() calls on Python's internal data structure for
+# thread-local storage by skipping the PyOS_AfterFork() call when the 'preexec_fn' parameter isn't
+# specified to subprocess.Popen(). See SERVER-22219 for more details.
+#
+# The subprocess32 module is untested on Windows and thus isn't recommended for use, even when it's
+# installed. See https://github.com/google/python-subprocess32/blob/3.2.7/README.md#usage.
+if os.name == "posix" and sys.version_info[0] == 2:
+ try:
+ import subprocess32 as subprocess
+ except ImportError:
+ import warnings
+ warnings.warn(("Falling back to using the subprocess module because subprocess32 isn't"
+ " available. When using the subprocess module, a child process may trigger"
+ " an invalid free(). See SERVER-22219 for more details."),
+ RuntimeWarning)
+ import subprocess
+else:
+ import subprocess
+
+from . import pipe
+from .. import utils
+
+# Attempt to avoid race conditions (e.g. hangs caused by a file descriptor being left open) when
+# starting subprocesses concurrently from multiple threads by guarding calls to subprocess.Popen()
+# with a lock. See https://bugs.python.org/issue2320 and https://bugs.python.org/issue12739 as
+# reports of such hangs.
+#
+# This lock probably isn't necessary when both the subprocess32 module and its _posixsubprocess C
+# extension module are available because either
+# (a) the pipe2() syscall is available on the platform we're using, so pipes are atomically
+# created with the FD_CLOEXEC flag set on them, or
+# (b) the pipe2() syscall isn't available, but the GIL isn't released during the
+# _posixsubprocess.fork_exec() call or the _posixsubprocess.cloexec_pipe() call.
+# See https://bugs.python.org/issue7213 for more details.
+_POPEN_LOCK = threading.Lock()
+
+# Job objects are the only reliable way to ensure that processes are terminated on Windows.
+if sys.platform == "win32":
+ import win32api
+ import win32con
+ import win32job
+ import win32process
+ import winerror
+
+ def _init_job_object():
+ job_object = win32job.CreateJobObject(None, "")
+
+ # Get the limit and job state information of the newly-created job object.
+ job_info = win32job.QueryInformationJobObject(job_object,
+ win32job.JobObjectExtendedLimitInformation)
+
+ # Set up the job object so that closing the last handle to the job object
+ # will terminate all associated processes and destroy the job object itself.
+ job_info["BasicLimitInformation"]["LimitFlags"] |= \
+ win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+
+ # Update the limits of the job object.
+ win32job.SetInformationJobObject(job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ return job_object
+
+ # Don't create a job object if the current process is already inside one.
+ if win32job.IsProcessInJob(win32process.GetCurrentProcess(), None):
+ _JOB_OBJECT = None
+ else:
+ _JOB_OBJECT = _init_job_object()
+ atexit.register(win32api.CloseHandle, _JOB_OBJECT)
+
+
+class Process(object):
+ """
+ Wrapper around subprocess.Popen class.
+ """
+
+ def __init__(self, logger, args, env=None, env_vars=None):
+ """
+ Initializes the process with the specified logger, arguments,
+ and environment.
+ """
+
+ # Ensure that executable files on Windows have a ".exe" extension.
+ if sys.platform == "win32" and os.path.splitext(args[0])[1] != ".exe":
+ args[0] += ".exe"
+
+ self.logger = logger
+ self.args = args
+ self.env = utils.default_if_none(env, os.environ.copy())
+ if env_vars is not None:
+ self.env.update(env_vars)
+
+ self.pid = None
+
+ self._process = None
+ self._stdout_pipe = None
+ self._stderr_pipe = None
+
+ def start(self):
+ """
+ Starts the process and the logger pipes for its stdout and
+ stderr.
+ """
+
+ creation_flags = 0
+ if sys.platform == "win32" and _JOB_OBJECT is not None:
+ creation_flags |= win32process.CREATE_BREAKAWAY_FROM_JOB
+
+ # Use unbuffered I/O pipes to avoid adding delay between when the subprocess writes output
+ # and when the LoggerPipe thread reads it.
+ buffer_size = 0
+
+ # Close file descriptors in the child process before executing the program. This prevents
+ # file descriptors that were inherited due to multiple calls to fork() -- either within one
+ # thread, or concurrently from multiple threads -- from causing another subprocess to wait
+ # for the completion of the newly spawned child process. Closing other file descriptors
+ # isn't supported on Windows when stdout and stderr are redirected.
+ close_fds = (sys.platform != "win32")
+
+ with _POPEN_LOCK:
+ self._process = subprocess.Popen(self.args,
+ bufsize=buffer_size,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=close_fds,
+ env=self.env,
+ creationflags=creation_flags)
+ self.pid = self._process.pid
+
+ self._stdout_pipe = pipe.LoggerPipe(self.logger, logging.INFO, self._process.stdout)
+ self._stderr_pipe = pipe.LoggerPipe(self.logger, logging.ERROR, self._process.stderr)
+
+ self._stdout_pipe.wait_until_started()
+ self._stderr_pipe.wait_until_started()
+
+ if sys.platform == "win32" and _JOB_OBJECT is not None:
+ try:
+ win32job.AssignProcessToJobObject(_JOB_OBJECT, self._process._handle)
+ except win32job.error as err:
+ # ERROR_ACCESS_DENIED (winerror=5) is received when the process has already died.
+ if err.winerror != winerror.ERROR_ACCESS_DENIED:
+ raise
+ return_code = win32process.GetExitCodeProcess(self._process._handle)
+ if return_code == win32con.STILL_ACTIVE:
+ raise
+
+ def stop(self):
+ """
+ Terminates the process.
+ """
+
+ if sys.platform == "win32":
+ # Adapted from implementation of Popen.terminate() in subprocess.py of Python 2.7
+ # because earlier versions do not catch exceptions.
+ try:
+ # Have the process exit with code 0 if it is terminated by us to simplify the
+ # success-checking logic later on.
+ win32process.TerminateProcess(self._process._handle, 0)
+ except win32process.error as err:
+ # ERROR_ACCESS_DENIED (winerror=5) is received when the process
+ # has already died.
+ if err.winerror != winerror.ERROR_ACCESS_DENIED:
+ raise
+ return_code = win32process.GetExitCodeProcess(self._process._handle)
+ if return_code == win32con.STILL_ACTIVE:
+ raise
+ else:
+ try:
+ self._process.terminate()
+ except OSError as err:
+ # ESRCH (errno=3) is received when the process has already died.
+ if err.errno != 3:
+ raise
+
+ def poll(self):
+ return self._process.poll()
+
+ def wait(self):
+ """
+ Waits until the process has terminated and all output has been
+ consumed by the logger pipes.
+ """
+
+ return_code = self._process.wait()
+
+ if self._stdout_pipe:
+ self._stdout_pipe.wait_until_finished()
+ if self._stderr_pipe:
+ self._stderr_pipe.wait_until_finished()
+
+ return return_code
+
+ def as_command(self):
+ """
+ Returns an equivalent command line invocation of the process.
+ """
+
+ default_env = os.environ
+ env_diff = self.env.copy()
+
+ # Remove environment variables that appear in both 'os.environ' and 'self.env'.
+ for env_var in default_env:
+ if env_var in env_diff and env_diff[env_var] == default_env[env_var]:
+ del env_diff[env_var]
+
+ sb = [] # String builder.
+ for env_var in env_diff:
+ sb.append("%s=%s" % (env_var, env_diff[env_var]))
+ sb.extend(self.args)
+
+ return " ".join(sb)
+
+ def __str__(self):
+ if self.pid is None:
+ return self.as_command()
+ return "%s (%d)" % (self.as_command(), self.pid)
diff --git a/test/qa-tests/buildscripts/resmokelib/core/programs.py b/test/qa-tests/buildscripts/resmokelib/core/programs.py
new file mode 100644
index 00000000000..cdffcdf7bca
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/core/programs.py
@@ -0,0 +1,311 @@
+"""
+Utility functions to create MongoDB processes.
+
+Handles all the nitty-gritty parameter conversion.
+"""
+
+from __future__ import absolute_import
+
+import json
+import os
+import os.path
+import stat
+
+from . import process as _process
+from .. import utils
+from .. import config
+
+
+def mongod_program(logger, executable=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a mongod executable with
+ arguments constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_MONGOD_EXECUTABLE)
+ args = [executable]
+
+ # Apply the --setParameter command line argument. Command line options to resmoke.py override
+ # the YAML configuration.
+ suite_set_parameters = kwargs.pop("set_parameters", {})
+
+ if config.MONGOD_SET_PARAMETERS is not None:
+ suite_set_parameters.update(utils.load_yaml(config.MONGOD_SET_PARAMETERS))
+
+ _apply_set_parameters(args, suite_set_parameters)
+
+ shortcut_opts = {
+ "nojournal": config.NO_JOURNAL,
+ "nopreallocj": config.NO_PREALLOC_JOURNAL,
+ "storageEngine": config.STORAGE_ENGINE,
+ "wiredTigerCollectionConfigString": config.WT_COLL_CONFIG,
+ "wiredTigerEngineConfigString": config.WT_ENGINE_CONFIG,
+ "wiredTigerIndexConfigString": config.WT_INDEX_CONFIG,
+ }
+
+ # These options are just flags, so they should not take a value.
+ opts_without_vals = ("nojournal", "nopreallocj")
+
+ # Have the --nojournal command line argument to resmoke.py unset the journal option.
+ if shortcut_opts["nojournal"] and "journal" in kwargs:
+ del kwargs["journal"]
+
+ # Ensure that config servers run with journaling enabled.
+ if "configsvr" in kwargs:
+ shortcut_opts["nojournal"] = False
+ kwargs["journal"] = ""
+
+ # Command line options override the YAML configuration.
+ for opt_name in shortcut_opts:
+ opt_value = shortcut_opts[opt_name]
+ if opt_name in opts_without_vals:
+ # Options that are specified as --flag on the command line are represented by a boolean
+ # value where True indicates that the flag should be included in 'kwargs'.
+ if opt_value:
+ kwargs[opt_name] = ""
+ else:
+ # Options that are specified as --key=value on the command line are represented by a
+ # value where None indicates that the key-value pair shouldn't be included in 'kwargs'.
+ if opt_value is not None:
+ kwargs[opt_name] = opt_value
+
+ # Override the storage engine specified on the command line with "wiredTiger" if running a
+ # config server replica set.
+ if "replSet" in kwargs and "configsvr" in kwargs:
+ kwargs["storageEngine"] = "wiredTiger"
+
+ # Apply the rest of the command line arguments.
+ _apply_kwargs(args, kwargs)
+
+ _set_keyfile_permissions(kwargs)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def mongos_program(logger, executable=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a mongos executable with
+ arguments constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_MONGOS_EXECUTABLE)
+ args = [executable]
+
+ # Apply the --setParameter command line argument. Command line options to resmoke.py override
+ # the YAML configuration.
+ suite_set_parameters = kwargs.pop("set_parameters", {})
+
+ if config.MONGOS_SET_PARAMETERS is not None:
+ suite_set_parameters.update(utils.load_yaml(config.MONGOS_SET_PARAMETERS))
+
+ _apply_set_parameters(args, suite_set_parameters)
+
+ # Apply the rest of the command line arguments.
+ _apply_kwargs(args, kwargs)
+
+ _set_keyfile_permissions(kwargs)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a mongo shell with arguments
+ constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_MONGO_EXECUTABLE)
+ args = [executable]
+
+ eval_sb = [] # String builder.
+ global_vars = kwargs.pop("global_vars", {}).copy()
+
+ shortcut_opts = {
+ "noJournal": (config.NO_JOURNAL, False),
+ "noJournalPrealloc": (config.NO_PREALLOC_JOURNAL, False),
+ "storageEngine": (config.STORAGE_ENGINE, ""),
+ "testName": (os.path.splitext(os.path.basename(filename))[0], ""),
+ "wiredTigerCollectionConfigString": (config.WT_COLL_CONFIG, ""),
+ "wiredTigerEngineConfigString": (config.WT_ENGINE_CONFIG, ""),
+ "wiredTigerIndexConfigString": (config.WT_INDEX_CONFIG, ""),
+ }
+
+ test_data = global_vars.get("TestData", {}).copy()
+ for opt_name in shortcut_opts:
+ (opt_value, opt_default) = shortcut_opts[opt_name]
+ if opt_value is not None:
+ test_data[opt_name] = opt_value
+ elif opt_name not in test_data:
+ # Only use 'opt_default' if the property wasn't set in the YAML configuration.
+ test_data[opt_name] = opt_default
+ global_vars["TestData"] = test_data
+
+ # Pass setParameters for mongos and mongod through TestData. The setParameter parsing in
+ # servers.js is very primitive (just splits on commas), so this may break for non-scalar
+ # setParameter values.
+ if config.MONGOD_SET_PARAMETERS is not None:
+ if "setParameters" in test_data:
+ raise ValueError("setParameters passed via TestData can only be set from either the"
+ " command line or the suite YAML, not both")
+ mongod_set_parameters = utils.load_yaml(config.MONGOD_SET_PARAMETERS)
+ test_data["setParameters"] = _format_test_data_set_parameters(mongod_set_parameters)
+
+ if config.MONGOS_SET_PARAMETERS is not None:
+ if "setParametersMongos" in test_data:
+ raise ValueError("setParametersMongos passed via TestData can only be set from either"
+ " the command line or the suite YAML, not both")
+ mongos_set_parameters = utils.load_yaml(config.MONGOS_SET_PARAMETERS)
+ test_data["setParametersMongos"] = _format_test_data_set_parameters(mongos_set_parameters)
+
+ if "eval_prepend" in kwargs:
+ eval_sb.append(str(kwargs.pop("eval_prepend")))
+
+ for var_name in global_vars:
+ _format_shell_vars(eval_sb, var_name, global_vars[var_name])
+
+ if "eval" in kwargs:
+ eval_sb.append(str(kwargs.pop("eval")))
+
+ eval_str = "; ".join(eval_sb)
+ args.append("--eval")
+ args.append(eval_str)
+
+ if config.SHELL_READ_MODE is not None:
+ kwargs["readMode"] = config.SHELL_READ_MODE
+
+ if config.SHELL_WRITE_MODE is not None:
+ kwargs["writeMode"] = config.SHELL_WRITE_MODE
+
+ # Apply the rest of the command line arguments.
+ _apply_kwargs(args, kwargs)
+
+ # Have the mongos shell run the specified file.
+ args.append(filename)
+
+ _set_keyfile_permissions(test_data)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def _format_shell_vars(sb, path, value):
+ """
+ Formats 'value' in a way that can be passed to --eval.
+
+ If 'value' is a dictionary, then it is unrolled into the creation of
+ a new JSON object with properties assigned for each key of the
+ dictionary.
+ """
+
+ # Only need to do special handling for JSON objects.
+ if not isinstance(value, dict):
+ sb.append("%s = %s" % (path, json.dumps(value)))
+ return
+
+ # Avoid including curly braces and colons in output so that the command invocation can be
+ # copied and run through bash.
+ sb.append("%s = new Object()" % (path))
+ for subkey in value:
+ _format_shell_vars(sb, ".".join((path, subkey)), value[subkey])
+
+
+def dbtest_program(logger, executable=None, suites=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a dbtest executable with
+ arguments constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_DBTEST_EXECUTABLE)
+ args = [executable]
+
+ if suites is not None:
+ args.extend(suites)
+
+ if config.STORAGE_ENGINE is not None:
+ kwargs["storageEngine"] = config.STORAGE_ENGINE
+
+ return generic_program(logger, args, process_kwargs=process_kwargs, **kwargs)
+
+def generic_program(logger, args, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts an arbitrary executable with
+ arguments constructed from 'kwargs'. The args parameter is an array
+ of strings containing the command to execute.
+ """
+
+ if not utils.is_string_list(args):
+ raise ValueError("The args parameter must be a list of command arguments")
+
+ _apply_kwargs(args, kwargs)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def _format_test_data_set_parameters(set_parameters):
+ """
+ Converts key-value pairs from 'set_parameters' into the comma
+ delimited list format expected by the parser in servers.js.
+
+ WARNING: the parsing logic in servers.js is very primitive.
+ Non-scalar options such as logComponentVerbosity will not work
+ correctly.
+ """
+ params = []
+ for param_name in set_parameters:
+ param_value = set_parameters[param_name]
+ if isinstance(param_value, bool):
+ # Boolean valued setParameters are specified as lowercase strings.
+ param_value = "true" if param_value else "false"
+ elif isinstance(param_value, dict):
+ raise TypeError("Non-scalar setParameter values are not currently supported.")
+ params.append("%s=%s" % (param_name, param_value))
+ return ",".join(params)
+
+def _apply_set_parameters(args, set_parameter):
+ """
+ Converts key-value pairs from 'kwargs' into --setParameter key=value
+ arguments to an executable and appends them to 'args'.
+ """
+
+ for param_name in set_parameter:
+ param_value = set_parameter[param_name]
+ # --setParameter takes boolean values as lowercase strings.
+ if isinstance(param_value, bool):
+ param_value = "true" if param_value else "false"
+ args.append("--setParameter")
+ args.append("%s=%s" % (param_name, param_value))
+
+
+def _apply_kwargs(args, kwargs):
+ """
+ Converts key-value pairs from 'kwargs' into --key value arguments
+ to an executable and appends them to 'args'.
+
+ A --flag without a value is represented with the empty string.
+ """
+
+ for arg_name in kwargs:
+ arg_value = str(kwargs[arg_name])
+ args.append("--%s" % (arg_name))
+ if arg_value:
+ args.append(arg_value)
+
+
+def _set_keyfile_permissions(opts):
+ """
+ Change the permissions of keyfiles in 'opts' to 600, i.e. only the
+ user can read and write the file.
+
+ This necessary to avoid having the mongod/mongos fail to start up
+ because "permissions on the keyfiles are too open".
+
+ We can't permanently set the keyfile permissions because git is not
+ aware of them.
+ """
+ if "keyFile" in opts:
+ os.chmod(opts["keyFile"], stat.S_IRUSR | stat.S_IWUSR)
+ if "encryptionKeyFile" in opts:
+ os.chmod(opts["encryptionKeyFile"], stat.S_IRUSR | stat.S_IWUSR)
diff --git a/test/qa-tests/buildscripts/resmokelib/errors.py b/test/qa-tests/buildscripts/resmokelib/errors.py
new file mode 100644
index 00000000000..6d2a704e390
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/errors.py
@@ -0,0 +1,52 @@
+"""
+Exceptions raised by resmoke.py.
+"""
+
+
+class ResmokeError(Exception):
+ """
+ Base class for all resmoke.py exceptions.
+ """
+ pass
+
+
+class StopExecution(ResmokeError):
+ """
+ Exception that is raised when resmoke.py should stop executing tests
+ if failing fast is enabled.
+ """
+ pass
+
+
+class UserInterrupt(StopExecution):
+ """
+ Exception that is raised when a user signals resmoke.py to
+ unconditionally stop executing tests.
+ """
+ pass
+
+
+class TestFailure(ResmokeError):
+ """
+ Exception that is raised by a hook in the after_test method if it
+ determines the the previous test should be marked as a failure.
+ """
+ pass
+
+
+class ServerFailure(TestFailure):
+ """
+ Exception that is raised by a hook in the after_test method if it
+ detects that the fixture did not exit cleanly and should be marked
+ as a failure.
+ """
+ pass
+
+
+class PortAllocationError(ResmokeError):
+ """
+ Exception that is raised by the PortAllocator if a port is requested
+ outside of the range of valid ports, or if a fixture requests more
+ ports than were reserved for that job.
+ """
+ pass
diff --git a/test/qa-tests/buildscripts/resmokelib/logging/__init__.py b/test/qa-tests/buildscripts/resmokelib/logging/__init__.py
new file mode 100644
index 00000000000..54609ad861f
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/logging/__init__.py
@@ -0,0 +1,14 @@
+"""
+Extension to the logging package to support buildlogger.
+"""
+
+from __future__ import absolute_import
+
+# Alias the built-in logging.Logger class for type checking arguments. Those interested in
+# constructing a new Logger instance should use the loggers.new_logger() function instead.
+from logging import Logger
+
+from . import config
+from . import buildlogger
+from . import flush
+from . import loggers
diff --git a/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py b/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py
new file mode 100644
index 00000000000..c5f5d40401b
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py
@@ -0,0 +1,284 @@
+"""
+Defines handlers for communicating with a buildlogger server.
+"""
+
+from __future__ import absolute_import
+
+import functools
+import urllib2
+
+from . import handlers
+from . import loggers
+from .. import config as _config
+
+
+CREATE_BUILD_ENDPOINT = "/build"
+APPEND_GLOBAL_LOGS_ENDPOINT = "/build/%(build_id)s"
+CREATE_TEST_ENDPOINT = "/build/%(build_id)s/test"
+APPEND_TEST_LOGS_ENDPOINT = "/build/%(build_id)s/test/%(test_id)s"
+
+_BUILDLOGGER_REALM = "buildlogs"
+_BUILDLOGGER_CONFIG = "mci.buildlogger"
+
+_SEND_AFTER_LINES = 2000
+_SEND_AFTER_SECS = 10
+
+
+def _log_on_error(func):
+ """
+ A decorator that causes any exceptions to be logged by the
+ "buildlogger" Logger instance.
+
+ Returns the wrapped function's return value, or None if an error
+ was encountered.
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError as err:
+ sb = [] # String builder.
+ sb.append("HTTP Error %s: %s" % (err.code, err.msg))
+ sb.append("POST %s" % (err.filename))
+
+ for name in err.hdrs:
+ value = err.hdrs[name]
+ sb.append(" %s: %s" % (name, value))
+
+ # Try to read the response back from the server.
+ if hasattr(err, "read"):
+ sb.append(err.read())
+
+ loggers._BUILDLOGGER_FALLBACK.exception("\n".join(sb))
+ except:
+ loggers._BUILDLOGGER_FALLBACK.exception("Encountered an error.")
+ return None
+
+ return wrapper
+
+@_log_on_error
+def get_config():
+ """
+ Returns the buildlogger configuration as evaluated from the
+ _BUILDLOGGER_CONFIG file.
+ """
+
+ tmp_globals = {} # Avoid conflicts with variables defined in 'config_file'.
+ config = {}
+ execfile(_BUILDLOGGER_CONFIG, tmp_globals, config)
+
+ # Rename "slavename" to "username" if present.
+ if "slavename" in config and "username" not in config:
+ config["username"] = config["slavename"]
+ del config["slavename"]
+ # Rename "passwd" to "password" if present.
+ if "passwd" in config and "password" not in config:
+ config["password"] = config["passwd"]
+ del config["passwd"]
+
+ return config
+
+@_log_on_error
+def new_build_id(config):
+ """
+ Returns a new build id for sending global logs to.
+ """
+
+ if config is None:
+ return None
+
+ username = config["username"]
+ password = config["password"]
+ builder = config["builder"]
+ build_num = int(config["build_num"])
+
+ handler = handlers.HTTPHandler(
+ realm=_BUILDLOGGER_REALM,
+ url_root=_config.BUILDLOGGER_URL,
+ username=username,
+ password=password)
+
+ response = handler.post(CREATE_BUILD_ENDPOINT, data={
+ "builder": builder,
+ "buildnum": build_num,
+ })
+
+ return response["id"]
+
+@_log_on_error
+def new_test_id(build_id, build_config, test_filename, test_command):
+ """
+ Returns a new test id for sending test logs to.
+ """
+
+ if build_id is None or build_config is None:
+ return None
+
+ handler = handlers.HTTPHandler(
+ realm=_BUILDLOGGER_REALM,
+ url_root=_config.BUILDLOGGER_URL,
+ username=build_config["username"],
+ password=build_config["password"])
+
+ endpoint = CREATE_TEST_ENDPOINT % {"build_id": build_id}
+ response = handler.post(endpoint, data={
+ "test_filename": test_filename,
+ "command": test_command,
+ "phase": build_config.get("build_phase", "unknown"),
+ })
+
+ return response["id"]
+
+
+class _BaseBuildloggerHandler(handlers.BufferedHandler):
+ """
+ Base class of the buildlogger handler for the global logs and the
+ handler for the test logs.
+ """
+
+ def __init__(self,
+ build_id,
+ build_config,
+ capacity=_SEND_AFTER_LINES,
+ interval_secs=_SEND_AFTER_SECS):
+ """
+ Initializes the buildlogger handler with the build id and
+ credentials.
+ """
+
+ handlers.BufferedHandler.__init__(self, capacity, interval_secs)
+
+ username = build_config["username"]
+ password = build_config["password"]
+
+ self.http_handler = handlers.HTTPHandler(_BUILDLOGGER_REALM,
+ _config.BUILDLOGGER_URL,
+ username,
+ password)
+
+ self.build_id = build_id
+ self.retry_buffer = []
+
+ def process_record(self, record):
+ """
+ Returns a tuple of the time the log record was created, and the
+ message because the buildlogger expects the log messages
+ formatted in JSON as:
+
+ [ [ <log-time-1>, <log-message-1> ],
+ [ <log-time-2>, <log-message-2> ],
+ ... ]
+ """
+ msg = self.format(record)
+ return (record.created, msg)
+
+ def post(self, *args, **kwargs):
+ """
+ Convenience method for subclasses to use when making POST requests.
+ """
+
+ return self.http_handler.post(*args, **kwargs)
+
+ def _append_logs(self, log_lines):
+ raise NotImplementedError("_append_logs must be implemented by _BaseBuildloggerHandler"
+ " subclasses")
+
+ def flush_with_lock(self, close_called):
+ """
+ Ensures all logging output has been flushed to the buildlogger
+ server.
+
+ If _append_logs() returns false, then the log messages are added
+ to a separate buffer and retried the next time flush() is
+ called.
+ """
+
+ self.retry_buffer.extend(self.buffer)
+
+ if self._append_logs(self.retry_buffer):
+ self.retry_buffer = []
+ elif close_called:
+ # Request to the buildlogger server returned an error, so use the fallback logger to
+ # avoid losing the log messages entirely.
+ for (_, message) in self.retry_buffer:
+ # TODO: construct an LogRecord instance equivalent to the one passed to the
+ # process_record() method if we ever decide to log the time when the
+ # LogRecord was created, e.g. using %(asctime)s in
+ # _fallback_buildlogger_handler().
+ loggers._BUILDLOGGER_FALLBACK.info(message)
+ self.retry_buffer = []
+
+ self.buffer = []
+
+
+class BuildloggerTestHandler(_BaseBuildloggerHandler):
+ """
+ Buildlogger handler for the test logs.
+ """
+
+ def __init__(self, build_id, build_config, test_id, **kwargs):
+ """
+ Initializes the buildlogger handler with the build id, test id,
+ and credentials.
+ """
+
+ _BaseBuildloggerHandler.__init__(self, build_id, build_config, **kwargs)
+
+ self.test_id = test_id
+
+ @_log_on_error
+ def _append_logs(self, log_lines):
+ """
+ Sends a POST request to the APPEND_TEST_LOGS_ENDPOINT with the
+ logs that have been captured.
+ """
+ endpoint = APPEND_TEST_LOGS_ENDPOINT % {
+ "build_id": self.build_id,
+ "test_id": self.test_id,
+ }
+
+ response = self.post(endpoint, data=log_lines)
+ return response is not None
+
+ @_log_on_error
+ def _finish_test(self, failed=False):
+ """
+ Sends a POST request to the APPEND_TEST_LOGS_ENDPOINT with the
+ test status.
+ """
+ endpoint = APPEND_TEST_LOGS_ENDPOINT % {
+ "build_id": self.build_id,
+ "test_id": self.test_id,
+ }
+
+ self.post(endpoint, headers={
+ "X-Sendlogs-Test-Done": "true",
+ "X-Sendlogs-Test-Failed": "true" if failed else "false",
+ })
+
+ def close(self):
+ """
+ Closes the buildlogger handler.
+ """
+
+ _BaseBuildloggerHandler.close(self)
+
+ # TODO: pass the test status (success/failure) to this method
+ self._finish_test()
+
+
+class BuildloggerGlobalHandler(_BaseBuildloggerHandler):
+ """
+ Buildlogger handler for the global logs.
+ """
+
+ @_log_on_error
+ def _append_logs(self, log_lines):
+ """
+ Sends a POST request to the APPEND_GLOBAL_LOGS_ENDPOINT with
+ the logs that have been captured.
+ """
+ endpoint = APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": self.build_id}
+ response = self.post(endpoint, data=log_lines)
+ return response is not None
diff --git a/test/qa-tests/buildscripts/resmokelib/logging/config.py b/test/qa-tests/buildscripts/resmokelib/logging/config.py
new file mode 100644
index 00000000000..c3960bbafd3
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/logging/config.py
@@ -0,0 +1,161 @@
+"""
+Configuration functions for the logging package.
+"""
+
+from __future__ import absolute_import
+
+import logging
+import sys
+
+from . import buildlogger
+from . import formatters
+from . import loggers
+
+
+_DEFAULT_FORMAT = "[%(name)s] %(message)s"
+
+
+def using_buildlogger(logging_config):
+ """
+ Returns true if buildlogger is set as a handler on the "fixture" or
+ "tests" loggers, and false otherwise.
+ """
+ for logger_name in (loggers.FIXTURE_LOGGER_NAME, loggers.TESTS_LOGGER_NAME):
+ logger_info = logging_config[logger_name]
+ if _get_buildlogger_handler_info(logger_info) is not None:
+ return True
+ return False
+
+
+def apply_config(logging_config):
+ """
+ Adds all handlers specified by the configuration to the "executor",
+ "fixture", and "tests" loggers.
+ """
+
+ logging_components = (loggers.EXECUTOR_LOGGER_NAME,
+ loggers.FIXTURE_LOGGER_NAME,
+ loggers.TESTS_LOGGER_NAME)
+
+ if not all(component in logging_config for component in logging_components):
+ raise ValueError("Logging configuration should contain %s, %s, and %s components"
+ % logging_components)
+
+ # Configure the executor, fixture, and tests loggers.
+ for component in logging_components:
+ logger = loggers.LOGGERS_BY_NAME[component]
+ logger_info = logging_config[component]
+ _configure_logger(logger, logger_info)
+
+ # Configure the buildlogger logger.
+ loggers._BUILDLOGGER_FALLBACK.addHandler(_fallback_buildlogger_handler())
+
+
+def apply_buildlogger_global_handler(logger, logging_config, build_id=None, build_config=None):
+ """
+ Adds a buildlogger.BuildloggerGlobalHandler to 'logger' if specified
+ to do so by the configuration.
+ """
+
+ logger_info = logging_config[loggers.FIXTURE_LOGGER_NAME]
+ handler_info = _get_buildlogger_handler_info(logger_info)
+ if handler_info is None:
+ # Not configured to use buildlogger.
+ return
+
+ if all(x is not None for x in (build_id, build_config)):
+ log_format = logger_info.get("format", _DEFAULT_FORMAT)
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ handler = buildlogger.BuildloggerGlobalHandler(build_id,
+ build_config,
+ **handler_info)
+ handler.setFormatter(formatter)
+ else:
+ handler = _fallback_buildlogger_handler()
+ # Fallback handler already has formatting configured.
+
+ logger.addHandler(handler)
+
+
+def apply_buildlogger_test_handler(logger,
+ logging_config,
+ build_id=None,
+ build_config=None,
+ test_id=None):
+ """
+ Adds a buildlogger.BuildloggerTestHandler to 'logger' if specified
+ to do so by the configuration.
+ """
+
+ logger_info = logging_config[loggers.TESTS_LOGGER_NAME]
+ handler_info = _get_buildlogger_handler_info(logger_info)
+ if handler_info is None:
+ # Not configured to use buildlogger.
+ return
+
+ if all(x is not None for x in (build_id, build_config, test_id)):
+ log_format = logger_info.get("format", _DEFAULT_FORMAT)
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ handler = buildlogger.BuildloggerTestHandler(build_id,
+ build_config,
+ test_id,
+ **handler_info)
+ handler.setFormatter(formatter)
+ else:
+ handler = _fallback_buildlogger_handler()
+ # Fallback handler already has formatting configured.
+
+ logger.addHandler(handler)
+
+
+def _configure_logger(logger, logger_info):
+ """
+ Adds the handlers specified by the configuration to 'logger'.
+ """
+
+ log_format = logger_info.get("format", _DEFAULT_FORMAT)
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ for handler_info in logger_info.get("handlers", []):
+ handler_class = handler_info["class"]
+ if handler_class == "logging.FileHandler":
+ handler = logging.FileHandler(filename=handler_info["filename"],
+ mode=handler_info.get("mode", "w"))
+ elif handler_class == "logging.NullHandler":
+ handler = logging.NullHandler()
+ elif handler_class == "logging.StreamHandler":
+ handler = logging.StreamHandler(sys.stdout)
+ elif handler_class == "buildlogger":
+ continue # Buildlogger handlers are applied when running tests.
+ else:
+ raise ValueError("Unknown handler class '%s'" % (handler_class))
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+
+def _fallback_buildlogger_handler():
+ """
+ Returns a handler that writes to stderr.
+ """
+
+ log_format = "[buildlogger:%(name)s] %(message)s"
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ handler = logging.StreamHandler(sys.stderr)
+ handler.setFormatter(formatter)
+
+ return handler
+
+def _get_buildlogger_handler_info(logger_info):
+ """
+ Returns the buildlogger handler information if it exists, and None
+ otherwise.
+ """
+
+ for handler_info in logger_info["handlers"]:
+ handler_info = handler_info.copy()
+ if handler_info.pop("class") == "buildlogger":
+ return handler_info
+ return None
diff --git a/test/qa-tests/buildscripts/resmokelib/logging/flush.py b/test/qa-tests/buildscripts/resmokelib/logging/flush.py
new file mode 100644
index 00000000000..c45533f1e13
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/logging/flush.py
@@ -0,0 +1,97 @@
+"""
+Workaround for having too many threads running on 32-bit systems when
+logging to buildlogger that still allows periodically flushing messages
+to the buildlogger server.
+
+This is because a utils.timer.AlarmClock instance is used for each
+buildlogger.BuildloggerTestHandler, but only dismiss()ed when the Python
+process is about to exit.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+from ..utils import queue
+
+
+_LOGGER_QUEUE = queue.Queue()
+
+_FLUSH_THREAD_LOCK = threading.Lock()
+_FLUSH_THREAD = None
+
+
+def start_thread():
+ """
+ Starts the flush thread.
+ """
+
+ global _FLUSH_THREAD
+ with _FLUSH_THREAD_LOCK:
+ if _FLUSH_THREAD is not None:
+ raise ValueError("FlushThread has already been started")
+
+ _FLUSH_THREAD = _FlushThread()
+ _FLUSH_THREAD.start()
+
+
+def stop_thread():
+ """
+ Signals the flush thread to stop and waits until it does.
+ """
+
+ with _FLUSH_THREAD_LOCK:
+ if _FLUSH_THREAD is None:
+ raise ValueError("FlushThread hasn't been started")
+
+ # Add sentinel value to indicate when there are no more loggers to process.
+ _LOGGER_QUEUE.put(None)
+ _FLUSH_THREAD.join()
+
+
+def close_later(logger):
+ """
+ Adds 'logger' to the queue so that it is closed later by the flush
+ thread.
+ """
+ _LOGGER_QUEUE.put(logger)
+
+
+class _FlushThread(threading.Thread):
+ """
+ Asynchronously flushes and closes logging handlers.
+ """
+
+ def __init__(self):
+ """
+ Initializes the flush thread.
+ """
+
+ threading.Thread.__init__(self, name="FlushThread")
+ # Do not wait to flush the logs if interrupted by the user.
+ self.daemon = True
+
+ def run(self):
+ """
+ Continuously shuts down loggers from the queue.
+ """
+
+ while True:
+ logger = _LOGGER_QUEUE.get()
+ try:
+ if logger is None:
+ # Sentinel value received, so exit.
+ break
+ _FlushThread._shutdown_logger(logger)
+ finally:
+ _LOGGER_QUEUE.task_done()
+
+ @staticmethod
+ def _shutdown_logger(logger):
+ """
+ Flushes and closes all handlers of 'logger'.
+ """
+
+ for handler in logger.handlers:
+ handler.flush()
+ handler.close()
diff --git a/test/qa-tests/buildscripts/resmokelib/logging/formatters.py b/test/qa-tests/buildscripts/resmokelib/logging/formatters.py
new file mode 100644
index 00000000000..4cc36da32d4
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/logging/formatters.py
@@ -0,0 +1,50 @@
+"""
+Custom formatters for the logging handlers.
+"""
+
+from __future__ import absolute_import
+
+import logging
+import sys
+import time
+
+
+class ISO8601Formatter(logging.Formatter):
+ """
+ An ISO 8601 compliant formatter for log messages. It formats the
+ timezone as an hour/minute offset and uses a period as the
+ millisecond separator in order to match the log messages of MongoDB.
+ """
+
+ def formatTime(self, record, datefmt=None):
+ converted_time = self.converter(record.created)
+
+ if datefmt is not None:
+ return time.strftime(datefmt, converted_time)
+
+ formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", converted_time)
+ timezone = ISO8601Formatter._format_timezone_offset(converted_time)
+ return "%s.%03d%s" % (formatted_time, record.msecs, timezone)
+
+ @staticmethod
+ def _format_timezone_offset(converted_time):
+ """
+ Returns the timezone as an hour/minute offset in the form
+ "+HHMM" or "-HHMM".
+ """
+
+ # Windows treats %z in the format string as %Z, so we compute the hour/minute offset
+ # manually.
+ if converted_time.tm_isdst == 1 and time.daylight:
+ utc_offset_secs = time.altzone
+ else:
+ utc_offset_secs = time.timezone
+
+ # The offset is positive if the local timezone is behind (east of) UTC, and negative if it
+ # is ahead (west) of UTC.
+ utc_offset_prefix = "-" if utc_offset_secs > 0 else "+"
+ utc_offset_secs = abs(utc_offset_secs)
+
+ utc_offset_mins = (utc_offset_secs / 60) % 60
+ utc_offset_hours = utc_offset_secs / 3600
+ return "%s%02d%02d" % (utc_offset_prefix, utc_offset_hours, utc_offset_mins)
diff --git a/test/qa-tests/buildscripts/resmokelib/logging/handlers.py b/test/qa-tests/buildscripts/resmokelib/logging/handlers.py
new file mode 100644
index 00000000000..b688a1da68a
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/logging/handlers.py
@@ -0,0 +1,178 @@
+"""
+Additional handlers that are used as the base classes of the buildlogger
+handler.
+"""
+
+from __future__ import absolute_import
+
+import json
+import logging
+import threading
+import urllib2
+
+from .. import utils
+from ..utils import timer
+
+_TIMEOUT_SECS = 10
+
+class BufferedHandler(logging.Handler):
+ """
+ A handler class that buffers logging records in memory. Whenever
+ each record is added to the buffer, a check is made to see if the
+ buffer should be flushed. If it should, then flush() is expected to
+ do what's needed.
+ """
+
+ def __init__(self, capacity, interval_secs):
+ """
+ Initializes the handler with the buffer size and timeout after
+ which the buffer is flushed regardless.
+ """
+
+ logging.Handler.__init__(self)
+
+ if not isinstance(capacity, int):
+ raise TypeError("capacity must be an integer")
+ elif capacity <= 0:
+ raise ValueError("capacity must be a positive integer")
+
+ if not isinstance(interval_secs, (int, float)):
+ raise TypeError("interval_secs must be a number")
+ elif interval_secs <= 0.0:
+ raise ValueError("interval_secs must be a positive number")
+
+ self.capacity = capacity
+ self.interval_secs = interval_secs
+ self.buffer = []
+
+ self._lock = threading.Lock()
+ self._timer = None # Defer creation until actually begin to log messages.
+
+ def _new_timer(self):
+ """
+ Returns a new timer.AlarmClock instance that will call the
+ flush() method after 'interval_secs' seconds.
+ """
+
+ return timer.AlarmClock(self.interval_secs, self.flush, args=[self])
+
+ def process_record(self, record):
+ """
+ Applies a transformation to the record before it gets added to
+ the buffer.
+
+ The default implementation returns 'record' unmodified.
+ """
+
+ return record
+
+ def emit(self, record):
+ """
+ Emits a record.
+
+ Append the record to the buffer after it has been transformed by
+ process_record(). If the length of the buffer is greater than or
+ equal to its capacity, then flush() is called to process the
+ buffer.
+
+ After flushing the buffer, the timer is restarted so that it
+ will expire after another 'interval_secs' seconds.
+ """
+
+ with self._lock:
+ self.buffer.append(self.process_record(record))
+ if len(self.buffer) >= self.capacity:
+ if self._timer is not None:
+ self._timer.snooze()
+ self.flush_with_lock(False)
+ if self._timer is not None:
+ self._timer.reset()
+
+ if self._timer is None:
+ self._timer = self._new_timer()
+ self._timer.start()
+
+ def flush(self, close_called=False):
+ """
+ Ensures all logging output has been flushed.
+ """
+
+ with self._lock:
+ if self.buffer:
+ self.flush_with_lock(close_called)
+
+ def flush_with_lock(self, close_called):
+ """
+ Ensures all logging output has been flushed.
+
+ This version resets the buffers back to an empty list and is
+ intended to be overridden by subclasses.
+ """
+
+ self.buffer = []
+
+ def close(self):
+ """
+ Tidies up any resources used by the handler.
+
+ Stops the timer and flushes the buffer.
+ """
+
+ if self._timer is not None:
+ self._timer.dismiss()
+ self.flush(close_called=True)
+
+ logging.Handler.close(self)
+
+
+class HTTPHandler(object):
+ """
+ A class which sends data to a web server using POST requests.
+ """
+
+ def __init__(self, realm, url_root, username, password):
+ """
+ Initializes the handler with the necessary authenticaton
+ credentials.
+ """
+
+ digest_handler = urllib2.HTTPDigestAuthHandler()
+ digest_handler.add_password(
+ realm=realm,
+ uri=url_root,
+ user=username,
+ passwd=password)
+
+ self.url_root = url_root
+ self.url_opener = urllib2.build_opener(digest_handler, urllib2.HTTPErrorProcessor())
+
+ def _make_url(self, endpoint):
+ return "%s/%s/" % (self.url_root.rstrip("/"), endpoint.strip("/"))
+
+ def post(self, endpoint, data=None, headers=None, timeout_secs=_TIMEOUT_SECS):
+ """
+ Sends a POST request to the specified endpoint with the supplied
+ data.
+
+ Returns the response, either as a string or a JSON object based
+ on the content type.
+ """
+
+ data = utils.default_if_none(data, [])
+ data = json.dumps(data, encoding="utf-8")
+
+ headers = utils.default_if_none(headers, {})
+ headers["Content-Type"] = "application/json; charset=utf-8"
+
+ url = self._make_url(endpoint)
+ request = urllib2.Request(url=url, data=data, headers=headers)
+
+ response = self.url_opener.open(request, timeout=timeout_secs)
+ headers = response.info()
+
+ content_type = headers.gettype()
+ if content_type == "application/json":
+ encoding = headers.getparam("charset") or "utf-8"
+ return json.load(response, encoding=encoding)
+
+ return response.read()
diff --git a/test/qa-tests/buildscripts/resmokelib/logging/loggers.py b/test/qa-tests/buildscripts/resmokelib/logging/loggers.py
new file mode 100644
index 00000000000..35f41512425
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/logging/loggers.py
@@ -0,0 +1,37 @@
+"""
+Module to hold the logger instances themselves.
+"""
+
+from __future__ import absolute_import
+
+import logging
+
+EXECUTOR_LOGGER_NAME = "executor"
+FIXTURE_LOGGER_NAME = "fixture"
+TESTS_LOGGER_NAME = "tests"
+
+def new_logger(logger_name, parent=None):
+ """
+ Returns a new logging.Logger instance with the specified name.
+ """
+
+ # Set up the logger to handle all messages it receives.
+ logger = logging.Logger(logger_name, level=logging.DEBUG)
+
+ if parent is not None:
+ logger.parent = parent
+ logger.propagate = True
+
+ return logger
+
+EXECUTOR = new_logger(EXECUTOR_LOGGER_NAME)
+FIXTURE = new_logger(FIXTURE_LOGGER_NAME)
+TESTS = new_logger(TESTS_LOGGER_NAME)
+
+LOGGERS_BY_NAME = {
+ EXECUTOR_LOGGER_NAME: EXECUTOR,
+ FIXTURE_LOGGER_NAME: FIXTURE,
+ TESTS_LOGGER_NAME: TESTS,
+}
+
+_BUILDLOGGER_FALLBACK = new_logger("fallback")
diff --git a/test/qa-tests/buildscripts/resmokelib/parser.py b/test/qa-tests/buildscripts/resmokelib/parser.py
new file mode 100644
index 00000000000..4bcc7bfb137
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/parser.py
@@ -0,0 +1,368 @@
+"""
+Parser for command line arguments.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import optparse
+
+from . import config as _config
+from . import testing
+from . import utils
+from .. import resmokeconfig
+
+
+# Mapping of the attribute of the parsed arguments (dest) to its key as it appears in the options
+# YAML configuration file. Most should only be converting from snake_case to camelCase.
+DEST_TO_CONFIG = {
+ "base_port": "basePort",
+ "buildlogger_url": "buildloggerUrl",
+ "continue_on_failure": "continueOnFailure",
+ "dbpath_prefix": "dbpathPrefix",
+ "dbtest_executable": "dbtest",
+ "dry_run": "dryRun",
+ "exclude_with_all_tags": "excludeWithAllTags",
+ "exclude_with_any_tags": "excludeWithAnyTags",
+ "include_with_all_tags": "includeWithAllTags",
+ "include_with_any_tags": "includeWithAnyTags",
+ "jobs": "jobs",
+ "mongo_executable": "mongo",
+ "mongod_executable": "mongod",
+ "mongod_parameters": "mongodSetParameters",
+ "mongos_executable": "mongos",
+ "mongos_parameters": "mongosSetParameters",
+ "no_journal": "nojournal",
+ "prealloc_journal": "preallocJournal",
+ "repeat": "repeat",
+ "report_file": "reportFile",
+ "seed": "seed",
+ "shell_read_mode": "shellReadMode",
+ "shell_write_mode": "shellWriteMode",
+ "shuffle": "shuffle",
+ "storage_engine": "storageEngine",
+ "wt_coll_config": "wiredTigerCollectionConfigString",
+ "wt_engine_config": "wiredTigerEngineConfigString",
+ "wt_index_config": "wiredTigerIndexConfigString"
+}
+
+
+def parse_command_line():
+ """
+ Parses the command line arguments passed to resmoke.py.
+ """
+
+ parser = optparse.OptionParser()
+
+ parser.add_option("--suites", dest="suite_files", metavar="SUITE1,SUITE2",
+ help=("Comma separated list of YAML files that each specify the configuration"
+ " of a suite. If the file is located in the resmokeconfig/suites/"
+ " directory, then the basename without the .yml extension can be"
+ " specified, e.g. 'core'."))
+
+ parser.add_option("--executor", dest="executor_file", metavar="EXECUTOR",
+ help=("A YAML file that specifies the executor configuration. If the file is"
+ " located in the resmokeconfig/suites/ directory, then the basename"
+ " without the .yml extension can be specified, e.g. 'core_small_oplog'."
+ " If specified in combination with the --suites option, then the suite"
+ " configuration takes precedence."))
+
+ parser.add_option("--log", dest="logger_file", metavar="LOGGER",
+ help=("A YAML file that specifies the logging configuration. If the file is"
+ " located in the resmokeconfig/suites/ directory, then the basename"
+ " without the .yml extension can be specified, e.g. 'console'."))
+
+ parser.add_option("--options", dest="options_file", metavar="OPTIONS",
+ help="A YAML file that specifies global options to resmoke.py.")
+
+ parser.add_option("--basePort", dest="base_port", metavar="PORT",
+ help=("The starting port number to use for mongod and mongos processes"
+ " spawned by resmoke.py or the tests themselves. Each fixture and Job"
+ " allocates a contiguous range of ports."))
+
+ parser.add_option("--buildloggerUrl", action="store", dest="buildlogger_url", metavar="URL",
+ help="The root url of the buildlogger server.")
+
+ parser.add_option("--continueOnFailure", action="store_true", dest="continue_on_failure",
+ help="Executes all tests in all suites, even if some of them fail.")
+
+ parser.add_option("--dbpathPrefix", dest="dbpath_prefix", metavar="PATH",
+ help=("The directory which will contain the dbpaths of any mongod's started"
+ " by resmoke.py or the tests themselves."))
+
+ parser.add_option("--dbtest", dest="dbtest_executable", metavar="PATH",
+ help="The path to the dbtest executable for resmoke to use.")
+
+ parser.add_option("--excludeWithAllTags", dest="exclude_with_all_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. Any jstest that contains all of the"
+ " specified tags will be excluded from any suites that are run."))
+
+ parser.add_option("--excludeWithAnyTags", dest="exclude_with_any_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. Any jstest that contains any of the"
+ " specified tags will be excluded from any suites that are run."))
+
+ parser.add_option("--includeWithAllTags", dest="include_with_all_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. For the jstest portion of the suite(s),"
+ " only tests which have all of the specified tags will be run."))
+
+ parser.add_option("--includeWithAnyTags", dest="include_with_any_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. For the jstest portion of the suite(s),"
+ " only tests which have at least one of the specified tags will be"
+ " run."))
+
+ parser.add_option("-n", action="store_const", const="tests", dest="dry_run",
+ help=("Output the tests that would be run."))
+
+ # TODO: add support for --dryRun=commands
+ parser.add_option("--dryRun", type="choice", action="store", dest="dry_run",
+ choices=("off", "tests"), metavar="MODE",
+ help=("Instead of running the tests, output the tests that would be run"
+ " (if MODE=tests). Defaults to MODE=%default."))
+
+ parser.add_option("-j", "--jobs", type="int", dest="jobs", metavar="JOBS",
+ help=("The number of Job instances to use. Each instance will receive its own"
+ " MongoDB deployment to dispatch tests to."))
+
+ parser.add_option("-l", "--listSuites", action="store_true", dest="list_suites",
+ help="List the names of the suites available to execute.")
+
+ parser.add_option("--mongo", dest="mongo_executable", metavar="PATH",
+ help="The path to the mongo shell executable for resmoke.py to use.")
+
+ parser.add_option("--mongod", dest="mongod_executable", metavar="PATH",
+ help="The path to the mongod executable for resmoke.py to use.")
+
+ parser.add_option("--mongodSetParameters", dest="mongod_parameters",
+ metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
+ help=("Pass one or more --setParameter options to all mongod processes"
+ " started by resmoke.py. The argument is specified as bracketed YAML -"
+ " i.e. JSON with support for single quoted and unquoted keys."))
+
+ parser.add_option("--mongos", dest="mongos_executable", metavar="PATH",
+ help="The path to the mongos executable for resmoke.py to use.")
+
+ parser.add_option("--mongosSetParameters", dest="mongos_parameters",
+ metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
+ help=("Pass one or more --setParameter options to all mongos processes"
+ " started by resmoke.py. The argument is specified as bracketed YAML -"
+ " i.e. JSON with support for single quoted and unquoted keys."))
+
+ parser.add_option("--nojournal", action="store_true", dest="no_journal",
+ help="Disable journaling for all mongod's.")
+
+ parser.add_option("--nopreallocj", action="store_const", const="off", dest="prealloc_journal",
+ help="Disable preallocation of journal files for all mongod processes.")
+
+ parser.add_option("--preallocJournal", type="choice", action="store", dest="prealloc_journal",
+ choices=("on", "off"), metavar="ON|OFF",
+ help=("Enable or disable preallocation of journal files for all mongod"
+ " processes. Defaults to %default."))
+
+ parser.add_option("--repeat", type="int", dest="repeat", metavar="N",
+ help="Repeat the given suite(s) N times, or until one fails.")
+
+ parser.add_option("--reportFile", dest="report_file", metavar="REPORT",
+ help="Write a JSON file with test status and timing information.")
+
+ parser.add_option("--seed", type="int", dest="seed", metavar="SEED",
+ help=("Seed for the random number generator. Useful in combination with the"
+ " --shuffle option for producing a consistent test execution order."))
+
+ parser.add_option("--shellReadMode", type="choice", action="store", dest="shell_read_mode",
+ choices=("commands", "compatibility", "legacy"), metavar="READ_MODE",
+ help="The read mode used by the mongo shell.")
+
+ parser.add_option("--shellWriteMode", type="choice", action="store", dest="shell_write_mode",
+ choices=("commands", "compatibility", "legacy"), metavar="WRITE_MODE",
+ help="The write mode used by the mongo shell.")
+
+ parser.add_option("--shuffle", action="store_true", dest="shuffle",
+ help="Randomize the order in which tests are executed.")
+
+ parser.add_option("--storageEngine", dest="storage_engine", metavar="ENGINE",
+ help="The storage engine used by dbtests and jstests.")
+
+ parser.add_option("--wiredTigerCollectionConfigString", dest="wt_coll_config", metavar="CONFIG",
+ help="Set the WiredTiger collection configuration setting for all mongod's.")
+
+ parser.add_option("--wiredTigerEngineConfigString", dest="wt_engine_config", metavar="CONFIG",
+ help="Set the WiredTiger engine configuration setting for all mongod's.")
+
+ parser.add_option("--wiredTigerIndexConfigString", dest="wt_index_config", metavar="CONFIG",
+ help="Set the WiredTiger index configuration setting for all mongod's.")
+
+ parser.set_defaults(executor_file="with_server",
+ logger_file="console",
+ dry_run="off",
+ list_suites=False,
+ prealloc_journal="off")
+
+ return parser.parse_args()
+
+
+def get_logging_config(values):
+ return _get_logging_config(values.logger_file)
+
+
+def update_config_vars(values):
+ options = _get_options_config(values.options_file)
+
+ config = _config.DEFAULTS.copy()
+ config.update(options)
+
+ values = vars(values)
+ for dest in values:
+ if dest not in DEST_TO_CONFIG:
+ continue
+ config_var = DEST_TO_CONFIG[dest]
+ if values[dest] is not None:
+ config[config_var] = values[dest]
+
+ _config.BASE_PORT = int(config.pop("basePort"))
+ _config.BUILDLOGGER_URL = config.pop("buildloggerUrl")
+ _config.DBPATH_PREFIX = _expand_user(config.pop("dbpathPrefix"))
+ _config.DBTEST_EXECUTABLE = _expand_user(config.pop("dbtest"))
+ _config.DRY_RUN = config.pop("dryRun")
+ _config.EXCLUDE_WITH_ALL_TAGS = config.pop("excludeWithAllTags")
+ _config.EXCLUDE_WITH_ANY_TAGS = config.pop("excludeWithAnyTags")
+ _config.FAIL_FAST = not config.pop("continueOnFailure")
+ _config.INCLUDE_WITH_ALL_TAGS = config.pop("includeWithAllTags")
+ _config.INCLUDE_WITH_ANY_TAGS = config.pop("includeWithAnyTags")
+ _config.JOBS = config.pop("jobs")
+ _config.MONGO_EXECUTABLE = _expand_user(config.pop("mongo"))
+ _config.MONGOD_EXECUTABLE = _expand_user(config.pop("mongod"))
+ _config.MONGOD_SET_PARAMETERS = config.pop("mongodSetParameters")
+ _config.MONGOS_EXECUTABLE = _expand_user(config.pop("mongos"))
+ _config.MONGOS_SET_PARAMETERS = config.pop("mongosSetParameters")
+ _config.NO_JOURNAL = config.pop("nojournal")
+ _config.NO_PREALLOC_JOURNAL = config.pop("preallocJournal") == "off"
+ _config.RANDOM_SEED = config.pop("seed")
+ _config.REPEAT = config.pop("repeat")
+ _config.REPORT_FILE = config.pop("reportFile")
+ _config.SHELL_READ_MODE = config.pop("shellReadMode")
+ _config.SHELL_WRITE_MODE = config.pop("shellWriteMode")
+ _config.SHUFFLE = config.pop("shuffle")
+ _config.STORAGE_ENGINE = config.pop("storageEngine")
+ _config.WT_COLL_CONFIG = config.pop("wiredTigerCollectionConfigString")
+ _config.WT_ENGINE_CONFIG = config.pop("wiredTigerEngineConfigString")
+ _config.WT_INDEX_CONFIG = config.pop("wiredTigerIndexConfigString")
+
+ if config:
+ raise optparse.OptionValueError("Unknown option(s): %s" % (config.keys()))
+
+
+def get_suites(values, args):
+ if (values.suite_files is None and not args) or (values.suite_files is not None and args):
+ raise optparse.OptionValueError("Must specify either --suites or a list of tests")
+
+ # If there are no suites specified, but there are args, assume they are jstests.
+ if args:
+ # No specified config, just use the following, and default the logging and executor.
+ suite_config = _make_jstests_config(args)
+ _ensure_executor(suite_config, values.executor_file)
+ suite = testing.suite.Suite("<jstests>", suite_config)
+ return [suite]
+
+ suite_files = values.suite_files.split(",")
+
+ suites = []
+ for suite_filename in suite_files:
+ suite_config = _get_suite_config(suite_filename)
+ _ensure_executor(suite_config, values.executor_file)
+ suite = testing.suite.Suite(suite_filename, suite_config)
+ suites.append(suite)
+ return suites
+
+
+def get_named_suites():
+ """
+ Returns the list of suites available to execute.
+ """
+
+ # Skip "with_server" and "no_server" because they do not define any test files to run.
+ executor_only = set(["with_server", "no_server"])
+ suite_names = [suite for suite in resmokeconfig.NAMED_SUITES if suite not in executor_only]
+ suite_names.sort()
+ return suite_names
+
+
+def _get_logging_config(pathname):
+ """
+ Attempts to read a YAML configuration from 'pathname' that describes
+ how resmoke.py should log the tests and fixtures.
+ """
+
+ # Named loggers are specified as the basename of the file, without the .yml extension.
+ if not utils.is_yaml_file(pathname) and not os.path.dirname(pathname):
+ if pathname not in resmokeconfig.NAMED_LOGGERS:
+ raise optparse.OptionValueError("Unknown logger '%s'" % (pathname))
+ pathname = resmokeconfig.NAMED_LOGGERS[pathname] # Expand 'pathname' to full path.
+
+ if not utils.is_yaml_file(pathname) or not os.path.isfile(pathname):
+ raise optparse.OptionValueError("Expected a logger YAML config, but got '%s'" % (pathname))
+
+ return utils.load_yaml_file(pathname).pop("logging")
+
+
+def _get_options_config(pathname):
+ """
+ Attempts to read a YAML configuration from 'pathname' that describes
+ any modifications to global options.
+ """
+
+ if pathname is None:
+ return {}
+
+ return utils.load_yaml_file(pathname).pop("options")
+
+
+def _get_suite_config(pathname):
+ """
+ Attempts to read a YAML configuration from 'pathname' that describes
+ what tests to run and how to run them.
+ """
+
+ # Named suites are specified as the basename of the file, without the .yml extension.
+ if not utils.is_yaml_file(pathname) and not os.path.dirname(pathname):
+ if pathname not in resmokeconfig.NAMED_SUITES:
+ raise optparse.OptionValueError("Unknown suite '%s'" % (pathname))
+ pathname = resmokeconfig.NAMED_SUITES[pathname] # Expand 'pathname' to full path.
+
+ if not utils.is_yaml_file(pathname) or not os.path.isfile(pathname):
+ raise optparse.OptionValueError("Expected a suite YAML config, but got '%s'" % (pathname))
+
+ return utils.load_yaml_file(pathname)
+
+
+def _make_jstests_config(js_files):
+ for pathname in js_files:
+ if not utils.is_js_file(pathname) or not os.path.isfile(pathname):
+ raise optparse.OptionValueError("Expected a list of JS files, but got '%s'"
+ % (pathname))
+
+ return {"selector": {"js_test": {"roots": js_files}}}
+
+
+def _ensure_executor(suite_config, executor_pathname):
+ if "executor" not in suite_config:
+ # Named executors are specified as the basename of the file, without the .yml extension.
+ if not utils.is_yaml_file(executor_pathname) and not os.path.dirname(executor_pathname):
+ if executor_pathname not in resmokeconfig.NAMED_SUITES:
+ raise optparse.OptionValueError("Unknown executor '%s'" % (executor_pathname))
+ executor_pathname = resmokeconfig.NAMED_SUITES[executor_pathname]
+
+ if not utils.is_yaml_file(executor_pathname) or not os.path.isfile(executor_pathname):
+ raise optparse.OptionValueError("Expected an executor YAML config, but got '%s'"
+ % (executor_pathname))
+
+ suite_config["executor"] = utils.load_yaml_file(executor_pathname).pop("executor")
+
+
+def _expand_user(pathname):
+ """
+ Wrapper around os.path.expanduser() to do nothing when given None.
+ """
+ if pathname is None:
+ return None
+ return os.path.expanduser(pathname)
diff --git a/test/qa-tests/buildscripts/resmokelib/selector.py b/test/qa-tests/buildscripts/resmokelib/selector.py
new file mode 100644
index 00000000000..c2dc0fca41b
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/selector.py
@@ -0,0 +1,291 @@
+"""
+Test selection utility.
+
+Defines filtering rules for what tests to include in a suite depending
+on whether they apply to C++ unit tests, dbtests, or JS tests.
+"""
+
+from __future__ import absolute_import
+
+import fnmatch
+import os.path
+import subprocess
+import sys
+
+from . import config
+from . import errors
+from . import utils
+from .utils import globstar
+from .utils import jscomment
+
+def _filter_cpp_tests(kind, root, include_files, exclude_files):
+ """
+ Generic filtering logic for C++ tests that are sourced from a list
+ of test executables.
+ """
+ include_files = utils.default_if_none(include_files, [])
+ exclude_files = utils.default_if_none(exclude_files, [])
+
+ tests = []
+ with open(root, "r") as fp:
+ for test_path in fp:
+ test_path = test_path.rstrip()
+ tests.append(test_path)
+
+ (remaining, included, _) = _filter_by_filename(kind,
+ tests,
+ include_files,
+ exclude_files)
+
+ if include_files:
+ return list(included)
+ elif exclude_files:
+ return list(remaining)
+ return tests
+
+def filter_cpp_unit_tests(root="build/unittests.txt", include_files=None, exclude_files=None):
+ """
+ Filters out what C++ unit tests to run.
+ """
+ return _filter_cpp_tests("C++ unit test", root, include_files, exclude_files)
+
+
+def filter_cpp_integration_tests(root="build/integration_tests.txt",
+ include_files=None,
+ exclude_files=None):
+ """
+ Filters out what C++ integration tests to run.
+ """
+ return _filter_cpp_tests("C++ integration test", root, include_files, exclude_files)
+
+
+def filter_dbtests(binary=None, include_suites=None):
+ """
+ Filters out what dbtests to run.
+ """
+
+ # Command line option overrides the YAML configuration.
+ binary = utils.default_if_none(config.DBTEST_EXECUTABLE, binary)
+ # Use the default if nothing specified.
+ binary = utils.default_if_none(binary, config.DEFAULT_DBTEST_EXECUTABLE)
+
+ include_suites = utils.default_if_none(include_suites, [])
+
+ if not utils.is_string_list(include_suites):
+ raise TypeError("include_suites must be a list of strings")
+
+ # Ensure that executable files on Windows have a ".exe" extension.
+ if sys.platform == "win32" and os.path.splitext(binary)[1] != ".exe":
+ binary += ".exe"
+
+ program = subprocess.Popen([binary, "--list"], stdout=subprocess.PIPE)
+ stdout = program.communicate()[0]
+
+ if program.returncode != 0:
+ raise errors.ResmokeError("Getting list of dbtest suites failed")
+
+ dbtests = stdout.splitlines()
+
+ if not include_suites:
+ return dbtests
+
+ dbtests = set(dbtests)
+
+ (verbatim, globbed) = _partition(include_suites, normpath=False)
+ included = _pop_all("dbtest suite", dbtests, verbatim)
+
+ for suite_pattern in globbed:
+ for suite_name in dbtests:
+ if fnmatch.fnmatchcase(suite_name, suite_pattern):
+ included.add(suite_name)
+
+ return list(included)
+
+
+def filter_jstests(roots,
+ include_files=None,
+ include_with_all_tags=None,
+ include_with_any_tags=None,
+ exclude_files=None,
+ exclude_with_all_tags=None,
+ exclude_with_any_tags=None):
+ """
+ Filters out what jstests to run.
+ """
+
+ include_files = utils.default_if_none(include_files, [])
+ exclude_files = utils.default_if_none(exclude_files, [])
+
+ # Command line options override the YAML options, and all should be defaulted to an empty list
+ # if not specified.
+ tags = {
+ "exclude_with_all_tags": exclude_with_all_tags,
+ "exclude_with_any_tags": exclude_with_any_tags,
+ "include_with_all_tags": include_with_all_tags,
+ "include_with_any_tags": include_with_any_tags,
+ }
+ cmd_line_values = (
+ ("exclude_with_all_tags", config.EXCLUDE_WITH_ALL_TAGS),
+ ("exclude_with_any_tags", config.EXCLUDE_WITH_ANY_TAGS),
+ ("include_with_all_tags", config.INCLUDE_WITH_ALL_TAGS),
+ ("include_with_any_tags", config.INCLUDE_WITH_ANY_TAGS),
+ )
+ for (tag_category, cmd_line_val) in cmd_line_values:
+ if cmd_line_val is not None:
+ # Ignore the empty string when it is used as a tag. Specifying an empty string on the
+ # command line allows a user to unset the list of tags specified in the YAML
+ # configuration.
+ tags[tag_category] = set([tag for tag in cmd_line_val.split(",") if tag != ""])
+ else:
+ tags[tag_category] = set(utils.default_if_none(tags[tag_category], []))
+
+ using_tags = 0
+ for name in tags:
+ if not utils.is_string_set(tags[name]):
+ raise TypeError("%s must be a list of strings" % (name))
+ if len(tags[name]) > 0:
+ using_tags += 1
+
+ if using_tags > 1:
+ raise ValueError("Can only specify one of 'include_with_all_tags', 'include_with_any_tags',"
+ " 'exclude_with_all_tags', and 'exclude_with_any_tags'. If you wish to"
+ " unset one of these options, use --includeWithAllTags='' or similar")
+
+ jstests = []
+ for root in roots:
+ jstests.extend(globstar.iglob(root))
+
+ (remaining, included, _) = _filter_by_filename("jstest",
+ jstests,
+ include_files,
+ exclude_files)
+
+ # Skip parsing comments if not using tags
+ if not using_tags:
+ if include_files:
+ return list(included)
+ elif exclude_files:
+ return list(remaining)
+ return jstests
+
+ jstests = set(remaining)
+ excluded = set()
+
+ for filename in jstests:
+ file_tags = set(jscomment.get_tags(filename))
+ if tags["include_with_all_tags"] and not tags["include_with_all_tags"] - file_tags:
+ included.add(filename)
+ elif tags["include_with_any_tags"] and tags["include_with_any_tags"] & file_tags:
+ included.add(filename)
+ elif tags["exclude_with_all_tags"] and not tags["exclude_with_all_tags"] - file_tags:
+ excluded.add(filename)
+ elif tags["exclude_with_any_tags"] and tags["exclude_with_any_tags"] & file_tags:
+ excluded.add(filename)
+
+ if tags["include_with_all_tags"] or tags["include_with_any_tags"]:
+ if exclude_files:
+ return list((included & jstests) - excluded)
+ return list(included)
+ else:
+ if include_files:
+ return list(included | (jstests - excluded))
+ return list(jstests - excluded)
+
+
+def _filter_by_filename(kind, universe, include_files, exclude_files):
+ """
+ Filters out what tests to run solely by filename.
+
+ Returns the triplet (remaining, included, excluded), where
+ 'remaining' is 'universe' after 'included' and 'excluded' were
+ removed from it.
+ """
+
+ if not utils.is_string_list(include_files):
+ raise TypeError("include_files must be a list of strings")
+ elif not utils.is_string_list(exclude_files):
+ raise TypeError("exclude_files must be a list of strings")
+ elif include_files and exclude_files:
+ raise ValueError("Cannot specify both include_files and exclude_files")
+
+ universe = set(universe)
+ if include_files:
+ (verbatim, globbed) = _partition(include_files)
+ # Remove all matching files of 'verbatim' from 'universe'.
+ included_verbatim = _pop_all(kind, universe, verbatim)
+ included_globbed = set()
+
+ for file_pattern in globbed:
+ included_globbed.update(globstar.iglob(file_pattern))
+
+ # Remove all matching files of 'included_globbed' from 'universe' without checking whether
+ # the same file is expanded to multiple times. This implicitly takes an intersection
+ # between 'included_globbed' and 'universe'.
+ included_globbed = _pop_all(kind, universe, included_globbed, validate=False)
+ return (universe, included_verbatim | included_globbed, set())
+
+ elif exclude_files:
+ (verbatim, globbed) = _partition(exclude_files)
+
+ # Remove all matching files of 'verbatim' from 'universe'.
+ excluded_verbatim = _pop_all(kind, universe, verbatim)
+ excluded_globbed = set()
+
+ for file_pattern in globbed:
+ excluded_globbed.update(globstar.iglob(file_pattern))
+
+ # Remove all matching files of 'excluded_globbed' from 'universe' without checking whether
+ # the same file is expanded to multiple times. This implicitly takes an intersection
+ # between 'excluded_globbed' and 'universe'.
+ excluded_globbed = _pop_all(kind, universe, excluded_globbed, validate=False)
+ return (universe, set(), excluded_verbatim | excluded_globbed)
+
+ return (universe, set(), set())
+
+
+def _partition(pathnames, normpath=True):
+ """
+ Splits 'pathnames' into two separate lists based on whether they
+ use a glob pattern.
+
+ Returns the pair (non-globbed pathnames, globbed pathnames).
+ """
+
+ verbatim = []
+ globbed = []
+
+ for pathname in pathnames:
+ if globstar.is_glob_pattern(pathname):
+ globbed.append(pathname)
+ continue
+
+ # Normalize 'pathname' so exact string comparison can be used later.
+ if normpath:
+ pathname = os.path.normpath(pathname)
+ verbatim.append(pathname)
+
+ return (verbatim, globbed)
+
+
+def _pop_all(kind, universe, iterable, validate=True):
+ """
+ Removes all elements of 'iterable' from 'universe' and returns them.
+
+ If 'validate' is true, then a ValueError is raised if a element
+ would be removed multiple times, or if an element of 'iterable' does
+ not appear in 'universe' at all.
+ """
+
+ members = set()
+
+ for elem in iterable:
+ if validate and elem in members:
+ raise ValueError("%s '%s' specified multiple times" % (kind, elem))
+
+ if elem in universe:
+ universe.remove(elem)
+ members.add(elem)
+ elif validate:
+ raise ValueError("Unrecognized %s '%s'" % (kind, elem))
+
+ return members
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/__init__.py b/test/qa-tests/buildscripts/resmokelib/testing/__init__.py
new file mode 100644
index 00000000000..e4acff00521
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/__init__.py
@@ -0,0 +1,9 @@
+"""
+Extension to the unittest package to support buildlogger and parallel
+test execution.
+"""
+
+from __future__ import absolute_import
+
+from . import executor
+from . import suite
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/executor.py b/test/qa-tests/buildscripts/resmokelib/testing/executor.py
new file mode 100644
index 00000000000..5d79abd6ac6
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/executor.py
@@ -0,0 +1,307 @@
+"""
+Driver of the test execution framework.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+from . import fixtures
+from . import hooks as _hooks
+from . import job as _job
+from . import report as _report
+from . import testcases
+from .. import config as _config
+from .. import errors
+from .. import logging
+from .. import utils
+from ..utils import queue as _queue
+
+
+class TestGroupExecutor(object):
+ """
+ Executes a test group.
+
+ Responsible for setting up and tearing down the fixtures that the
+ tests execute against.
+ """
+
+ _TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
+
+ def __init__(self,
+ exec_logger,
+ test_group,
+ logging_config,
+ config=None,
+ fixture=None,
+ hooks=None):
+ """
+ Initializes the TestGroupExecutor with the test group to run.
+ """
+
+ # Build a logger for executing this group of tests.
+ logger_name = "%s:%s" % (exec_logger.name, test_group.test_kind)
+ self.logger = logging.loggers.new_logger(logger_name, parent=exec_logger)
+
+ self.logging_config = logging_config
+ self.fixture_config = fixture
+ self.hooks_config = utils.default_if_none(hooks, [])
+ self.test_config = utils.default_if_none(config, {})
+
+ self._test_group = test_group
+
+ self._using_buildlogger = logging.config.using_buildlogger(logging_config)
+ self._build_config = None
+
+ if self._using_buildlogger:
+ self._build_config = logging.buildlogger.get_config()
+
+ # Must be done after getting buildlogger configuration.
+ self._jobs = [self._make_job(job_num) for job_num in xrange(_config.JOBS)]
+
+ def run(self):
+ """
+ Executes the test group.
+
+ Any exceptions that occur during setting up or tearing down a
+ fixture are propagated.
+ """
+
+ self.logger.info("Starting execution of %ss...", self._test_group.test_kind)
+
+ return_code = 0
+ try:
+ if not self._setup_fixtures():
+ return_code = 2
+ return
+
+ num_repeats = _config.REPEAT
+ while num_repeats > 0:
+ test_queue = self._make_test_queue()
+ self._test_group.record_start()
+ (report, interrupted) = self._run_tests(test_queue)
+ self._test_group.record_end(report)
+
+ # If the user triggered a KeyboardInterrupt, then we should stop.
+ if interrupted:
+ raise errors.UserInterrupt("Received interrupt from user")
+
+ sb = [] # String builder.
+ self._test_group.summarize_latest(sb)
+ self.logger.info("Summary: %s", "\n ".join(sb))
+
+ if not report.wasSuccessful():
+ return_code = 1
+ if _config.FAIL_FAST:
+ break
+
+ # Clear the report so it can be reused for the next execution.
+ for job in self._jobs:
+ job.report.reset()
+ num_repeats -= 1
+ finally:
+ if not self._teardown_fixtures():
+ return_code = 2
+ self._test_group.return_code = return_code
+
+ def _setup_fixtures(self):
+ """
+ Sets up a fixture for each job.
+ """
+
+ for job in self._jobs:
+ try:
+ job.fixture.setup()
+ except:
+ self.logger.exception("Encountered an error while setting up %s.", job.fixture)
+ return False
+
+ # Once they have all been started, wait for them to become available.
+ for job in self._jobs:
+ try:
+ job.fixture.await_ready()
+ except:
+ self.logger.exception("Encountered an error while waiting for %s to be ready",
+ job.fixture)
+ return False
+
+ return True
+
+ def _run_tests(self, test_queue):
+ """
+ Starts a thread for each Job instance and blocks until all of
+ the tests are run.
+
+ Returns a (combined report, user interrupted) pair, where the
+ report contains the status and timing information of tests run
+ by all of the threads.
+ """
+
+ threads = []
+ interrupt_flag = threading.Event()
+ user_interrupted = False
+ try:
+ # Run each Job instance in its own thread.
+ for job in self._jobs:
+ t = threading.Thread(target=job, args=(test_queue, interrupt_flag))
+ # Do not wait for tests to finish executing if interrupted by the user.
+ t.daemon = True
+ t.start()
+ threads.append(t)
+
+ joined = False
+ while not joined:
+ # Need to pass a timeout to join() so that KeyboardInterrupt exceptions
+ # are propagated.
+ joined = test_queue.join(TestGroupExecutor._TIMEOUT)
+ except (KeyboardInterrupt, SystemExit):
+ interrupt_flag.set()
+ user_interrupted = True
+ else:
+ # Only wait for all the Job instances if not interrupted by the user.
+ for t in threads:
+ t.join()
+
+ reports = [job.report for job in self._jobs]
+ combined_report = _report.TestReport.combine(*reports)
+
+ # We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
+ # instance if a test fails and it decides to drain the queue. We only want to raise a
+ # StopExecution exception in TestGroupExecutor.run() if the user triggered the interrupt.
+ return (combined_report, user_interrupted)
+
+ def _teardown_fixtures(self):
+ """
+ Tears down all of the fixtures.
+
+ Returns true if all fixtures were torn down successfully, and
+ false otherwise.
+ """
+
+ success = True
+ for job in self._jobs:
+ try:
+ if not job.fixture.teardown():
+ self.logger.warn("Teardown of %s was not successful.", job.fixture)
+ success = False
+ except:
+ self.logger.exception("Encountered an error while tearing down %s.", job.fixture)
+ success = False
+
+ return success
+
+ def _get_build_id(self, job_num):
+ """
+ Returns a unique build id for a job.
+ """
+
+ build_config = self._build_config
+
+ if self._using_buildlogger:
+ # Use a distinct "builder" for each job in order to separate their logs.
+ if build_config is not None and "builder" in build_config:
+ build_config = build_config.copy()
+ build_config["builder"] = "%s_job%d" % (build_config["builder"], job_num)
+
+ build_id = logging.buildlogger.new_build_id(build_config)
+
+ if build_config is None or build_id is None:
+ self.logger.info("Encountered an error configuring buildlogger for job #%d, falling"
+ " back to stderr.", job_num)
+
+ return build_id, build_config
+
+ return None, build_config
+
+ def _make_fixture(self, job_num, build_id, build_config):
+ """
+ Creates a fixture for a job.
+ """
+
+ fixture_config = {}
+ fixture_class = fixtures.NOOP_FIXTURE_CLASS
+
+ if self.fixture_config is not None:
+ fixture_config = self.fixture_config.copy()
+ fixture_class = fixture_config.pop("class")
+
+ logger_name = "%s:job%d" % (fixture_class, job_num)
+ logger = logging.loggers.new_logger(logger_name, parent=logging.loggers.FIXTURE)
+ logging.config.apply_buildlogger_global_handler(logger,
+ self.logging_config,
+ build_id=build_id,
+ build_config=build_config)
+
+ return fixtures.make_fixture(fixture_class, logger, job_num, **fixture_config)
+
+ def _make_hooks(self, job_num, fixture):
+ """
+ Creates the custom behaviors for the job's fixture.
+ """
+
+ behaviors = []
+
+ for behavior_config in self.hooks_config:
+ behavior_config = behavior_config.copy()
+ behavior_class = behavior_config.pop("class")
+
+ logger_name = "%s:job%d" % (behavior_class, job_num)
+ logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+ behavior = _hooks.make_custom_behavior(behavior_class,
+ logger,
+ fixture,
+ **behavior_config)
+ behaviors.append(behavior)
+
+ return behaviors
+
+ def _make_job(self, job_num):
+ """
+ Returns a Job instance with its own fixture, hooks, and test
+ report.
+ """
+
+ build_id, build_config = self._get_build_id(job_num)
+ fixture = self._make_fixture(job_num, build_id, build_config)
+ hooks = self._make_hooks(job_num, fixture)
+
+ logger_name = "%s:job%d" % (self.logger.name, job_num)
+ logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ if build_id is not None:
+ endpoint = logging.buildlogger.APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": build_id}
+ url = "%s/%s/" % (_config.BUILDLOGGER_URL.rstrip("/"), endpoint.strip("/"))
+ logger.info("Writing output of job #%d to %s.", job_num, url)
+
+ report = _report.TestReport(logger,
+ self.logging_config,
+ build_id=build_id,
+ build_config=build_config)
+
+ return _job.Job(logger, fixture, hooks, report)
+
+ def _make_test_queue(self):
+ """
+ Returns a queue of TestCase instances.
+
+ Use a multi-consumer queue instead of a unittest.TestSuite so
+ that the test cases can be dispatched to multiple threads.
+ """
+
+ test_kind_logger = logging.loggers.new_logger(self._test_group.test_kind,
+ parent=logging.loggers.TESTS)
+
+ # Put all the test cases in a queue.
+ queue = _queue.Queue()
+ for test_name in self._test_group.tests:
+ test_case = testcases.make_test_case(self._test_group.test_kind,
+ test_kind_logger,
+ test_name,
+ **self.test_config)
+ queue.put(test_case)
+
+ # Add sentinel value for each job to indicate when there are no more items to process.
+ for _ in xrange(_config.JOBS):
+ queue.put(None)
+
+ return queue
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py
new file mode 100644
index 00000000000..d68a66911d2
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py
@@ -0,0 +1,32 @@
+"""
+Fixtures for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+from .interface import Fixture, ReplFixture
+from .standalone import MongoDFixture
+from .replicaset import ReplicaSetFixture
+from .masterslave import MasterSlaveFixture
+from .shardedcluster import ShardedClusterFixture
+
+
+NOOP_FIXTURE_CLASS = "Fixture"
+
+_FIXTURES = {
+ "Fixture": Fixture,
+ "MongoDFixture": MongoDFixture,
+ "ReplicaSetFixture": ReplicaSetFixture,
+ "MasterSlaveFixture": MasterSlaveFixture,
+ "ShardedClusterFixture": ShardedClusterFixture,
+}
+
+
+def make_fixture(class_name, *args, **kwargs):
+ """
+ Factory function for creating Fixture instances.
+ """
+
+ if class_name not in _FIXTURES:
+ raise ValueError("Unknown fixture class '%s'" % (class_name))
+ return _FIXTURES[class_name](*args, **kwargs)
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py
new file mode 100644
index 00000000000..5fbf537c107
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py
@@ -0,0 +1,128 @@
+"""
+Interface of the different fixtures for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+import pymongo
+
+from ... import errors
+from ... import logging
+
+
+class Fixture(object):
+ """
+ Base class for all fixtures.
+ """
+
+ def __init__(self, logger, job_num):
+ """
+ Initializes the fixtures with a logger instance.
+ """
+
+ if not isinstance(logger, logging.Logger):
+ raise TypeError("logger must be a Logger instance")
+
+ if not isinstance(job_num, int):
+ raise TypeError("job_num must be an integer")
+ elif job_num < 0:
+ raise ValueError("job_num must be a nonnegative integer")
+
+ self.logger = logger
+ self.job_num = job_num
+
+ self.port = None # Port that the mongo shell should connect to.
+
+ def setup(self):
+ """
+ Creates the fixture.
+ """
+ pass
+
+ def await_ready(self):
+ """
+ Blocks until the fixture can be used for testing.
+ """
+ pass
+
+ def teardown(self):
+ """
+ Destroys the fixture. Return true if was successful, and false otherwise.
+ """
+ return True
+
+ def is_running(self):
+ """
+ Returns true if the fixture is still operating and more tests
+ can be run, and false otherwise.
+ """
+ return True
+
+ def get_connection_string(self):
+ """
+ Returns the connection string for this fixture. This is NOT a
+ driver connection string, but a connection string of the format
+ expected by the mongo::ConnectionString class.
+ """
+ raise NotImplementedError("get_connection_string must be implemented by Fixture subclasses")
+
+ def __str__(self):
+ return "%s (Job #%d)" % (self.__class__.__name__, self.job_num)
+
+ def __repr__(self):
+ return "%r(%r, %r)" % (self.__class__.__name__, self.logger, self.job_num)
+
+
+class ReplFixture(Fixture):
+ """
+ Base class for all fixtures that support replication.
+ """
+
+ AWAIT_REPL_TIMEOUT_MINS = 5
+
+ def get_primary(self):
+ """
+ Returns the primary of a replica set, or the master of a
+ master-slave deployment.
+ """
+ raise NotImplementedError("get_primary must be implemented by ReplFixture subclasses")
+
+ def get_secondaries(self):
+ """
+ Returns a list containing the secondaries of a replica set, or
+ the slave of a master-slave deployment.
+ """
+ raise NotImplementedError("get_secondaries must be implemented by ReplFixture subclasses")
+
+ def await_repl(self):
+ """
+ Blocks until all operations on the primary/master have
+ replicated to all other nodes.
+ """
+ raise NotImplementedError("await_repl must be implemented by ReplFixture subclasses")
+
+ def retry_until_wtimeout(self, insert_fn):
+ """
+ Given a callback function representing an insert operation on
+ the primary, handle any connection failures, and keep retrying
+ the operation for up to 'AWAIT_REPL_TIMEOUT_MINS' minutes.
+
+ The insert operation callback should take an argument for the
+ number of remaining seconds to provide as the timeout for the
+ operation.
+ """
+
+ deadline = time.time() + ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60
+
+ while True:
+ try:
+ remaining = deadline - time.time()
+ insert_fn(remaining)
+ break
+ except pymongo.errors.ConnectionFailure:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ raise errors.ServerFailure("Failed to connect to the primary on port %d" %
+ self.port)
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py
new file mode 100644
index 00000000000..f3dbf87eb91
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py
@@ -0,0 +1,209 @@
+"""
+Master/slave fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import os.path
+
+import pymongo
+
+from . import interface
+from . import standalone
+from ... import config
+from ... import logging
+from ... import utils
+
+
+class MasterSlaveFixture(interface.ReplFixture):
+ """
+ Fixture which provides JSTests with a master/slave deployment to
+ run against.
+ """
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongod_executable=None,
+ mongod_options=None,
+ master_options=None,
+ slave_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False):
+
+ interface.ReplFixture.__init__(self, logger, job_num)
+
+ if "dbpath" in mongod_options:
+ raise ValueError("Cannot specify mongod_options.dbpath")
+
+ self.mongod_executable = mongod_executable
+ self.mongod_options = utils.default_if_none(mongod_options, {})
+ self.master_options = utils.default_if_none(master_options, {})
+ self.slave_options = utils.default_if_none(slave_options, {})
+ self.preserve_dbpath = preserve_dbpath
+
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self._dbpath_prefix = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+
+ self.master = None
+ self.slave = None
+
+ def setup(self):
+ if self.master is None:
+ self.master = self._new_mongod_master()
+ self.master.setup()
+ self.port = self.master.port
+
+ if self.slave is None:
+ self.slave = self._new_mongod_slave()
+ self.slave.setup()
+
+ def await_ready(self):
+ self.master.await_ready()
+ self.slave.await_ready()
+
+ # Do a replicated write to ensure that the slave has finished with its initial sync before
+ # starting to run any tests.
+ client = utils.new_mongo_client(self.port)
+
+ # Keep retrying this until it times out waiting for replication.
+ def insert_fn(remaining_secs):
+ remaining_millis = int(round(remaining_secs * 1000))
+ write_concern = pymongo.WriteConcern(w=2, wtimeout=remaining_millis)
+ coll = client.resmoke.get_collection("await_ready", write_concern=write_concern)
+ coll.insert_one({"awaiting": "ready"})
+
+ try:
+ self.retry_until_wtimeout(insert_fn)
+ except pymongo.errors.WTimeoutError:
+ self.logger.info("Replication of write operation timed out.")
+ raise
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success if nothing is running.
+
+ if not running_at_start:
+ self.logger.info("Master-slave deployment was expected to be running in teardown(),"
+ " but wasn't.")
+
+ if self.slave is not None:
+ if running_at_start:
+ self.logger.info("Stopping slave...")
+
+ success = self.slave.teardown()
+
+ if running_at_start:
+ self.logger.info("Successfully stopped slave.")
+
+ if self.master is not None:
+ if running_at_start:
+ self.logger.info("Stopping master...")
+
+ success = self.master.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully stopped master.")
+
+ return success
+
+ def is_running(self):
+ return (self.master is not None and self.master.is_running() and
+ self.slave is not None and self.slave.is_running())
+
+ def get_primary(self):
+ return self.master
+
+ def get_secondaries(self):
+ return [self.slave]
+
+ def await_repl(self):
+ """
+ Inserts a document into each database on the master and waits
+ for all write operations to be acknowledged by the master-slave
+ deployment.
+ """
+
+ client = utils.new_mongo_client(self.port)
+
+ # We verify that each database has replicated to the slave because in the case of an initial
+ # sync, the slave may acknowledge writes to one database before it has finished syncing
+ # others.
+ db_names = client.database_names()
+ self.logger.info("Awaiting replication of inserts to each of the following databases on"
+ " master on port %d: %s",
+ self.port,
+ db_names)
+
+ for db_name in db_names:
+ if db_name == "local":
+ continue # The local database is expected to differ, ignore.
+
+ self.logger.info("Awaiting replication of insert to database %s (w=2, wtimeout=%d min)"
+ " to master on port %d",
+ db_name,
+ interface.ReplFixture.AWAIT_REPL_TIMEOUT_MINS,
+ self.port)
+
+ # Keep retrying this until it times out waiting for replication.
+ def insert_fn(remaining_secs):
+ remaining_millis = int(round(remaining_secs * 1000))
+ write_concern = pymongo.WriteConcern(w=2, wtimeout=remaining_millis)
+ coll = client[db_name].get_collection("await_repl", write_concern=write_concern)
+ coll.insert_one({"awaiting": "repl"})
+
+ try:
+ self.retry_until_wtimeout(insert_fn)
+ except pymongo.errors.WTimeoutError:
+ self.logger.info("Replication of write operation timed out.")
+ raise
+
+ self.logger.info("Replication of write operation completed for database %s.", db_name)
+
+ self.logger.info("Finished awaiting replication.")
+
+ def _new_mongod(self, mongod_logger, mongod_options):
+ """
+ Returns a standalone.MongoDFixture with the specified logger and
+ options.
+ """
+ return standalone.MongoDFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath)
+
+ def _new_mongod_master(self):
+ """
+ Returns a standalone.MongoDFixture configured to be used as the
+ master of a master-slave deployment.
+ """
+
+ logger_name = "%s:master" % (self.logger.name)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = self.mongod_options.copy()
+ mongod_options.update(self.master_options)
+ mongod_options["master"] = ""
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "master")
+ return self._new_mongod(mongod_logger, mongod_options)
+
+ def _new_mongod_slave(self):
+ """
+ Returns a standalone.MongoDFixture configured to be used as the
+ slave of a master-slave deployment.
+ """
+
+ logger_name = "%s:slave" % (self.logger.name)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = self.mongod_options.copy()
+ mongod_options.update(self.slave_options)
+ mongod_options["slave"] = ""
+ mongod_options["source"] = "localhost:%d" % (self.port)
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "slave")
+ return self._new_mongod(mongod_logger, mongod_options)
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py
new file mode 100644
index 00000000000..e9930627641
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -0,0 +1,211 @@
+"""
+Replica set fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import os.path
+import time
+
+import pymongo
+
+from . import interface
+from . import standalone
+from ... import config
+from ... import logging
+from ... import utils
+
+
+class ReplicaSetFixture(interface.ReplFixture):
+ """
+ Fixture which provides JSTests with a replica set to run against.
+ """
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongod_executable=None,
+ mongod_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False,
+ num_nodes=2,
+ auth_options=None,
+ replset_config_options=None):
+
+ interface.ReplFixture.__init__(self, logger, job_num)
+
+ self.mongod_executable = mongod_executable
+ self.mongod_options = utils.default_if_none(mongod_options, {})
+ self.preserve_dbpath = preserve_dbpath
+ self.num_nodes = num_nodes
+ self.auth_options = auth_options
+ self.replset_config_options = utils.default_if_none(replset_config_options, {})
+
+ # The dbpath in mongod_options is used as the dbpath prefix for replica set members and
+ # takes precedence over other settings. The ShardedClusterFixture uses this parameter to
+ # create replica sets and assign their dbpath structure explicitly.
+ if "dbpath" in self.mongod_options:
+ self._dbpath_prefix = self.mongod_options.pop("dbpath")
+ else:
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self._dbpath_prefix = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+
+ self.nodes = []
+ self.replset_name = None
+
+ def setup(self):
+ self.replset_name = self.mongod_options.get("replSet", "rs")
+
+ if not self.nodes:
+ for i in xrange(self.num_nodes):
+ node = self._new_mongod(i, self.replset_name)
+ self.nodes.append(node)
+
+ for node in self.nodes:
+ node.setup()
+
+ self.port = self.get_primary().port
+
+ # Call await_ready() on each of the nodes here because we want to start the election as
+ # soon as possible.
+ for node in self.nodes:
+ node.await_ready()
+
+ # Initiate the replica set.
+ members = []
+ for (i, node) in enumerate(self.nodes):
+ member_info = {"_id": i, "host": node.get_connection_string()}
+ if i > 0:
+ member_info["priority"] = 0
+ if i >= 7:
+ # Only 7 nodes in a replica set can vote, so the other members must be non-voting.
+ member_info["votes"] = 0
+ members.append(member_info)
+ initiate_cmd_obj = {"replSetInitiate": {"_id": self.replset_name, "members": members}}
+
+ client = utils.new_mongo_client(port=self.port)
+ if self.auth_options is not None:
+ auth_db = client[self.auth_options["authenticationDatabase"]]
+ auth_db.authenticate(self.auth_options["username"],
+ password=self.auth_options["password"],
+ mechanism=self.auth_options["authenticationMechanism"])
+
+ if self.replset_config_options.get("configsvr", False):
+ initiate_cmd_obj["replSetInitiate"]["configsvr"] = True
+
+ self.logger.info("Issuing replSetInitiate command...")
+ client.admin.command(initiate_cmd_obj)
+
+ def await_ready(self):
+ # Wait for the primary to be elected.
+ client = utils.new_mongo_client(port=self.port)
+ while True:
+ is_master = client.admin.command("isMaster")["ismaster"]
+ if is_master:
+ break
+ self.logger.info("Waiting for primary on port %d to be elected.", self.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ # Wait for the secondaries to become available.
+ for secondary in self.get_secondaries():
+ client = utils.new_mongo_client(port=secondary.port,
+ read_preference=pymongo.ReadPreference.SECONDARY)
+ while True:
+ is_secondary = client.admin.command("isMaster")["secondary"]
+ if is_secondary:
+ break
+ self.logger.info("Waiting for secondary on port %d to become available.",
+ secondary.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start:
+ self.logger.info("Replica set was expected to be running in teardown(), but wasn't.")
+ else:
+ self.logger.info("Stopping all members of the replica set...")
+
+ # Terminate the secondaries first to reduce noise in the logs.
+ for node in reversed(self.nodes):
+ success = node.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully stopped all members of the replica set.")
+
+ return success
+
+ def is_running(self):
+ return all(node.is_running() for node in self.nodes)
+
+ def get_primary(self):
+ # The primary is always the first element of the 'nodes' list because all other members of
+ # the replica set are configured with priority=0.
+ return self.nodes[0]
+
+ def get_secondaries(self):
+ return self.nodes[1:]
+
+ def await_repl(self):
+ self.logger.info("Awaiting replication of insert (w=%d, wtimeout=%d min) to primary on port"
+ " %d", self.num_nodes, interface.ReplFixture.AWAIT_REPL_TIMEOUT_MINS,
+ self.port)
+ client = utils.new_mongo_client(port=self.port)
+
+ # Keep retrying this until it times out waiting for replication.
+ def insert_fn(remaining_secs):
+ remaining_millis = int(round(remaining_secs * 1000))
+ write_concern = pymongo.WriteConcern(w=self.num_nodes, wtimeout=remaining_millis)
+ coll = client.resmoke.get_collection("await_repl", write_concern=write_concern)
+ coll.insert_one({"awaiting": "repl"})
+
+ try:
+ self.retry_until_wtimeout(insert_fn)
+ except pymongo.errors.WTimeoutError:
+ self.logger.info("Replication of write operation timed out.")
+ raise
+
+ self.logger.info("Replication of write operation completed.")
+
+ def _new_mongod(self, index, replset_name):
+ """
+ Returns a standalone.MongoDFixture configured to be used as a
+ replica-set member of 'replset_name'.
+ """
+
+ mongod_logger = self._get_logger_for_mongod(index)
+ mongod_options = self.mongod_options.copy()
+ mongod_options["replSet"] = replset_name
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "node%d" % (index))
+
+ return standalone.MongoDFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath)
+
+ def _get_logger_for_mongod(self, index):
+ """
+ Returns a new logging.Logger instance for use as the primary or
+ secondary of a replica-set.
+ """
+
+ if index == 0:
+ logger_name = "%s:primary" % (self.logger.name)
+ else:
+ suffix = str(index - 1) if self.num_nodes > 2 else ""
+ logger_name = "%s:secondary%s" % (self.logger.name, suffix)
+
+ return logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ def get_connection_string(self):
+ if self.replset_name is None:
+ raise ValueError("Must call setup() before calling get_connection_string()")
+
+ conn_strs = [node.get_connection_string() for node in self.nodes]
+ return self.replset_name + "/" + ",".join(conn_strs)
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
new file mode 100644
index 00000000000..ab7b26bf372
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -0,0 +1,347 @@
+"""
+Sharded cluster fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import copy
+import os.path
+import time
+
+import pymongo
+
+from . import interface
+from . import standalone
+from . import replicaset
+from ... import config
+from ... import core
+from ... import errors
+from ... import logging
+from ... import utils
+
+
+class ShardedClusterFixture(interface.Fixture):
+ """
+ Fixture which provides JSTests with a sharded cluster to run
+ against.
+ """
+
+ _CONFIGSVR_REPLSET_NAME = "config-rs"
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongos_executable=None,
+ mongos_options=None,
+ mongod_executable=None,
+ mongod_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False,
+ num_shards=1,
+ separate_configsvr=True,
+ enable_sharding=None,
+ auth_options=None):
+ """
+ Initializes ShardedClusterFixture with the different options to
+ the mongod and mongos processes.
+ """
+
+ interface.Fixture.__init__(self, logger, job_num)
+
+ if "dbpath" in mongod_options:
+ raise ValueError("Cannot specify mongod_options.dbpath")
+
+ self.mongos_executable = mongos_executable
+ self.mongos_options = utils.default_if_none(mongos_options, {})
+ self.mongod_executable = mongod_executable
+ self.mongod_options = utils.default_if_none(mongod_options, {})
+ self.preserve_dbpath = preserve_dbpath
+ self.num_shards = num_shards
+ self.separate_configsvr = separate_configsvr
+ self.enable_sharding = utils.default_if_none(enable_sharding, [])
+ self.auth_options = auth_options
+
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self._dbpath_prefix = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+
+ self.configsvr = None
+ self.mongos = None
+ self.shards = []
+
+ def setup(self):
+ if self.separate_configsvr:
+ if self.configsvr is None:
+ self.configsvr = self._new_configsvr()
+ self.configsvr.setup()
+
+ if not self.shards:
+ for i in xrange(self.num_shards):
+ shard = self._new_shard(i)
+ self.shards.append(shard)
+
+ # Start up each of the shards
+ for shard in self.shards:
+ shard.setup()
+
+ def await_ready(self):
+ # Wait for the config server
+ if self.configsvr is not None:
+ self.configsvr.await_ready()
+
+ # Wait for each of the shards
+ for shard in self.shards:
+ shard.await_ready()
+
+ if self.mongos is None:
+ self.mongos = self._new_mongos()
+
+ # Start up the mongos
+ self.mongos.setup()
+
+ # Wait for the mongos
+ self.mongos.await_ready()
+ self.port = self.mongos.port
+
+ client = utils.new_mongo_client(port=self.port)
+ if self.auth_options is not None:
+ auth_db = client[self.auth_options["authenticationDatabase"]]
+ auth_db.authenticate(self.auth_options["username"],
+ password=self.auth_options["password"],
+ mechanism=self.auth_options["authenticationMechanism"])
+
+ # Inform mongos about each of the shards
+ for shard in self.shards:
+ self._add_shard(client, shard)
+
+ # Enable sharding on each of the specified databases
+ for db_name in self.enable_sharding:
+ self.logger.info("Enabling sharding for '%s' database...", db_name)
+ client.admin.command({"enablesharding": db_name})
+
+ def teardown(self):
+ """
+ Shuts down the sharded cluster.
+ """
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start:
+ self.logger.info("Sharded cluster was expected to be running in teardown(), but"
+ " wasn't.")
+
+ if self.configsvr is not None:
+ if running_at_start:
+ self.logger.info("Stopping config server...")
+
+ success = self.configsvr.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the config server.")
+
+ if self.mongos is not None:
+ if running_at_start:
+ self.logger.info("Stopping mongos...")
+
+ success = self.mongos.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the mongos.")
+
+ if running_at_start:
+ self.logger.info("Stopping shards...")
+ for shard in self.shards:
+ success = shard.teardown() and success
+ if running_at_start:
+ self.logger.info("Successfully terminated all shards.")
+
+ return success
+
+ def is_running(self):
+ """
+ Returns true if the config server, all shards, and the mongos
+ are all still operating, and false otherwise.
+ """
+ return (self.configsvr is not None and self.configsvr.is_running() and
+ all(shard.is_running() for shard in self.shards) and
+ self.mongos is not None and self.mongos.is_running())
+
+ def _new_configsvr(self):
+ """
+ Returns a replicaset.ReplicaSetFixture configured to be used as
+ the config server of a sharded cluster.
+ """
+
+ logger_name = "%s:configsvr" % (self.logger.name)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = copy.deepcopy(self.mongod_options)
+ mongod_options["configsvr"] = ""
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "config")
+ mongod_options["replSet"] = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
+ mongod_options["storageEngine"] = "wiredTiger"
+
+ return replicaset.ReplicaSetFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath,
+ num_nodes=3,
+ auth_options=self.auth_options,
+ replset_config_options={"configsvr": True})
+
+ def _new_shard(self, index):
+ """
+ Returns a standalone.MongoDFixture configured to be used as a
+ shard in a sharded cluster.
+ """
+
+ logger_name = "%s:shard%d" % (self.logger.name, index)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = copy.deepcopy(self.mongod_options)
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard%d" % (index))
+
+ return standalone.MongoDFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath)
+
+ def _new_mongos(self):
+ """
+ Returns a _MongoSFixture configured to be used as the mongos for
+ a sharded cluster.
+ """
+
+ logger_name = "%s:mongos" % (self.logger.name)
+ mongos_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongos_options = copy.deepcopy(self.mongos_options)
+ if self.separate_configsvr:
+ configdb_replset = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
+ configdb_port = self.configsvr.port
+ mongos_options["configdb"] = "%s/localhost:%d" % (configdb_replset, configdb_port)
+ else:
+ mongos_options["configdb"] = "localhost:%d" % (self.shards[0].port)
+
+ return _MongoSFixture(mongos_logger,
+ self.job_num,
+ mongos_executable=self.mongos_executable,
+ mongos_options=mongos_options)
+
+ def _add_shard(self, client, shard):
+ """
+ Add the specified program as a shard by executing the addShard
+ command.
+
+ See https://docs.mongodb.org/manual/reference/command/addShard
+ for more details.
+ """
+
+ self.logger.info("Adding localhost:%d as a shard...", shard.port)
+ client.admin.command({"addShard": "localhost:%d" % (shard.port)})
+
+
+class _MongoSFixture(interface.Fixture):
+ """
+ Fixture which provides JSTests with a mongos to connect to.
+ """
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongos_executable=None,
+ mongos_options=None):
+
+ interface.Fixture.__init__(self, logger, job_num)
+
+ # Command line options override the YAML configuration.
+ self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE, mongos_executable)
+
+ self.mongos_options = utils.default_if_none(mongos_options, {}).copy()
+
+ self.mongos = None
+
+ def setup(self):
+ if "chunkSize" not in self.mongos_options:
+ self.mongos_options["chunkSize"] = 50
+
+ if "port" not in self.mongos_options:
+ self.mongos_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
+ self.port = self.mongos_options["port"]
+
+ mongos = core.programs.mongos_program(self.logger,
+ executable=self.mongos_executable,
+ **self.mongos_options)
+ try:
+ self.logger.info("Starting mongos on port %d...\n%s", self.port, mongos.as_command())
+ mongos.start()
+ self.logger.info("mongos started on port %d with pid %d.", self.port, mongos.pid)
+ except:
+ self.logger.exception("Failed to start mongos on port %d.", self.port)
+ raise
+
+ self.mongos = mongos
+
+ def await_ready(self):
+ deadline = time.time() + standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS
+
+ # Wait until the mongos is accepting connections. The retry logic is necessary to support
+ # versions of PyMongo <3.0 that immediately raise a ConnectionFailure if a connection cannot
+ # be established.
+ while True:
+ # Check whether the mongos exited for some reason.
+ exit_code = self.mongos.poll()
+ if exit_code is not None:
+ raise errors.ServerFailure("Could not connect to mongos on port %d, process ended"
+ " unexpectedly with code %d." % (self.port, exit_code))
+
+ try:
+ # Use a shorter connection timeout to more closely satisfy the requested deadline.
+ client = utils.new_mongo_client(self.port, timeout_millis=500)
+ client.admin.command("ping")
+ break
+ except pymongo.errors.ConnectionFailure:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ raise errors.ServerFailure(
+ "Failed to connect to mongos on port %d after %d seconds"
+ % (self.port, standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
+
+ self.logger.info("Waiting to connect to mongos on port %d.", self.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ self.logger.info("Successfully contacted the mongos on port %d.", self.port)
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start and self.port is not None:
+ self.logger.info("mongos on port %d was expected to be running in teardown(), but"
+ " wasn't." % (self.port))
+
+ if self.mongos is not None:
+ if running_at_start:
+ self.logger.info("Stopping mongos on port %d with pid %d...",
+ self.port,
+ self.mongos.pid)
+ self.mongos.stop()
+
+ exit_code = self.mongos.wait()
+ success = exit_code == 0
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the mongos on port %d, exited with code"
+ " %d",
+ self.port,
+ exit_code)
+
+ return success
+
+ def is_running(self):
+ return self.mongos is not None and self.mongos.poll() is None
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py
new file mode 100644
index 00000000000..a8c1dc597c5
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py
@@ -0,0 +1,151 @@
+"""
+Standalone mongod fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import shutil
+import time
+
+import pymongo
+
+from . import interface
+from ... import config
+from ... import core
+from ... import errors
+from ... import utils
+
+
+class MongoDFixture(interface.Fixture):
+ """
+ Fixture which provides JSTests with a standalone mongod to run
+ against.
+ """
+
+ AWAIT_READY_TIMEOUT_SECS = 300
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongod_executable=None,
+ mongod_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False):
+
+ interface.Fixture.__init__(self, logger, job_num)
+
+ if "dbpath" in mongod_options and dbpath_prefix is not None:
+ raise ValueError("Cannot specify both mongod_options.dbpath and dbpath_prefix")
+
+ # Command line options override the YAML configuration.
+ self.mongod_executable = utils.default_if_none(config.MONGOD_EXECUTABLE, mongod_executable)
+
+ self.mongod_options = utils.default_if_none(mongod_options, {}).copy()
+ self.preserve_dbpath = preserve_dbpath
+
+ # The dbpath in mongod_options takes precedence over other settings to make it easier for
+ # users to specify a dbpath containing data to test against.
+ if "dbpath" not in self.mongod_options:
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self.mongod_options["dbpath"] = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+ self._dbpath = self.mongod_options["dbpath"]
+
+ self.mongod = None
+
+ def setup(self):
+ if not self.preserve_dbpath:
+ shutil.rmtree(self._dbpath, ignore_errors=True)
+
+ try:
+ os.makedirs(self._dbpath)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ if "port" not in self.mongod_options:
+ self.mongod_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
+ self.port = self.mongod_options["port"]
+
+ mongod = core.programs.mongod_program(self.logger,
+ executable=self.mongod_executable,
+ **self.mongod_options)
+ try:
+ self.logger.info("Starting mongod on port %d...\n%s", self.port, mongod.as_command())
+ mongod.start()
+ self.logger.info("mongod started on port %d with pid %d.", self.port, mongod.pid)
+ except:
+ self.logger.exception("Failed to start mongod on port %d.", self.port)
+ raise
+
+ self.mongod = mongod
+
+ def await_ready(self):
+ deadline = time.time() + MongoDFixture.AWAIT_READY_TIMEOUT_SECS
+
+ # Wait until the mongod is accepting connections. The retry logic is necessary to support
+ # versions of PyMongo <3.0 that immediately raise a ConnectionFailure if a connection cannot
+ # be established.
+ while True:
+ # Check whether the mongod exited for some reason.
+ exit_code = self.mongod.poll()
+ if exit_code is not None:
+ raise errors.ServerFailure("Could not connect to mongod on port %d, process ended"
+ " unexpectedly with code %d." % (self.port, exit_code))
+
+ try:
+ # Use a shorter connection timeout to more closely satisfy the requested deadline.
+ client = utils.new_mongo_client(self.port, timeout_millis=500)
+ client.admin.command("ping")
+ break
+ except pymongo.errors.ConnectionFailure:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ raise errors.ServerFailure(
+ "Failed to connect to mongod on port %d after %d seconds"
+ % (self.port, MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
+
+ self.logger.info("Waiting to connect to mongod on port %d.", self.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ self.logger.info("Successfully contacted the mongod on port %d.", self.port)
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start and self.port is not None:
+ self.logger.info("mongod on port %d was expected to be running in teardown(), but"
+ " wasn't." % (self.port))
+
+ if self.mongod is not None:
+ if running_at_start:
+ self.logger.info("Stopping mongod on port %d with pid %d...",
+ self.port,
+ self.mongod.pid)
+ self.mongod.stop()
+
+ exit_code = self.mongod.wait()
+ success = exit_code == 0
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the mongod on port %d, exited with code"
+ " %d.",
+ self.port,
+ exit_code)
+
+ return success
+
+ def is_running(self):
+ return self.mongod is not None and self.mongod.poll() is None
+
+ def get_connection_string(self):
+ if self.mongod is None:
+ raise ValueError("Must call setup() before calling get_connection_string()")
+
+ return "localhost:%d" % self.port
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/hooks.py b/test/qa-tests/buildscripts/resmokelib/testing/hooks.py
new file mode 100644
index 00000000000..4c580fa8392
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/hooks.py
@@ -0,0 +1,704 @@
+"""
+Customize the behavior of a fixture by allowing special code to be
+executed before or after each test, and before or after each suite.
+"""
+
+from __future__ import absolute_import
+
+import os
+import sys
+
+import bson
+import pymongo
+
+from . import fixtures
+from . import testcases
+from .. import errors
+from .. import logging
+from .. import utils
+
+
+def make_custom_behavior(class_name, *args, **kwargs):
+ """
+ Factory function for creating CustomBehavior instances.
+ """
+
+ if class_name not in _CUSTOM_BEHAVIORS:
+ raise ValueError("Unknown custom behavior class '%s'" % (class_name))
+ return _CUSTOM_BEHAVIORS[class_name](*args, **kwargs)
+
+
+class CustomBehavior(object):
+ """
+ The common interface all CustomBehaviors will inherit from.
+ """
+
+ @staticmethod
+ def start_dynamic_test(test_case, test_report):
+ """
+ If a CustomBehavior wants to add a test case that will show up
+ in the test report, it should use this method to add it to the
+ report, since we will need to count it as a dynamic test to get
+ the stats in the summary information right.
+ """
+ test_report.startTest(test_case, dynamic=True)
+
+ def __init__(self, logger, fixture):
+ """
+ Initializes the CustomBehavior with the specified fixture.
+ """
+
+ if not isinstance(logger, logging.Logger):
+ raise TypeError("logger must be a Logger instance")
+
+ self.logger = logger
+ self.fixture = fixture
+
+ def before_suite(self, test_report):
+ """
+ The test runner calls this exactly once before they start
+ running the suite.
+ """
+ pass
+
+ def after_suite(self, test_report):
+ """
+ The test runner calls this exactly once after all tests have
+ finished executing. Be sure to reset the behavior back to its
+ original state so that it can be run again.
+ """
+ pass
+
+ def before_test(self, test_report):
+ """
+ Each test will call this before it executes.
+
+ Raises a TestFailure if the test should be marked as a failure,
+ or a ServerFailure if the fixture exits uncleanly or
+ unexpectedly.
+ """
+ pass
+
+ def after_test(self, test_report):
+ """
+ Each test will call this after it executes.
+
+ Raises a TestFailure if the test should be marked as a failure,
+ or a ServerFailure if the fixture exits uncleanly or
+ unexpectedly.
+ """
+ pass
+
+
+class CleanEveryN(CustomBehavior):
+ """
+ Restarts the fixture after it has ran 'n' tests.
+ On mongod-related fixtures, this will clear the dbpath.
+ """
+
+ DEFAULT_N = 20
+
+ def __init__(self, logger, fixture, n=DEFAULT_N):
+ CustomBehavior.__init__(self, logger, fixture)
+
+ # Try to isolate what test triggers the leak by restarting the fixture each time.
+ if "detect_leaks=1" in os.getenv("ASAN_OPTIONS", ""):
+ self.logger.info("ASAN_OPTIONS environment variable set to detect leaks, so restarting"
+ " the fixture after each test instead of after every %d.", n)
+ n = 1
+
+ self.n = n
+ self.tests_run = 0
+
+ def after_test(self, test_report):
+ self.tests_run += 1
+ if self.tests_run >= self.n:
+ self.logger.info("%d tests have been run against the fixture, stopping it...",
+ self.tests_run)
+ self.tests_run = 0
+
+ teardown_success = self.fixture.teardown()
+ self.logger.info("Starting the fixture back up again...")
+ self.fixture.setup()
+ self.fixture.await_ready()
+
+ # Raise this after calling setup in case --continueOnFailure was specified.
+ if not teardown_success:
+ raise errors.TestFailure("%s did not exit cleanly" % (self.fixture))
+
+
+class CheckReplDBHash(CustomBehavior):
+ """
+ Waits for replication after each test, then checks that the dbhahses
+ of all databases other than "local" match on the primary and all of
+ the secondaries. If any dbhashes do not match, logs information
+ about what was different (e.g. Different numbers of collections,
+ missing documents in a collection, mismatching documents, etc).
+
+ Compatible only with ReplFixture subclasses.
+ """
+
+ def __init__(self, logger, fixture):
+ if not isinstance(fixture, fixtures.ReplFixture):
+ raise TypeError("%s does not support replication" % (fixture.__class__.__name__))
+
+ CustomBehavior.__init__(self, logger, fixture)
+
+ self.test_case = testcases.TestCase(self.logger, "Hook", "#dbhash#")
+
+ self.started = False
+
+ def after_test(self, test_report):
+ """
+ After each test, check that the dbhash of the test database is
+ the same on all nodes in the replica set or master/slave
+ fixture.
+ """
+
+ try:
+ if not self.started:
+ CustomBehavior.start_dynamic_test(self.test_case, test_report)
+ self.started = True
+
+ # Wait until all operations have replicated.
+ self.fixture.await_repl()
+
+ success = True
+ sb = [] # String builder.
+
+ primary = self.fixture.get_primary()
+ primary_conn = utils.new_mongo_client(port=primary.port)
+
+ for secondary in self.fixture.get_secondaries():
+ read_preference = pymongo.ReadPreference.SECONDARY
+ secondary_conn = utils.new_mongo_client(port=secondary.port,
+ read_preference=read_preference)
+ # Skip arbiters.
+ if secondary_conn.admin.command("isMaster").get("arbiterOnly", False):
+ continue
+
+ all_matched = CheckReplDBHash._check_all_db_hashes(primary_conn,
+ secondary_conn,
+ sb)
+ if not all_matched:
+ sb.insert(0,
+ "One or more databases were different between the primary on port %d"
+ " and the secondary on port %d:"
+ % (primary.port, secondary.port))
+
+ success = all_matched and success
+
+ if not success:
+ # Adding failures to a TestReport requires traceback information, so we raise
+ # a 'self.test_case.failureException' that we will catch ourselves.
+ self.test_case.logger.info("\n ".join(sb))
+ raise self.test_case.failureException("The dbhashes did not match")
+ except self.test_case.failureException as err:
+ self.test_case.logger.exception("The dbhashes did not match.")
+ self.test_case.return_code = 1
+ test_report.addFailure(self.test_case, sys.exc_info())
+ test_report.stopTest(self.test_case)
+ raise errors.ServerFailure(err.args[0])
+ except pymongo.errors.WTimeoutError:
+ self.test_case.logger.exception("Awaiting replication timed out.")
+ self.test_case.return_code = 2
+ test_report.addError(self.test_case, sys.exc_info())
+ test_report.stopTest(self.test_case)
+ raise errors.StopExecution("Awaiting replication timed out")
+
+ def after_suite(self, test_report):
+ """
+ If we get to this point, the #dbhash# test must have been
+ successful, so add it to the test report.
+ """
+
+ if self.started:
+ self.test_case.logger.info("The dbhashes matched for all tests.")
+ self.test_case.return_code = 0
+ test_report.addSuccess(self.test_case)
+ # TestReport.stopTest() has already been called if there was a failure.
+ test_report.stopTest(self.test_case)
+
+ self.started = False
+
+ @staticmethod
+ def _check_all_db_hashes(primary_conn, secondary_conn, sb):
+ """
+ Returns true if for each non-local database, the dbhash command
+ returns the same MD5 hash on the primary as it does on the
+ secondary. Returns false otherwise.
+
+ Logs a message describing the differences if any database's
+ dbhash did not match.
+ """
+
+ # Overview of how we'll check that everything replicated correctly between these two nodes:
+ #
+ # - Check whether they have the same databases.
+ # - If not, log which databases are missing where, and dump the contents of any that are
+ # missing.
+ #
+ # - Check whether each database besides "local" gives the same md5 field as the result of
+ # running the dbhash command.
+ # - If not, check whether they have the same collections.
+ # - If not, log which collections are missing where, and dump the contents of any
+ # that are missing.
+ # - If so, check that the hash of each non-capped collection matches.
+ # - If any do not match, log the diff of the collection between the two nodes.
+
+ success = True
+
+ if not CheckReplDBHash._check_dbs_present(primary_conn, secondary_conn, sb):
+ return False
+
+ for db_name in primary_conn.database_names():
+ if db_name == "local":
+ continue # We don't expect this to match across different nodes.
+
+ matched = CheckReplDBHash._check_db_hash(primary_conn, secondary_conn, db_name, sb)
+ success = matched and success
+
+ return success
+
+ @staticmethod
+ def _check_dbs_present(primary_conn, secondary_conn, sb):
+ """
+ Returns true if the list of databases on the primary is
+ identical to the list of databases on the secondary, and false
+ otherwise.
+ """
+
+ success = True
+ primary_dbs = primary_conn.database_names()
+
+ # Can't run database_names() on secondary, so instead use the listDatabases command.
+ # TODO: Use database_names() once PYTHON-921 is resolved.
+ list_db_output = secondary_conn.admin.command("listDatabases")
+ secondary_dbs = [db["name"] for db in list_db_output["databases"]]
+
+ # There may be a difference in databases which is not considered an error, when
+ # the database only contains system collections. This difference is only logged
+ # when others are encountered, i.e., success = False.
+ missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
+ set(primary_dbs), set(secondary_dbs), "database")
+
+ for missing_db in missing_on_secondary:
+ db = primary_conn[missing_db]
+ coll_names = db.collection_names()
+ non_system_colls = [name for name in coll_names if not name.startswith("system.")]
+
+ # It is only an error if there are any non-system collections in the database,
+ # otherwise it's not well defined whether they should exist or not.
+ if non_system_colls:
+ sb.append("Database %s present on primary but not on secondary." % (missing_db))
+ CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
+ success = False
+
+ for missing_db in missing_on_primary:
+ db = secondary_conn[missing_db]
+
+ # Can't run collection_names() on secondary, so instead use the listCollections command.
+ # TODO: Always use collection_names() once PYTHON-921 is resolved. Then much of the
+ # logic that is duplicated here can be consolidated.
+ list_coll_output = db.command("listCollections")["cursor"]["firstBatch"]
+ coll_names = [coll["name"] for coll in list_coll_output]
+ non_system_colls = [name for name in coll_names if not name.startswith("system.")]
+
+ # It is only an error if there are any non-system collections in the database,
+ # otherwise it's not well defined if it should exist or not.
+ if non_system_colls:
+ sb.append("Database %s present on secondary but not on primary." % (missing_db))
+ CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
+ success = False
+
+ return success
+
+ @staticmethod
+ def _check_db_hash(primary_conn, secondary_conn, db_name, sb):
+ """
+ Returns true if the dbhash for 'db_name' matches on the primary
+ and the secondary, and false otherwise.
+
+ Appends a message to 'sb' describing the differences if the
+ dbhashes do not match.
+ """
+
+ primary_hash = primary_conn[db_name].command("dbhash")
+ secondary_hash = secondary_conn[db_name].command("dbhash")
+
+ if primary_hash["md5"] == secondary_hash["md5"]:
+ return True
+
+ success = CheckReplDBHash._check_dbs_eq(
+ primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb)
+
+ if not success:
+ sb.append("Database %s has a different hash on the primary and the secondary"
+ " ([ %s ] != [ %s ]):"
+ % (db_name, primary_hash["md5"], secondary_hash["md5"]))
+
+ return success
+
+ @staticmethod
+ def _check_dbs_eq(primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb):
+ """
+ Returns true if all non-capped collections had the same hash in
+ the dbhash response, and false otherwise.
+
+ Appends information to 'sb' about the differences between the
+ 'db_name' database on the primary and the 'db_name' database on
+ the secondary, if any.
+ """
+
+ success = True
+
+ primary_db = primary_conn[db_name]
+ secondary_db = secondary_conn[db_name]
+
+ primary_coll_hashes = primary_hash["collections"]
+ secondary_coll_hashes = secondary_hash["collections"]
+
+ primary_coll_names = set(primary_coll_hashes.keys())
+ secondary_coll_names = set(secondary_coll_hashes.keys())
+
+ missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
+ primary_coll_names, secondary_coll_names, "collection", sb=sb)
+
+ if missing_on_primary or missing_on_secondary:
+
+ # 'sb' already describes which collections are missing where.
+ for coll_name in missing_on_primary:
+ CheckReplDBHash._dump_all_documents(secondary_db, coll_name, sb)
+ for coll_name in missing_on_secondary:
+ CheckReplDBHash._dump_all_documents(primary_db, coll_name, sb)
+ return
+
+ for coll_name in primary_coll_names & secondary_coll_names:
+ primary_coll_hash = primary_coll_hashes[coll_name]
+ secondary_coll_hash = secondary_coll_hashes[coll_name]
+
+ if primary_coll_hash == secondary_coll_hash:
+ continue
+
+ # Ignore capped collections because they are not expected to match on all nodes.
+ if primary_db.command({"collStats": coll_name})["capped"]:
+ # Still fail if the collection is not capped on the secondary.
+ if not secondary_db.command({"collStats": coll_name})["capped"]:
+ success = False
+ sb.append("%s.%s collection is capped on primary but not on secondary."
+ % (primary_db.name, coll_name))
+ sb.append("%s.%s collection is capped, ignoring." % (primary_db.name, coll_name))
+ continue
+ # Still fail if the collection is capped on the secondary, but not on the primary.
+ elif secondary_db.command({"collStats": coll_name})["capped"]:
+ success = False
+ sb.append("%s.%s collection is capped on secondary but not on primary."
+ % (primary_db.name, coll_name))
+ continue
+
+ success = False
+ sb.append("Collection %s.%s has a different hash on the primary and the secondary"
+ " ([ %s ] != [ %s ]):"
+ % (db_name, coll_name, primary_coll_hash, secondary_coll_hash))
+ CheckReplDBHash._check_colls_eq(primary_db, secondary_db, coll_name, sb)
+
+ if success:
+ sb.append("All collections that were expected to match did.")
+ return success
+
+ @staticmethod
+ def _check_colls_eq(primary_db, secondary_db, coll_name, sb):
+ """
+ Appends information to 'sb' about the differences or between
+ the 'coll_name' collection on the primary and the 'coll_name'
+ collection on the secondary, if any.
+ """
+
+ codec_options = bson.CodecOptions(document_class=TypeSensitiveSON)
+
+ primary_coll = primary_db.get_collection(coll_name, codec_options=codec_options)
+ secondary_coll = secondary_db.get_collection(coll_name, codec_options=codec_options)
+
+ primary_docs = CheckReplDBHash._extract_documents(primary_coll)
+ secondary_docs = CheckReplDBHash._extract_documents(secondary_coll)
+
+ CheckReplDBHash._get_collection_diff(primary_docs, secondary_docs, sb)
+
+ @staticmethod
+ def _extract_documents(collection):
+ """
+ Returns a list of all documents in the collection, sorted by
+ their _id.
+ """
+
+ return [doc for doc in collection.find().sort("_id", pymongo.ASCENDING)]
+
+ @staticmethod
+ def _get_collection_diff(primary_docs, secondary_docs, sb):
+ """
+ Returns true if the documents in 'primary_docs' exactly match
+ the documents in 'secondary_docs', and false otherwise.
+
+ Appends information to 'sb' about what matched or did not match.
+ """
+
+ matched = True
+
+ # These need to be lists instead of sets because documents aren't hashable.
+ missing_on_primary = []
+ missing_on_secondary = []
+
+ p_idx = 0 # Keep track of our position in 'primary_docs'.
+ s_idx = 0 # Keep track of our position in 'secondary_docs'.
+
+ while p_idx < len(primary_docs) and s_idx < len(secondary_docs):
+ primary_doc = primary_docs[p_idx]
+ secondary_doc = secondary_docs[s_idx]
+
+ if primary_doc == secondary_doc:
+ p_idx += 1
+ s_idx += 1
+ continue
+
+ # We have mismatching documents.
+ matched = False
+
+ if primary_doc["_id"] == secondary_doc["_id"]:
+ sb.append("Mismatching document:")
+ sb.append(" primary: %s" % (primary_doc))
+ sb.append(" secondary: %s" % (secondary_doc))
+ p_idx += 1
+ s_idx += 1
+
+ # One node was missing a document. Since the documents are sorted by _id, the doc with
+ # the smaller _id was the one that was skipped.
+ elif primary_doc["_id"] < secondary_doc["_id"]:
+ missing_on_secondary.append(primary_doc)
+
+ # Only move past the doc that we know was skipped.
+ p_idx += 1
+
+ else: # primary_doc["_id"] > secondary_doc["_id"]
+ missing_on_primary.append(secondary_doc)
+
+ # Only move past the doc that we know was skipped.
+ s_idx += 1
+
+ # Check if there are any unmatched documents left.
+ while p_idx < len(primary_docs):
+ matched = False
+ missing_on_secondary.append(primary_docs[p_idx])
+ p_idx += 1
+ while s_idx < len(secondary_docs):
+ matched = False
+ missing_on_primary.append(secondary_docs[s_idx])
+ s_idx += 1
+
+ if not matched:
+ CheckReplDBHash._append_differences(
+ missing_on_primary, missing_on_secondary, "document", sb)
+ else:
+ sb.append("All documents matched.")
+
+ @staticmethod
+ def _check_difference(primary_set, secondary_set, item_type_name, sb=None):
+ """
+ Returns true if the contents of 'primary_set' and
+ 'secondary_set' are identical, and false otherwise. The sets
+ contain information about the primary and secondary,
+ respectively, e.g. the database names that exist on each node.
+
+ Appends information about anything that differed to 'sb'.
+ """
+
+ missing_on_primary = set()
+ missing_on_secondary = set()
+
+ for item in primary_set - secondary_set:
+ missing_on_secondary.add(item)
+
+ for item in secondary_set - primary_set:
+ missing_on_primary.add(item)
+
+ if sb is not None:
+ CheckReplDBHash._append_differences(
+ missing_on_primary, missing_on_secondary, item_type_name, sb)
+
+ return (missing_on_primary, missing_on_secondary)
+
+ @staticmethod
+ def _append_differences(missing_on_primary, missing_on_secondary, item_type_name, sb):
+ """
+ Given two iterables representing items that were missing on the
+ primary or the secondary respectively, append the information
+ about which items were missing to 'sb', if any.
+ """
+
+ if missing_on_primary:
+ sb.append("The following %ss were present on the secondary, but not on the"
+ " primary:" % (item_type_name))
+ for item in missing_on_primary:
+ sb.append(str(item))
+
+ if missing_on_secondary:
+ sb.append("The following %ss were present on the primary, but not on the"
+ " secondary:" % (item_type_name))
+ for item in missing_on_secondary:
+ sb.append(str(item))
+
+ @staticmethod
+ def _dump_all_collections(database, coll_names, sb):
+ """
+ Appends the contents of each of the collections in 'coll_names'
+ to 'sb'.
+ """
+
+ if coll_names:
+ sb.append("Database %s contains the following collections: %s"
+ % (database.name, coll_names))
+ for coll_name in coll_names:
+ CheckReplDBHash._dump_all_documents(database, coll_name, sb)
+ else:
+ sb.append("No collections in database %s." % (database.name))
+
+ @staticmethod
+ def _dump_all_documents(database, coll_name, sb):
+ """
+ Appends the contents of 'coll_name' to 'sb'.
+ """
+
+ docs = CheckReplDBHash._extract_documents(database[coll_name])
+ if docs:
+ sb.append("Documents in %s.%s:" % (database.name, coll_name))
+ for doc in docs:
+ sb.append(" %s" % (doc))
+ else:
+ sb.append("No documents in %s.%s." % (database.name, coll_name))
+
+class TypeSensitiveSON(bson.SON):
+ """
+ Extends bson.SON to perform additional type-checking of document values
+ to differentiate BSON types.
+ """
+
+ def items_with_types(self):
+ """
+ Returns a list of triples. Each triple consists of a field name, a
+ field value, and a field type for each field in the document.
+ """
+
+ return [(key, self[key], type(self[key])) for key in self]
+
+ def __eq__(self, other):
+ """
+ Comparison to another TypeSensitiveSON is order-sensitive and
+ type-sensitive while comparison to a regular dictionary ignores order
+ and type mismatches.
+ """
+
+ if isinstance(other, TypeSensitiveSON):
+ return (len(self) == len(other) and
+ self.items_with_types() == other.items_with_types())
+
+ raise TypeError("TypeSensitiveSON objects cannot be compared to other types")
+
+class ValidateCollections(CustomBehavior):
+ """
+ Runs full validation (db.collection.validate(true)) on all collections
+ in all databases on every standalone, or primary mongod. If validation
+ fails (validate.valid), then the validate return object is logged.
+
+ Compatible with all subclasses.
+ """
+ DEFAULT_FULL = True
+ DEFAULT_SCANDATA = True
+
+ def __init__(self, logger, fixture, full=DEFAULT_FULL, scandata=DEFAULT_SCANDATA):
+ CustomBehavior.__init__(self, logger, fixture)
+
+ if not isinstance(full, bool):
+ raise TypeError("Fixture option full is not specified as type bool")
+
+ if not isinstance(scandata, bool):
+ raise TypeError("Fixture option scandata is not specified as type bool")
+
+ self.test_case = testcases.TestCase(self.logger, "Hook", "#validate#")
+ self.started = False
+ self.full = full
+ self.scandata = scandata
+
+ def after_test(self, test_report):
+ """
+ After each test, run a full validation on all collections.
+ """
+
+ try:
+ if not self.started:
+ CustomBehavior.start_dynamic_test(self.test_case, test_report)
+ self.started = True
+
+ sb = [] # String builder.
+
+ # The self.fixture.port can be used for client connection to a
+ # standalone mongod, a replica-set primary, or mongos.
+ # TODO: Run collection validation on all nodes in a replica-set.
+ port = self.fixture.port
+ conn = utils.new_mongo_client(port=port)
+
+ success = ValidateCollections._check_all_collections(
+ conn, sb, self.full, self.scandata)
+
+ if not success:
+ # Adding failures to a TestReport requires traceback information, so we raise
+ # a 'self.test_case.failureException' that we will catch ourselves.
+ self.test_case.logger.info("\n ".join(sb))
+ raise self.test_case.failureException("Collection validation failed")
+ except self.test_case.failureException as err:
+ self.test_case.logger.exception("Collection validation failed")
+ self.test_case.return_code = 1
+ test_report.addFailure(self.test_case, sys.exc_info())
+ test_report.stopTest(self.test_case)
+ raise errors.ServerFailure(err.args[0])
+
+ def after_suite(self, test_report):
+ """
+ If we get to this point, the #validate# test must have been
+ successful, so add it to the test report.
+ """
+
+ if self.started:
+ self.test_case.logger.info("Collection validation passed for all tests.")
+ self.test_case.return_code = 0
+ test_report.addSuccess(self.test_case)
+ # TestReport.stopTest() has already been called if there was a failure.
+ test_report.stopTest(self.test_case)
+
+ self.started = False
+
+ @staticmethod
+ def _check_all_collections(conn, sb, full, scandata):
+ """
+ Returns true if for all databases and collections validate_collection
+ succeeds. Returns false otherwise.
+
+ Logs a message if any database's collection fails validate_collection.
+ """
+
+ success = True
+
+ for db_name in conn.database_names():
+ for coll_name in conn[db_name].collection_names():
+ try:
+ conn[db_name].validate_collection(coll_name, full=full, scandata=scandata)
+ except pymongo.errors.CollectionInvalid as err:
+ sb.append("Database %s, collection %s failed to validate:\n%s"
+ % (db_name, coll_name, err.args[0]))
+ success = False
+ return success
+
+
+_CUSTOM_BEHAVIORS = {
+ "CleanEveryN": CleanEveryN,
+ "CheckReplDBHash": CheckReplDBHash,
+ "ValidateCollections": ValidateCollections,
+}
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/job.py b/test/qa-tests/buildscripts/resmokelib/testing/job.py
new file mode 100644
index 00000000000..bc5705ffdfb
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/job.py
@@ -0,0 +1,195 @@
+"""
+Enables supports for running tests simultaneously by processing them
+from a multi-consumer queue.
+"""
+
+from __future__ import absolute_import
+
+import sys
+
+from .. import config
+from .. import errors
+from ..utils import queue as _queue
+
+
+class Job(object):
+ """
+ Runs tests from a queue.
+ """
+
+ def __init__(self, logger, fixture, hooks, report):
+ """
+ Initializes the job with the specified fixture and custom
+ behaviors.
+ """
+
+ self.logger = logger
+ self.fixture = fixture
+ self.hooks = hooks
+ self.report = report
+
+ def __call__(self, queue, interrupt_flag):
+ """
+ Continuously executes tests from 'queue' and records their
+ details in 'report'.
+ """
+
+ should_stop = False
+ try:
+ self._run(queue, interrupt_flag)
+ except errors.StopExecution as err:
+ # Stop running tests immediately.
+ self.logger.error("Received a StopExecution exception: %s.", err)
+ should_stop = True
+ except:
+ # Unknown error, stop execution.
+ self.logger.exception("Encountered an error during test execution.")
+ should_stop = True
+
+ if should_stop:
+ # Set the interrupt flag so that other jobs do not start running more tests.
+ interrupt_flag.set()
+ # Drain the queue to unblock the main thread.
+ Job._drain_queue(queue)
+
+ def _run(self, queue, interrupt_flag):
+ """
+ Calls the before/after suite hooks and continuously executes
+ tests from 'queue'.
+ """
+
+ for hook in self.hooks:
+ hook.before_suite(self.report)
+
+ while not interrupt_flag.is_set():
+ test = queue.get_nowait()
+ try:
+ if test is None:
+ # Sentinel value received, so exit.
+ break
+ self._execute_test(test)
+ finally:
+ queue.task_done()
+
+ for hook in self.hooks:
+ hook.after_suite(self.report)
+
+ def _execute_test(self, test):
+ """
+ Calls the before/after test hooks and executes 'test'.
+ """
+
+ test.configure(self.fixture)
+ self._run_hooks_before_tests(test)
+
+ test(self.report)
+ if config.FAIL_FAST and not self.report.wasSuccessful():
+ test.logger.info("%s failed, so stopping..." % (test.shortDescription()))
+ raise errors.StopExecution("%s failed" % (test.shortDescription()))
+
+ if not self.fixture.is_running():
+ self.logger.error("%s marked as a failure because the fixture crashed during the test.",
+ test.shortDescription())
+ self.report.setFailure(test, return_code=2)
+ # Always fail fast if the fixture fails.
+ raise errors.StopExecution("%s not running after %s" %
+ (self.fixture, test.shortDescription()))
+
+ self._run_hooks_after_tests(test)
+
+ def _run_hooks_before_tests(self, test):
+ """
+ Runs the before_test method on each of the hooks.
+
+ Swallows any TestFailure exceptions if set to continue on
+ failure, and reraises any other exceptions.
+ """
+
+ try:
+ for hook in self.hooks:
+ hook.before_test(self.report)
+
+ except errors.StopExecution:
+ raise
+
+ except errors.ServerFailure:
+ self.logger.exception("%s marked as a failure by a hook's before_test.",
+ test.shortDescription())
+ self._fail_test(test, sys.exc_info(), return_code=2)
+ raise errors.StopExecution("A hook's before_test failed")
+
+ except errors.TestFailure:
+ self.logger.exception("%s marked as a failure by a hook's before_test.",
+ test.shortDescription())
+ self._fail_test(test, sys.exc_info(), return_code=1)
+ if config.FAIL_FAST:
+ raise errors.StopExecution("A hook's before_test failed")
+
+ except:
+ # Record the before_test() error in 'self.report'.
+ self.report.startTest(test)
+ self.report.addError(test, sys.exc_info())
+ self.report.stopTest(test)
+ raise
+
+ def _run_hooks_after_tests(self, test):
+ """
+ Runs the after_test method on each of the hooks.
+
+ Swallows any TestFailure exceptions if set to continue on
+ failure, and reraises any other exceptions.
+ """
+ try:
+ for hook in self.hooks:
+ hook.after_test(self.report)
+
+ except errors.StopExecution:
+ raise
+
+ except errors.ServerFailure:
+ self.logger.exception("%s marked as a failure by a hook's after_test.",
+ test.shortDescription())
+ self.report.setFailure(test, return_code=2)
+ raise errors.StopExecution("A hook's after_test failed")
+
+ except errors.TestFailure:
+ self.logger.exception("%s marked as a failure by a hook's after_test.",
+ test.shortDescription())
+ self.report.setFailure(test, return_code=1)
+ if config.FAIL_FAST:
+ raise errors.StopExecution("A hook's after_test failed")
+
+ except:
+ self.report.setError(test)
+ raise
+
+ def _fail_test(self, test, exc_info, return_code=1):
+ """
+ Helper to record a test as a failure with the provided return
+ code.
+
+ This method should not be used if 'test' has already been
+ started, instead use TestReport.setFailure().
+ """
+
+ self.report.startTest(test)
+ test.return_code = return_code
+ self.report.addFailure(test, exc_info)
+ self.report.stopTest(test)
+
+ @staticmethod
+ def _drain_queue(queue):
+ """
+ Removes all elements from 'queue' without actually doing
+ anything to them. Necessary to unblock the main thread that is
+ waiting for 'queue' to be empty.
+ """
+
+ try:
+ while not queue.empty():
+ queue.get_nowait()
+ queue.task_done()
+ except _queue.Empty:
+ # Multiple threads may be draining the queue simultaneously, so just ignore the
+ # exception from the race between queue.empty() being false and failing to get an item.
+ pass
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/report.py b/test/qa-tests/buildscripts/resmokelib/testing/report.py
new file mode 100644
index 00000000000..61468e1dd41
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/report.py
@@ -0,0 +1,330 @@
+"""
+Extension to the unittest.TestResult to support additional test status
+and timing information for the report.json file.
+"""
+
+from __future__ import absolute_import
+
+import copy
+import time
+import unittest
+
+from .. import config
+from .. import logging
+
+
+class TestReport(unittest.TestResult):
+ """
+ Records test status and timing information.
+ """
+
+ def __init__(self, logger, logging_config, build_id=None, build_config=None):
+ """
+ Initializes the TestReport with the buildlogger configuration.
+ """
+
+ unittest.TestResult.__init__(self)
+
+ self.logger = logger
+ self.logging_config = logging_config
+ self.build_id = build_id
+ self.build_config = build_config
+
+ self.reset()
+
+ @classmethod
+ def combine(cls, *reports):
+ """
+ Merges the results from multiple TestReport instances into one.
+
+ If the same test is present in multiple reports, then one that
+ failed or errored is more preferred over one that succeeded.
+ This behavior is useful for when running multiple jobs that
+ dynamically add a #dbhash# test case.
+ """
+
+ combined_report = cls(logging.loggers.EXECUTOR, {})
+ combining_time = time.time()
+
+ for report in reports:
+ if not isinstance(report, TestReport):
+ raise TypeError("reports must be a list of TestReport instances")
+
+ for test_info in report.test_infos:
+ # If the user triggers a KeyboardInterrupt exception while a test is running, then
+ # it is possible for 'test_info' to be modified by a job thread later on. We make a
+ # shallow copy in order to ensure 'num_failed' is consistent with the actual number
+ # of tests that have status equal to "failed".
+ test_info = copy.copy(test_info)
+
+ # TestReport.addXX() may not have been called.
+ if test_info.status is None or test_info.return_code is None:
+ # Mark the test as having failed if it was interrupted. It might have passed if
+ # the suite ran to completion, but we wouldn't know for sure.
+ test_info.status = "fail"
+ test_info.return_code = -2
+
+ # TestReport.stopTest() may not have been called.
+ if test_info.end_time is None:
+ # Use the current time as the time that the test finished running.
+ test_info.end_time = combining_time
+
+ combined_report.test_infos.append(test_info)
+
+ combined_report.num_dynamic += report.num_dynamic
+
+ # Recompute number of success, failures, and errors.
+ combined_report.num_succeeded = len(combined_report.get_successful())
+ combined_report.num_failed = len(combined_report.get_failed())
+ combined_report.num_errored = len(combined_report.get_errored())
+
+ return combined_report
+
+ def startTest(self, test, dynamic=False):
+ """
+ Called immediately before 'test' is run.
+ """
+
+ unittest.TestResult.startTest(self, test)
+
+ test_info = _TestInfo(test.id(), dynamic)
+ test_info.start_time = time.time()
+ self.test_infos.append(test_info)
+
+ basename = test.basename()
+ if dynamic:
+ command = "(dynamic test case)"
+ self.num_dynamic += 1
+ else:
+ command = test.as_command()
+ self.logger.info("Running %s...\n%s", basename, command)
+
+ test_id = logging.buildlogger.new_test_id(self.build_id,
+ self.build_config,
+ basename,
+ command)
+
+ if self.build_id is not None:
+ endpoint = logging.buildlogger.APPEND_TEST_LOGS_ENDPOINT % {
+ "build_id": self.build_id,
+ "test_id": test_id,
+ }
+
+ test_info.url_endpoint = "%s/%s/" % (config.BUILDLOGGER_URL.rstrip("/"),
+ endpoint.strip("/"))
+
+ self.logger.info("Writing output of %s to %s.",
+ test.shortDescription(),
+ test_info.url_endpoint)
+
+ # Set up the test-specific logger.
+ logger_name = "%s:%s" % (test.logger.name, test.short_name())
+ logger = logging.loggers.new_logger(logger_name, parent=test.logger)
+ logging.config.apply_buildlogger_test_handler(logger,
+ self.logging_config,
+ build_id=self.build_id,
+ build_config=self.build_config,
+ test_id=test_id)
+
+ self.__original_loggers[test_info.test_id] = test.logger
+ test.logger = logger
+
+ def stopTest(self, test):
+ """
+ Called immediately after 'test' has run.
+ """
+
+ unittest.TestResult.stopTest(self, test)
+
+ test_info = self._find_test_info(test)
+ test_info.end_time = time.time()
+
+ time_taken = test_info.end_time - test_info.start_time
+ self.logger.info("%s ran in %0.2f seconds.", test.basename(), time_taken)
+
+ # Asynchronously closes the buildlogger test handler to avoid having too many threads open
+ # on 32-bit systems.
+ logging.flush.close_later(test.logger)
+
+ # Restore the original logger for the test.
+ test.logger = self.__original_loggers.pop(test.id())
+
+ def addError(self, test, err):
+ """
+ Called when a non-failureException was raised during the
+ execution of 'test'.
+ """
+
+ unittest.TestResult.addError(self, test, err)
+ self.num_errored += 1
+
+ test_info = self._find_test_info(test)
+ test_info.status = "error"
+ test_info.return_code = test.return_code
+
+ def setError(self, test):
+ """
+ Used to change the outcome of an existing test to an error.
+ """
+
+ test_info = self._find_test_info(test)
+ if test_info.end_time is None:
+ raise ValueError("stopTest was not called on %s" % (test.basename()))
+
+ test_info.status = "error"
+ test_info.return_code = 2
+
+ # Recompute number of success, failures, and errors.
+ self.num_succeeded = len(self.get_successful())
+ self.num_failed = len(self.get_failed())
+ self.num_errored = len(self.get_errored())
+
+ def addFailure(self, test, err):
+ """
+ Called when a failureException was raised during the execution
+ of 'test'.
+ """
+
+ unittest.TestResult.addFailure(self, test, err)
+ self.num_failed += 1
+
+ test_info = self._find_test_info(test)
+ test_info.status = "fail"
+ test_info.return_code = test.return_code
+
+ def setFailure(self, test, return_code=1):
+ """
+ Used to change the outcome of an existing test to a failure.
+ """
+
+ test_info = self._find_test_info(test)
+ if test_info.end_time is None:
+ raise ValueError("stopTest was not called on %s" % (test.basename()))
+
+ test_info.status = "fail"
+ test_info.return_code = return_code
+
+ # Recompute number of success, failures, and errors.
+ self.num_succeeded = len(self.get_successful())
+ self.num_failed = len(self.get_failed())
+ self.num_errored = len(self.get_errored())
+
+ def addSuccess(self, test):
+ """
+ Called when 'test' executed successfully.
+ """
+
+ unittest.TestResult.addSuccess(self, test)
+ self.num_succeeded += 1
+
+ test_info = self._find_test_info(test)
+ test_info.status = "pass"
+ test_info.return_code = test.return_code
+
+ def wasSuccessful(self):
+ """
+ Returns true if all tests executed successfully.
+ """
+ return self.num_failed == self.num_errored == 0
+
+ def get_successful(self):
+ """
+ Returns the status and timing information of the tests that
+ executed successfully.
+ """
+ return [test_info for test_info in self.test_infos if test_info.status == "pass"]
+
+ def get_failed(self):
+ """
+ Returns the status and timing information of the tests that
+ raised a failureException during their execution.
+ """
+ return [test_info for test_info in self.test_infos if test_info.status == "fail"]
+
+ def get_errored(self):
+ """
+ Returns the status and timing information of the tests that
+ raised a non-failureException during their execution.
+ """
+ return [test_info for test_info in self.test_infos if test_info.status == "error"]
+
+ def as_dict(self):
+ """
+ Return the test result information as a dictionary.
+
+ Used to create the report.json file.
+ """
+
+ results = []
+ for test_info in self.test_infos:
+ # Don't distinguish between failures and errors.
+ status = "pass" if test_info.status == "pass" else "fail"
+
+ result = {
+ "test_file": test_info.test_id,
+ "status": status,
+ "exit_code": test_info.return_code,
+ "start": test_info.start_time,
+ "end": test_info.end_time,
+ "elapsed": test_info.end_time - test_info.start_time,
+ }
+
+ if test_info.url_endpoint is not None:
+ result["url"] = test_info.url_endpoint
+
+ results.append(result)
+
+ return {
+ "results": results,
+ "failures": self.num_failed + self.num_errored,
+ }
+
+ def reset(self):
+ """
+ Resets the test report back to its initial state.
+ """
+
+ self.test_infos = []
+
+ self.num_dynamic = 0
+ self.num_succeeded = 0
+ self.num_failed = 0
+ self.num_errored = 0
+
+ self.__original_loggers = {}
+
+ def _find_test_info(self, test):
+ """
+ Returns the status and timing information associated with
+ 'test'.
+ """
+
+ test_id = test.id()
+
+ # Search the list backwards to efficiently find the status and timing information of a test
+ # that was recently started.
+ for test_info in reversed(self.test_infos):
+ if test_info.test_id == test_id:
+ return test_info
+
+ raise ValueError("Details for %s not found in the report" % (test.basename()))
+
+
+class _TestInfo(object):
+ """
+ Holder for the test status and timing information.
+ """
+
+ def __init__(self, test_id, dynamic):
+ """
+ Initializes the _TestInfo instance.
+ """
+
+ self.test_id = test_id
+ self.dynamic = dynamic
+
+ self.start_time = None
+ self.end_time = None
+ self.status = None
+ self.return_code = None
+ self.url_endpoint = None
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/suite.py b/test/qa-tests/buildscripts/resmokelib/testing/suite.py
new file mode 100644
index 00000000000..65503b85e8b
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/suite.py
@@ -0,0 +1,140 @@
+"""
+Holder for a set of TestGroup instances.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+from . import summary as _summary
+from . import testgroup
+from .. import selector as _selector
+
+
+class Suite(object):
+ """
+ A suite of tests.
+ """
+
+ TESTS_ORDER = ("cpp_unit_test", "cpp_integration_test", "db_test", "js_test", "mongos_test")
+
+ def __init__(self, suite_name, suite_config):
+ """
+ Initializes the suite with the specified name and configuration.
+ """
+
+ self._suite_name = suite_name
+ self._suite_config = suite_config
+
+ self.test_groups = []
+ for test_kind in Suite.TESTS_ORDER:
+ if test_kind not in suite_config["selector"]:
+ continue
+ tests = self._get_tests_for_group(test_kind)
+ test_group = testgroup.TestGroup(test_kind, tests)
+ self.test_groups.append(test_group)
+
+ self.return_code = None
+
+ self._start_time = None
+ self._end_time = None
+
+ def _get_tests_for_group(self, test_kind):
+ """
+ Returns the tests to run based on the 'test_kind'-specific
+ filtering policy.
+ """
+
+ test_info = self.get_selector_config()[test_kind]
+
+ # The mongos_test doesn't have to filter anything, the test_info is just the arguments to
+ # the mongos program to be used as the test case.
+ if test_kind == "mongos_test":
+ mongos_options = test_info # Just for easier reading.
+ if not isinstance(mongos_options, dict):
+ raise TypeError("Expected dictionary of arguments to mongos")
+ return [mongos_options]
+ elif test_kind == "cpp_integration_test":
+ tests = _selector.filter_cpp_integration_tests(**test_info)
+ elif test_kind == "cpp_unit_test":
+ tests = _selector.filter_cpp_unit_tests(**test_info)
+ elif test_kind == "db_test":
+ tests = _selector.filter_dbtests(**test_info)
+ else: # test_kind == "js_test":
+ tests = _selector.filter_jstests(**test_info)
+
+ return sorted(tests, key=str.lower)
+
+ def get_name(self):
+ """
+ Returns the name of the test suite.
+ """
+ return self._suite_name
+
+ def get_selector_config(self):
+ """
+ Returns the "selector" section of the YAML configuration.
+ """
+ return self._suite_config["selector"]
+
+ def get_executor_config(self):
+ """
+ Returns the "executor" section of the YAML configuration.
+ """
+ return self._suite_config["executor"]
+
+ def record_start(self):
+ """
+ Records the start time of the suite.
+ """
+ self._start_time = time.time()
+
+ def record_end(self):
+ """
+ Records the end time of the suite.
+
+ Sets the 'return_code' of the suite based on the record codes of
+ each of the individual test groups.
+ """
+
+ self._end_time = time.time()
+
+ # Only set 'return_code' if it hasn't been set already. It may have been set if there was
+ # an exception that happened during the execution of the suite.
+ if self.return_code is None:
+ # The return code of the suite should be 2 if any test group has a return code of 2.
+ # The return code of the suite should be 1 if any test group has a return code of 1,
+ # and none have a return code of 2. Otherwise, the return code should be 0.
+ self.return_code = max(test_group.return_code for test_group in self.test_groups)
+
+ def summarize(self, sb):
+ """
+ Appends a summary of each individual test group onto the string
+ builder 'sb'.
+ """
+
+ combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
+
+ summarized_groups = []
+ for group in self.test_groups:
+ group_sb = []
+ summary = group.summarize(group_sb)
+ summarized_groups.append(" %ss: %s" % (group.test_kind, "\n ".join(group_sb)))
+
+ combined_summary = _summary.combine(combined_summary, summary)
+
+ if combined_summary.num_run == 0:
+ sb.append("Suite did not run any tests.")
+ return
+
+ # Override the 'time_taken' attribute of the summary if we have more accurate timing
+ # information available.
+ if self._start_time is not None and self._end_time is not None:
+ time_taken = self._end_time - self._start_time
+ combined_summary = combined_summary._replace(time_taken=time_taken)
+
+ sb.append("%d test(s) ran in %0.2f seconds"
+ " (%d succeeded, %d were skipped, %d failed, %d errored)" % combined_summary)
+
+ for summary_text in summarized_groups:
+ sb.append(summary_text)
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/summary.py b/test/qa-tests/buildscripts/resmokelib/testing/summary.py
new file mode 100644
index 00000000000..1dae9ca81d6
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/summary.py
@@ -0,0 +1,22 @@
+"""
+Holder for summary information about a test group or suite.
+"""
+
+from __future__ import absolute_import
+
+import collections
+
+
+
+Summary = collections.namedtuple("Summary", ["num_run", "time_taken", "num_succeeded",
+ "num_skipped", "num_failed", "num_errored"])
+
+
+def combine(summary1, summary2):
+ """
+ Returns a summary representing the sum of 'summary1' and 'summary2'.
+ """
+ args = []
+ for i in xrange(len(Summary._fields)):
+ args.append(summary1[i] + summary2[i])
+ return Summary._make(args)
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/testcases.py b/test/qa-tests/buildscripts/resmokelib/testing/testcases.py
new file mode 100644
index 00000000000..3b068c3b80f
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/testcases.py
@@ -0,0 +1,407 @@
+"""
+Subclasses of unittest.TestCase.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import shutil
+import unittest
+
+from .. import config
+from .. import core
+from .. import logging
+from .. import utils
+
+
+def make_test_case(test_kind, *args, **kwargs):
+ """
+ Factory function for creating TestCase instances.
+ """
+
+ if test_kind not in _TEST_CASES:
+ raise ValueError("Unknown test kind '%s'" % (test_kind))
+ return _TEST_CASES[test_kind](*args, **kwargs)
+
+
+class TestCase(unittest.TestCase):
+ """
+ A test case to execute.
+ """
+
+ def __init__(self, logger, test_kind, test_name):
+ """
+ Initializes the TestCase with the name of the test.
+ """
+
+ unittest.TestCase.__init__(self, methodName="run_test")
+
+ if not isinstance(logger, logging.Logger):
+ raise TypeError("logger must be a Logger instance")
+
+ if not isinstance(test_kind, basestring):
+ raise TypeError("test_kind must be a string")
+
+ if not isinstance(test_name, basestring):
+ raise TypeError("test_name must be a string")
+
+ self.logger = logger
+ self.test_kind = test_kind
+ self.test_name = test_name
+
+ self.fixture = None
+ self.return_code = None
+
+ def long_name(self):
+ """
+ Returns the path to the test, relative to the current working directory.
+ """
+ return os.path.relpath(self.test_name)
+
+ def basename(self):
+ """
+ Returns the basename of the test.
+ """
+ return os.path.basename(self.test_name)
+
+ def short_name(self):
+ """
+ Returns the basename of the test without the file extension.
+ """
+ return os.path.splitext(self.basename())[0]
+
+ def id(self):
+ return self.test_name
+
+ def shortDescription(self):
+ return "%s %s" % (self.test_kind, self.test_name)
+
+ def configure(self, fixture):
+ """
+ Stores 'fixture' as an attribute for later use during execution.
+ """
+ self.fixture = fixture
+
+ def run_test(self):
+ """
+ Runs the specified test.
+ """
+ raise NotImplementedError("run_test must be implemented by TestCase subclasses")
+
+ def as_command(self):
+ """
+ Returns the command invocation used to run the test.
+ """
+ return self._make_process().as_command()
+
+ def _execute(self, process):
+ """
+ Runs the specified process.
+ """
+
+ self.logger.info("Starting %s...\n%s", self.shortDescription(), process.as_command())
+ process.start()
+ self.logger.info("%s started with pid %s.", self.shortDescription(), process.pid)
+
+ self.return_code = process.wait()
+ if self.return_code != 0:
+ raise self.failureException("%s failed" % (self.shortDescription()))
+
+ self.logger.info("%s finished.", self.shortDescription())
+
+ def _make_process(self):
+ """
+ Returns a new Process instance that could be used to run the
+ test or log the command.
+ """
+ raise NotImplementedError("_make_process must be implemented by TestCase subclasses")
+
+
+class CPPUnitTestCase(TestCase):
+ """
+ A C++ unit test to execute.
+ """
+
+ def __init__(self,
+ logger,
+ program_executable,
+ program_options=None):
+ """
+ Initializes the CPPUnitTestCase with the executable to run.
+ """
+
+ TestCase.__init__(self, logger, "Program", program_executable)
+
+ self.program_executable = program_executable
+ self.program_options = utils.default_if_none(program_options, {}).copy()
+
+ def run_test(self):
+ try:
+ program = self._make_process()
+ self._execute(program)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running C++ unit test %s.", self.basename())
+ raise
+
+ def _make_process(self):
+ return core.process.Process(self.logger,
+ [self.program_executable],
+ **self.program_options)
+
+
+class CPPIntegrationTestCase(TestCase):
+ """
+ A C++ integration test to execute.
+ """
+
+ def __init__(self,
+ logger,
+ program_executable,
+ program_options=None):
+ """
+ Initializes the CPPIntegrationTestCase with the executable to run.
+ """
+
+ TestCase.__init__(self, logger, "Program", program_executable)
+
+ self.program_executable = program_executable
+ self.program_options = utils.default_if_none(program_options, {}).copy()
+
+ def configure(self, fixture):
+ TestCase.configure(self, fixture)
+
+ self.program_options["connectionString"] = self.fixture.get_connection_string()
+
+ def run_test(self):
+ try:
+ program = self._make_process()
+ self._execute(program)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running C++ integration test %s.",
+ self.basename())
+ raise
+
+ def _make_process(self):
+ return core.programs.generic_program(self.logger,
+ [self.program_executable],
+ **self.program_options)
+
+
+class DBTestCase(TestCase):
+ """
+ A dbtest to execute.
+ """
+
+ def __init__(self,
+ logger,
+ dbtest_suite,
+ dbtest_executable=None,
+ dbtest_options=None):
+ """
+ Initializes the DBTestCase with the dbtest suite to run.
+ """
+
+ TestCase.__init__(self, logger, "DBTest", dbtest_suite)
+
+ # Command line options override the YAML configuration.
+ self.dbtest_executable = utils.default_if_none(config.DBTEST_EXECUTABLE, dbtest_executable)
+
+ self.dbtest_suite = dbtest_suite
+ self.dbtest_options = utils.default_if_none(dbtest_options, {}).copy()
+
+ def configure(self, fixture):
+ TestCase.configure(self, fixture)
+
+ # If a dbpath was specified, then use it as a container for all other dbpaths.
+ dbpath_prefix = self.dbtest_options.pop("dbpath", DBTestCase._get_dbpath_prefix())
+ dbpath = os.path.join(dbpath_prefix, "job%d" % (self.fixture.job_num), "unittest")
+ self.dbtest_options["dbpath"] = dbpath
+
+ shutil.rmtree(dbpath, ignore_errors=True)
+
+ try:
+ os.makedirs(dbpath)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ def run_test(self):
+ try:
+ dbtest = self._make_process()
+ self._execute(dbtest)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running dbtest suite %s.", self.basename())
+ raise
+
+ def _make_process(self):
+ return core.programs.dbtest_program(self.logger,
+ executable=self.dbtest_executable,
+ suites=[self.dbtest_suite],
+ **self.dbtest_options)
+
+ @staticmethod
+ def _get_dbpath_prefix():
+ """
+ Returns the prefix of the dbpath to use for the dbtest
+ executable.
+
+ Order of preference:
+ 1. The --dbpathPrefix specified at the command line.
+ 2. Value of the TMPDIR environment variable.
+ 3. Value of the TEMP environment variable.
+ 4. Value of the TMP environment variable.
+ 5. The /tmp directory.
+ """
+
+ if config.DBPATH_PREFIX is not None:
+ return config.DBPATH_PREFIX
+
+ for env_var in ("TMPDIR", "TEMP", "TMP"):
+ if env_var in os.environ:
+ return os.environ[env_var]
+ return os.path.normpath("/tmp")
+
+
+class JSTestCase(TestCase):
+ """
+ A jstest to execute.
+ """
+
+ def __init__(self,
+ logger,
+ js_filename,
+ shell_executable=None,
+ shell_options=None):
+ "Initializes the JSTestCase with the JS file to run."
+
+ TestCase.__init__(self, logger, "JSTest", js_filename)
+
+ # Command line options override the YAML configuration.
+ self.shell_executable = utils.default_if_none(config.MONGO_EXECUTABLE, shell_executable)
+
+ self.js_filename = js_filename
+ self.shell_options = utils.default_if_none(shell_options, {}).copy()
+
+ def configure(self, fixture):
+ TestCase.configure(self, fixture)
+
+ if self.fixture.port is not None:
+ self.shell_options["port"] = self.fixture.port
+
+ global_vars = self.shell_options.get("global_vars", {}).copy()
+ data_dir = self._get_data_dir(global_vars)
+
+ # Set MongoRunner.dataPath if overridden at command line or not specified in YAML.
+ if config.DBPATH_PREFIX is not None or "MongoRunner.dataPath" not in global_vars:
+ # dataPath property is the dataDir property with a trailing slash.
+ data_path = os.path.join(data_dir, "")
+ else:
+ data_path = global_vars["MongoRunner.dataPath"]
+
+ global_vars["MongoRunner.dataDir"] = data_dir
+ global_vars["MongoRunner.dataPath"] = data_path
+
+ test_data = global_vars.get("TestData", {}).copy()
+ test_data["minPort"] = core.network.PortAllocator.min_test_port(fixture.job_num)
+ test_data["maxPort"] = core.network.PortAllocator.max_test_port(fixture.job_num)
+
+ global_vars["TestData"] = test_data
+ self.shell_options["global_vars"] = global_vars
+
+ shutil.rmtree(data_dir, ignore_errors=True)
+
+ try:
+ os.makedirs(data_dir)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ def _get_data_dir(self, global_vars):
+ """
+ Returns the value that the mongo shell should set for the
+ MongoRunner.dataDir property.
+ """
+
+ # Command line options override the YAML configuration.
+ data_dir_prefix = utils.default_if_none(config.DBPATH_PREFIX,
+ global_vars.get("MongoRunner.dataDir"))
+ data_dir_prefix = utils.default_if_none(data_dir_prefix, config.DEFAULT_DBPATH_PREFIX)
+ return os.path.join(data_dir_prefix,
+ "job%d" % (self.fixture.job_num),
+ config.MONGO_RUNNER_SUBDIR)
+
+ def run_test(self):
+ try:
+ shell = self._make_process()
+ self._execute(shell)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running jstest %s.", self.basename())
+ raise
+
+ def _make_process(self):
+ return core.programs.mongo_shell_program(self.logger,
+ executable=self.shell_executable,
+ filename=self.js_filename,
+ **self.shell_options)
+
+
+class MongosTestCase(TestCase):
+ """
+ A TestCase which runs a mongos binary with the given parameters.
+ """
+
+ def __init__(self,
+ logger,
+ mongos_options):
+ """
+ Initializes the mongos test and saves the options.
+ """
+
+ self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE,
+ config.DEFAULT_MONGOS_EXECUTABLE)
+ # Use the executable as the test name.
+ TestCase.__init__(self, logger, "mongos", self.mongos_executable)
+ self.options = mongos_options.copy()
+
+ def configure(self, fixture):
+ """
+ Ensures the --test option is present in the mongos options.
+ """
+
+ TestCase.configure(self, fixture)
+ # Always specify test option to ensure the mongos will terminate.
+ if "test" not in self.options:
+ self.options["test"] = ""
+
+ def run_test(self):
+ try:
+ mongos = self._make_process()
+ self._execute(mongos)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running %s.", mongos.as_command())
+ raise
+
+ def _make_process(self):
+ return core.programs.mongos_program(self.logger,
+ executable=self.mongos_executable,
+ **self.options)
+
+
+_TEST_CASES = {
+ "cpp_unit_test": CPPUnitTestCase,
+ "cpp_integration_test": CPPIntegrationTestCase,
+ "db_test": DBTestCase,
+ "js_test": JSTestCase,
+ "mongos_test": MongosTestCase,
+}
diff --git a/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py b/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py
new file mode 100644
index 00000000000..688d56c296d
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py
@@ -0,0 +1,132 @@
+"""
+Holder for the (test kind, list of tests) pair with additional metadata
+about when and how they execute.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+from . import summary as _summary
+
+
+class TestGroup(object):
+ """
+ A class to encapsulate the results of running a group of tests
+ of a particular kind (e.g. C++ unit tests, dbtests, jstests).
+ """
+
+ def __init__(self, test_kind, tests):
+ """
+ Initializes the TestGroup with a list of tests.
+ """
+
+ self.test_kind = test_kind
+ self.tests = tests
+
+ self.return_code = None # Set by the executor.
+
+ self._start_times = []
+ self._end_times = []
+ self._reports = []
+
+ def get_reports(self):
+ """
+ Returns the list of reports.
+ """
+ return self._reports
+
+ def record_start(self):
+ """
+ Records the start time of an execution.
+ """
+ self._start_times.append(time.time())
+
+ def record_end(self, report):
+ """
+ Records the end time of an execution.
+ """
+ self._end_times.append(time.time())
+ self._reports.append(report)
+
+ def summarize_latest(self, sb):
+ """
+ Returns a summary of the latest execution of the group and appends a
+ summary of that execution onto the string builder 'sb'.
+ """
+ return self._summarize_execution(-1, sb)
+
+ def summarize(self, sb):
+ """
+ Returns a summary of the execution(s) of the group and appends a
+ summary of the execution(s) onto the string builder 'sb'.
+ """
+
+ if not self._reports:
+ sb.append("No tests ran.")
+ return _summary.Summary(0, 0.0, 0, 0, 0, 0)
+
+ if len(self._reports) == 1:
+ return self._summarize_execution(0, sb)
+
+ return self._summarize_repeated(sb)
+
+ def _summarize_repeated(self, sb):
+ """
+ Returns the summary information of all executions and appends
+ each execution's summary onto the string builder 'sb'. Also
+ appends information of how many repetitions there were.
+ """
+
+ num_iterations = len(self._reports)
+ total_time_taken = self._end_times[-1] - self._start_times[0]
+ sb.append("Executed %d times in %0.2f seconds:" % (num_iterations, total_time_taken))
+
+ combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
+ for iteration in xrange(num_iterations):
+ # Summarize each execution as a bulleted list of results.
+ bulleter_sb = []
+ summary = self._summarize_execution(iteration, bulleter_sb)
+ combined_summary = _summary.combine(combined_summary, summary)
+
+ for (i, line) in enumerate(bulleter_sb):
+ # Only bullet first line, indent others.
+ prefix = "* " if i == 0 else " "
+ sb.append(prefix + line)
+
+ return combined_summary
+
+ def _summarize_execution(self, iteration, sb):
+ """
+ Returns the summary information of the execution given by
+ 'iteration' and appends a summary of that execution onto the
+ string builder 'sb'.
+ """
+
+ report = self._reports[iteration]
+ time_taken = self._end_times[iteration] - self._start_times[iteration]
+
+ num_run = report.num_succeeded + report.num_errored + report.num_failed
+ num_skipped = len(self.tests) + report.num_dynamic - num_run
+
+ if report.num_succeeded == num_run and num_skipped == 0:
+ sb.append("All %d test(s) passed in %0.2f seconds." % (num_run, time_taken))
+ return _summary.Summary(num_run, time_taken, num_run, 0, 0, 0)
+
+ summary = _summary.Summary(num_run, time_taken, report.num_succeeded, num_skipped,
+ report.num_failed, report.num_errored)
+
+ sb.append("%d test(s) ran in %0.2f seconds"
+ " (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
+
+ if report.num_failed > 0:
+ sb.append("The following tests failed (with exit code):")
+ for test_info in report.get_failed():
+ sb.append(" %s (%d)" % (test_info.test_id, test_info.return_code))
+
+ if report.num_errored > 0:
+ sb.append("The following tests had errors:")
+ for test_info in report.get_errored():
+ sb.append(" %s" % (test_info.test_id))
+
+ return summary
diff --git a/test/qa-tests/buildscripts/resmokelib/utils/__init__.py b/test/qa-tests/buildscripts/resmokelib/utils/__init__.py
new file mode 100644
index 00000000000..df387cc3323
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/utils/__init__.py
@@ -0,0 +1,88 @@
+"""
+Helper functions.
+"""
+
+from __future__ import absolute_import
+
+import os.path
+
+import pymongo
+import yaml
+
+
+def default_if_none(value, default):
+ return value if value is not None else default
+
+
+def is_string_list(lst):
+ """
+ Returns true if 'lst' is a list of strings, and false otherwise.
+ """
+ return isinstance(lst, list) and all(isinstance(x, basestring) for x in lst)
+
+
+def is_string_set(value):
+ """
+ Returns true if 'value' is a set of strings, and false otherwise.
+ """
+ return isinstance(value, set) and all(isinstance(x, basestring) for x in value)
+
+
+def is_js_file(filename):
+ """
+ Returns true if 'filename' ends in .js, and false otherwise.
+ """
+ return os.path.splitext(filename)[1] == ".js"
+
+
+def is_yaml_file(filename):
+ """
+ Returns true if 'filename' ends in .yml or .yaml, and false
+ otherwise.
+ """
+ return os.path.splitext(filename)[1] in (".yaml", ".yml")
+
+
+def load_yaml_file(filename):
+ """
+ Attempts to read 'filename' as YAML.
+ """
+ try:
+ with open(filename, "r") as fp:
+ return yaml.safe_load(fp)
+ except yaml.YAMLError as err:
+ raise ValueError("File '%s' contained invalid YAML: %s" % (filename, err))
+
+
+def dump_yaml(value):
+ """
+ Returns 'value' formatted as YAML.
+ """
+ # Use block (indented) style for formatting YAML.
+ return yaml.safe_dump(value, default_flow_style=False).rstrip()
+
+def load_yaml(value):
+ """
+ Attempts to parse 'value' as YAML.
+ """
+ try:
+ return yaml.safe_load(value)
+ except yaml.YAMLError as err:
+ raise ValueError("Attempted to parse invalid YAML value '%s': %s" % (value, err))
+
+
+def new_mongo_client(port, read_preference=pymongo.ReadPreference.PRIMARY, timeout_millis=30000):
+ """
+ Returns a pymongo.MongoClient connected on 'port' with a read
+ preference of 'read_preference'.
+
+ The PyMongo driver will wait up to 'timeout_millis' milliseconds
+ before concluding that the server is unavailable.
+ """
+
+ kwargs = {"connectTimeoutMS": timeout_millis}
+ if pymongo.version_tuple[0] >= 3:
+ kwargs["serverSelectionTimeoutMS"] = timeout_millis
+ kwargs["connect"] = True
+
+ return pymongo.MongoClient(port=port, read_preference=read_preference, **kwargs)
diff --git a/test/qa-tests/buildscripts/resmokelib/utils/globstar.py b/test/qa-tests/buildscripts/resmokelib/utils/globstar.py
new file mode 100644
index 00000000000..644ebfe3e38
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/utils/globstar.py
@@ -0,0 +1,202 @@
+"""
+Filename globbing utility.
+"""
+
+from __future__ import absolute_import
+
+import glob as _glob
+import os
+import os.path
+import re
+
+
+_GLOBSTAR = "**"
+_CONTAINS_GLOB_PATTERN = re.compile("[*?[]")
+
+
+def is_glob_pattern(s):
+ """
+ Returns true if 's' represents a glob pattern, and false otherwise.
+ """
+
+ # Copied from glob.has_magic().
+ return _CONTAINS_GLOB_PATTERN.search(s) is not None
+
+
+def glob(globbed_pathname):
+ """
+ Return a list of pathnames matching the 'globbed_pathname' pattern.
+
+ In addition to containing simple shell-style wildcards a la fnmatch,
+ the pattern may also contain globstars ("**"), which is recursively
+ expanded to match zero or more subdirectories.
+ """
+
+ return list(iglob(globbed_pathname))
+
+
+def iglob(globbed_pathname):
+ """
+ Emit a list of pathnames matching the 'globbed_pathname' pattern.
+
+ In addition to containing simple shell-style wildcards a la fnmatch,
+ the pattern may also contain globstars ("**"), which is recursively
+ expanded to match zero or more subdirectories.
+ """
+
+ parts = _split_path(globbed_pathname)
+ parts = _canonicalize(parts)
+
+ index = _find_globstar(parts)
+ if index == -1:
+ for pathname in _glob.iglob(globbed_pathname):
+ # Normalize 'pathname' so exact string comparison can be used later.
+ yield os.path.normpath(pathname)
+ return
+
+ # **, **/, or **/a
+ if index == 0:
+ expand = _expand_curdir
+
+ # a/** or a/**/ or a/**/b
+ else:
+ expand = _expand
+
+ prefix_parts = parts[:index]
+ suffix_parts = parts[index + 1:]
+
+ prefix = os.path.join(*prefix_parts) if prefix_parts else os.curdir
+ suffix = os.path.join(*suffix_parts) if suffix_parts else ""
+
+ for (kind, path) in expand(prefix):
+ if not suffix_parts:
+ yield path
+
+ # Avoid following symlinks to avoid an infinite loop
+ elif suffix_parts and kind == "dir" and not os.path.islink(path):
+ path = os.path.join(path, suffix)
+ for pathname in iglob(path):
+ yield pathname
+
+
+def _split_path(pathname):
+ """
+ Return 'pathname' as a list of path components.
+ """
+
+ parts = []
+
+ while True:
+ (dirname, basename) = os.path.split(pathname)
+ parts.append(basename)
+ if pathname == dirname:
+ parts.append(dirname)
+ break
+ if not dirname:
+ break
+ pathname = dirname
+
+ parts.reverse()
+ return parts
+
+
+def _canonicalize(parts):
+ """
+ Return a copy of 'parts' with consecutive "**"s coalesced.
+ Raise a ValueError for unsupported uses of "**".
+ """
+
+ res = []
+
+ prev_was_globstar = False
+ for p in parts:
+ if p == _GLOBSTAR:
+ # Skip consecutive **'s
+ if not prev_was_globstar:
+ prev_was_globstar = True
+ res.append(p)
+ elif _GLOBSTAR in p: # a/b**/c or a/**b/c
+ raise ValueError("Can only specify glob patterns of the form a/**/b")
+ else:
+ prev_was_globstar = False
+ res.append(p)
+
+ return res
+
+
+def _find_globstar(parts):
+ """
+ Return the index of the first occurrence of "**" in 'parts'.
+ Return -1 if "**" is not found in the list.
+ """
+
+ for (i, p) in enumerate(parts):
+ if p == _GLOBSTAR:
+ return i
+ return -1
+
+
+def _list_dir(pathname):
+ """
+ Return a pair of the subdirectory names and filenames immediately
+ contained within the 'pathname' directory.
+
+ If 'pathname' does not exist, then None is returned.
+ """
+
+ try:
+ (_root, dirs, files) = os.walk(pathname).next()
+ return (dirs, files)
+ except StopIteration:
+ return None # 'pathname' directory does not exist
+
+
+def _expand(pathname):
+ """
+ Emit tuples of the form ("dir", dirname) and ("file", filename)
+ of all directories and files contained within the 'pathname' directory.
+ """
+
+ res = _list_dir(pathname)
+ if res is None:
+ return
+
+ (dirs, files) = res
+
+ # Zero expansion
+ if os.path.basename(pathname):
+ yield ("dir", os.path.join(pathname, ""))
+
+ for f in files:
+ path = os.path.join(pathname, f)
+ yield ("file", path)
+
+ for d in dirs:
+ path = os.path.join(pathname, d)
+ for x in _expand(path):
+ yield x
+
+
+def _expand_curdir(pathname):
+ """
+ Emit tuples of the form ("dir", dirname) and ("file", filename)
+ of all directories and files contained within the 'pathname' directory.
+
+ The returned pathnames omit a "./" prefix.
+ """
+
+ res = _list_dir(pathname)
+ if res is None:
+ return
+
+ (dirs, files) = res
+
+ # Zero expansion
+ yield ("dir", "")
+
+ for f in files:
+ yield ("file", f)
+
+ for d in dirs:
+ for x in _expand(d):
+ yield x
diff --git a/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py b/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py
new file mode 100644
index 00000000000..18da7885820
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py
@@ -0,0 +1,78 @@
+"""
+Utility for parsing JS comments.
+"""
+
+from __future__ import absolute_import
+
+import re
+
+import yaml
+
+
+# TODO: use a more robust regular expression for matching tags
+_JSTEST_TAGS_RE = re.compile(r".*@tags\s*:\s*(\[[^\]]*\])", re.DOTALL)
+
+
+def get_tags(pathname):
+ """
+ Returns the list of tags found in the (JS-style) comments of
+ 'pathname'. The definition can span multiple lines, use unquoted,
+ single-quoted, or double-quoted strings, and use the '#' character
+ for inline commenting.
+
+ e.g.
+
+ /**
+ * @tags: [ "tag1", # double quoted
+ * 'tag2' # single quoted
+ * # line with only a comment
+ * , tag3 # no quotes
+ * tag4, # trailing comma
+ * ]
+ */
+ """
+
+ with open(pathname) as fp:
+ match = _JSTEST_TAGS_RE.match(fp.read())
+ if match:
+ try:
+ # TODO: it might be worth supporting the block (indented) style of YAML lists in
+ # addition to the flow (bracketed) style
+ tags = yaml.safe_load(_strip_jscomments(match.group(1)))
+ if not isinstance(tags, list) and all(isinstance(tag, basestring) for tag in tags):
+ raise TypeError("Expected a list of string tags, but got '%s'" % (tags))
+ return tags
+ except yaml.YAMLError as err:
+ raise ValueError("File '%s' contained invalid tags (expected YAML): %s"
+ % (pathname, err))
+
+ return []
+
+
+def _strip_jscomments(s):
+ """
+ Given a string 's' that represents the contents after the "@tags:"
+ annotation in the JS file, this function returns a string that can
+ be converted to YAML.
+
+ e.g.
+
+ [ "tag1", # double quoted
+ * 'tag2' # single quoted
+ * # line with only a comment
+ * , tag3 # no quotes
+ * tag4, # trailing comma
+ * ]
+
+ If the //-style JS comments were used, then the example remains the,
+ same except with the '*' character is replaced by '//'.
+ """
+
+ yaml_lines = []
+
+ for line in s.splitlines():
+ # Remove leading whitespace and symbols that commonly appear in JS comments.
+ line = line.lstrip("\t ").lstrip("*/")
+ yaml_lines.append(line)
+
+ return "\n".join(yaml_lines)
diff --git a/test/qa-tests/buildscripts/resmokelib/utils/queue.py b/test/qa-tests/buildscripts/resmokelib/utils/queue.py
new file mode 100644
index 00000000000..80da5e2cc66
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/utils/queue.py
@@ -0,0 +1,52 @@
+"""
+Extension to the Queue.Queue class.
+
+Added support for the join() method to take a timeout. This is necessary
+in order for KeyboardInterrupt exceptions to get propagated.
+
+See https://bugs.python.org/issue1167930 for more details.
+"""
+
+from __future__ import absolute_import
+
+import Queue
+import time
+
+
+# Exception that is raised when get_nowait() is called on an empty Queue.
+Empty = Queue.Empty
+
+
+class Queue(Queue.Queue):
+ """
+ A multi-producer, multi-consumer queue.
+ """
+
+ def join(self, timeout=None):
+ """
+ Wait until all items in the queue have been retrieved and processed,
+ or until 'timeout' seconds have passed.
+
+ The count of unfinished tasks is incremented whenever an item is added
+ to the queue. The count is decremented whenever task_done() is called
+ to indicate that all work on the retrieved item was completed.
+
+ When the number of unfinished tasks reaches zero, True is returned.
+ If the number of unfinished tasks remains nonzero after 'timeout'
+ seconds have passed, then False is returned.
+ """
+ with self.all_tasks_done:
+ if timeout is None:
+ while self.unfinished_tasks:
+ self.all_tasks_done.wait()
+ elif timeout < 0:
+ raise ValueError("timeout must be a nonnegative number")
+ else:
+ # Pass timeout down to lock acquisition
+ deadline = time.time() + timeout
+ while self.unfinished_tasks:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ return False
+ self.all_tasks_done.wait(remaining)
+ return True
diff --git a/test/qa-tests/buildscripts/resmokelib/utils/timer.py b/test/qa-tests/buildscripts/resmokelib/utils/timer.py
new file mode 100644
index 00000000000..80531d5db5c
--- /dev/null
+++ b/test/qa-tests/buildscripts/resmokelib/utils/timer.py
@@ -0,0 +1,125 @@
+"""
+Alternative to the threading.Timer class.
+
+Enables a timer to be restarted without needing to construct a new thread
+each time. This is necessary to execute periodic actions, e.g. flushing
+log messages to buildlogger, while avoiding errors related to "can't start
+new thread" that would otherwise occur on Windows.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+
+class AlarmClock(threading.Thread):
+ """
+ Calls a function after a specified number of seconds.
+ """
+
+ def __init__(self, interval, func, args=None, kwargs=None):
+ """
+ Initializes the timer with a function to periodically execute.
+ """
+
+ threading.Thread.__init__(self)
+
+ # A non-dismissed timer should not prevent the program from exiting
+ self.daemon = True
+
+ self.interval = interval
+ self.func = func
+ self.args = args if args is not None else []
+ self.kwargs = kwargs if kwargs is not None else {}
+
+ self.lock = threading.Lock()
+ self.cond = threading.Condition(self.lock)
+
+ self.snoozed = False # canceled for one execution
+ self.dismissed = False # canceled for all time
+ self.restarted = False
+
+ def dismiss(self):
+ """
+ Disables the timer.
+ """
+
+ with self.lock:
+ self.dismissed = True
+ self.cond.notify_all()
+
+ self.join() # Tidy up the started thread.
+
+ cancel = dismiss # Expose API compatible with that of threading.Timer.
+
+ def snooze(self):
+ """
+ Skips the next execution of 'func' if it has not already started.
+ """
+
+ with self.lock:
+ if self.dismissed:
+ raise ValueError("Timer cannot be snoozed if it has been dismissed")
+
+ self.snoozed = True
+ self.restarted = False
+ self.cond.notify_all()
+
+ def reset(self):
+ """
+ Restarts the timer, causing it to wait 'interval' seconds before calling
+ 'func' again.
+ """
+
+ with self.lock:
+ if self.dismissed:
+ raise ValueError("Timer cannot be reset if it has been dismissed")
+
+ if not self.snoozed:
+ raise ValueError("Timer cannot be reset if it has not been snoozed")
+
+ self.restarted = True
+ self.cond.notify_all()
+
+ def run(self):
+ """
+ Repeatedly calls 'func' with a delay of 'interval' seconds between executions.
+
+ If the timer is snoozed before 'func' is called, then it waits to be reset.
+ After it has been reset, the timer will again wait 'interval' seconds and
+ then try to call 'func'.
+
+ If the timer is dismissed, then no subsequent executions of 'func' are made.
+ """
+
+ while True:
+ with self.lock:
+ if self.dismissed:
+ return
+
+ # Wait for the specified amount of time.
+ self.cond.wait(self.interval)
+
+ if self.dismissed:
+ return
+
+ # If the timer was snoozed, then it should wait to be reset.
+ if self.snoozed:
+ while not self.restarted:
+ self.cond.wait()
+
+ if self.dismissed:
+ return
+
+ self.restarted = False
+ self.snoozed = False
+ continue
+
+ # Execute the function after the lock has been released to prevent potential deadlocks
+ # with the invoked function.
+ self.func(*self.args, **self.kwargs)
+
+ # Reacquire the lock.
+ with self.lock:
+ # Ignore snoozes that took place while the function was being executed.
+ self.snoozed = False