summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Abrahams <jonathan@mongodb.com>2018-09-22 15:46:43 -0400
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2018-09-22 15:46:43 -0400
commit1b0a962b03696784741ff9f4e83b2f084d1d88d7 (patch)
tree1550a86f34f6e2b0bbda9106fcb3fef7af7c49a8
parent189dd0a7a0a15efb98d5a0349964ac4ca92e06bb (diff)
downloadmongo-1b0a962b03696784741ff9f4e83b2f084d1d88d7.tar.gz
SERVER-36076 Create new resmoke.py test suite for running mongoebench on a desktop
(cherry picked from commit a444720202d97795b71cf2daaaa2bea94b430ef9)
-rw-r--r--buildscripts/resmokeconfig/suites/benchrun_embedded_aggregation.yml9
-rw-r--r--buildscripts/resmokeconfig/suites/benchrun_embedded_commands.yml9
-rw-r--r--buildscripts/resmokeconfig/suites/benchrun_embedded_insert.yml9
-rw-r--r--buildscripts/resmokeconfig/suites/benchrun_embedded_misc.yml18
-rw-r--r--buildscripts/resmokeconfig/suites/benchrun_embedded_mixed_and_multi.yml10
-rw-r--r--buildscripts/resmokeconfig/suites/benchrun_embedded_queries.yml9
-rw-r--r--buildscripts/resmokeconfig/suites/benchrun_embedded_remove.yml9
-rw-r--r--buildscripts/resmokeconfig/suites/benchrun_embedded_update.yml9
-rw-r--r--buildscripts/resmokelib/config.py8
-rw-r--r--buildscripts/resmokelib/parser.py25
-rw-r--r--buildscripts/resmokelib/selector.py10
-rw-r--r--buildscripts/resmokelib/testing/hooks/combine_benchrun_embedded_results.py150
-rw-r--r--buildscripts/resmokelib/testing/testcases/benchrun_embedded_test.py96
-rwxr-xr-xbuildscripts/tests/resmokelib/testing/hooks/test_combine_benchrun_embedded_results.py163
-rw-r--r--etc/evergreen.yml134
15 files changed, 653 insertions, 15 deletions
diff --git a/buildscripts/resmokeconfig/suites/benchrun_embedded_aggregation.yml b/buildscripts/resmokeconfig/suites/benchrun_embedded_aggregation.yml
new file mode 100644
index 00000000000..65ed34dafcb
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/benchrun_embedded_aggregation.yml
@@ -0,0 +1,9 @@
+test_kind: benchrun_embedded_test
+
+selector:
+ roots:
+ - benchrun_embedded/**/aggregation*.json
+
+executor:
+ hooks:
+ - class: CombineBenchrunEmbeddedResults
diff --git a/buildscripts/resmokeconfig/suites/benchrun_embedded_commands.yml b/buildscripts/resmokeconfig/suites/benchrun_embedded_commands.yml
new file mode 100644
index 00000000000..c1812defd37
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/benchrun_embedded_commands.yml
@@ -0,0 +1,9 @@
+test_kind: benchrun_embedded_test
+
+selector:
+ roots:
+ - benchrun_embedded/**/commands*.json
+
+executor:
+ hooks:
+ - class: CombineBenchrunEmbeddedResults
diff --git a/buildscripts/resmokeconfig/suites/benchrun_embedded_insert.yml b/buildscripts/resmokeconfig/suites/benchrun_embedded_insert.yml
new file mode 100644
index 00000000000..ec25d3147c3
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/benchrun_embedded_insert.yml
@@ -0,0 +1,9 @@
+test_kind: benchrun_embedded_test
+
+selector:
+ roots:
+ - benchrun_embedded/**/insert*.json
+
+executor:
+ hooks:
+ - class: CombineBenchrunEmbeddedResults
diff --git a/buildscripts/resmokeconfig/suites/benchrun_embedded_misc.yml b/buildscripts/resmokeconfig/suites/benchrun_embedded_misc.yml
new file mode 100644
index 00000000000..5e0877699fb
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/benchrun_embedded_misc.yml
@@ -0,0 +1,18 @@
+test_kind: benchrun_embedded_test
+
+selector:
+ roots:
+ - benchrun_embedded/**/*.json
+ exclude_files:
+ - benchrun_embedded/**/aggregation*.json
+ - benchrun_embedded/**/commands*.json
+ - benchrun_embedded/**/insert*.json
+ - benchrun_embedded/**/mixed*.json
+ - benchrun_embedded/**/multi*.json
+ - benchrun_embedded/**/queries*.json
+ - benchrun_embedded/**/remove*.json
+ - benchrun_embedded/**/update*.json
+
+executor:
+ hooks:
+ - class: CombineBenchrunEmbeddedResults
diff --git a/buildscripts/resmokeconfig/suites/benchrun_embedded_mixed_and_multi.yml b/buildscripts/resmokeconfig/suites/benchrun_embedded_mixed_and_multi.yml
new file mode 100644
index 00000000000..d6909a10b53
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/benchrun_embedded_mixed_and_multi.yml
@@ -0,0 +1,10 @@
+test_kind: benchrun_embedded_test
+
+selector:
+ roots:
+ - benchrun_embedded/**/mixed*.json
+ - benchrun_embedded/**/multi*.json
+
+executor:
+ hooks:
+ - class: CombineBenchrunEmbeddedResults
diff --git a/buildscripts/resmokeconfig/suites/benchrun_embedded_queries.yml b/buildscripts/resmokeconfig/suites/benchrun_embedded_queries.yml
new file mode 100644
index 00000000000..5c4e1ed1fe6
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/benchrun_embedded_queries.yml
@@ -0,0 +1,9 @@
+test_kind: benchrun_embedded_test
+
+selector:
+ roots:
+ - benchrun_embedded/**/queries*.json
+
+executor:
+ hooks:
+ - class: CombineBenchrunEmbeddedResults
diff --git a/buildscripts/resmokeconfig/suites/benchrun_embedded_remove.yml b/buildscripts/resmokeconfig/suites/benchrun_embedded_remove.yml
new file mode 100644
index 00000000000..eecc24ff2b0
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/benchrun_embedded_remove.yml
@@ -0,0 +1,9 @@
+test_kind: benchrun_embedded_test
+
+selector:
+ roots:
+ - benchrun_embedded/**/remove*.json
+
+executor:
+ hooks:
+ - class: CombineBenchrunEmbeddedResults
diff --git a/buildscripts/resmokeconfig/suites/benchrun_embedded_update.yml b/buildscripts/resmokeconfig/suites/benchrun_embedded_update.yml
new file mode 100644
index 00000000000..3ea83564dd7
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/benchrun_embedded_update.yml
@@ -0,0 +1,9 @@
+test_kind: benchrun_embedded_test
+
+selector:
+ roots:
+ - benchrun_embedded/**/update*.json
+
+executor:
+ hooks:
+ - class: CombineBenchrunEmbeddedResults
diff --git a/buildscripts/resmokelib/config.py b/buildscripts/resmokelib/config.py
index 66753c389db..06d4fd9942e 100644
--- a/buildscripts/resmokelib/config.py
+++ b/buildscripts/resmokelib/config.py
@@ -26,6 +26,7 @@ MONGO_RUNNER_SUBDIR = "mongorunner"
# Default path for where to look for executables.
DEFAULT_DBTEST_EXECUTABLE = os.path.join(os.curdir, "dbtest")
DEFAULT_MONGO_EXECUTABLE = os.path.join(os.curdir, "mongo")
+DEFAULT_MONGOEBENCH_EXECUTABLE = os.path.join(os.curdir, "mongoebench")
DEFAULT_MONGOD_EXECUTABLE = os.path.join(os.curdir, "mongod")
DEFAULT_MONGOS_EXECUTABLE = os.path.join(os.curdir, "mongos")
@@ -53,6 +54,7 @@ DEFAULTS = {
"mongo_executable": None,
"mongod_executable": None,
"mongod_set_parameters": None,
+ "mongoebench_executable": None,
"mongos_executable": None,
"mongos_set_parameters": None,
"no_journal": False,
@@ -271,6 +273,9 @@ MONGOD_EXECUTABLE = None
# The --setParameter options passed to mongod.
MONGOD_SET_PARAMETERS = None
+# The path to the mongoebench executable used by resmoke.py.
+MONGOEBENCH_EXECUTABLE = None
+
# The path to the mongos executable used by resmoke.py.
MONGOS_EXECUTABLE = None
@@ -377,4 +382,5 @@ DEFAULT_INTEGRATION_TEST_LIST = "build/integration_tests.txt"
# External files or executables, used as suite selectors, that are created during the build and
# therefore might not be available when creating a test membership map.
EXTERNAL_SUITE_SELECTORS = (DEFAULT_BENCHMARK_TEST_LIST, DEFAULT_UNIT_TEST_LIST,
- DEFAULT_INTEGRATION_TEST_LIST, DEFAULT_DBTEST_EXECUTABLE)
+ DEFAULT_INTEGRATION_TEST_LIST, DEFAULT_DBTEST_EXECUTABLE,
+ DEFAULT_MONGOEBENCH_EXECUTABLE)
diff --git a/buildscripts/resmokelib/parser.py b/buildscripts/resmokelib/parser.py
index 4798e9525dc..76a7376ded9 100644
--- a/buildscripts/resmokelib/parser.py
+++ b/buildscripts/resmokelib/parser.py
@@ -112,6 +112,10 @@ def _make_parser(): # pylint: disable=too-many-statements
" started by resmoke.py. The argument is specified as bracketed YAML -"
" i.e. JSON with support for single quoted and unquoted keys."))
+ parser.add_option("--mongoebench", dest="mongoebench_executable", metavar="PATH",
+ help=("The path to the mongoebench (benchrun embedded) executable for"
+ " resmoke.py to use."))
+
parser.add_option("--mongos", dest="mongos_executable", metavar="PATH",
help="The path to the mongos executable for resmoke.py to use.")
@@ -264,31 +268,33 @@ def _make_parser(): # pylint: disable=too-many-statements
evergreen_options.add_option("--versionId", dest="version_id", metavar="VERSION_ID",
help="Sets the version ID of the task.")
- benchmark_options = optparse.OptionGroup(parser, title="Benchmark test options",
- description="Options for running Benchmark tests")
+ benchmark_options = optparse.OptionGroup(
+ parser, title="Benchmark/Benchrun test options",
+ description="Options for running Benchmark/Benchrun tests")
parser.add_option_group(benchmark_options)
benchmark_options.add_option("--benchmarkFilter", type="string", dest="benchmark_filter",
metavar="BENCHMARK_FILTER",
- help="Regex to filter benchmark tests to run.")
+ help="Regex to filter Google benchmark tests to run.")
benchmark_options.add_option("--benchmarkListTests", dest="benchmark_list_tests",
action="store_true", metavar="BENCHMARK_LIST_TESTS",
- help="Lists all benchmark test configurations in each test file.")
+ help=("Lists all Google benchmark test configurations in each"
+ " test file."))
benchmark_min_time_help = (
- "Minimum time to run each benchmark test for. Use this option instead of "
+ "Minimum time to run each benchmark/benchrun test for. Use this option instead of "
"--benchmarkRepetitions to make a test run for a longer or shorter duration.")
benchmark_options.add_option("--benchmarkMinTimeSecs", type="int",
dest="benchmark_min_time_secs", metavar="BENCHMARK_MIN_TIME",
help=benchmark_min_time_help)
benchmark_repetitions_help = (
- "Set --benchmarkRepetitions=1 if you'd like to run the benchmark tests only once. By "
- "default, each test is run multiple times to provide statistics on the variance between "
- "runs; use --benchmarkMinTimeSecs if you'd like to run a test for a longer or shorter "
- "duration.")
+ "Set --benchmarkRepetitions=1 if you'd like to run the benchmark/benchrun tests only once."
+ " By default, each test is run multiple times to provide statistics on the variance"
+ " between runs; use --benchmarkMinTimeSecs if you'd like to run a test for a longer or"
+ " shorter duration.")
benchmark_options.add_option("--benchmarkRepetitions", type="int", dest="benchmark_repetitions",
metavar="BENCHMARK_REPETITIONS", help=benchmark_repetitions_help)
@@ -370,6 +376,7 @@ def _update_config_vars(values): # pylint: disable=too-many-statements
_config.MONGO_EXECUTABLE = _expand_user(config.pop("mongo_executable"))
_config.MONGOD_EXECUTABLE = _expand_user(config.pop("mongod_executable"))
_config.MONGOD_SET_PARAMETERS = config.pop("mongod_set_parameters")
+ _config.MONGOEBENCH_EXECUTABLE = _expand_user(config.pop("mongoebench_executable"))
_config.MONGOS_EXECUTABLE = _expand_user(config.pop("mongos_executable"))
_config.MONGOS_SET_PARAMETERS = config.pop("mongos_set_parameters")
_config.NO_JOURNAL = config.pop("no_journal")
diff --git a/buildscripts/resmokelib/selector.py b/buildscripts/resmokelib/selector.py
index d878710f1d7..197102b16b0 100644
--- a/buildscripts/resmokelib/selector.py
+++ b/buildscripts/resmokelib/selector.py
@@ -633,11 +633,11 @@ class _DbTestSelector(_Selector):
return test_files.get_tests()
-class _JsonSchemaTestSelectorConfig(_SelectorConfig):
+class _JsonTestSelectorConfig(_SelectorConfig):
"""_SelectorConfig subclass for json_schema_test tests."""
def __init__(self, roots, include_files=None, exclude_files=None):
- """Initialize _JsonSchemaTestSelectorConfig."""
+ """Initialize _JsonTestSelectorConfig."""
_SelectorConfig.__init__(self, roots=roots, include_files=include_files,
exclude_files=exclude_files)
@@ -676,10 +676,11 @@ _SELECTOR_REGISTRY = {
"cpp_integration_test": (_CppTestSelectorConfig, _CppTestSelector),
"cpp_unit_test": (_CppTestSelectorConfig, _CppTestSelector),
"benchmark_test": (_CppTestSelectorConfig, _CppTestSelector),
+ "benchrun_embedded_test": (_JsonTestSelectorConfig, _Selector),
"db_test": (_DbTestSelectorConfig, _DbTestSelector),
"fsm_workload_test": (_JSTestSelectorConfig, _JSTestSelector),
"parallel_fsm_workload_test": (_MultiJSTestSelectorConfig, _MultiJSTestSelector),
- "json_schema_test": (_JsonSchemaTestSelectorConfig, _Selector),
+ "json_schema_test": (_JsonTestSelectorConfig, _Selector),
"js_test": (_JSTestSelectorConfig, _JSTestSelector),
"multi_stmt_txn_passthrough": (_JSTestSelectorConfig, _JSTestSelector),
"py_test": (_PyTestCaseSelectorConfig, _Selector),
@@ -691,8 +692,7 @@ def filter_tests(test_kind, selector_config, test_file_explorer=_DEFAULT_TEST_FI
"""Filter the tests according to a specified configuration.
Args:
- test_kind: the test kind, one of 'cpp_integration_test', 'cpp_unit_test', 'db_test',
- 'json_schema_test', 'js_test'.
+ test_kind: the test kind, from _SELECTOR_REGISTRY.
selector_config: a dict containing the selector configuration.
test_file_explorer: the TestFileExplorer to use. Using a TestFileExplorer other than
the default one should not be needed except for mocking purposes.
diff --git a/buildscripts/resmokelib/testing/hooks/combine_benchrun_embedded_results.py b/buildscripts/resmokelib/testing/hooks/combine_benchrun_embedded_results.py
new file mode 100644
index 00000000000..bf7af69244b
--- /dev/null
+++ b/buildscripts/resmokelib/testing/hooks/combine_benchrun_embedded_results.py
@@ -0,0 +1,150 @@
+"""Module for generating the test results file fed into the perf plugin."""
+
+from __future__ import absolute_import
+from __future__ import division
+
+import collections
+import datetime
+import glob
+import json
+import os
+
+from buildscripts.resmokelib import config as _config
+from buildscripts.resmokelib.testing.hooks import combine_benchmark_results as cbr
+
+
+class CombineBenchrunEmbeddedResults(cbr.CombineBenchmarkResults):
+ """CombineBenchrunEmbeddedResults class.
+
+ The CombineBenchrunEmbeddedResults hook combines test results from
+ individual benchmark embedded files to a single file. This is useful for
+ generating the json file to feed into the Evergreen performance
+ visualization plugin.
+ """
+
+ DESCRIPTION = "Combine JSON results from embedded benchrun"
+
+ def before_test(self, test, test_report):
+ """Remove any existing mongoebench reports for this test."""
+ for bm_report in self._test_result_files(test):
+ os.remove(bm_report)
+
+ def after_test(self, test, test_report):
+ """Update test report."""
+ for bm_report in self._test_result_files(test):
+ test_name, thread_count = self._parse_report_name(bm_report)
+ with open(bm_report, "r") as report_file:
+ report_dict = json.load(report_file)
+ if test_name not in self.benchmark_reports:
+ self.benchmark_reports[test_name] = _BenchrunEmbeddedThreadsReport()
+ self.benchmark_reports[test_name].add_report(thread_count, report_dict)
+
+ def before_suite(self, test_report):
+ """Set suite start time."""
+ self.create_time = datetime.datetime.now()
+ # Remove any existing perf reports.
+ if self.report_file and os.path.isfile(self.report_file):
+ os.remove(self.report_file)
+
+ def _generate_perf_plugin_report(self):
+ """Format the data to look like a perf plugin report."""
+ perf_report = {
+ "start": self._strftime(self.create_time),
+ "end": self._strftime(self.end_time),
+ "errors": [], # There are no errors if we have gotten this far.
+ "results": []
+ }
+
+ for name, report in self.benchmark_reports.items():
+ test_report = {"name": name, "results": report.generate_perf_plugin_dict()}
+
+ perf_report["results"].append(test_report)
+
+ return perf_report
+
+ @staticmethod
+ def _test_result_files(test):
+ """Return a list of existing test result files based on the test.short_name()."""
+ return glob.glob("mongoebench[.]{}[.]*[.]json".format(test.short_name()))
+
+ @staticmethod
+ def _parse_report_name(report_path):
+ """Parse mongoebench report path and return test_name and thread_count.
+
+ The format of the mongoebench report file name is defined in
+ ../testing/testcases/benchrun_embedded_test.py
+ as mongoebench.<test_name>.<num threads>.<iteration num>.json
+ """
+ report_base = os.path.basename(report_path)
+ _, test_name, thread_count, _, _ = report_base.split(".")
+ return test_name, thread_count
+
+
+class _BenchrunEmbeddedThreadsReport(object):
+ """_BenchrunEmbeddedThreadsReport class.
+
+ Class representation of a report for all thread levels of a single
+ benchmark test. Each report is designed to correspond to one graph
+ in the Evergreen perf plugin.
+
+ A raw mongoebench report looks like the following:
+ {
+ "note" : "values per second",
+ "errCount" : { "$numberLong" : "0" },
+ "trapped" : "error: not implemented",
+ "insertLatencyAverageMicros" : 389.4926654182272,
+ "totalOps" : { "$numberLong" : "12816" },
+ "totalOps/s" : 2563.095938304905,
+ "findOne" : 0,
+ "insert" : 2563.095938304905,
+ "delete" : 0,
+ "update" : 0,
+ "query" : 0,
+ "command" : 0,
+ "findOnes" : { "$numberLong" : "0" },
+ "inserts" : { "$numberLong" : "12816" },
+ "deletes" : { "$numberLong" : "0" },
+ "updates" : { "$numberLong" : "0" },
+ "queries" : { "$numberLong" : "0" },
+ "commands" : { "$numberLong" : "0" }
+ }
+ """
+
+ def __init__(self):
+ # list of benchmark runs for each thread.
+ self.thread_benchmark_map = collections.defaultdict(list)
+
+ def add_report(self, thread_count, report):
+ """Add to report."""
+ self.thread_benchmark_map[str(thread_count)].append(report)
+
+ def generate_perf_plugin_dict(self):
+ """Generate perf plugin data points of the following format.
+
+ "1": {
+ "error_values": [
+ 0,
+ 0,
+ 0
+ ],
+ "ops_per_sec": 9552.108279243452,
+ "ops_per_sec_values": [
+ 9574.812658450564,
+ 9522.642340821469,
+ 9536.252775275878
+ ]
+ },
+ """
+
+ res = {}
+ for thread_count, reports in self.thread_benchmark_map.items():
+ thread_report = {"error_values": [], "ops_per_sec_values": []}
+
+ for report in reports:
+ thread_report["error_values"].append(report["errCount"]["$numberLong"])
+ thread_report["ops_per_sec_values"].append(report["totalOps/s"])
+ thread_report["ops_per_sec"] = sum(thread_report["ops_per_sec_values"]) / len(reports)
+
+ res[thread_count] = thread_report
+
+ return res
diff --git a/buildscripts/resmokelib/testing/testcases/benchrun_embedded_test.py b/buildscripts/resmokelib/testing/testcases/benchrun_embedded_test.py
new file mode 100644
index 00000000000..5e9e0a09c08
--- /dev/null
+++ b/buildscripts/resmokelib/testing/testcases/benchrun_embedded_test.py
@@ -0,0 +1,96 @@
+"""The unittest.TestCase for tests using benchrun embedded (mongoebench)."""
+
+from __future__ import absolute_import
+
+import os
+
+from buildscripts.resmokelib import config as _config
+from buildscripts.resmokelib import core
+from buildscripts.resmokelib import parser
+from buildscripts.resmokelib import utils
+from buildscripts.resmokelib.testing.testcases import interface
+
+
+class BenchrunEmbeddedTestCase(interface.ProcessTestCase):
+ """A Benchrun embedded test to execute."""
+
+ REGISTERED_NAME = "benchrun_embedded_test"
+
+ def __init__(self, logger, mongoebench_config_file, program_options=None):
+ """Initialize the BenchrunEmbeddedTestCase with the executable to run."""
+
+ interface.ProcessTestCase.__init__(self, logger, "Benchmark embedded test",
+ mongoebench_config_file)
+ parser.validate_benchmark_options()
+
+ self.benchrun_config_file = mongoebench_config_file
+
+ # Command line options override the YAML configuration.
+ self.benchrun_executable = utils.default_if_none(_config.MONGOEBENCH_EXECUTABLE,
+ _config.DEFAULT_MONGOEBENCH_EXECUTABLE)
+ self.benchrun_repetitions = utils.default_if_none(_config.BENCHMARK_REPETITIONS,
+ _config.DEFAULT_BENCHMARK_REPETITIONS)
+ self.suite_benchrun_options = program_options
+ self.benchrun_threads = 1
+ if program_options and "threads" in program_options:
+ self.benchrun_threads = program_options["threads"]
+ self.benchrun_options = {}
+
+ # Set the dbpath.
+ dbpath = utils.default_if_none(_config.DBPATH_PREFIX, _config.DEFAULT_DBPATH_PREFIX)
+ self.dbpath = os.path.join(dbpath, "mongoebench")
+
+ def configure(self, fixture, *args, **kwargs):
+ """Configure BenchrunEmbeddedTestCase."""
+ interface.ProcessTestCase.configure(self, fixture, *args, **kwargs)
+
+ # 1. Set the default benchmark options.
+ benchrun_options = {"time": _config.DEFAULT_BENCHMARK_MIN_TIME.total_seconds()}
+
+ # 2. Override Benchmark options with options set through `program_options` in the suite
+ # configuration.
+ suite_benchrun_options = utils.default_if_none(self.suite_benchrun_options, {})
+ benchrun_options.update(suite_benchrun_options)
+
+ # 3. Override Benchmark options with options set through resmoke's command line.
+ resmoke_benchrun_options = {"dbpath": self.dbpath, "time": _config.BENCHMARK_MIN_TIME}
+
+ for key, value in resmoke_benchrun_options.items():
+ if value is not None:
+ # 4. sanitize options before passing them to Benchmark's command line.
+ if key == "time":
+ value = value.total_seconds()
+ benchrun_options[key] = value
+
+ self.benchrun_options = benchrun_options
+
+ # Create the dbpath.
+ self._clear_dbpath()
+ try:
+ os.makedirs(self.dbpath)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ def run_test(self):
+ """Run the test for specified number of iterations."""
+ for it_num in xrange(self.benchrun_repetitions):
+ # Set the output file for each iteration.
+ self.benchrun_options["output"] = self._report_name(it_num)
+ interface.ProcessTestCase.run_test(self)
+
+ def _clear_dbpath(self):
+ utils.rmtree(self.dbpath, ignore_errors=True)
+
+ def _report_name(self, iter_num):
+ """Return the constructed report name.
+
+ The report name is of the form mongoebench.<test_name>.<num threads>.<iteration num>.json.
+ """
+ return "mongoebench.{}.{}.{}.json".format(self.short_name(), self.benchrun_threads,
+ iter_num)
+
+ def _make_process(self):
+ return core.programs.generic_program(self.logger,
+ [self.benchrun_executable, self.benchrun_config_file],
+ **self.benchrun_options)
diff --git a/buildscripts/tests/resmokelib/testing/hooks/test_combine_benchrun_embedded_results.py b/buildscripts/tests/resmokelib/testing/hooks/test_combine_benchrun_embedded_results.py
new file mode 100755
index 00000000000..06744381706
--- /dev/null
+++ b/buildscripts/tests/resmokelib/testing/hooks/test_combine_benchrun_embedded_results.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+"""Unit tests for the resmokelib.testing.hooks.combine_benchrun_embedded_results module."""
+
+from __future__ import absolute_import
+
+import datetime
+import unittest
+
+import mock
+
+import buildscripts.resmokelib.testing.hooks.combine_benchrun_embedded_results as cber
+
+# pylint: disable=missing-docstring,protected-access,attribute-defined-outside-init
+
+_BM_REPORT_INSERT_1 = {
+ "note": "values per second", "errCount": {"$numberLong": "0"},
+ "trapped": "error: not implemented", "insertLatencyAverageMicros": 389.4926654182272,
+ "totalOps": {"$numberLong": "12816"}, "totalOps/s": 2563.095938304905, "findOne": 0,
+ "insert": 2563.095938304905, "delete": 0, "update": 0, "query": 0, "command": 0, "findOnes": {
+ "$numberLong": "0"
+ }, "inserts": {"$numberLong": "12816"}, "deletes": {"$numberLong": "0"}, "updates": {
+ "$numberLong": "0"
+ }, "queries": {"$numberLong": "0"}, "commands": {"$numberLong": "0"}
+}
+
+_BM_REPORT_INSERT_2 = {
+ "note": "values per second", "errCount": {"$numberLong": "0"},
+ "trapped": "error: not implemented", "insertLatencyAverageMicros": 2563.095938304905,
+ "totalOps": {"$numberLong": "7404"}, "totalOps/s": 2409.05, "findOne": 0, "insert": 2409.05,
+ "delete": 0, "update": 0, "query": 0, "command": 0, "findOnes": {"$numberLong": "0"},
+ "inserts": {"$numberLong": "7404"}, "deletes": {"$numberLong": "0"}, "updates": {
+ "$numberLong": "0"
+ }, "queries": {"$numberLong": "0"}, "commands": {"$numberLong": "0"}
+}
+
+_BM_REPORT_DELETE = {
+ "note": "values per second", "errCount": {"$numberLong": "0"},
+ "trapped": "error: not implemented", "insertLatencyAverageMicros": "1234.56", "totalOps": {
+ "$numberLong": "2345"
+ }, "totalOps/s": 1234.56, "findOne": 0, "insert": 0, "delete": 1234.56, "update": 0, "query": 0,
+ "command": 0, "findOnes": {"$numberLong": "0"}, "inserts": {"$numberLong": "0"}, "deletes": {
+ "$numberLong": "2345"
+ }, "updates": {"$numberLong": "0"}, "queries": {"$numberLong": "0"},
+ "commands": {"$numberLong": "0"}
+}
+
+_BM_REPORT_UPDATE = {
+ "note": "values per second", "errCount": {"$numberLong": "0"},
+ "trapped": "error: not implemented", "insertLatencyAverageMicros": 654.321, "totalOps": {
+ "$numberLong": "4521"
+ }, "totalOps/s": 4521.00, "findOne": 0, "insert": 0, "delete": 0, "update": 4521.00, "query": 0,
+ "command": 0, "findOnes": {"$numberLong": "0"}, "inserts": {"$numberLong": "0"}, "deletes": {
+ "$numberLong": "0"
+ }, "updates": {"$numberLong": "4521"}, "queries": {"$numberLong": "0"},
+ "commands": {"$numberLong": "0"}
+}
+
+_BM_REPORT_MULTI = {
+ "note": "values per second", "errCount": {"$numberLong": "0"},
+ "trapped": "error: not implemented", "insertLatencyAverageMicros": 111.111, "totalOps": {
+ "$numberLong": "11532"
+ }, "totalOps/s": 5766.00, "findOne": 0, "insert": 2490.00, "delete": 0, "update": 9042.00,
+ "query": 0, "command": 0, "findOnes": {"$numberLong": "0"}, "inserts": {
+ "$numberLong": "2490.00"
+ }, "deletes": {"$numberLong": "0"}, "updates": {"$numberLong": "9042"},
+ "queries": {"$numberLong": "0"}, "commands": {"$numberLong": "0"}
+}
+
+_BM_ALL_REPORTS = [
+ _BM_REPORT_INSERT_1, _BM_REPORT_INSERT_2, _BM_REPORT_DELETE, _BM_REPORT_UPDATE, _BM_REPORT_MULTI
+]
+
+# 12/31/2999 @ 11:59pm (UTC)
+_START_TIME = 32503679999
+
+# 01/01/3000 @ 12:00am (UTC)
+_END_TIME = 32503680000
+
+
+class CombineBenchrunEmbeddedResultsFixture(unittest.TestCase):
+
+ # Mock the hook's parent class because we're testing only functionality of this hook and
+ # not anything related to or inherit from the parent class.
+ @mock.patch("buildscripts.resmokelib.testing.hooks.interface.Hook", autospec=True)
+ def setUp(self, MockHook): # pylint: disable=arguments-differ,unused-argument
+ self.cber_hook = cber.CombineBenchrunEmbeddedResults(None, None)
+ self.cber_hook.create_time = datetime.datetime.utcfromtimestamp(_START_TIME)
+ self.cber_hook.end_time = datetime.datetime.utcfromtimestamp(_END_TIME)
+
+
+class TestCombineBenchmarkResults(CombineBenchrunEmbeddedResultsFixture):
+ def _setup_reports(self, reports, test_name, num_threads):
+ self.total_ops_per_sec = 0
+ self.num_tests = len(reports)
+ self.cber_hook.benchmark_reports[test_name] = cber._BenchrunEmbeddedThreadsReport()
+ for rep in reports:
+ self.cber_hook.benchmark_reports[test_name].add_report(num_threads, rep)
+ self.total_ops_per_sec += rep["totalOps/s"]
+ self.ops_per_sec = self.total_ops_per_sec / self.num_tests
+ self.report = self.cber_hook._generate_perf_plugin_report()
+
+ def test_generate_one_report(self):
+ test_name = "test_cber1"
+ num_threads = "2"
+ self._setup_reports([_BM_REPORT_MULTI], test_name, num_threads)
+ report_0 = self.report["results"][0]
+ self.assertEqual(report_0["name"], test_name)
+ self.assertEqual(report_0["results"][str(num_threads)]["ops_per_sec"], self.ops_per_sec)
+
+ def test_generate_all_reports(self):
+ test_name = "test_cber2"
+ thread_num = "1"
+ self._setup_reports(_BM_ALL_REPORTS, test_name, thread_num)
+ self.assertEqual(len(self.report.keys()), 4)
+ report_0 = self.report["results"][0]
+ self.assertEqual(report_0["name"], test_name)
+ self.assertEqual(report_0["results"][thread_num]["ops_per_sec"], self.ops_per_sec)
+ self.assertEqual(self.report["start"], "2999-12-31T23:59:59Z")
+ self.assertEqual(self.report["end"], "3000-01-01T00:00:00Z")
+
+ def test_parse_report_name(self):
+ test_name = "test1"
+ thread_num = "4"
+ file_name = "mongoebench.{}.{}.iter0.json".format(test_name, thread_num)
+ report_name, report_threads = self.cber_hook._parse_report_name(file_name)
+ self.assertEqual(report_name, test_name)
+ self.assertEqual(report_threads, thread_num)
+
+
+class TestBenchrunEmbeddedThreadsReport(CombineBenchrunEmbeddedResultsFixture):
+ def test_generate_single_thread_perf_plugin_dict(self):
+ thread_report = cber._BenchrunEmbeddedThreadsReport()
+ thread_num = "1"
+ thread_report.add_report(thread_num, _BM_REPORT_INSERT_1)
+ perf_report = thread_report.generate_perf_plugin_dict()
+ self.assertEqual(len(perf_report.keys()), 1)
+ self.assertEqual(perf_report[thread_num]["ops_per_sec"], _BM_REPORT_INSERT_1["totalOps/s"])
+ self.assertEqual(len(perf_report[thread_num]["ops_per_sec_values"]), 1)
+
+ thread_report.add_report(thread_num, _BM_REPORT_INSERT_2)
+ perf_report = thread_report.generate_perf_plugin_dict()
+ self.assertEqual(len(perf_report.keys()), 1)
+ ops_per_sec = (_BM_REPORT_INSERT_1["totalOps/s"] + _BM_REPORT_INSERT_2["totalOps/s"]) / 2
+ self.assertEqual(perf_report[thread_num]["ops_per_sec"], ops_per_sec)
+ self.assertEqual(len(perf_report[thread_num]["ops_per_sec_values"]), 2)
+
+ def test_generate_multi_thread_perf_plugin_dict(self):
+ thread_report = cber._BenchrunEmbeddedThreadsReport()
+ thread_num = "1"
+ thread_report.add_report(thread_num, _BM_REPORT_INSERT_1)
+ perf_report = thread_report.generate_perf_plugin_dict()
+ self.assertEqual(len(perf_report.keys()), 1)
+ self.assertEqual(perf_report[thread_num]["ops_per_sec"], _BM_REPORT_INSERT_1["totalOps/s"])
+ self.assertEqual(len(perf_report[thread_num]["ops_per_sec_values"]), 1)
+
+ thread_num = "2"
+ thread_report.add_report(thread_num, _BM_REPORT_INSERT_2)
+ perf_report = thread_report.generate_perf_plugin_dict()
+ self.assertEqual(len(perf_report.keys()), 2)
+ self.assertEqual(perf_report["1"]["ops_per_sec"], _BM_REPORT_INSERT_1["totalOps/s"])
+ self.assertEqual(len(perf_report["1"]["ops_per_sec_values"]), 1)
+ self.assertEqual(perf_report[thread_num]["ops_per_sec"], _BM_REPORT_INSERT_2["totalOps/s"])
+ self.assertEqual(len(perf_report[thread_num]["ops_per_sec_values"]), 1)
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 83e3ffd1869..6e937dc4d12 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -175,6 +175,18 @@ variables:
mongod_options: --mongodUsablePorts ${standard_port} ${secret_port} --dbPath=${db_path} --logPath=${log_path}
mongod_extra_options: --mongodOptions=\"--setParameter enableTestCommands=1 --setParameter logComponentVerbosity='{storage:{recovery:2}}'\"
+- &benchrun_embedded
+ name: benchrun_embedded
+ execution_tasks:
+ - benchrun_embedded_aggregation
+ - benchrun_embedded_commands
+ - benchrun_embedded_insert
+ - benchrun_embedded_misc
+ - benchrun_embedded_mixed_and_multi
+ - benchrun_embedded_queries
+ - benchrun_embedded_remove
+ - benchrun_embedded_update
+
- &replica_sets_auth
name: replica_sets_auth
execution_tasks:
@@ -766,6 +778,15 @@ functions:
bucket: mciuploads
extract_to: src
+ "fetch benchmark embedded files" : &fetch_benchrun_embedded_files
+ command: s3.get
+ params:
+ aws_key: ${aws_key}
+ aws_secret: ${aws_secret}
+ remote_file: ${project}/benchrun_embedded/benchrun_json_files.tgz
+ bucket: mciuploads
+ extract_to: src/benchrun_embedded
+
"get buildnumber" : &get_buildnumber
command: keyval.inc
params:
@@ -1197,6 +1218,13 @@ functions:
- *set_up_credentials
- *fetch_benchmarks
+ "do benchmark embedded setup" :
+ - *git_get_project
+ - *fetch_artifacts
+ - *get_buildnumber
+ - *set_up_credentials
+ - *fetch_benchrun_embedded_files
+
"set up virtualenv" :
command: shell.exec
type: test
@@ -3215,6 +3243,7 @@ timeout:
test_lifecycle_excluded_tasks:
- burn_in_tests
- compile*
+- benchmarks*
- dbtest*
- idl_tests
- integration*
@@ -3953,6 +3982,86 @@ tasks:
run_multiple_jobs: false
- func: "send benchmark results"
+- <<: *task_template
+ name: benchrun_embedded_aggregation
+ commands:
+ - func: "do benchmark embedded setup"
+ - func: "run tests"
+ vars:
+ resmoke_args: --suites=benchrun_embedded_aggregation
+ run_multiple_jobs: false
+ - func: "send benchmark results"
+
+- <<: *task_template
+ name: benchrun_embedded_commands
+ commands:
+ - func: "do benchmark embedded setup"
+ - func: "run tests"
+ vars:
+ resmoke_args: --suites=benchrun_embedded_commands
+ run_multiple_jobs: false
+ - func: "send benchmark results"
+
+- <<: *task_template
+ name: benchrun_embedded_insert
+ commands:
+ - func: "do benchmark embedded setup"
+ - func: "run tests"
+ vars:
+ resmoke_args: --suites=benchrun_embedded_insert
+ run_multiple_jobs: false
+ - func: "send benchmark results"
+
+- <<: *task_template
+ name: benchrun_embedded_misc
+ commands:
+ - func: "do benchmark embedded setup"
+ - func: "run tests"
+ vars:
+ resmoke_args: --suites=benchrun_embedded_misc
+ run_multiple_jobs: false
+ - func: "send benchmark results"
+
+- <<: *task_template
+ name: benchrun_embedded_mixed_and_multi
+ commands:
+ - func: "do benchmark embedded setup"
+ - func: "run tests"
+ vars:
+ resmoke_args: --suites=benchrun_embedded_mixed_and_multi
+ run_multiple_jobs: false
+ - func: "send benchmark results"
+
+- <<: *task_template
+ name: benchrun_embedded_queries
+ commands:
+ - func: "do benchmark embedded setup"
+ - func: "run tests"
+ vars:
+ resmoke_args: --suites=benchrun_embedded_queries
+ run_multiple_jobs: false
+ - func: "send benchmark results"
+
+- <<: *task_template
+ name: benchrun_embedded_remove
+ commands:
+ - func: "do benchmark embedded setup"
+ - func: "run tests"
+ vars:
+ resmoke_args: --suites=benchrun_embedded_remove
+ run_multiple_jobs: false
+ - func: "send benchmark results"
+
+- <<: *task_template
+ name: benchrun_embedded_update
+ commands:
+ - func: "do benchmark embedded setup"
+ - func: "run tests"
+ vars:
+ resmoke_args: --suites=benchrun_embedded_update
+ run_multiple_jobs: false
+ - func: "send benchmark results"
+
- <<: *run_jepsen_template
name: jepsen_register_findAndModify_WT
commands:
@@ -12429,6 +12538,7 @@ buildvariants:
build_mongoreplay: true
additional_targets: mongoebench mongoed
display_tasks:
+ - *benchrun_embedded
- *unittests
tasks:
- name: compile_all_run_unittests_TG
@@ -12439,6 +12549,30 @@ buildvariants:
- name: aggregation_facet_unwind_passthrough
- name: auth
- name: concurrency
+ - name: benchrun_embedded_aggregation
+ distros:
+ - centos6-perf
+ - name: benchrun_embedded_commands
+ distros:
+ - centos6-perf
+ - name: benchrun_embedded_insert
+ distros:
+ - centos6-perf
+ - name: benchrun_embedded_misc
+ distros:
+ - centos6-perf
+ - name: benchrun_embedded_mixed_and_multi
+ distros:
+ - centos6-perf
+ - name: benchrun_embedded_queries
+ distros:
+ - centos6-perf
+ - name: benchrun_embedded_remove
+ distros:
+ - centos6-perf
+ - name: benchrun_embedded_update
+ distros:
+ - centos6-perf
- name: dbtest
- name: disk_mobile
- name: failpoints