diff options
author | Robert Guo <robert.guo@10gen.com> | 2018-02-23 13:47:29 -0500 |
---|---|---|
committer | Robert Guo <robert.guo@10gen.com> | 2018-04-26 09:28:43 -0400 |
commit | ea6f63582215843b984ba1ce0d25174ee85cc84d (patch) | |
tree | 120c1cb2890f58273e4b4406de2e380f46ca5a0a /buildscripts | |
parent | cbf74aafb6eb8f256e24a014273cf275872ab941 (diff) | |
download | mongo-ea6f63582215843b984ba1ce0d25174ee85cc84d.tar.gz |
SERVER-33201 add resmoke.py testcase for Benchmark tests
(cherry picked from commit a47c30b73e686fb4b5743a969e4c79386bd26c7b)
Diffstat (limited to 'buildscripts')
-rw-r--r-- | buildscripts/resmokeconfig/suites/benchmarks.yml | 8 | ||||
-rw-r--r-- | buildscripts/resmokelib/config.py | 61 | ||||
-rw-r--r-- | buildscripts/resmokelib/parser.py | 89 | ||||
-rw-r--r-- | buildscripts/resmokelib/testing/testcases/benchmark_test.py | 68 |
4 files changed, 197 insertions, 29 deletions
diff --git a/buildscripts/resmokeconfig/suites/benchmarks.yml b/buildscripts/resmokeconfig/suites/benchmarks.yml new file mode 100644 index 00000000000..9537e51cedc --- /dev/null +++ b/buildscripts/resmokeconfig/suites/benchmarks.yml @@ -0,0 +1,8 @@ +test_kind: benchmark_test + +selector: + root: build/benchmarks.txt + # TODO: SERVER-33203 Add path glob for canary tests. + +executor: + config: {} diff --git a/buildscripts/resmokelib/config.py b/buildscripts/resmokelib/config.py index c3e58c80aa3..48a9fa452a0 100644 --- a/buildscripts/resmokelib/config.py +++ b/buildscripts/resmokelib/config.py @@ -5,14 +5,25 @@ Configuration options for resmoke.py. from __future__ import absolute_import import collections +import datetime import itertools -import os import os.path import time +# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started +# by resmoke.py. +FIXTURE_SUBDIR = "resmoke" + +# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started +# by individual tests. +MONGO_RUNNER_SUBDIR = "mongorunner" + ## -# Default values. +# Default values. There are two types of default values: "DEFAULT_" prefixed module variables, +# and values in the "DEFAULTS" dictionary. The former is used to set the default value manually. +# (e.g. if the default value needs to be reconciled with suite-level configuration) +# The latter is set automatically as part of resmoke's option parsing on startup. ## # Default path for where to look for executables. @@ -21,18 +32,13 @@ DEFAULT_MONGO_EXECUTABLE = os.path.join(os.curdir, "mongo") DEFAULT_MONGOD_EXECUTABLE = os.path.join(os.curdir, "mongod") DEFAULT_MONGOS_EXECUTABLE = os.path.join(os.curdir, "mongos") +DEFAULT_BENCHMARK_REPETITIONS = 3 +DEFAULT_BENCHMARK_MIN_TIME = datetime.timedelta(seconds=5) + # Default root directory for where resmoke.py puts directories containing data files of mongod's it # starts, as well as those started by individual tests. DEFAULT_DBPATH_PREFIX = os.path.normpath("/data/db") -# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started -# by resmoke.py. -FIXTURE_SUBDIR = "resmoke" - -# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started -# by individual tests. -MONGO_RUNNER_SUBDIR = "mongorunner" - # Names below correspond to how they are specified via the command line or in the options YAML file. DEFAULTS = { "base_port": 20000, @@ -40,11 +46,8 @@ DEFAULTS = { "continue_on_failure": False, "dbpath_prefix": None, "dbtest_executable": None, - "distro_id": None, "dry_run": None, "exclude_with_any_tags": None, - "execution_number": 0, - "git_revision": None, "include_with_any_tags": None, "jobs": 1, "mongo_executable": None, @@ -54,9 +57,7 @@ DEFAULTS = { "mongos_set_parameters": None, "no_journal": False, "num_clients_per_fixture": 1, - "patch_build": False, "prealloc_journal": None, # Default is set on the commandline. - "project_name": "mongodb-mongo-master", "repeat": 1, "report_failure_status": "fail", "report_file": None, @@ -71,15 +72,29 @@ DEFAULTS = { "storage_engine": None, "storage_engine_cache_size_gb": None, "tag_file": None, + "transport_layer": None, + + # Evergreen options. + "distro_id": None, + "execution_number": 0, + "git_revision": None, + "patch_build": False, + "project_name": "mongodb-mongo-master", "task_id": None, "task_name": None, - "transport_layer": None, "variant_name": None, + + # WiredTiger options. "wt_coll_config": None, "wt_engine_config": None, - "wt_index_config": None -} + "wt_index_config": None, + # Benchmark options. + "benchmark_filter": None, + "benchmark_list_tests": None, + "benchmark_min_time_secs": None, + "benchmark_repetitions": None +} _SuiteOptions = collections.namedtuple("_SuiteOptions", [ "description", @@ -164,7 +179,6 @@ class SuiteOptions(_SuiteOptions): SuiteOptions.ALL_INHERITED = SuiteOptions(**dict(zip(SuiteOptions._fields, itertools.repeat(SuiteOptions.INHERIT)))) - ## # Variables that are set by the user at the command line or with --options. ## @@ -301,10 +315,19 @@ WT_ENGINE_CONFIG = None # WiredTiger index configuration settings. WT_INDEX_CONFIG = None +# Benchmark options that map to Google Benchmark options when converted to lowercase. +BENCHMARK_FILTER = None +BENCHMARK_LIST_TESTS = None +BENCHMARK_MIN_TIME = None +BENCHMARK_REPETITIONS = None + ## # Internally used configuration options that aren't exposed to the user ## +# Benchmark options set internally by resmoke.py +BENCHMARK_OUT_FORMAT = "json" + # Default sort order for test execution. Will only be changed if --suites wasn't specified. ORDER_TESTS_BY_NAME = True diff --git a/buildscripts/resmokelib/parser.py b/buildscripts/resmokelib/parser.py index edd27491f1d..642ba4f8f34 100644 --- a/buildscripts/resmokelib/parser.py +++ b/buildscripts/resmokelib/parser.py @@ -7,6 +7,8 @@ from __future__ import absolute_import import collections import os import os.path + +import datetime import optparse from . import config as _config @@ -180,9 +182,9 @@ def parse_command_line(): parser.add_option("--storageEngine", dest="storage_engine", metavar="ENGINE", help="The storage engine used by dbtests and jstests.") - parser.add_option("--storageEngineCacheSizeGB", dest="storage_engine_cache_size", + parser.add_option("--storageEngineCacheSizeGB", dest="storage_engine_cache_size_gb", metavar="CONFIG", help="Set the storage engine cache size configuration" - " setting for all mongod's.") + " setting for all mongod's.") parser.add_option("--tagFile", dest="tag_file", metavar="OPTIONS", help="A YAML file that associates tests and tags.") @@ -225,6 +227,39 @@ def parse_command_line(): help=("Set the name of the Evergreen build variant running the" " tests.")) + benchmark_options = optparse.OptionGroup( + parser, + title="Benchmark test options", + description="Options for running Benchmark tests" + ) + + parser.add_option_group(benchmark_options) + + benchmark_options.add_option("--benchmarkFilter", type="string", dest="benchmark_filter", + metavar="BENCHMARK_FILTER", + help="Regex to filter benchmark tests to run.") + + benchmark_options.add_option("--benchmarkListTests", dest="benchmark_list_tests", + action="store_true", + metavar="BENCHMARK_LIST_TESTS", + help="Lists all benchmark test configurations in each test file.") + + benchmark_min_time_help = ( + "Minimum time to run each benchmark test for. Use this option instead of " + "--benchmarkRepetitions to make a test run for a longer or shorter duration.") + benchmark_options.add_option("--benchmarkMinTimeSecs", type="int", + dest="benchmark_min_time_secs", metavar="BENCHMARK_MIN_TIME", + help=benchmark_min_time_help) + + benchmark_repetitions_help = ( + "Set --benchmarkRepetitions=1 if you'd like to run the benchmark tests only once. By " + "default, each test is run multiple times to provide statistics on the variance between " + "runs; use --benchmarkMinTimeSecs if you'd like to run a test for a longer or shorter " + "duration.") + benchmark_options.add_option("--benchmarkRepetitions", type="int", dest="benchmark_repetitions", + metavar="BENCHMARK_REPETITIONS", + help=benchmark_repetitions_help) + parser.set_defaults(logger_file="console", dry_run="off", find_suites=False, @@ -254,6 +289,28 @@ def validate_options(parser, options, args): .format(options.executor_file, " ".join(args))) +def validate_benchmark_options(): + """ + Some options are incompatible with benchmark test suites, we error out early if any of + these options are specified. + + :return: None + """ + + if _config.REPEAT > 1: + raise optparse.OptionValueError( + "--repeat cannot be used with benchmark tests. Please use --benchmarkMinTimeSecs to " + "increase the runtime of a single benchmark configuration.") + + if _config.JOBS > 1: + raise optparse.OptionValueError( + "--jobs=%d cannot be used for benchmark tests. Parallel jobs affect CPU cache access " + "patterns and cause additional context switching, which lead to inaccurate benchmark " + "results. Please use --jobs=1" + % _config.JOBS + ) + + def get_logging_config(values): return _get_logging_config(values.logger_file) @@ -275,14 +332,6 @@ def update_config_vars(values): _config.DBPATH_PREFIX = _expand_user(config.pop("dbpath_prefix")) _config.DBTEST_EXECUTABLE = _expand_user(config.pop("dbtest_executable")) _config.DRY_RUN = config.pop("dry_run") - _config.EVERGREEN_DISTRO_ID = config.pop("distro_id") - _config.EVERGREEN_EXECUTION = config.pop("execution_number") - _config.EVERGREEN_PATCH_BUILD = config.pop("patch_build") - _config.EVERGREEN_PROJECT_NAME = config.pop("project_name") - _config.EVERGREEN_REVISION = config.pop("git_revision") - _config.EVERGREEN_TASK_ID = config.pop("task_id") - _config.EVERGREEN_TASK_NAME = config.pop("task_name") - _config.EVERGREEN_VARIANT_NAME = config.pop("variant_name") _config.EXCLUDE_WITH_ANY_TAGS = _tags_from_list(config.pop("exclude_with_any_tags")) _config.FAIL_FAST = not config.pop("continue_on_failure") _config.INCLUDE_WITH_ANY_TAGS = _tags_from_list(config.pop("include_with_any_tags")) @@ -307,10 +356,30 @@ def update_config_vars(values): _config.STORAGE_ENGINE_CACHE_SIZE = config.pop("storage_engine_cache_size_gb") _config.TAG_FILE = config.pop("tag_file") _config.TRANSPORT_LAYER = config.pop("transport_layer") + + # Evergreen options. + _config.EVERGREEN_DISTRO_ID = config.pop("distro_id") + _config.EVERGREEN_EXECUTION = config.pop("execution_number") + _config.EVERGREEN_PATCH_BUILD = config.pop("patch_build") + _config.EVERGREEN_PROJECT_NAME = config.pop("project_name") + _config.EVERGREEN_REVISION = config.pop("git_revision") + _config.EVERGREEN_TASK_ID = config.pop("task_id") + _config.EVERGREEN_TASK_NAME = config.pop("task_name") + _config.EVERGREEN_VARIANT_NAME = config.pop("variant_name") + + # Wiredtiger options. _config.WT_COLL_CONFIG = config.pop("wt_coll_config") _config.WT_ENGINE_CONFIG = config.pop("wt_engine_config") _config.WT_INDEX_CONFIG = config.pop("wt_index_config") + # Benchmark options. + _config.BENCHMARK_FILTER = config.pop("benchmark_filter") + _config.BENCHMARK_LIST_TESTS = config.pop("benchmark_list_tests") + benchmark_min_time = config.pop("benchmark_min_time_secs") + if benchmark_min_time is not None: + _config.BENCHMARK_MIN_TIME = datetime.timedelta(seconds=benchmark_min_time) + _config.BENCHMARK_REPETITIONS = config.pop("benchmark_repetitions") + shuffle = config.pop("shuffle") if shuffle == "auto": # If the user specified a value for --jobs > 1 (or -j > 1), then default to randomize diff --git a/buildscripts/resmokelib/testing/testcases/benchmark_test.py b/buildscripts/resmokelib/testing/testcases/benchmark_test.py new file mode 100644 index 00000000000..874ed85e745 --- /dev/null +++ b/buildscripts/resmokelib/testing/testcases/benchmark_test.py @@ -0,0 +1,68 @@ +""" +unittest.TestCase for tests using a MongoDB vendored version of Google Benchmark. +""" + +from __future__ import absolute_import + +from buildscripts.resmokelib import config as _config +from buildscripts.resmokelib import core +from buildscripts.resmokelib import parser +from buildscripts.resmokelib import utils +from buildscripts.resmokelib.testing.testcases import interface + + +class BenchmarkTestCase(interface.ProcessTestCase): + """ + A Benchmark test to execute. + """ + + REGISTERED_NAME = "benchmark_test" + + def __init__(self, + logger, + program_executable, + program_options=None): + """ + Initializes the BenchmarkTestCase with the executable to run. + """ + + interface.ProcessTestCase.__init__(self, logger, "Benchmark test", program_executable) + + parser.validate_benchmark_options() + + # 1. Set the default benchmark options, including the out file path, which is based on the + # executable path. Keep the existing extension (if any) to simplify parsing. + bm_options = { + "benchmark_out": program_executable + ".json", + "benchmark_min_time": _config.DEFAULT_BENCHMARK_MIN_TIME.total_seconds(), + "benchmark_repetitions": _config.DEFAULT_BENCHMARK_REPETITIONS + } + + # 2. Override Benchmark options with options set through `program_options` in the suite + # configuration. + suite_bm_options = utils.default_if_none(program_options, {}) + bm_options.update(suite_bm_options) + + # 3. Override Benchmark options with options set through resmoke's command line. + resmoke_bm_options = { + "benchmark_filter": _config.BENCHMARK_FILTER, + "benchmark_list_tests": _config.BENCHMARK_LIST_TESTS, + "benchmark_min_time": _config.BENCHMARK_MIN_TIME, + "benchmark_out_format": _config.BENCHMARK_OUT_FORMAT, + "benchmark_repetitions": _config.BENCHMARK_REPETITIONS + } + + for key, value in resmoke_bm_options.items(): + if value is not None: + # 4. sanitize options before passing them to Benchmark's command line. + if key == "benchmark_min_time": + value = value.total_seconds() + bm_options[key] = value + + self.program_options = bm_options + self.program_executable = program_executable + + def _make_process(self): + return core.programs.generic_program(self.logger, + [self.program_executable], + **self.program_options) |