summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/burn_in_tags.py76
-rw-r--r--buildscripts/burn_in_tests.py760
-rw-r--r--buildscripts/ciconfig/evergreen.py8
-rwxr-xr-xbuildscripts/evergreen_generate_resmoke_tasks.py34
-rw-r--r--buildscripts/patch_builds/task_generation.py161
-rw-r--r--buildscripts/resmokelib/testing/suite.py4
-rw-r--r--buildscripts/tests/patch_builds/__init__.py1
-rw-r--r--buildscripts/tests/patch_builds/test_task_generation.py223
-rw-r--r--buildscripts/tests/test_burn_in_tags.py73
-rw-r--r--buildscripts/tests/test_burn_in_tests.py927
-rw-r--r--buildscripts/util/taskname.py5
-rw-r--r--etc/evergreen.yml16
12 files changed, 1230 insertions, 1058 deletions
diff --git a/buildscripts/burn_in_tags.py b/buildscripts/burn_in_tags.py
index 3f99edd82e2..1ca028ec741 100644
--- a/buildscripts/burn_in_tags.py
+++ b/buildscripts/burn_in_tags.py
@@ -19,7 +19,8 @@ if __name__ == "__main__" and __package__ is None:
# pylint: disable=wrong-import-position
import buildscripts.util.read_config as read_config
from buildscripts.ciconfig import evergreen
-from buildscripts.burn_in_tests import create_generate_tasks_config, create_tests_by_task
+from buildscripts.burn_in_tests import create_generate_tasks_config, create_tests_by_task, \
+ GenerateConfig, RepeatConfig
# pylint: enable=wrong-import-position
CONFIG_DIRECTORY = "generated_burn_in_tags_config"
@@ -28,8 +29,8 @@ EVERGREEN_FILE = "etc/evergreen.yml"
EVG_CONFIG_FILE = ".evergreen.yml"
ConfigOptions = namedtuple("ConfigOptions", [
- "buildvariant",
- "run_buildvariant",
+ "build_variant",
+ "run_build_variant",
"base_commit",
"max_revisions",
"branch",
@@ -42,14 +43,14 @@ ConfigOptions = namedtuple("ConfigOptions", [
])
-def _get_config_options(expansions_file_data, buildvariant, run_buildvariant):
+def _get_config_options(expansions_file_data, build_variant, run_build_variant):
"""
Get the configuration to use.
:param expansions_file_data: Config data file to use.
- :param buildvariant: The buildvariant the current patch should be compared against to figure
+ :param build_variant: The buildvariant the current patch should be compared against to figure
out which tests have changed.
- :param run_buildvariant: The buildvariant the generated task should be run on.
+ :param run_build_variant: The buildvariant the generated task should be run on.
:return: ConfigOptions for the generated task to use.
"""
base_commit = expansions_file_data["revision"]
@@ -60,54 +61,55 @@ def _get_config_options(expansions_file_data, buildvariant, run_buildvariant):
distro = expansions_file_data["distro_id"]
repeat_tests_min = int(expansions_file_data["repeat_tests_min"])
repeat_tests_max = int(expansions_file_data["repeat_tests_max"])
- repeat_tests_secs = float(expansions_file_data["repeat_tests_secs"])
+ repeat_tests_secs = int(expansions_file_data["repeat_tests_secs"])
project = expansions_file_data["project"]
- return ConfigOptions(buildvariant, run_buildvariant, base_commit, max_revisions, branch,
+ return ConfigOptions(build_variant, run_build_variant, base_commit, max_revisions, branch,
check_evergreen, distro, repeat_tests_secs, repeat_tests_min,
repeat_tests_max, project)
-def _create_evg_buildvariant_map(expansions_file_data):
+def _create_evg_build_variant_map(expansions_file_data, evergreen_conf):
"""
Generate relationship of base buildvariant to generated buildvariant.
:param expansions_file_data: Config data file to use.
+ :param evergreen_conf: Evergreen configuration.
:return: Map of base buildvariants to their generated buildvariants.
"""
burn_in_tags_gen_variant = expansions_file_data["build_variant"]
- evergreen_conf = evergreen.parse_evergreen_file(EVERGREEN_FILE)
burn_in_tags_gen_variant_config = evergreen_conf.get_variant(burn_in_tags_gen_variant)
- burn_in_tag_buildvariants = burn_in_tags_gen_variant_config.expansions.get(
+ burn_in_tag_build_variants = burn_in_tags_gen_variant_config.expansions.get(
"burn_in_tag_buildvariants")
- if burn_in_tag_buildvariants:
+
+ if burn_in_tag_build_variants:
return {
base_variant: f"{base_variant}-required"
- for base_variant in burn_in_tag_buildvariants.split(" ")
+ for base_variant in burn_in_tag_build_variants.split(" ")
}
+
return {}
-def _generate_evg_buildvariant(shrub_config, buildvariant, run_buildvariant,
- burn_in_tags_gen_variant):
+def _generate_evg_build_variant(shrub_config, build_variant, run_build_variant,
+ burn_in_tags_gen_variant, evg_conf):
"""
Generate buildvariants for a given shrub config.
:param shrub_config: Shrub config object that the generated buildvariant will be built upon.
- :param buildvariant: The base variant that the generated run_buildvariant will be based on.
- :param run_buildvariant: The generated buildvariant.
+ :param build_variant: The base variant that the generated run_buildvariant will be based on.
+ :param run_build_variant: The generated buildvariant.
:param burn_in_tags_gen_variant: The buildvariant on which the burn_in_tags_gen task runs.
"""
- evergreen_conf = evergreen.parse_evergreen_file(EVERGREEN_FILE)
- base_variant_config = evergreen_conf.get_variant(buildvariant)
+ base_variant_config = evg_conf.get_variant(build_variant)
new_variant_display_name = f"! {base_variant_config.display_name}"
new_variant_run_on = base_variant_config.run_on[0]
task_spec = TaskSpec("compile_TG")
- new_variant = shrub_config.variant(run_buildvariant).expansion("burn_in_bypass",
- burn_in_tags_gen_variant)
+ new_variant = shrub_config.variant(run_build_variant).expansion("burn_in_bypass",
+ burn_in_tags_gen_variant)
new_variant.display_name(new_variant_display_name)
new_variant.run_on(new_variant_run_on)
new_variant.task(task_spec)
@@ -119,24 +121,32 @@ def _generate_evg_buildvariant(shrub_config, buildvariant, run_buildvariant,
new_variant.modules(modules)
-def _generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data, buildvariant_map, repo):
+# pylint: disable=too-many-arguments
+def _generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data, build_variant_map, repo,
+ evg_conf):
"""
Generate burn in tests tasks for a given shrub config and group of buildvariants.
:param evergreen_api: Evergreen.py object.
:param shrub_config: Shrub config object that the build variants will be built upon.
:param expansions_file_data: Config data file to use.
- :param buildvariant_map: Map of base buildvariants to their generated buildvariant.
+ :param build_variant_map: Map of base buildvariants to their generated buildvariant.
:param repo: Git repository.
"""
- for buildvariant, run_buildvariant in buildvariant_map.items():
- config_options = _get_config_options(expansions_file_data, buildvariant, run_buildvariant)
- tests_by_task = create_tests_by_task(config_options, repo)
+ for build_variant, run_build_variant in build_variant_map.items():
+ config_options = _get_config_options(expansions_file_data, build_variant, run_build_variant)
+ tests_by_task = create_tests_by_task(build_variant, repo, evg_conf)
if tests_by_task:
- _generate_evg_buildvariant(shrub_config, buildvariant, run_buildvariant,
- expansions_file_data["build_variant"])
- create_generate_tasks_config(evergreen_api, shrub_config, config_options, tests_by_task,
- False)
+ _generate_evg_build_variant(shrub_config, build_variant, run_build_variant,
+ expansions_file_data["build_variant"], evg_conf)
+ gen_config = GenerateConfig(build_variant, config_options.project, run_build_variant,
+ config_options.distro).validate(evg_conf)
+ repeat_config = RepeatConfig(repeat_tests_min=config_options.repeat_tests_min,
+ repeat_tests_max=config_options.repeat_tests_max,
+ repeat_tests_secs=config_options.repeat_tests_secs)
+
+ create_generate_tasks_config(shrub_config, tests_by_task, gen_config, repeat_config,
+ evergreen_api, include_gen_task=False)
def _write_to_file(shrub_config):
@@ -162,8 +172,10 @@ def main(evergreen_api, repo):
expansions_file_data = read_config.read_config_file(cmd_line_options.expansion_file)
shrub_config = Configuration()
- buildvariant_map = _create_evg_buildvariant_map(expansions_file_data)
- _generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data, buildvariant_map, repo)
+ evg_conf = evergreen.parse_evergreen_file(EVERGREEN_FILE)
+ build_variant_map = _create_evg_build_variant_map(expansions_file_data, evg_conf)
+ _generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data, build_variant_map, repo,
+ evg_conf)
_write_to_file(shrub_config)
diff --git a/buildscripts/burn_in_tests.py b/buildscripts/burn_in_tests.py
index 0d9abca5f43..5125c904dbe 100644
--- a/buildscripts/burn_in_tests.py
+++ b/buildscripts/burn_in_tests.py
@@ -1,33 +1,28 @@
#!/usr/bin/env python3
"""Command line utility for determining what jstests have been added or modified."""
-import collections
import copy
+import datetime
import json
-import optparse
+import logging
import os.path
-import subprocess
import shlex
+import subprocess
import sys
-import datetime
-import logging
from math import ceil
+from collections import defaultdict
+from typing import Optional, Set, Tuple, List, Dict
-from git import Repo
+import click
import requests
import structlog
from structlog.stdlib import LoggerFactory
import yaml
+from git import Repo
+from evergreen.api import RetryingEvergreenApi, EvergreenApi
from shrub.config import Configuration
-from shrub.command import CommandDefinition
-from shrub.task import TaskDependency
-from shrub.variant import DisplayTaskDefinition
-from shrub.variant import TaskSpec
-from shrub.operations import CmdTimeoutUpdate
-
-from evergreen.api import RetryingEvergreenApi
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
@@ -35,9 +30,13 @@ if __name__ == "__main__" and __package__ is None:
# pylint: disable=wrong-import-position
from buildscripts.patch_builds.change_data import find_changed_files
-from buildscripts import resmokelib
-from buildscripts.ciconfig import evergreen
+from buildscripts.resmokelib.suitesconfig import create_test_membership_map, get_suites
+from buildscripts.resmokelib.utils import default_if_none, globstar
+from buildscripts.ciconfig.evergreen import parse_evergreen_file, ResmokeArgs, \
+ EvergreenProjectConfig
from buildscripts.util import teststats
+from buildscripts.util.taskname import name_generated_task
+from buildscripts.patch_builds.task_generation import resmoke_commands, TimeoutInfo, TaskList
# pylint: enable=wrong-import-position
structlog.configure(logger_factory=LoggerFactory())
@@ -51,6 +50,7 @@ EXTERNAL_LOGGERS = {
AVG_TEST_RUNTIME_ANALYSIS_DAYS = 14
AVG_TEST_TIME_MULTIPLIER = 3
CONFIG_FILE = ".evergreen.yml"
+DEFAULT_PROJECT = "mongodb-mongo-master"
REPEAT_SUITES = 2
EVERGREEN_FILE = "etc/evergreen.yml"
MAX_TASKS_TO_CREATE = 1000
@@ -68,129 +68,113 @@ BURN_IN_TESTS_GEN_TASK = "burn_in_tests_gen"
BURN_IN_TESTS_TASK = "burn_in_tests"
-def parse_command_line():
- """Parse command line options."""
-
- parser = optparse.OptionParser(usage="Usage: %prog [options] [resmoke command]")
-
- parser.add_option(
- "--maxRevisions", dest="max_revisions", type=int, default=25,
- help=("Maximum number of revisions to check for changes. Default is"
- " %default."))
-
- parser.add_option(
- "--branch", dest="branch", default="master",
- help=("The name of the branch the working branch was based on. Default is"
- " '%default'."))
-
- parser.add_option("--baseCommit", dest="base_commit", default=None,
- help="The base commit to compare to for determining changes.")
-
- parser.add_option(
- "--buildVariant", dest="buildvariant", default=None,
- help=("The buildvariant to select the tasks. Required when"
- " generating the JSON file with test executor information"))
-
- parser.add_option(
- "--runBuildVariant", dest="run_buildvariant", default=None,
- help=("The buildvariant the tasks will execute on. If not specified then tasks"
- " will execute on the the buildvariant specified in --buildVariant."))
-
- parser.add_option(
- "--distro", dest="distro", default=None,
- help=("The distro the tasks will execute on. Can only be specified"
- " with --generateTasksFile."))
-
- parser.add_option(
- "--checkEvergreen", dest="check_evergreen", default=False, action="store_true",
- help=("Checks Evergreen for the last commit that was scheduled."
- " This way all the tests that haven't been burned in will be run."))
-
- parser.add_option(
- "--generateTasksFile", dest="generate_tasks_file", default=None,
- help=("Write an Evergreen generate.tasks JSON file. If this option is"
- " specified then no tests will be executed."))
-
- parser.add_option("--noExec", dest="no_exec", default=False, action="store_true",
- help="Do not run resmoke loop on new tests.")
-
- parser.add_option("--reportFile", dest="report_file", default="report.json",
- help="Write a JSON file with test results. Default is '%default'.")
-
- parser.add_option("--project", dest="project", default="mongodb-mongo-master",
- help="The project the test history will be requested for.")
-
- parser.add_option("--testListFile", dest="test_list_file", default=None, metavar="TESTLIST",
- help="Load a JSON file with tests to run.")
-
- parser.add_option("--testListOutfile", dest="test_list_outfile", default=None,
- help="Write a JSON file with test executor information.")
-
- parser.add_option(
- "--repeatTests", dest="repeat_tests_num", default=None, type=int,
- help="The number of times to repeat each test. If --repeatTestsSecs is not"
- " specified then this will be set to {}.".format(REPEAT_SUITES))
-
- parser.add_option(
- "--repeatTestsMin", dest="repeat_tests_min", default=None, type=int,
- help="The minimum number of times to repeat each test when --repeatTestsSecs"
- " is specified.")
-
- parser.add_option(
- "--repeatTestsMax", dest="repeat_tests_max", default=None, type=int,
- help="The maximum number of times to repeat each test when --repeatTestsSecs"
- " is specified.")
-
- parser.add_option(
- "--repeatTestsSecs", dest="repeat_tests_secs", default=None, type=float,
- help="Time, in seconds, to repeat each test. Note that this option is"
- " mutually exclusive with with --repeatTests.")
-
- # This disables argument parsing on the first unrecognized parameter. This allows us to pass
- # a complete resmoke.py command line without accidentally parsing its options.
- parser.disable_interspersed_args()
-
- options, args = parser.parse_args()
- validate_options(parser, options)
-
- return options, args
-
-
-def check_variant(buildvariant, parser):
- """Check if the buildvariant is found in the evergreen file."""
- evg_conf = evergreen.parse_evergreen_file(EVERGREEN_FILE)
- if not evg_conf.get_variant(buildvariant):
- parser.error("Buildvariant '{}' not found in {}, select from:\n\t{}".format(
- buildvariant, EVERGREEN_FILE, "\n\t".join(sorted(evg_conf.variant_names))))
-
-
-def validate_options(parser, options):
- """Validate command line options."""
-
- if options.repeat_tests_max:
- if options.repeat_tests_secs is None:
- parser.error("Must specify --repeatTestsSecs with --repeatTestsMax")
-
- if options.repeat_tests_min and options.repeat_tests_min > options.repeat_tests_max:
- parser.error("--repeatTestsSecsMin is greater than --repeatTestsMax")
-
- if options.repeat_tests_min and options.repeat_tests_secs is None:
- parser.error("Must specify --repeatTestsSecs with --repeatTestsMin")
-
- if options.repeat_tests_num and options.repeat_tests_secs:
- parser.error("Cannot specify --repeatTests and --repeatTestsSecs")
-
- if options.test_list_file is None and options.buildvariant is None:
- parser.error("Must specify --buildVariant to find changed tests")
-
- if options.buildvariant:
- check_variant(options.buildvariant, parser)
-
- if options.run_buildvariant:
- check_variant(options.run_buildvariant, parser)
-
-
-def _is_file_a_test_file(file_path):
+class RepeatConfig(object):
+ """Configuration for how tests should be repeated."""
+
+ def __init__(self, repeat_tests_secs: Optional[int] = None,
+ repeat_tests_min: Optional[int] = None, repeat_tests_max: Optional[int] = None,
+ repeat_tests_num: Optional[int] = None):
+ """
+ Create a Repeat Config.
+
+ :param repeat_tests_secs: Repeat test for this number of seconds.
+ :param repeat_tests_min: Repeat the test at least this many times.
+ :param repeat_tests_max: At most repeat the test this many times.
+ :param repeat_tests_num: Repeat the test exactly this many times.
+ """
+ self.repeat_tests_secs = repeat_tests_secs
+ self.repeat_tests_min = repeat_tests_min
+ self.repeat_tests_max = repeat_tests_max
+ self.repeat_tests_num = repeat_tests_num
+
+ def validate(self):
+ """
+ Raise an exception if this configuration is invalid.
+
+ :return: self.
+ """
+ if self.repeat_tests_num and self.repeat_tests_secs:
+ raise ValueError("Cannot specify --repeat-tests and --repeat-tests-secs")
+
+ if self.repeat_tests_max:
+ if not self.repeat_tests_secs:
+ raise ValueError("Must specify --repeat-tests-secs with --repeat-tests-max")
+
+ if self.repeat_tests_min and self.repeat_tests_min > self.repeat_tests_max:
+ raise ValueError("--repeat-tests-secs-min is greater than --repeat-tests-max")
+
+ if self.repeat_tests_min and not self.repeat_tests_secs:
+ raise ValueError("Must specify --repeat-tests-secs with --repeat-tests-min")
+
+ return self
+
+ def generate_resmoke_options(self) -> str:
+ """
+ Generate the resmoke options to repeat a test.
+
+ :return: Resmoke options to repeat a test.
+ """
+ if self.repeat_tests_secs:
+ repeat_options = f" --repeatTestsSecs={self.repeat_tests_secs} "
+ if self.repeat_tests_min:
+ repeat_options += f" --repeatTestsMin={self.repeat_tests_min} "
+ if self.repeat_tests_max:
+ repeat_options += f" --repeatTestsMax={self.repeat_tests_max} "
+ return repeat_options
+
+ repeat_suites = self.repeat_tests_num if self.repeat_tests_num else REPEAT_SUITES
+ return f" --repeatSuites={repeat_suites} "
+
+
+class GenerateConfig(object):
+ """Configuration for how to generate tasks."""
+
+ def __init__(self, build_variant: str, project: str, run_build_variant: Optional[str] = None,
+ distro: Optional[str] = None):
+ """
+ Create a GenerateConfig.
+
+ :param build_variant: Build variant to get tasks from.
+ :param project: Project to run tasks on.
+ :param run_build_variant: Build variant to run new tasks on.
+ :param distro: Distro to run tasks on.
+ """
+ self.build_variant = build_variant
+ self._run_build_variant = run_build_variant
+ self.distro = distro
+ self.project = project
+
+ @property
+ def run_build_variant(self):
+ """Build variant tasks should run against."""
+ if self._run_build_variant:
+ return self._run_build_variant
+ return self.build_variant
+
+ def validate(self, evg_conf: EvergreenProjectConfig):
+ """
+ Raise an exception if this configuration is invalid.
+
+ :param evg_conf: Evergreen configuration.
+ :return: self.
+ """
+ self._check_variant(self.build_variant, evg_conf)
+
+ return self
+
+ @staticmethod
+ def _check_variant(build_variant: str, evg_conf: EvergreenProjectConfig):
+ """
+ Check if the build_variant is found in the evergreen file.
+
+ :param build_variant: Build variant to check.
+ :param evg_conf: Evergreen configuration to check against.
+ """
+ if not evg_conf.get_variant(build_variant):
+ raise ValueError(f"Build variant '{build_variant}' not found in Evergreen file")
+
+
+def _is_file_a_test_file(file_path: str) -> bool:
"""
Check if the given path points to a test file.
@@ -207,7 +191,7 @@ def _is_file_a_test_file(file_path):
return True
-def find_changed_tests(repo: Repo):
+def find_changed_tests(repo: Repo) -> Set[str]:
"""
Find the changed tests.
@@ -224,42 +208,44 @@ def find_changed_tests(repo: Repo):
return changed_tests
-def find_excludes(selector_file):
+def find_excludes(selector_file: str) -> Tuple[List, List, List]:
"""Parse etc/burn_in_tests.yml. Returns lists of excluded suites, tasks & tests."""
if not selector_file:
- return ([], [], [])
+ return [], [], []
+ LOGGER.debug("reading configuration", config_file=selector_file)
with open(selector_file, "r") as fstream:
yml = yaml.safe_load(fstream)
try:
js_test = yml["selector"]["js_test"]
except KeyError:
- raise Exception("The selector file " + selector_file +
- " is missing the 'selector.js_test' key")
-
- return (resmokelib.utils.default_if_none(js_test.get("exclude_suites"), []),
- resmokelib.utils.default_if_none(js_test.get("exclude_tasks"), []),
- resmokelib.utils.default_if_none(js_test.get("exclude_tests"), []))
+ raise Exception(f"The selector file {selector_file} is missing the 'selector.js_test' key")
+ return (default_if_none(js_test.get("exclude_suites"), []),
+ default_if_none(js_test.get("exclude_tasks"), []),
+ default_if_none(js_test.get("exclude_tests"), []))
-def filter_tests(tests, exclude_tests):
- """Exclude tests which have been blacklisted.
- A test is in the tests list, i.e., ['jstests/core/a.js']
- The tests paths must be in normalized form (see os.path.normpath(path)).
+def filter_tests(tests: Set[str], exclude_tests: [str]) -> Set[str]:
"""
+ Exclude tests which have been blacklisted.
+ :param tests: Set of tests to filter.
+ :param exclude_tests: Tests to filter out.
+ :return: Set of tests with exclude_tests filtered out.
+ """
if not exclude_tests or not tests:
return tests
# The exclude_tests can be specified using * and ** to specify directory and file patterns.
excluded_globbed = set()
for exclude_test_pattern in exclude_tests:
- excluded_globbed.update(resmokelib.utils.globstar.iglob(exclude_test_pattern))
+ excluded_globbed.update(globstar.iglob(exclude_test_pattern))
- return set(tests) - excluded_globbed
+ LOGGER.debug("Excluding test pattern", excluded=excluded_globbed)
+ return tests - excluded_globbed
def create_executor_list(suites, exclude_suites):
@@ -269,13 +255,13 @@ def create_executor_list(suites, exclude_suites):
parameter. Returns a dict keyed by suite name / executor, value is tests
to run under that executor.
"""
+ test_membership = create_test_membership_map(test_kind=SUPPORTED_TEST_KINDS)
- test_membership = resmokelib.suitesconfig.create_test_membership_map(
- test_kind=SUPPORTED_TEST_KINDS)
-
- memberships = collections.defaultdict(list)
+ memberships = defaultdict(list)
for suite in suites:
+ LOGGER.debug("Adding tests for suite", suite=suite, tests=suite.tests)
for test in suite.tests:
+ LOGGER.debug("membership for test", test=test, membership=test_membership[test])
for executor in set(test_membership[test]) - set(exclude_suites):
if test not in memberships[executor]:
memberships[executor].append(test)
@@ -283,7 +269,11 @@ def create_executor_list(suites, exclude_suites):
def _get_task_name(task):
- """Return the task var from a "generate resmoke task" instead of the task name."""
+ """
+ Return the task var from a "generate resmoke task" instead of the task name.
+
+ :param task: task to get name of.
+ """
if task.is_generate_resmoke_task:
return task.generated_task_name
@@ -292,23 +282,25 @@ def _get_task_name(task):
def _set_resmoke_args(task):
- """Set the resmoke args to include the --suites option.
+ """
+ Set the resmoke args to include the --suites option.
The suite name from "generate resmoke tasks" can be specified as a var or directly in the
resmoke_args.
"""
resmoke_args = task.combined_resmoke_args
- suite_name = evergreen.ResmokeArgs.get_arg(resmoke_args, "suites")
+ suite_name = ResmokeArgs.get_arg(resmoke_args, "suites")
if task.is_generate_resmoke_task:
suite_name = task.get_vars_suite_name(task.generate_resmoke_tasks_command["vars"])
- return evergreen.ResmokeArgs.get_updated_arg(resmoke_args, "suites", suite_name)
+ return ResmokeArgs.get_updated_arg(resmoke_args, "suites", suite_name)
-def create_task_list( #pylint: disable=too-many-locals
- evergreen_conf, buildvariant, suites, exclude_tasks):
- """Find associated tasks for the specified buildvariant and suites.
+def create_task_list(evergreen_conf: EvergreenProjectConfig, build_variant: str, suites: Dict,
+ exclude_tasks: [str]):
+ """
+ Find associated tasks for the specified build_variant and suites.
Returns a dict keyed by task_name, with executor, resmoke_args & tests, i.e.,
{'jsCore_small_oplog':
@@ -316,30 +308,41 @@ def create_task_list( #pylint: disable=too-many-locals
'tests': ['jstests/core/all2.js', 'jstests/core/all3.js'],
'use_multiversion': '/data/multiversion'}
}
+
+ :param evergreen_conf: Evergreen configuration for project.
+ :param build_variant: Build variant to select tasks from.
+ :param suites: Suites to be run.
+ :param exclude_tasks: Tasks to exclude.
+ :return: Dict of tasks to run with run configuration.
"""
+ log = LOGGER.bind(build_variant=build_variant)
- evg_buildvariant = evergreen_conf.get_variant(buildvariant)
- if not evg_buildvariant:
- print("Buildvariant '{}' not found in {}".format(buildvariant, evergreen_conf.path))
- sys.exit(1)
+ log.debug("creating task list for suites", suites=suites, exclude_tasks=exclude_tasks)
+ evg_build_variant = evergreen_conf.get_variant(build_variant)
+ if not evg_build_variant:
+ log.warning("Buildvariant not found in evergreen config")
+ raise ValueError(f"Buildvariant ({build_variant} not found in evergreen configuration")
- # Find all the buildvariant tasks.
+ # Find all the build variant tasks.
exclude_tasks_set = set(exclude_tasks)
- variant_task = {
+ all_variant_tasks = {
_get_task_name(task): task
- for task in evg_buildvariant.tasks
+ for task in evg_build_variant.tasks
if task.name not in exclude_tasks_set and task.combined_resmoke_args
}
# Return the list of tasks to run for the specified suite.
- return {
+ task_list = {
task_name: {
"resmoke_args": _set_resmoke_args(task), "tests": suites[task.resmoke_suite],
"use_multiversion": task.multiversion_path
}
- for task_name, task in variant_task.items() if task.resmoke_suite in suites
+ for task_name, task in all_variant_tasks.items() if task.resmoke_suite in suites
}
+ log.debug("Found task list", task_list=task_list)
+ return task_list
+
def _write_json_file(json_data, pathname):
"""Write out a JSON file."""
@@ -348,79 +351,17 @@ def _write_json_file(json_data, pathname):
json.dump(json_data, fstream, indent=4)
-def _load_tests_file(pathname):
- """Load the list of tests and executors from the specified file.
-
- The file might not exist, and this is fine. The task running this becomes a noop.
- """
-
- if not os.path.isfile(pathname):
- return None
- with open(pathname, "r") as fstream:
- return json.load(fstream)
-
-
-def _update_report_data(data_to_update, pathname, task):
- """Read in the report file from the previous resmoke.py run, if it exists.
-
- We'll concat it to the data_to_update dict.
- """
-
- if not os.path.isfile(pathname):
- return
-
- with open(pathname, "r") as fstream:
- report_data = json.load(fstream)
-
- for result in report_data["results"]:
- result["test_file"] += ":" + task
-
- data_to_update["failures"] += report_data["failures"]
- data_to_update["results"] += report_data["results"]
-
-
-def get_resmoke_repeat_options(options):
- """Build the resmoke repeat options."""
-
- if options.repeat_tests_secs:
- repeat_options = "--repeatTestsSecs={}".format(options.repeat_tests_secs)
- if options.repeat_tests_min:
- repeat_options += " --repeatTestsMin={}".format(options.repeat_tests_min)
- if options.repeat_tests_max:
- repeat_options += " --repeatTestsMax={}".format(options.repeat_tests_max)
- else:
- # To maintain previous default behavior, we set repeat_suites to 2 if
- # options.repeat_tests_secs and options.repeat_tests_num are both not specified.
- repeat_suites = options.repeat_tests_num if options.repeat_tests_num else REPEAT_SUITES
- repeat_options = "--repeatSuites={}".format(repeat_suites)
-
- return repeat_options
-
-
-def _set_resmoke_cmd(options, args):
+def _set_resmoke_cmd(repeat_config: RepeatConfig, resmoke_args: [str]) -> [str]:
"""Build the resmoke command, if a resmoke.py command wasn't passed in."""
+ new_args = [sys.executable, "buildscripts/resmoke.py"]
+ if resmoke_args:
+ new_args = copy.deepcopy(resmoke_args)
- new_args = copy.deepcopy(args) if args else [sys.executable, "buildscripts/resmoke.py"]
- new_args += get_resmoke_repeat_options(options).split()
-
+ new_args += repeat_config.generate_resmoke_options().split()
+ LOGGER.debug("set resmoke command", new_args=new_args)
return new_args
-def _sub_task_name(options, task, task_num):
- """Return the generated sub-task name."""
- task_name_prefix = options.buildvariant
- if options.run_buildvariant:
- task_name_prefix = options.run_buildvariant
- return "burn_in:{}_{}_{}".format(task_name_prefix, task, task_num)
-
-
-def _get_run_buildvariant(options):
- """Return the build variant to execute the tasks on."""
- if options.run_buildvariant:
- return options.run_buildvariant
- return options.buildvariant
-
-
def _parse_avg_test_runtime(test, task_avg_test_runtime_stats):
"""
Parse list of teststats to find runtime for particular test.
@@ -446,7 +387,7 @@ def _calculate_timeout(avg_test_runtime):
return max(MIN_AVG_TEST_TIME_SEC, ceil(avg_test_runtime * AVG_TEST_TIME_MULTIPLIER))
-def _calculate_exec_timeout(options, avg_test_runtime):
+def _calculate_exec_timeout(repeat_tests_secs, avg_test_runtime):
"""
Calculate exec_timeout_secs for the Evergreen task.
@@ -454,38 +395,37 @@ def _calculate_exec_timeout(options, avg_test_runtime):
:return: repeat_tests_secs + an amount of padding time so that the test has time to finish on
its final run.
"""
- test_execution_time_over_limit = avg_test_runtime - (
- options.repeat_tests_secs % avg_test_runtime)
+ test_execution_time_over_limit = avg_test_runtime - (repeat_tests_secs % avg_test_runtime)
test_execution_time_over_limit = max(MIN_AVG_TEST_OVERFLOW_SEC, test_execution_time_over_limit)
- return ceil(options.repeat_tests_secs +
- (test_execution_time_over_limit * AVG_TEST_TIME_MULTIPLIER))
+ return ceil(repeat_tests_secs + (test_execution_time_over_limit * AVG_TEST_TIME_MULTIPLIER))
-def _generate_timeouts(options, commands, test, task_avg_test_runtime_stats):
+def _generate_timeouts(repeat_tests_secs, test, task_avg_test_runtime_stats) -> TimeoutInfo:
"""
Add timeout.update command to list of commands for a burn in execution task.
- :param options: Command line options.
- :param commands: List of commands for a burn in execution task.
+ :param repeat_tests_secs: How long test will repeat for.
:param test: Test name.
:param task_avg_test_runtime_stats: Teststat data.
+ :return: TimeoutInfo to use.
"""
if task_avg_test_runtime_stats:
avg_test_runtime = _parse_avg_test_runtime(test, task_avg_test_runtime_stats)
if avg_test_runtime:
- cmd_timeout = CmdTimeoutUpdate()
LOGGER.debug("Avg test runtime", test=test, runtime=avg_test_runtime)
timeout = _calculate_timeout(avg_test_runtime)
- cmd_timeout.timeout(timeout)
+ exec_timeout = _calculate_exec_timeout(repeat_tests_secs, avg_test_runtime)
+ timeout_info = TimeoutInfo.overridden(exec_timeout, timeout)
- exec_timeout = _calculate_exec_timeout(options, avg_test_runtime)
- cmd_timeout.exec_timeout(exec_timeout)
+ LOGGER.debug("Override runtime for test", test=test, timeout=timeout_info)
+ return timeout_info
- commands.append(cmd_timeout.validate().resolve())
+ return TimeoutInfo.default_timeout()
-def _get_task_runtime_history(evg_api, project, task, variant):
+def _get_task_runtime_history(evg_api: Optional[EvergreenApi], project: str, task: str,
+ variant: str):
"""
Fetch historical average runtime for all tests in a task from Evergreen API.
@@ -495,6 +435,9 @@ def _get_task_runtime_history(evg_api, project, task, variant):
:param variant: Variant name.
:return: Test historical runtimes, parsed into teststat objects.
"""
+ if not evg_api:
+ return []
+
try:
end_date = datetime.datetime.utcnow().replace(microsecond=0)
start_date = end_date - datetime.timedelta(days=AVG_TEST_RUNTIME_ANALYSIS_DAYS)
@@ -514,155 +457,274 @@ def _get_task_runtime_history(evg_api, project, task, variant):
raise
-def create_generate_tasks_config(evg_api, evg_config, options, tests_by_task, include_gen_task):
- """Create the config for the Evergreen generate.tasks file."""
- # pylint: disable=too-many-locals
- task_specs = []
- task_names = []
- if include_gen_task:
- task_names.append(BURN_IN_TESTS_GEN_TASK)
+def create_generate_tasks_config(evg_config: Configuration, tests_by_task: Dict,
+ generate_config: GenerateConfig, repeat_config: RepeatConfig,
+ evg_api: Optional[EvergreenApi], include_gen_task: bool = True,
+ task_prefix: str = "burn_in") -> Configuration:
+ # pylint: disable=too-many-arguments,too-many-locals
+ """
+ Create the config for the Evergreen generate.tasks file.
+
+ :param evg_config: Shrub configuration to add to.
+ :param tests_by_task: Dictionary of tests to generate tasks for.
+ :param generate_config: Configuration of what to generate.
+ :param repeat_config: Configuration of how to repeat tests.
+ :param evg_api: Evergreen API.
+ :param include_gen_task: Should generating task be include in display task.
+ :param task_prefix: Prefix all task names with this.
+ :return: Shrub configuration with added tasks.
+ """
+ task_list = TaskList(evg_config)
+ resmoke_options = repeat_config.generate_resmoke_options()
for task in sorted(tests_by_task):
multiversion_path = tests_by_task[task].get("use_multiversion")
- task_avg_test_runtime_stats = _get_task_runtime_history(evg_api, options.project, task,
- options.buildvariant)
- for test_num, test in enumerate(tests_by_task[task]["tests"]):
- sub_task_name = _sub_task_name(options, task, test_num)
- task_names.append(sub_task_name)
- evg_sub_task = evg_config.task(sub_task_name)
- evg_sub_task.dependency(TaskDependency("compile"))
- task_spec = TaskSpec(sub_task_name)
- if options.distro:
- task_spec.distro(options.distro)
- task_specs.append(task_spec)
- run_tests_vars = {
- "resmoke_args":
- "{} {} {}".format(tests_by_task[task]["resmoke_args"],
- get_resmoke_repeat_options(options), test),
- }
- commands = []
- _generate_timeouts(options, commands, test, task_avg_test_runtime_stats)
- commands.append(CommandDefinition().function("do setup"))
+ task_runtime_stats = _get_task_runtime_history(evg_api, generate_config.project, task,
+ generate_config.build_variant)
+ resmoke_args = tests_by_task[task]["resmoke_args"]
+ test_list = tests_by_task[task]["tests"]
+ for index, test in enumerate(test_list):
+ sub_task_name = name_generated_task(f"{task_prefix}:{task}", index, len(test_list),
+ generate_config.run_build_variant)
+ LOGGER.debug("Generating sub-task", sub_task=sub_task_name)
+
+ run_tests_vars = {"resmoke_args": f"{resmoke_args} {resmoke_options} {test}"}
if multiversion_path:
run_tests_vars["task_path_suffix"] = multiversion_path
- commands.append(CommandDefinition().function("do multiversion setup"))
- commands.append(CommandDefinition().function("run tests").vars(run_tests_vars))
- evg_sub_task.commands(commands)
+ timeout = _generate_timeouts(repeat_config.repeat_tests_secs, test, task_runtime_stats)
+ commands = resmoke_commands("run tests", run_tests_vars, timeout, multiversion_path)
- display_task = DisplayTaskDefinition(BURN_IN_TESTS_TASK).execution_tasks(task_names)
- evg_config.variant(_get_run_buildvariant(options)).tasks(task_specs).display_task(display_task)
+ task_list.add_task(sub_task_name, commands, ["compile"], generate_config.distro)
+
+ existing_tasks = [BURN_IN_TESTS_GEN_TASK] if include_gen_task else None
+ task_list.add_to_variant(generate_config.run_build_variant, BURN_IN_TESTS_TASK, existing_tasks)
return evg_config
-def create_tests_by_task(options, repo):
+def create_task_list_for_tests(
+ changed_tests: Set[str], build_variant: str, evg_conf: EvergreenProjectConfig,
+ exclude_suites: Optional[List] = None, exclude_tasks: Optional[List] = None) -> Dict:
+ """
+ Create a list of tests by task for the given tests.
+
+ :param changed_tests: Set of test that have changed.
+ :param build_variant: Build variant to collect tasks from.
+ :param evg_conf: Evergreen configuration.
+ :param exclude_suites: Suites to exclude.
+ :param exclude_tasks: Tasks to exclude.
+ :return: Tests by task.
+ """
+ if not exclude_suites:
+ exclude_suites = []
+ if not exclude_tasks:
+ exclude_tasks = []
+
+ suites = get_suites(suite_files=SUITE_FILES, test_files=changed_tests)
+ LOGGER.debug("Found suites to run", suites=suites)
+
+ tests_by_executor = create_executor_list(suites, exclude_suites)
+ LOGGER.debug("tests_by_executor", tests_by_executor=tests_by_executor)
+
+ return create_task_list(evg_conf, build_variant, tests_by_executor, exclude_tasks)
+
+
+def create_tests_by_task(build_variant: str, repo: Repo, evg_conf: EvergreenProjectConfig) -> Dict:
"""
Create a list of tests by task.
- :param options: Options.
+ :param build_variant: Build variant to collect tasks from.
:param repo: Git repo being tracked.
- :return: Tests by task
+ :param evg_conf: Evergreen configuration.
+ :return: Tests by task.
"""
- # Parse the Evergreen project configuration file.
- evergreen_conf = evergreen.parse_evergreen_file(EVERGREEN_FILE)
-
changed_tests = find_changed_tests(repo)
exclude_suites, exclude_tasks, exclude_tests = find_excludes(SELECTOR_FILE)
changed_tests = filter_tests(changed_tests, exclude_tests)
if changed_tests:
- suites = resmokelib.suitesconfig.get_suites(suite_files=SUITE_FILES,
- test_files=changed_tests)
- tests_by_executor = create_executor_list(suites, exclude_suites)
- tests_by_task = create_task_list(evergreen_conf, options.buildvariant, tests_by_executor,
- exclude_tasks)
- else:
- print("No new or modified tests found.")
- tests_by_task = {}
-
- return tests_by_task
+ return create_task_list_for_tests(changed_tests, build_variant, evg_conf, exclude_suites,
+ exclude_tasks)
+ LOGGER.info("No new or modified tests found.")
+ return {}
-def create_generate_tasks_file(evg_api, options, tests_by_task):
- """Create the Evergreen generate.tasks file."""
+# pylint: disable=too-many-arguments
+def create_generate_tasks_file(tests_by_task: Dict, generate_config: GenerateConfig,
+ repeat_config: RepeatConfig, evg_api: Optional[EvergreenApi],
+ task_prefix: str = 'burn_in', include_gen_task: bool = True) -> Dict:
+ """
+ Create an Evergreen generate.tasks file to run the given tasks and tests.
+
+ :param tests_by_task: Dictionary of tests and tasks to run.
+ :param generate_config: Information about how burn_in should generate tasks.
+ :param repeat_config: Information about how burn_in should repeat tests.
+ :param evg_api: Evergreen api.
+ :param task_prefix: Prefix to start generated task's name with.
+ :param include_gen_task: Should the generating task be included in the display task.
+ :returns: Configuration to pass to 'generate.tasks'.
+ """
evg_config = Configuration()
- evg_config = create_generate_tasks_config(evg_api, evg_config, options, tests_by_task,
- include_gen_task=True)
+ evg_config = create_generate_tasks_config(
+ evg_config, tests_by_task, generate_config, repeat_config, evg_api,
+ include_gen_task=include_gen_task, task_prefix=task_prefix)
json_config = evg_config.to_map()
tasks_to_create = len(json_config.get('tasks', []))
if tasks_to_create > MAX_TASKS_TO_CREATE:
LOGGER.warning("Attempting to create more tasks than max, aborting", tasks=tasks_to_create,
max=MAX_TASKS_TO_CREATE)
sys.exit(1)
- _write_json_file(json_config, options.generate_tasks_file)
-
+ return json_config
-def run_tests(no_exec, tests_by_task, resmoke_cmd, report_file):
- """Run the tests if not in no_exec mode."""
- if no_exec:
- return
+def run_tests(tests_by_task: Dict, resmoke_cmd: [str]):
+ """
+ Run the given tests locally.
- test_results = {"failures": 0, "results": []}
+ This function will exit with a non-zero return code on test failure.
+ :param tests_by_task: Dictionary of tests to run.
+ :param resmoke_cmd: Parameter to use when calling resmoke.
+ """
for task in sorted(tests_by_task):
+ log = LOGGER.bind(task=task)
new_resmoke_cmd = copy.deepcopy(resmoke_cmd)
new_resmoke_cmd.extend(shlex.split(tests_by_task[task]["resmoke_args"]))
new_resmoke_cmd.extend(tests_by_task[task]["tests"])
+ log.debug("starting execution of task")
try:
subprocess.check_call(new_resmoke_cmd, shell=False)
except subprocess.CalledProcessError as err:
- print("Resmoke returned an error with task:", task)
- _update_report_data(test_results, report_file, task)
- _write_json_file(test_results, report_file)
+ log.warning("Resmoke returned an error with task", error=err.returncode)
sys.exit(err.returncode)
- # Note - _update_report_data concatenates to test_results the current results to the
- # previously saved results.
- _update_report_data(test_results, report_file, task)
-
- _write_json_file(test_results, report_file)
+def _configure_logging(verbose: bool):
+ """
+ Configure logging for the application.
-def configure_logging():
- """Configure logging for the application."""
+ :param verbose: If True set log level to DEBUG.
+ """
+ level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(
format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s",
- level=logging.DEBUG,
+ level=level,
stream=sys.stdout,
)
for log_name in EXTERNAL_LOGGERS:
logging.getLogger(log_name).setLevel(logging.WARNING)
-def main(evg_api):
- """Execute Main program."""
-
- configure_logging()
- options, args = parse_command_line()
- resmoke_cmd = _set_resmoke_cmd(options, args)
-
- # Load the dict of tests to run.
- if options.test_list_file:
- tests_by_task = _load_tests_file(options.test_list_file)
- # If there are no tests to run, carry on.
- if tests_by_task is None:
- test_results = {"failures": 0, "results": []}
- _write_json_file(test_results, options.report_file)
- sys.exit(0)
+def _get_evg_api(evg_api_config: str, local_mode: bool) -> Optional[EvergreenApi]:
+ """
+ Get an instance of the Evergreen Api.
- # Run the executor finder.
- else:
- repo = Repo(".")
- tests_by_task = create_tests_by_task(options, repo)
+ :param evg_api_config: Config file with evg auth information.
+ :param local_mode: If true, do not connect to Evergreen API.
+ :return: Evergreen Api instance.
+ """
+ if not local_mode:
+ return RetryingEvergreenApi.get_api(config_file=evg_api_config)
+ return None
- if options.test_list_outfile:
- _write_json_file(tests_by_task, options.test_list_outfile)
- if options.generate_tasks_file:
- create_generate_tasks_file(evg_api, options, tests_by_task)
+@click.command()
+@click.option("--no-exec", "no_exec", default=False, is_flag=True,
+ help="Do not execute the found tests.")
+@click.option("--generate-tasks-file", "generate_tasks_file", default=None, metavar='FILE',
+ help="Run in 'generate.tasks' mode. Store task config to given file.")
+@click.option("--build-variant", "build_variant", default=None, metavar='BUILD_VARIANT',
+ help="Tasks to run will be selected from this build variant.")
+@click.option("--run-build-variant", "run_build_variant", default=None, metavar='BUILD_VARIANT',
+ help="Burn in tasks will be generated on this build variant.")
+@click.option("--distro", "distro", default=None, metavar='DISTRO',
+ help="The distro the tasks will execute on.")
+@click.option("--project", "project", default=DEFAULT_PROJECT, metavar='PROJECT',
+ help="The evergreen project the tasks will execute on.")
+@click.option("--repeat-tests", "repeat_tests_num", default=None, type=int,
+ help="Number of times to repeat tests.")
+@click.option("--repeat-tests-min", "repeat_tests_min", default=None, type=int,
+ help="The minimum number of times to repeat tests if time option is specified.")
+@click.option("--repeat-tests-max", "repeat_tests_max", default=None, type=int,
+ help="The maximum number of times to repeat tests if time option is specified.")
+@click.option("--repeat-tests-secs", "repeat_tests_secs", default=None, type=int, metavar="SECONDS",
+ help="Repeat tests for the given time (in secs).")
+@click.option("--evg-api-config", "evg_api_config", default=CONFIG_FILE, metavar="FILE",
+ help="Configuration file with connection info for Evergreen API.")
+@click.option("--local", "local_mode", default=False, is_flag=True,
+ help="Local mode. Do not call out to evergreen api.")
+@click.option("--verbose", "verbose", default=False, is_flag=True, help="Enable extra logging.")
+@click.argument("resmoke_args", nargs=-1, type=click.UNPROCESSED)
+# pylint: disable=too-many-arguments,too-many-locals
+def main(build_variant, run_build_variant, distro, project, generate_tasks_file, no_exec,
+ repeat_tests_num, repeat_tests_min, repeat_tests_max, repeat_tests_secs, resmoke_args,
+ local_mode, evg_api_config, verbose):
+ """
+ Run new or changed tests in repeated mode to validate their stability.
+
+ burn_in_tests detects jstests that are new or changed since the last git command and then
+ runs those tests in a loop to validate their reliability.
+
+ The `--repeat-*` arguments allow configuration of how burn_in_tests repeats tests. Tests can
+ either be repeated a specified number of times with the `--repeat-tests` option, or they can
+ be repeated for a certain time period with the `--repeat-tests-secs` option.
+
+ There are two modes that burn_in_tests can run in:
+
+ (1) Normal mode: by default burn_in_tests will attempt to run all detected tests the
+ configured number of times. This is useful if you have a test or tests you would like to
+ check before submitting a patch to evergreen.
+
+ (2) By specifying the `--generate-tasks-file`, burn_in_tests will run generate a configuration
+ file that can then be sent to the Evergreen 'generate.tasks' command to create evergreen tasks
+ to do all the test executions. This is the mode used to run tests in patch builds.
+
+ NOTE: There is currently a limit of the number of tasks burn_in_tests will attempt to generate
+ in evergreen. The limit is 1000. If you change enough tests that more than 1000 tasks would
+ be generated, burn_in_test will fail. This is to avoid generating more tasks than evergreen
+ can handle.
+ \f
+
+ :param build_variant: Build variant to query tasks from.
+ :param run_build_variant:Build variant to actually run against.
+ :param distro: Distro to run tests on.
+ :param project: Project to run tests on.
+ :param generate_tasks_file: Create a generate tasks configuration in this file.
+ :param no_exec: Just perform test discover, do not execute the tests.
+ :param repeat_tests_num: Repeat each test this number of times.
+ :param repeat_tests_min: Repeat each test at least this number of times.
+ :param repeat_tests_max: Once this number of repetitions has been reached, stop repeating.
+ :param repeat_tests_secs: Continue repeating tests for this number of seconds.
+ :param resmoke_args: Arguments to pass through to resmoke.
+ :param local_mode: Don't call out to the evergreen API (used for testing).
+ :param evg_api_config: Location of configuration file to connect to evergreen.
+ :param verbose: Log extra debug information.
+ """
+ _configure_logging(verbose)
+
+ evg_conf = parse_evergreen_file(EVERGREEN_FILE)
+ repeat_config = RepeatConfig(repeat_tests_secs=repeat_tests_secs,
+ repeat_tests_min=repeat_tests_min,
+ repeat_tests_max=repeat_tests_max,
+ repeat_tests_num=repeat_tests_num).validate() # yapf: disable
+ generate_config = GenerateConfig(build_variant=build_variant,
+ run_build_variant=run_build_variant,
+ distro=distro,
+ project=project).validate(evg_conf) # yapf: disable
+ evg_api = _get_evg_api(evg_api_config, local_mode)
+ repo = Repo(".")
+ resmoke_cmd = _set_resmoke_cmd(repeat_config, list(resmoke_args))
+
+ tests_by_task = create_tests_by_task(build_variant, repo, evg_conf)
+
+ if generate_tasks_file:
+ json_config = create_generate_tasks_file(tests_by_task, generate_config, repeat_config,
+ evg_api)
+ _write_json_file(json_config, generate_tasks_file)
+ elif not no_exec:
+ run_tests(tests_by_task, resmoke_cmd)
else:
- run_tests(options.no_exec, tests_by_task, resmoke_cmd, options.report_file)
+ LOGGER.info("Not running tests due to 'no_exec' option.")
if __name__ == "__main__":
- main(RetryingEvergreenApi.get_api(config_file=CONFIG_FILE))
+ main() # pylint: disable=no-value-for-parameter
diff --git a/buildscripts/ciconfig/evergreen.py b/buildscripts/ciconfig/evergreen.py
index 987e1b95a61..47cbd289b62 100644
--- a/buildscripts/ciconfig/evergreen.py
+++ b/buildscripts/ciconfig/evergreen.py
@@ -251,6 +251,10 @@ class Variant(object):
for task in self.tasks:
self.distro_names.update(task.run_on)
+ def __repr__(self):
+ """Create a string version of object for debugging."""
+ return self.name
+
@property
def name(self):
"""Get the build variant name."""
@@ -331,6 +335,10 @@ class VariantTask(Task):
self.run_on = run_on
self.variant = variant
+ def __repr__(self):
+ """Create a string representation of object for debugging."""
+ return f"{self.variant}: {self.name}"
+
@property
def combined_resmoke_args(self):
"""Get the combined resmoke arguments.
diff --git a/buildscripts/evergreen_generate_resmoke_tasks.py b/buildscripts/evergreen_generate_resmoke_tasks.py
index 80ef5cb8e2b..ab1b6984926 100755
--- a/buildscripts/evergreen_generate_resmoke_tasks.py
+++ b/buildscripts/evergreen_generate_resmoke_tasks.py
@@ -13,7 +13,6 @@ import math
import os
import re
import sys
-from collections import defaultdict
from collections import namedtuple
from distutils.util import strtobool # pylint: disable=no-name-in-module
@@ -35,9 +34,12 @@ if __name__ == "__main__" and __package__ is None:
import buildscripts.resmokelib.suitesconfig as suitesconfig # pylint: disable=wrong-import-position
import buildscripts.util.read_config as read_config # pylint: disable=wrong-import-position
import buildscripts.util.taskname as taskname # pylint: disable=wrong-import-position
-import buildscripts.util.testname as testname # pylint: disable=wrong-import-position
import buildscripts.util.teststats as teststats # pylint: disable=wrong-import-position
+# pylint: disable=wrong-import-position
+from buildscripts.patch_builds.task_generation import TimeoutInfo, resmoke_commands
+# pylint: enable=wrong-import-position
+
LOGGER = logging.getLogger(__name__)
TEST_SUITE_DIR = os.path.join("buildscripts", "resmokeconfig", "suites")
@@ -359,28 +361,30 @@ class EvergreenConfigGenerator(object):
return variables
- def _add_timeout_command(self, commands, max_test_runtime, expected_suite_runtime):
+ def _get_timeout_command(self, max_test_runtime, expected_suite_runtime, use_default):
"""
Add an evergreen command to override the default timeouts to the list of commands.
- :param commands: List of commands to add timeout command to.
:param max_test_runtime: Maximum runtime of any test in the sub-suite.
:param expected_suite_runtime: Expected runtime of the entire sub-suite.
+ :param use_default: Use default timeouts.
+ :return: Timeout information.
"""
repeat_factor = self.options.repeat_suites
- if max_test_runtime or expected_suite_runtime:
- cmd_timeout = CmdTimeoutUpdate()
+ if (max_test_runtime or expected_suite_runtime) and not use_default:
+ timeout = None
+ exec_timeout = None
if max_test_runtime:
timeout = calculate_timeout(max_test_runtime, 3) * repeat_factor
LOGGER.debug("Setting timeout to: %d (max=%d, repeat=%d)", timeout,
max_test_runtime, repeat_factor)
- cmd_timeout.timeout(timeout)
if expected_suite_runtime:
exec_timeout = calculate_timeout(expected_suite_runtime, 3) * repeat_factor
LOGGER.debug("Setting exec_timeout to: %d (runtime=%d, repeat=%d)", exec_timeout,
expected_suite_runtime, repeat_factor)
- cmd_timeout.exec_timeout(exec_timeout)
- commands.append(cmd_timeout.validate().resolve())
+ return TimeoutInfo.overridden(timeout=timeout, exec_timeout=exec_timeout)
+
+ return TimeoutInfo.default_timeout()
@staticmethod
def _is_task_dependency(task, possible_dependency):
@@ -418,13 +422,11 @@ class EvergreenConfigGenerator(object):
target_suite_file = os.path.join(CONFIG_DIR, sub_suite_name)
run_tests_vars = self._get_run_tests_vars(target_suite_file)
- commands = []
- if not self.options.use_default_timeouts:
- self._add_timeout_command(commands, max_test_runtime, expected_suite_runtime)
- commands.append(CommandDefinition().function("do setup"))
- if self.options.use_multiversion:
- commands.append(CommandDefinition().function("do multiversion setup"))
- commands.append(CommandDefinition().function("run generated tests").vars(run_tests_vars))
+ use_multiversion = self.options.use_multiversion
+ timeout_info = self._get_timeout_command(max_test_runtime, expected_suite_runtime,
+ self.options.use_default_timeouts)
+ commands = resmoke_commands("run generated tests", run_tests_vars, timeout_info,
+ use_multiversion)
self._add_dependencies(task).commands(commands)
diff --git a/buildscripts/patch_builds/task_generation.py b/buildscripts/patch_builds/task_generation.py
new file mode 100644
index 00000000000..576ac95b15a
--- /dev/null
+++ b/buildscripts/patch_builds/task_generation.py
@@ -0,0 +1,161 @@
+"""Utilities to help generate evergreen tasks."""
+from typing import Optional, List
+
+from shrub.command import CommandDefinition
+from shrub.config import Configuration
+from shrub.operations import CmdTimeoutUpdate
+from shrub.task import TaskDependency
+from shrub.variant import TaskSpec, DisplayTaskDefinition
+
+
+def _cmd_by_name(cmd_name):
+ """
+ Create a command definition of a function with the given name.
+
+ :param cmd_name: Name of function.
+ :return: Command Definition for function.
+ """
+ return CommandDefinition().function(cmd_name)
+
+
+def resmoke_commands(run_tests_fn_name, run_tests_vars, timeout_info, use_multiversion=None):
+ """
+ Create a list of commands to run a resmoke task.
+
+ :param run_tests_fn_name: Name of function to run resmoke tests.
+ :param run_tests_vars: Dictionary of variables to pass to run_tests function.
+ :param timeout_info: Timeout info for task.
+ :param use_multiversion: If True include multiversion setup.
+ :return: List of commands to run a resmoke task.
+ """
+ commands = [
+ timeout_info.cmd,
+ _cmd_by_name("do setup"),
+ _cmd_by_name("do multiversion setup") if use_multiversion else None,
+ _cmd_by_name(run_tests_fn_name).vars(run_tests_vars),
+ ]
+
+ return [cmd for cmd in commands if cmd]
+
+
+class TimeoutInfo(object):
+ """Timeout information for a generated task."""
+
+ def __init__(self, use_defaults, exec_timeout=None, timeout=None):
+ """
+ Create timeout information.
+
+ :param use_defaults: Don't overwrite any timeouts.
+ :param exec_timeout: Exec timeout value to overwrite.
+ :param timeout: Timeout value to overwrite.
+ """
+ self.use_defaults = use_defaults
+ self.exec_timeout = exec_timeout
+ self.timeout = timeout
+
+ @classmethod
+ def default_timeout(cls):
+ """Create an instance of TimeoutInfo that uses default timeouts."""
+ return cls(True)
+
+ @classmethod
+ def overridden(cls, exec_timeout=None, timeout=None):
+ """
+ Create an instance of TimeoutInfo that overwrites timeouts.
+
+ :param exec_timeout: Exec timeout value to overwrite.
+ :param timeout: Timeout value to overwrite.
+ :return: TimeoutInfo that overwrites given timeouts.
+ """
+ if not exec_timeout and not timeout:
+ raise ValueError("Must override either 'exec_timeout' or 'timeout'")
+ return cls(False, exec_timeout=exec_timeout, timeout=timeout)
+
+ @property
+ def cmd(self):
+ """Create a command that sets timeouts as specified."""
+ if not self.use_defaults:
+ timeout_cmd = CmdTimeoutUpdate()
+ if self.timeout:
+ timeout_cmd.timeout(self.timeout)
+
+ if self.exec_timeout:
+ timeout_cmd.exec_timeout(self.exec_timeout)
+ return timeout_cmd.validate().resolve()
+
+ return None
+
+ def __repr__(self):
+ """Create a string representation for debugging."""
+ if self.use_defaults:
+ return "<No Timeout Override>"
+ return f"<exec_timeout={self.exec_timeout}, timeout={self.timeout}>"
+
+
+class TaskList(object):
+ """A list of evergreen tasks to be generated together."""
+
+ def __init__(self, evg_config: Configuration):
+ """
+ Create a list of evergreen tasks to create.
+
+ :param evg_config: Evergreen configuration to add tasks to.
+ """
+ self.evg_config = evg_config
+ self.task_specs = []
+ self.task_names = []
+
+ def add_task(self, name: str, commands: [CommandDefinition],
+ depends_on: Optional[List[str]] = None, distro: Optional[str] = None):
+ """
+ Add a new task to the task list.
+
+ :param name: Name of task to add.
+ :param commands: List of commands comprising task.
+ :param depends_on: Any dependencies for the task.
+ :param distro: Distro task should be run on.
+ """
+ task = self.evg_config.task(name)
+ task.commands(commands)
+
+ if depends_on:
+ for dep in depends_on:
+ task.dependency(TaskDependency(dep))
+
+ task_spec = TaskSpec(name)
+ if distro:
+ task_spec.distro(distro)
+ self.task_specs.append(task_spec)
+ self.task_names.append(name)
+
+ def display_task(self, display_name: str, existing_tasks: Optional[List[str]] = None) \
+ -> DisplayTaskDefinition:
+ """
+ Create a display task for the list of tasks.
+
+ Note: This function should be called after all calls to `add_task` have been done.
+
+ :param display_name: Name of display tasks.
+ :param existing_tasks: Any existing tasks that should be part of the display task.
+ :return: Display task object.
+ """
+ execution_tasks = self.task_names
+ if existing_tasks:
+ execution_tasks.extend(existing_tasks)
+
+ display_task = DisplayTaskDefinition(display_name).execution_tasks(execution_tasks)
+ return display_task
+
+ def add_to_variant(self, variant_name: str, display_name: Optional[str] = None,
+ existing_tasks: Optional[List[str]] = None):
+ """
+ Add this task list to a build variant.
+
+ :param variant_name: Variant to add to.
+ :param display_name: Display name to add tasks under.
+ :param existing_tasks: Any existing tasks that should be added to the display group.
+ """
+ variant = self.evg_config.variant(variant_name)
+ variant.tasks(self.task_specs)
+ if display_name:
+ variant.display_task(self.display_task(display_name, existing_tasks))
diff --git a/buildscripts/resmokelib/testing/suite.py b/buildscripts/resmokelib/testing/suite.py
index 02f45f832ed..a8f78057a9a 100644
--- a/buildscripts/resmokelib/testing/suite.py
+++ b/buildscripts/resmokelib/testing/suite.py
@@ -86,6 +86,10 @@ class Suite(object): # pylint: disable=too-many-instance-attributes
# report intermediate results.
self._partial_reports = None
+ def __repr__(self):
+ """Create a string representation of object for debugging."""
+ return f"{self.test_kind}:{self._suite_name}"
+
def _get_tests_for_kind(self, test_kind):
"""Return the tests to run based on the 'test_kind'-specific filtering policy."""
selector_config = self.get_selector_config()
diff --git a/buildscripts/tests/patch_builds/__init__.py b/buildscripts/tests/patch_builds/__init__.py
new file mode 100644
index 00000000000..32d40680e1c
--- /dev/null
+++ b/buildscripts/tests/patch_builds/__init__.py
@@ -0,0 +1 @@
+"""Unit tests for buildscripts.patch_builds patckage."""
diff --git a/buildscripts/tests/patch_builds/test_task_generation.py b/buildscripts/tests/patch_builds/test_task_generation.py
new file mode 100644
index 00000000000..8c015e12406
--- /dev/null
+++ b/buildscripts/tests/patch_builds/test_task_generation.py
@@ -0,0 +1,223 @@
+"""Unittests for buildscripts.patch_builds.task_generation.py"""
+import unittest
+
+from shrub.config import Configuration
+
+import buildscripts.patch_builds.task_generation as under_test
+
+# pylint: disable=missing-docstring,protected-access,too-many-lines,no-self-use
+
+
+class TestResmokeCommand(unittest.TestCase):
+ def test_basic_command(self):
+ run_tests = "run tests"
+ test_vars = {}
+ timeout_info = under_test.TimeoutInfo.default_timeout()
+
+ commands = under_test.resmoke_commands(run_tests, test_vars, timeout_info)
+
+ # 2 expected command = 1 for setup + 1 for running tests.
+ self.assertEqual(2, len(commands))
+
+ def test_with_multiversion(self):
+ run_tests = "run tests"
+ test_vars = {}
+ timeout_info = under_test.TimeoutInfo.default_timeout()
+
+ commands = under_test.resmoke_commands(run_tests, test_vars, timeout_info,
+ use_multiversion="multiversion")
+
+ # 3 expected command = 1 for setup + 1 for running tests + 1 for multiversion setup.
+ self.assertEqual(3, len(commands))
+
+ def test_with_timeout(self):
+ run_tests = "run tests"
+ test_vars = {}
+ timeout_info = under_test.TimeoutInfo.overridden(timeout=5)
+
+ commands = under_test.resmoke_commands(run_tests, test_vars, timeout_info)
+
+ # 3 expected command = 1 for setup + 1 for running tests + 1 for timeout.
+ self.assertEqual(3, len(commands))
+
+ def test_with_everything(self):
+ run_tests = "run tests"
+ test_vars = {}
+ timeout_info = under_test.TimeoutInfo.overridden(timeout=5)
+
+ commands = under_test.resmoke_commands(run_tests, test_vars, timeout_info,
+ use_multiversion="multiversion")
+
+ # 4 expected command = 1 for setup + 1 for running tests + 1 for multiversion setup +
+ # 1 for timeout.
+ self.assertEqual(4, len(commands))
+
+
+class TestTimeoutInfo(unittest.TestCase):
+ def test_default_timeout(self):
+ timeout_info = under_test.TimeoutInfo.default_timeout()
+
+ self.assertIsNone(timeout_info.cmd)
+
+ def test_timeout_only_set(self):
+ timeout = 5
+ timeout_info = under_test.TimeoutInfo.overridden(timeout=timeout)
+
+ cmd = timeout_info.cmd.to_map()
+
+ self.assertEqual("timeout.update", cmd["command"])
+ self.assertEqual(timeout, cmd["params"]["timeout_secs"])
+ self.assertNotIn("exec_timeout_secs", cmd["params"])
+
+ def test_exec_timeout_only_set(self):
+ exec_timeout = 5
+ timeout_info = under_test.TimeoutInfo.overridden(exec_timeout=exec_timeout)
+
+ cmd = timeout_info.cmd.to_map()
+
+ self.assertEqual("timeout.update", cmd["command"])
+ self.assertEqual(exec_timeout, cmd["params"]["exec_timeout_secs"])
+ self.assertNotIn("timeout_secs", cmd["params"])
+
+ def test_both_timeouts_set(self):
+ timeout = 3
+ exec_timeout = 5
+ timeout_info = under_test.TimeoutInfo.overridden(exec_timeout=exec_timeout, timeout=timeout)
+
+ cmd = timeout_info.cmd.to_map()
+
+ self.assertEqual("timeout.update", cmd["command"])
+ self.assertEqual(exec_timeout, cmd["params"]["exec_timeout_secs"])
+ self.assertEqual(timeout, cmd["params"]["timeout_secs"])
+
+ def test_override_with_no_values(self):
+ with self.assertRaises(ValueError):
+ under_test.TimeoutInfo.overridden()
+
+
+class TestTaskList(unittest.TestCase):
+ def test_adding_a_task(self):
+ config = Configuration()
+ task_list = under_test.TaskList(config)
+
+ func = "test"
+ task = "task 1"
+ variant = "variant 1"
+ task_list.add_task(task, [under_test._cmd_by_name(func)])
+ task_list.add_to_variant(variant)
+
+ cfg_dict = config.to_map()
+
+ cmd_dict = cfg_dict["tasks"][0]
+ self.assertEqual(task, cmd_dict["name"])
+ self.assertEqual(func, cmd_dict["commands"][0]["func"])
+
+ self.assertEqual(task, cfg_dict["buildvariants"][0]["tasks"][0]["name"])
+
+ def test_adding_a_task_with_distro(self):
+ config = Configuration()
+ task_list = under_test.TaskList(config)
+
+ func = "test"
+ task = "task 1"
+ variant = "variant 1"
+ distro = "distro 1"
+ task_list.add_task(task, [under_test._cmd_by_name(func)], distro=distro)
+ task_list.add_to_variant(variant)
+
+ cfg_dict = config.to_map()
+
+ cmd_dict = cfg_dict["tasks"][0]
+ self.assertEqual(task, cmd_dict["name"])
+ self.assertEqual(func, cmd_dict["commands"][0]["func"])
+
+ self.assertEqual(task, cfg_dict["buildvariants"][0]["tasks"][0]["name"])
+ self.assertIn(distro, cfg_dict["buildvariants"][0]["tasks"][0]["distros"])
+
+ def test_adding_a_task_with_dependecies(self):
+ config = Configuration()
+ task_list = under_test.TaskList(config)
+
+ func = "test"
+ task = "task 1"
+ variant = "variant 1"
+ dependencies = ["dep task 1", "dep task 2"]
+ task_list.add_task(task, [under_test._cmd_by_name(func)], depends_on=dependencies)
+ task_list.add_to_variant(variant)
+
+ cfg_dict = config.to_map()
+
+ cmd_dict = cfg_dict["tasks"][0]
+ self.assertEqual(task, cmd_dict["name"])
+ self.assertEqual(func, cmd_dict["commands"][0]["func"])
+ for dep in dependencies:
+ self.assertIn(dep, {d["name"] for d in cmd_dict["depends_on"]})
+
+ task_dict = cfg_dict["buildvariants"][0]["tasks"][0]
+ self.assertEqual(task, task_dict["name"])
+
+ def test_adding_multiple_tasks(self):
+ config = Configuration()
+ task_list = under_test.TaskList(config)
+
+ func = "test"
+ variant = "variant 1"
+ tasks = ["task 1", "task 2"]
+ for task in tasks:
+ task_list.add_task(task, [under_test._cmd_by_name(func)])
+
+ task_list.add_to_variant(variant)
+
+ cfg_dict = config.to_map()
+
+ self.assertEqual(len(tasks), len(cfg_dict["tasks"]))
+ self.assertEqual(len(tasks), len(cfg_dict["buildvariants"][0]["tasks"]))
+
+ def test_using_display_task(self):
+ config = Configuration()
+ task_list = under_test.TaskList(config)
+
+ func = "test"
+ variant = "variant 1"
+ tasks = ["task 1", "task 2"]
+ for task in tasks:
+ task_list.add_task(task, [under_test._cmd_by_name(func)])
+
+ display_task = "display_task"
+ task_list.add_to_variant(variant, display_task)
+
+ cfg_dict = config.to_map()
+
+ self.assertEqual(len(tasks), len(cfg_dict["tasks"]))
+ variant_dict = cfg_dict["buildvariants"][0]
+ self.assertEqual(len(tasks), len(variant_dict["tasks"]))
+ dt_dict = variant_dict["display_tasks"][0]
+ self.assertEqual(display_task, dt_dict["name"])
+ for task in tasks:
+ self.assertIn(task, dt_dict["execution_tasks"])
+
+ def test_using_display_task_with_existing_tasks(self):
+ config = Configuration()
+ task_list = under_test.TaskList(config)
+
+ func = "test"
+ variant = "variant 1"
+ tasks = ["task 1", "task 2"]
+ for task in tasks:
+ task_list.add_task(task, [under_test._cmd_by_name(func)])
+
+ display_task = "display_task"
+ existing_tasks = ["other task 1", "other task 2"]
+ task_list.add_to_variant(variant, display_task, existing_tasks)
+
+ cfg_dict = config.to_map()
+
+ self.assertEqual(len(tasks), len(cfg_dict["tasks"]))
+ variant_dict = cfg_dict["buildvariants"][0]
+ self.assertEqual(len(tasks), len(variant_dict["tasks"]))
+ dt_dict = variant_dict["display_tasks"][0]
+ self.assertEqual(display_task, dt_dict["name"])
+ for task in tasks:
+ self.assertIn(task, dt_dict["execution_tasks"])
+ for task in existing_tasks:
+ self.assertIn(task, dt_dict["execution_tasks"])
diff --git a/buildscripts/tests/test_burn_in_tags.py b/buildscripts/tests/test_burn_in_tags.py
index eccb1ad4740..3fb48259975 100644
--- a/buildscripts/tests/test_burn_in_tags.py
+++ b/buildscripts/tests/test_burn_in_tags.py
@@ -1,15 +1,12 @@
"""Unit tests for the burn_in_tags.py script."""
-import datetime
import os
import unittest
-import mock
-
-from mock import Mock
+from unittest.mock import MagicMock, patch
from shrub.config import Configuration
-from buildscripts import burn_in_tags
+import buildscripts.burn_in_tags as under_test
import buildscripts.ciconfig.evergreen as _evergreen
@@ -39,16 +36,19 @@ def get_expansions_data():
"project": "fake_project",
} # yapf: disable
+
def get_evergreen_config():
return _evergreen.parse_evergreen_file(TEST_FILE_PATH, evergreen_binary=None)
class TestCreateEvgBuildVariantMap(unittest.TestCase):
- @mock.patch(ns("evergreen"))
- def test_create_evg_buildvariant_map(self, evergreen_mock):
- evergreen_mock.parse_evergreen_file.return_value = get_evergreen_config()
+ def test_create_evg_buildvariant_map(self):
+ evg_conf_mock = get_evergreen_config()
expansions_file_data = {"build_variant": "enterprise-rhel-62-64-bit"}
- buildvariant_map = burn_in_tags._create_evg_buildvariant_map(expansions_file_data)
+
+ buildvariant_map = under_test._create_evg_build_variant_map(expansions_file_data,
+ evg_conf_mock)
+
expected_buildvariant_map = {
"enterprise-rhel-62-64-bit-majority-read-concern-off":
"enterprise-rhel-62-64-bit-majority-read-concern-off-required",
@@ -57,24 +57,27 @@ class TestCreateEvgBuildVariantMap(unittest.TestCase):
}
self.assertEqual(buildvariant_map, expected_buildvariant_map)
- @mock.patch(ns("evergreen"))
- def test_create_evg_buildvariant_map_no_base_variants(self, evergreen_mock):
- evergreen_mock.parse_evergreen_file.return_value = get_evergreen_config()
+ def test_create_evg_buildvariant_map_no_base_variants(self):
+ evg_conf_mock = MagicMock()
+ evg_conf_mock.parse_evergreen_file.return_value = get_evergreen_config()
expansions_file_data = {"build_variant": "buildvariant-without-burn-in-tag-buildvariants"}
- buildvariant_map = burn_in_tags._create_evg_buildvariant_map(expansions_file_data)
+
+ buildvariant_map = under_test._create_evg_build_variant_map(expansions_file_data,
+ evg_conf_mock)
+
self.assertEqual(buildvariant_map, {})
class TestGenerateEvgBuildVariants(unittest.TestCase):
- @mock.patch(ns("evergreen"))
- def test_generate_evg_buildvariant_one_base_variant(self, evergreen_mock):
- evergreen_mock.parse_evergreen_file.return_value = get_evergreen_config()
+ def test_generate_evg_buildvariant_one_base_variant(self):
+ evg_conf_mock = get_evergreen_config()
base_variant = "enterprise-rhel-62-64-bit-inmem"
generated_variant = "enterprise-rhel-62-64-bit-inmem-required"
burn_in_tags_gen_variant = "enterprise-rhel-62-64-bit"
shrub_config = Configuration()
- burn_in_tags._generate_evg_buildvariant(shrub_config, base_variant, generated_variant,
- burn_in_tags_gen_variant)
+
+ under_test._generate_evg_build_variant(shrub_config, base_variant, generated_variant,
+ burn_in_tags_gen_variant, evg_conf_mock)
expected_variant_data = get_evergreen_config().get_variant(base_variant)
generated_buildvariants = shrub_config.to_map()["buildvariants"]
@@ -89,10 +92,9 @@ class TestGenerateEvgBuildVariants(unittest.TestCase):
class TestGenerateEvgTasks(unittest.TestCase):
- @mock.patch(ns("evergreen"))
- @mock.patch(ns("create_tests_by_task"))
- def test_generate_evg_tasks_no_tests_changed(self, create_tests_by_task_mock, evergreen_mock):
- evergreen_mock.parse_evergreen_file.return_value = get_evergreen_config()
+ @patch(ns("create_tests_by_task"))
+ def test_generate_evg_tasks_no_tests_changed(self, create_tests_by_task_mock):
+ evg_conf_mock = get_evergreen_config()
create_tests_by_task_mock.return_value = {}
expansions_file_data = get_expansions_data()
buildvariant_map = {
@@ -101,17 +103,16 @@ class TestGenerateEvgTasks(unittest.TestCase):
"enterprise-rhel-62-64-bit-majority-read-concern-off-required",
} # yapf: disable
shrub_config = Configuration()
- evergreen_api = Mock()
- repo = Mock()
- burn_in_tags._generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data,
- buildvariant_map, repo)
+ evergreen_api = MagicMock()
+ repo = MagicMock()
+ under_test._generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data,
+ buildvariant_map, repo, evg_conf_mock)
self.assertEqual(shrub_config.to_map(), {})
- @mock.patch(ns("evergreen"))
- @mock.patch(ns("create_tests_by_task"))
- def test_generate_evg_tasks_one_test_changed(self, create_tests_by_task_mock, evergreen_mock):
- evergreen_mock.parse_evergreen_file.return_value = get_evergreen_config()
+ @patch(ns("create_tests_by_task"))
+ def test_generate_evg_tasks_one_test_changed(self, create_tests_by_task_mock):
+ evg_conf_mock = get_evergreen_config()
create_tests_by_task_mock.return_value = {
"aggregation_mongos_passthrough": {
"resmoke_args":
@@ -127,13 +128,13 @@ class TestGenerateEvgTasks(unittest.TestCase):
"enterprise-rhel-62-64-bit-majority-read-concern-off-required",
} # yapf: disable
shrub_config = Configuration()
- evergreen_api = Mock()
- repo = Mock()
+ evergreen_api = MagicMock()
+ repo = MagicMock()
evergreen_api.test_stats_by_project.return_value = [
- Mock(test_file="dir/test2.js", avg_duration_pass=10)
+ MagicMock(test_file="dir/test2.js", avg_duration_pass=10)
]
- burn_in_tags._generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data,
- buildvariant_map, repo)
+ under_test._generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data,
+ buildvariant_map, repo, evg_conf_mock)
generated_config = shrub_config.to_map()
self.assertEqual(len(generated_config["buildvariants"]), 2)
@@ -141,4 +142,4 @@ class TestGenerateEvgTasks(unittest.TestCase):
self.assertEqual(first_generated_build_variant["display_tasks"][0]["name"], "burn_in_tests")
self.assertEqual(
first_generated_build_variant["display_tasks"][0]["execution_tasks"][0],
- "burn_in:enterprise-rhel-62-64-bit-inmem-required_aggregation_mongos_passthrough_0")
+ "burn_in:aggregation_mongos_passthrough_0_enterprise-rhel-62-64-bit-inmem-required")
diff --git a/buildscripts/tests/test_burn_in_tests.py b/buildscripts/tests/test_burn_in_tests.py
index e6ee3544c5b..5701db2cbea 100644
--- a/buildscripts/tests/test_burn_in_tests.py
+++ b/buildscripts/tests/test_burn_in_tests.py
@@ -10,21 +10,30 @@ import subprocess
import unittest
from math import ceil
-from mock import Mock, mock_open, patch, MagicMock
+from mock import Mock, patch, MagicMock
import requests
-import buildscripts.burn_in_tests as burn_in
+from shrub.config import Configuration
+
+import buildscripts.burn_in_tests as under_test
import buildscripts.util.teststats as teststats_utils
import buildscripts.ciconfig.evergreen as evg
-# pylint: disable=missing-docstring,protected-access,too-many-lines
+# pylint: disable=missing-docstring,protected-access,too-many-lines,no-self-use
+
+
+def create_tests_by_task_mock(n_tasks, n_tests):
+ return {
+ f"task_{i}": {
+ "resmoke_args": f"--suites=suite_{i}",
+ "tests": [f"jstests/tests_{j}" for j in range(n_tests)]
+ }
+ for i in range(n_tasks)
+ }
+
-BURN_IN = "buildscripts.burn_in_tests"
-EVG_CI = "buildscripts.ciconfig.evergreen"
-EVG_CLIENT = "buildscripts.client.evergreen"
_DATE = datetime.datetime(2018, 7, 15)
-GIT = "buildscripts.git"
RESMOKELIB = "buildscripts.resmokelib"
GENERATE_RESMOKE_TASKS_BASENAME = "this_is_a_gen_task"
@@ -45,7 +54,6 @@ GENERATE_RESMOKE_TASKS_MULTIVERSION_COMMAND = {
"vars": {"resmoke_args": "--shellWriteMode=commands", "use_multiversion": MULTIVERSION_PATH}
}
-MULTIVERSION_COMMAND = {"func": "do multiversion setup"}
RUN_TESTS_MULTIVERSION_COMMAND = {
"func": "run tests",
"vars": {"resmoke_args": "--shellWriteMode=commands", "task_path_suffix": MULTIVERSION_PATH}
@@ -135,270 +143,122 @@ EVERGREEN_CONF.get_variant = VARIANTS.get
EVERGREEN_CONF.variant_names = VARIANTS.keys()
-def _mock_parser():
- parser = Mock()
- parser.error = Mock()
- return parser
-
-
-def _mock_evergreen_api():
- evergreen_api = Mock()
- evergreen_api.test_stats_by_project.return_value = [
- Mock(
- test_file="jstests/test1.js",
- task_name="task1",
- variant="variant1",
- distro="distro1",
- date=_DATE,
- num_pass=1,
- num_fail=0,
- avg_duration_pass=10,
- )
- ]
- return evergreen_api
-
-
-class TestValidateOptions(unittest.TestCase):
- @staticmethod
- def _mock_options():
- options = Mock()
- options.repeat_tests_num = None
- options.repeat_tests_max = None
- options.repeat_tests_min = None
- options.repeat_tests_secs = None
- options.buildvariant = None
- options.run_buildvariant = None
- options.test_list_file = None
- return options
-
- def test_validate_options_listfile_buildvariant(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.test_list_file = "list_file.json"
- options.buildvariant = "variant1"
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_not_called()
-
- def test_validate_options_nolistfile_buildvariant(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.buildvariant = "variant1"
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_not_called()
-
- def test_validate_options_listfile_nobuildvariant(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.test_list_file = "list_file.json"
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_not_called()
-
- def test_validate_options_no_listfile_no_buildvariant(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_called()
-
- def test_validate_options_buildvariant(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.buildvariant = "variant1"
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_not_called()
-
- def test_validate_options_run_buildvariant(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.buildvariant = "variant1"
- options.run_buildvariant = "variant1"
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_not_called()
-
- def test_validate_options_bad_buildvariant(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.buildvariant = "badvariant1"
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_called()
-
- def test_validate_options_bad_run_buildvariant(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.run_buildvariant = "badvariant1"
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_called()
-
- def test_validate_options_tests_max_no_tests_secs(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.repeat_tests_max = 3
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_called()
-
- def test_validate_options_tests_min_no_tests_secs(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.repeat_tests_min = 3
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_called()
-
- def test_validate_options_tests_min_gt_tests_max(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.repeat_tests_min = 3
- options.repeat_tests_max = 2
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_called()
-
- def test_validate_options_tests_secs(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.buildvariant = "variant1"
- options.repeat_tests_min = 2
- options.repeat_tests_max = 1000
- options.repeat_tests_secs = 3
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_not_called()
-
- def test_validate_options_tests_secs_and_tests_num(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.buildvariant = "variant1"
- options.repeat_tests_num = 1
- options.repeat_tests_min = 1
- options.repeat_tests_max = 3
- options.repeat_tests_secs = 3
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_called()
-
- def test_validate_options_tests_secs_no_buildvariant(self):
- mock_parser = _mock_parser()
- options = self._mock_options()
- options.repeat_tests_min = 1
- options.repeat_tests_max = 3
- options.repeat_tests_secs = 3
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.validate_options(mock_parser, options)
- mock_parser.error.assert_called()
-
-
-class TestGetResmokeRepeatOptions(unittest.TestCase):
- @staticmethod
- def _options_mock():
- options = Mock()
- options.repeat_tests_secs = None
- options.repeat_tests_min = None
- options.repeat_tests_max = None
- options.repeat_tests_num = None
- return options
-
- def test_get_resmoke_repeat_options_default(self):
- options = self._options_mock()
- repeat_options = burn_in.get_resmoke_repeat_options(options)
- self.assertEqual(repeat_options, "--repeatSuites={}".format(burn_in.REPEAT_SUITES))
+class TestRepeatConfig(unittest.TestCase):
+ def test_validate_no_args(self):
+ repeat_config = under_test.RepeatConfig()
+
+ self.assertEqual(repeat_config, repeat_config.validate())
+
+ def test_validate_with_both_repeat_options_specified(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=10, repeat_tests_num=5)
+
+ with self.assertRaises(ValueError):
+ repeat_config.validate()
+
+ def test_validate_with_repeat_max_with_no_secs(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_max=10)
+
+ with self.assertRaises(ValueError):
+ repeat_config.validate()
+
+ def test_validate_with_repeat_min_greater_than_max(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_max=10, repeat_tests_min=100,
+ repeat_tests_secs=15)
+
+ with self.assertRaises(ValueError):
+ repeat_config.validate()
+
+ def test_validate_with_repeat_min_with_no_secs(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_min=10)
+
+ with self.assertRaises(ValueError):
+ repeat_config.validate()
def test_get_resmoke_repeat_options_num(self):
- options = self._options_mock()
- options.repeat_tests_num = 5
- repeat_options = burn_in.get_resmoke_repeat_options(options)
- self.assertEqual(repeat_options, "--repeatSuites={}".format(options.repeat_tests_num))
+ repeat_config = under_test.RepeatConfig(repeat_tests_num=5)
+ repeat_options = repeat_config.generate_resmoke_options()
+
+ self.assertEqual(repeat_options.strip(), f"--repeatSuites=5")
def test_get_resmoke_repeat_options_secs(self):
- options = self._options_mock()
- options.repeat_tests_secs = 5
- repeat_options = burn_in.get_resmoke_repeat_options(options)
- self.assertEqual(repeat_options, "--repeatTestsSecs={}".format(options.repeat_tests_secs))
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=5)
+ repeat_options = repeat_config.generate_resmoke_options()
+
+ self.assertEqual(repeat_options.strip(), "--repeatTestsSecs=5")
def test_get_resmoke_repeat_options_secs_min(self):
- options = self._options_mock()
- options.repeat_tests_secs = 5
- options.repeat_tests_min = 2
- repeat_options = burn_in.get_resmoke_repeat_options(options)
- self.assertIn("--repeatTestsSecs={}".format(options.repeat_tests_secs), repeat_options)
- self.assertIn("--repeatTestsMin={}".format(options.repeat_tests_min), repeat_options)
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=5, repeat_tests_min=2)
+ repeat_options = repeat_config.generate_resmoke_options()
+
+ self.assertIn("--repeatTestsSecs=5", repeat_options)
+ self.assertIn("--repeatTestsMin=2", repeat_options)
self.assertNotIn("--repeatTestsMax", repeat_options)
self.assertNotIn("--repeatSuites", repeat_options)
def test_get_resmoke_repeat_options_secs_max(self):
- options = self._options_mock()
- options.repeat_tests_secs = 5
- options.repeat_tests_max = 2
- repeat_options = burn_in.get_resmoke_repeat_options(options)
- self.assertIn("--repeatTestsSecs={}".format(options.repeat_tests_secs), repeat_options)
- self.assertIn("--repeatTestsMax={}".format(options.repeat_tests_max), repeat_options)
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=5, repeat_tests_max=2)
+ repeat_options = repeat_config.generate_resmoke_options()
+
+ self.assertIn("--repeatTestsSecs=5", repeat_options)
+ self.assertIn("--repeatTestsMax=2", repeat_options)
self.assertNotIn("--repeatTestsMin", repeat_options)
self.assertNotIn("--repeatSuites", repeat_options)
def test_get_resmoke_repeat_options_secs_min_max(self):
- options = self._options_mock()
- options.repeat_tests_secs = 5
- options.repeat_tests_min = 2
- options.repeat_tests_max = 2
- repeat_options = burn_in.get_resmoke_repeat_options(options)
- self.assertIn("--repeatTestsSecs={}".format(options.repeat_tests_secs), repeat_options)
- self.assertIn("--repeatTestsMin={}".format(options.repeat_tests_min), repeat_options)
- self.assertIn("--repeatTestsMax={}".format(options.repeat_tests_max), repeat_options)
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=5, repeat_tests_min=2,
+ repeat_tests_max=2)
+ repeat_options = repeat_config.generate_resmoke_options()
+
+ self.assertIn("--repeatTestsSecs=5", repeat_options)
+ self.assertIn("--repeatTestsMin=2", repeat_options)
+ self.assertIn("--repeatTestsMax=2", repeat_options)
self.assertNotIn("--repeatSuites", repeat_options)
def test_get_resmoke_repeat_options_min(self):
- options = self._options_mock()
- options.repeat_tests_min = 2
- repeat_options = burn_in.get_resmoke_repeat_options(options)
- self.assertEqual(repeat_options, "--repeatSuites={}".format(burn_in.REPEAT_SUITES))
+ repeat_config = under_test.RepeatConfig(repeat_tests_min=2)
+ repeat_options = repeat_config.generate_resmoke_options()
+
+ self.assertEqual(repeat_options.strip(), "--repeatSuites=2")
def test_get_resmoke_repeat_options_max(self):
- options = self._options_mock()
- options.repeat_tests_max = 2
- repeat_options = burn_in.get_resmoke_repeat_options(options)
- self.assertEqual(repeat_options, "--repeatSuites={}".format(burn_in.REPEAT_SUITES))
-
-
-class TestCheckVariant(unittest.TestCase):
- @staticmethod
- def test_check_variant():
- mock_parser = _mock_parser()
- buildvariant = "variant1"
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.check_variant(buildvariant, mock_parser)
- mock_parser.error.assert_not_called()
-
- @staticmethod
- def test_check_variant_badvariant():
- mock_parser = _mock_parser()
- buildvariant = "badvariant"
- with patch(EVG_CI + ".parse_evergreen_file", return_value=EVERGREEN_CONF):
- burn_in.check_variant(buildvariant, mock_parser)
- mock_parser.error.assert_called()
-
-
-class TestGetRunBuildvariant(unittest.TestCase):
- def test__get_run_buildvariant_rb(self):
- run_buildvariant = "variant1"
- buildvariant = "variant2"
- options = Mock()
- options.run_buildvariant = run_buildvariant
- options.buildvariant = buildvariant
- self.assertEqual(run_buildvariant, burn_in._get_run_buildvariant(options))
-
- def test__get_run_buildvariant_bv(self):
- buildvariant = "variant2"
- options = Mock()
- options.run_buildvariant = None
- options.buildvariant = buildvariant
- self.assertEqual(buildvariant, burn_in._get_run_buildvariant(options))
+ repeat_config = under_test.RepeatConfig(repeat_tests_max=2)
+ repeat_options = repeat_config.generate_resmoke_options()
+
+ self.assertEqual(repeat_options.strip(), "--repeatSuites=2")
+
+
+class TestGenerateConfig(unittest.TestCase):
+ def test_run_build_variant_with_no_run_build_variant(self):
+ gen_config = under_test.GenerateConfig("build_variant", "project")
+
+ self.assertEqual(gen_config.build_variant, gen_config.run_build_variant)
+
+ def test_run_build_variant_with_run_build_variant(self):
+ gen_config = under_test.GenerateConfig("build_variant", "project", "run_build_variant")
+
+ self.assertNotEqual(gen_config.build_variant, gen_config.run_build_variant)
+ self.assertEqual(gen_config.run_build_variant, "run_build_variant")
+
+ def test_validate_non_existing_build_variant(self):
+ evg_conf_mock = MagicMock()
+ evg_conf_mock.get_variant.return_value = None
+
+ gen_config = under_test.GenerateConfig("build_variant", "project", "run_build_variant")
+
+ with self.assertRaises(ValueError):
+ gen_config.validate(evg_conf_mock)
+
+ def test_validate_existing_build_variant(self):
+ evg_conf_mock = MagicMock()
+
+ gen_config = under_test.GenerateConfig("build_variant", "project", "run_build_variant")
+ gen_config.validate(evg_conf_mock)
+
+ def test_validate_non_existing_run_build_variant(self):
+ evg_conf_mock = MagicMock()
+
+ gen_config = under_test.GenerateConfig("build_variant", "project")
+ gen_config.validate(evg_conf_mock)
class TestParseAvgTestRuntime(unittest.TestCase):
@@ -407,71 +267,70 @@ class TestParseAvgTestRuntime(unittest.TestCase):
teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=30.2),
teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)
]
- result = burn_in._parse_avg_test_runtime("dir/test2.js", task_avg_test_runtime_stats)
+ result = under_test._parse_avg_test_runtime("dir/test2.js", task_avg_test_runtime_stats)
self.assertEqual(result, 455.1)
class TestCalculateTimeout(unittest.TestCase):
def test__calculate_timeout(self):
avg_test_runtime = 455.1
- expected_result = ceil(avg_test_runtime * burn_in.AVG_TEST_TIME_MULTIPLIER)
- self.assertEqual(expected_result, burn_in._calculate_timeout(avg_test_runtime))
+ expected_result = ceil(avg_test_runtime * under_test.AVG_TEST_TIME_MULTIPLIER)
+ self.assertEqual(expected_result, under_test._calculate_timeout(avg_test_runtime))
def test__calculate_timeout_avg_is_less_than_min(self):
avg_test_runtime = 10
- self.assertEqual(burn_in.MIN_AVG_TEST_TIME_SEC,
- burn_in._calculate_timeout(avg_test_runtime))
+ self.assertEqual(under_test.MIN_AVG_TEST_TIME_SEC,
+ under_test._calculate_timeout(avg_test_runtime))
class TestCalculateExecTimeout(unittest.TestCase):
def test__calculate_exec_timeout(self):
avg_test_runtime = 455.1
repeat_tests_secs = 600
- options = Mock(repeat_tests_secs=repeat_tests_secs)
- expected_result = repeat_tests_secs + (
- (avg_test_runtime -
- (repeat_tests_secs % avg_test_runtime)) * burn_in.AVG_TEST_TIME_MULTIPLIER)
- self.assertEqual(
- ceil(expected_result), burn_in._calculate_exec_timeout(options, avg_test_runtime))
+
+ exec_timeout = under_test._calculate_exec_timeout(repeat_tests_secs, avg_test_runtime)
+
+ self.assertEqual(1531, exec_timeout)
class TestGenerateTimeouts(unittest.TestCase):
def test__generate_timeouts(self):
- shrub_commands = []
- task_avg_test_runtime_stats = [
- teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)
- ]
- options = Mock(repeat_tests_secs=600)
+ repeat_tests_secs = 600
+ runtime_stats = [teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)]
test_name = "dir/test2.js"
- burn_in._generate_timeouts(options, shrub_commands, test_name, task_avg_test_runtime_stats)
- self.assertEqual(len(shrub_commands), 1)
- command_definition = shrub_commands[0]
- self.assertEqual(command_definition.to_map()["params"]["exec_timeout_secs"], 1531)
- self.assertEqual(command_definition.to_map()["params"]["timeout_secs"], 1366)
+ timeout_info = under_test._generate_timeouts(repeat_tests_secs, test_name, runtime_stats)
+
+ self.assertEqual(timeout_info.exec_timeout, 1531)
+ self.assertEqual(timeout_info.timeout, 1366)
def test__generate_timeouts_no_results(self):
- shrub_commands = []
- task_avg_test_runtime_stats = []
- options = Mock(repeat_tests_secs=600)
+ repeat_tests_secs = 600
+ runtime_stats = []
test_name = "dir/new_test.js"
- burn_in._generate_timeouts(options, shrub_commands, test_name, task_avg_test_runtime_stats)
- self.assertEqual(len(shrub_commands), 0)
+ timeout_info = under_test._generate_timeouts(repeat_tests_secs, test_name, runtime_stats)
+
+ self.assertIsNone(timeout_info.cmd)
def test__generate_timeouts_avg_runtime_is_zero(self):
- shrub_commands = []
- task_avg_test_runtime_stats = [
+ repeat_tests_secs = 600
+ runtime_stats = [
teststats_utils.TestRuntime(test_name="dir/test_with_zero_runtime.js", runtime=0)
]
- options = Mock(repeat_tests_secs=600)
test_name = "dir/test_with_zero_runtime.js"
- burn_in._generate_timeouts(options, shrub_commands, test_name, task_avg_test_runtime_stats)
- self.assertEqual(len(shrub_commands), 0)
+ timeout_info = under_test._generate_timeouts(repeat_tests_secs, test_name, runtime_stats)
+
+ self.assertIsNone(timeout_info.cmd)
class TestGetTaskRuntimeHistory(unittest.TestCase):
+ def test_get_task_runtime_history_with_no_api(self):
+ self.assertListEqual([],
+ under_test._get_task_runtime_history(None, "project", "task",
+ "variant"))
+
def test__get_task_runtime_history(self):
evergreen_api = Mock()
evergreen_api.test_stats_by_project.return_value = [
@@ -486,11 +345,12 @@ class TestGetTaskRuntimeHistory(unittest.TestCase):
avg_duration_pass=10.1,
)
]
- analysis_duration = burn_in.AVG_TEST_RUNTIME_ANALYSIS_DAYS
+ analysis_duration = under_test.AVG_TEST_RUNTIME_ANALYSIS_DAYS
end_date = datetime.datetime.utcnow().replace(microsecond=0)
start_date = end_date - datetime.timedelta(days=analysis_duration)
- result = burn_in._get_task_runtime_history(evergreen_api, "project1", "task1", "variant1")
+ result = under_test._get_task_runtime_history(evergreen_api, "project1", "task1",
+ "variant1")
self.assertEqual(result, [("dir/test2.js", 10.1)])
evergreen_api.test_stats_by_project.assert_called_with(
"project1", after_date=start_date.strftime("%Y-%m-%d"),
@@ -503,7 +363,8 @@ class TestGetTaskRuntimeHistory(unittest.TestCase):
evergreen_api = Mock()
evergreen_api.test_stats_by_project.side_effect = requests.HTTPError(response=response)
- result = burn_in._get_task_runtime_history(evergreen_api, "project1", "task1", "variant1")
+ result = under_test._get_task_runtime_history(evergreen_api, "project1", "task1",
+ "variant1")
self.assertEqual(result, [])
@@ -513,12 +374,12 @@ class TestGetTaskName(unittest.TestCase):
task = Mock()
task.is_generate_resmoke_task = False
task.name = name
- self.assertEqual(name, burn_in._get_task_name(task))
+ self.assertEqual(name, under_test._get_task_name(task))
def test__get_task_name_generate_resmoke_task(self):
task_name = "mytask"
task = Mock(is_generate_resmoke_task=True, generated_task_name=task_name)
- self.assertEqual(task_name, burn_in._get_task_name(task))
+ self.assertEqual(task_name, under_test._get_task_name(task))
class TestSetResmokeArgs(unittest.TestCase):
@@ -527,7 +388,7 @@ class TestSetResmokeArgs(unittest.TestCase):
task = Mock()
task.combined_resmoke_args = resmoke_args
task.is_generate_resmoke_task = False
- self.assertEqual(resmoke_args, burn_in._set_resmoke_args(task))
+ self.assertEqual(resmoke_args, under_test._set_resmoke_args(task))
def test__set_resmoke_args_gen_resmoke_task(self):
resmoke_args = "--suites=suite1 test1.js"
@@ -538,7 +399,7 @@ class TestSetResmokeArgs(unittest.TestCase):
task.is_generate_resmoke_task = True
task.get_vars_suite_name = lambda cmd_vars: cmd_vars["suite"]
task.generate_resmoke_tasks_command = {"vars": {"suite": new_suite}}
- self.assertEqual(new_resmoke_args, burn_in._set_resmoke_args(task))
+ self.assertEqual(new_resmoke_args, under_test._set_resmoke_args(task))
def test__set_resmoke_args_gen_resmoke_task_no_suite(self):
suite = "suite1"
@@ -548,41 +409,32 @@ class TestSetResmokeArgs(unittest.TestCase):
task.is_generate_resmoke_task = True
task.get_vars_suite_name = lambda cmd_vars: cmd_vars["task"]
task.generate_resmoke_tasks_command = {"vars": {"task": suite}}
- self.assertEqual(resmoke_args, burn_in._set_resmoke_args(task))
+ self.assertEqual(resmoke_args, under_test._set_resmoke_args(task))
class TestSetResmokeCmd(unittest.TestCase):
def test__set_resmoke_cmd_no_opts_no_args(self):
- with patch(BURN_IN + ".get_resmoke_repeat_options", return_value=""):
- self.assertListEqual([sys.executable, "buildscripts/resmoke.py"],
- burn_in._set_resmoke_cmd(None, None))
+ repeat_config = under_test.RepeatConfig()
+ resmoke_cmds = under_test._set_resmoke_cmd(repeat_config, [])
+
+ self.assertListEqual(resmoke_cmds,
+ [sys.executable, "buildscripts/resmoke.py", '--repeatSuites=2'])
def test__set_resmoke_cmd_no_opts(self):
- args = ["arg1", "arg2"]
- with patch(BURN_IN + ".get_resmoke_repeat_options", return_value=""):
- self.assertListEqual(args, burn_in._set_resmoke_cmd(None, args))
+ repeat_config = under_test.RepeatConfig()
+ resmoke_args = ["arg1", "arg2"]
- def test__set_resmoke_cmd(self):
- opts = "myopt1 myopt2"
- args = ["arg1", "arg2"]
- new_cmd = args + opts.split()
- with patch(BURN_IN + ".get_resmoke_repeat_options", return_value=opts):
- self.assertListEqual(new_cmd, burn_in._set_resmoke_cmd(opts, args))
+ resmoke_cmd = under_test._set_resmoke_cmd(repeat_config, resmoke_args)
+ self.assertListEqual(resmoke_args + ['--repeatSuites=2'], resmoke_cmd)
+
+ def test__set_resmoke_cmd(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_num=3)
+ resmoke_args = ["arg1", "arg2"]
-class TestSubTaskName(unittest.TestCase):
- def test__sub_task_name(self):
- options = MagicMock(buildvariant="myvar", run_buildvariant=None)
- task = "mytask"
- task_num = 0
- self.assertEqual("burn_in:myvar_mytask_0", burn_in._sub_task_name(options, task, task_num))
+ resmoke_cmd = under_test._set_resmoke_cmd(repeat_config, resmoke_args)
- def test__sub_task_name_with_run_bv(self):
- options = MagicMock(buildvariant="myvar", run_buildvariant="run_var")
- task = "mytask"
- task_num = 0
- self.assertEqual("burn_in:run_var_mytask_0", burn_in._sub_task_name(
- options, task, task_num))
+ self.assertListEqual(resmoke_args + ['--repeatSuites=3'], resmoke_cmd)
TESTS_BY_TASK = {
@@ -604,197 +456,101 @@ TESTS_BY_TASK = {
} # yapf: disable
+class TestCreateGenerateTasksConfig(unittest.TestCase):
+ def test_no_tasks_given(self):
+ evg_config = Configuration()
+ gen_config = MagicMock(run_build_variant="variant")
+ repeat_config = MagicMock()
+
+ evg_config = under_test.create_generate_tasks_config(evg_config, {}, gen_config,
+ repeat_config, None)
+
+ evg_config_dict = evg_config.to_map()
+ self.assertNotIn("tasks", evg_config_dict)
+
+ def test_one_task_one_test(self):
+ n_tasks = 1
+ n_tests = 1
+ resmoke_options = "options for resmoke"
+ evg_config = Configuration()
+ gen_config = MagicMock(run_build_variant="variant", distro=None)
+ repeat_config = MagicMock()
+ repeat_config.generate_resmoke_options.return_value = resmoke_options
+ tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
+
+ evg_config = under_test.create_generate_tasks_config(evg_config, tests_by_task, gen_config,
+ repeat_config, None)
+
+ evg_config_dict = evg_config.to_map()
+ tasks = evg_config_dict["tasks"]
+ self.assertEqual(n_tasks * n_tests, len(tasks))
+ cmd = tasks[0]["commands"]
+ self.assertIn(resmoke_options, cmd[1]["vars"]["resmoke_args"])
+ self.assertIn("--suites=suite_0", cmd[1]["vars"]["resmoke_args"])
+ self.assertIn("tests_0", cmd[1]["vars"]["resmoke_args"])
+
+ def test_n_task_m_test(self):
+ n_tasks = 3
+ n_tests = 5
+ evg_config = Configuration()
+ gen_config = MagicMock(run_build_variant="variant", distro=None)
+ repeat_config = MagicMock()
+ tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
+
+ evg_config = under_test.create_generate_tasks_config(evg_config, tests_by_task, gen_config,
+ repeat_config, None)
+
+ evg_config_dict = evg_config.to_map()
+ self.assertEqual(n_tasks * n_tests, len(evg_config_dict["tasks"]))
+
+ def test_multiversion_path_is_used(self):
+ n_tasks = 1
+ n_tests = 1
+ evg_config = Configuration()
+ gen_config = MagicMock(run_build_variant="variant", distro=None)
+ repeat_config = MagicMock()
+ tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
+ first_task = "task_0"
+ multiversion_path = "multiversion_path"
+ tests_by_task[first_task]["use_multiversion"] = multiversion_path
+
+ evg_config = under_test.create_generate_tasks_config(evg_config, tests_by_task, gen_config,
+ repeat_config, None)
+
+ evg_config_dict = evg_config.to_map()
+ tasks = evg_config_dict["tasks"]
+ self.assertEqual(n_tasks * n_tests, len(tasks))
+ self.assertEqual(multiversion_path, tasks[0]["commands"][2]["vars"]["task_path_suffix"])
+
+
class TestCreateGenerateTasksFile(unittest.TestCase):
- @staticmethod
- def _options_mock():
- options = Mock()
- options.buildvariant = None
- options.run_buildvariant = None
- options.repeat_tests_secs = 600
- options.distro = None
- options.branch = "master"
- return options
-
- @staticmethod
- def _get_tests(tests_by_task):
- tests = []
- for task in tests_by_task:
- tests.extend(tests_by_task[task]["tests"])
- return tests
-
- def test_create_generate_tasks_file_tasks(self):
- evergreen_api = Mock()
- evergreen_api.test_stats_by_project.return_value = [
- Mock(
- test_file="jstests/test1.js",
- task_name="task1",
- variant="variant1",
- distro="distro1",
- date=_DATE,
- num_pass=1,
- num_fail=0,
- avg_duration_pass=10,
- ),
- Mock(
- test_file="jstests/test2.js",
- task_name="task1",
- variant="variant1",
- distro="distro1",
- date=_DATE,
- num_pass=1,
- num_fail=0,
- avg_duration_pass=10,
- ),
- Mock(
- test_file="jstests/multi1.js",
- task_name="task1",
- variant="variant1",
- distro="distro1",
- date=_DATE,
- num_pass=1,
- num_fail=0,
- avg_duration_pass=10,
- )
- ]
- options = self._options_mock()
- options.buildvariant = "myvariant"
- tests_by_task = TESTS_BY_TASK
- test_tasks = self._get_tests(tests_by_task)
- with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
- evg_config = mock_write_json.call_args_list[0][0][0]
- evg_tasks = evg_config["tasks"]
- self.assertEqual(len(evg_tasks), len(test_tasks))
- # Check task1 - test1.js
- task = evg_tasks[0]
- self.assertEqual(task["name"], "burn_in:myvariant_task1_0")
- self.assertEqual(len(task["depends_on"]), 1)
- self.assertEqual(task["depends_on"][0]["name"], "compile")
- self.assertEqual(len(task["commands"]), 3)
- self.assertEqual(task["commands"][1]["func"], "do setup")
- self.assertEqual(task["commands"][2]["func"], "run tests")
- resmoke_args = task["commands"][2]["vars"]["resmoke_args"]
- self.assertIn("--suites=suite1", resmoke_args)
- self.assertIn("jstests/test1.js", resmoke_args)
- # Check task1 - test2.js
- task = evg_tasks[1]
- self.assertEqual(task["name"], "burn_in:myvariant_task1_1")
- self.assertEqual(len(task["depends_on"]), 1)
- self.assertEqual(task["depends_on"][0]["name"], "compile")
- self.assertEqual(len(task["commands"]), 3)
- self.assertEqual(task["commands"][1]["func"], "do setup")
- self.assertEqual(task["commands"][2]["func"], "run tests")
- resmoke_args = task["commands"][2]["vars"]["resmoke_args"]
- self.assertIn("--suites=suite1", resmoke_args)
- self.assertIn("jstests/test2.js", resmoke_args)
- # task[2] - task[5] are similar to task[0] & task[1]
- # Check taskmulti - multi1.js
- taskmulti = evg_tasks[6]
- self.assertEqual(taskmulti["name"], "burn_in:myvariant_taskmulti_0")
- self.assertEqual(len(taskmulti["depends_on"]), 1)
- self.assertEqual(taskmulti["depends_on"][0]["name"], "compile")
- self.assertEqual(len(taskmulti["commands"]), 4)
- self.assertEqual(taskmulti["commands"][1]["func"], "do setup")
- self.assertEqual(taskmulti["commands"][2]["func"], "do multiversion setup")
- self.assertEqual(taskmulti["commands"][3]["func"], "run tests")
- resmoke_args = taskmulti["commands"][3]["vars"]["resmoke_args"]
- self.assertIn("--suites=suite4", resmoke_args)
- self.assertIn("jstests/multi1.js", resmoke_args)
- self.assertEqual(taskmulti["commands"][3]["vars"]["task_path_suffix"], "/data/multi")
-
- def test_create_generate_tasks_file_variants(self):
- evergreen_api = _mock_evergreen_api()
- options = self._options_mock()
- options.buildvariant = "myvariant"
- tests_by_task = TESTS_BY_TASK
- with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
- evg_config = mock_write_json.call_args_list[0][0][0]
- self.assertEqual(len(evg_config["buildvariants"]), 1)
- self.assertEqual(evg_config["buildvariants"][0]["name"], "myvariant")
- self.assertEqual(len(evg_config["buildvariants"][0]["tasks"]), 7)
- self.assertEqual(len(evg_config["buildvariants"][0]["display_tasks"]), 1)
- display_task = evg_config["buildvariants"][0]["display_tasks"][0]
- self.assertEqual(display_task["name"], burn_in.BURN_IN_TESTS_TASK)
- execution_tasks = display_task["execution_tasks"]
- self.assertEqual(len(execution_tasks), 8)
- self.assertEqual(execution_tasks[0], burn_in.BURN_IN_TESTS_GEN_TASK)
- self.assertEqual(execution_tasks[1], "burn_in:myvariant_task1_0")
- self.assertEqual(execution_tasks[2], "burn_in:myvariant_task1_1")
- self.assertEqual(execution_tasks[3], "burn_in:myvariant_task2_0")
- self.assertEqual(execution_tasks[4], "burn_in:myvariant_task2_1")
- self.assertEqual(execution_tasks[5], "burn_in:myvariant_task3_0")
- self.assertEqual(execution_tasks[6], "burn_in:myvariant_task3_1")
- self.assertEqual(execution_tasks[7], "burn_in:myvariant_taskmulti_0")
-
- def test_create_generate_tasks_file_run_variants(self):
- evergreen_api = _mock_evergreen_api()
- options = self._options_mock()
- options.buildvariant = "myvariant"
- options.run_buildvariant = "run_variant"
- tests_by_task = TESTS_BY_TASK
- with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
- evg_config = mock_write_json.call_args_list[0][0][0]
- self.assertEqual(len(evg_config["buildvariants"]), 1)
- self.assertEqual(evg_config["buildvariants"][0]["name"], "run_variant")
- self.assertEqual(len(evg_config["buildvariants"][0]["tasks"]), 7)
- self.assertEqual(len(evg_config["buildvariants"][0]["display_tasks"]), 1)
- display_task = evg_config["buildvariants"][0]["display_tasks"][0]
- self.assertEqual(display_task["name"], burn_in.BURN_IN_TESTS_TASK)
- execution_tasks = display_task["execution_tasks"]
- self.assertEqual(len(execution_tasks), 8)
- self.assertEqual(execution_tasks[0], burn_in.BURN_IN_TESTS_GEN_TASK)
- self.assertEqual(execution_tasks[1], "burn_in:run_variant_task1_0")
- self.assertEqual(execution_tasks[2], "burn_in:run_variant_task1_1")
- self.assertEqual(execution_tasks[3], "burn_in:run_variant_task2_0")
- self.assertEqual(execution_tasks[4], "burn_in:run_variant_task2_1")
- self.assertEqual(execution_tasks[5], "burn_in:run_variant_task3_0")
- self.assertEqual(execution_tasks[6], "burn_in:run_variant_task3_1")
- self.assertEqual(execution_tasks[7], "burn_in:run_variant_taskmulti_0")
-
- def test_create_generate_tasks_file_distro(self):
- evergreen_api = _mock_evergreen_api()
- options = self._options_mock()
- options.buildvariant = "myvariant"
- options.distro = "mydistro"
- tests_by_task = TESTS_BY_TASK
- test_tasks = self._get_tests(tests_by_task)
- with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
- evg_config = mock_write_json.call_args_list[0][0][0]
- self.assertEqual(len(evg_config["tasks"]), len(test_tasks))
- self.assertEqual(len(evg_config["buildvariants"]), 1)
- for variant in evg_config["buildvariants"]:
- for task in variant.get("tasks", []):
- self.assertEqual(len(task["distros"]), 1)
- self.assertEqual(task["distros"][0], options.distro)
-
- def test_create_generate_tasks_file_no_tasks(self):
- evergreen_api = _mock_evergreen_api()
- variant = "myvariant"
- options = self._options_mock()
- options.buildvariant = variant
- tests_by_task = {}
- with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
- evg_config = mock_write_json.call_args_list[0][0][0]
- self.assertEqual(len(evg_config), 1)
- self.assertEqual(len(evg_config["buildvariants"]), 1)
- self.assertEqual(evg_config["buildvariants"][0]["name"], variant)
- display_tasks = evg_config["buildvariants"][0]["display_tasks"]
- self.assertEqual(len(display_tasks), 1)
- self.assertEqual(display_tasks[0]["name"], burn_in.BURN_IN_TESTS_TASK)
- execution_tasks = display_tasks[0]["execution_tasks"]
- self.assertEqual(len(execution_tasks), 1)
- self.assertEqual(execution_tasks[0], burn_in.BURN_IN_TESTS_GEN_TASK)
-
- @patch("buildscripts.burn_in_tests._write_json_file")
+ @patch("buildscripts.burn_in_tests.create_generate_tasks_config")
+ def test_gen_tasks_configuration_is_returned(self, gen_tasks_config_mock):
+ evg_api = MagicMock()
+ gen_config = MagicMock()
+ repeat_config = MagicMock()
+ tests_by_task = MagicMock()
+
+ task_list = [f"task_{i}" for i in range(10)]
+
+ evg_config = MagicMock()
+ evg_config.to_map.return_value = {
+ "tasks": task_list,
+ }
+
+ gen_tasks_config_mock.return_value = evg_config
+
+ config = under_test.create_generate_tasks_file(tests_by_task, gen_config, repeat_config,
+ evg_api)
+
+ self.assertEqual(config, evg_config.to_map.return_value)
+
@patch("buildscripts.burn_in_tests.sys.exit")
@patch("buildscripts.burn_in_tests.create_generate_tasks_config")
- def test_cap_on_task_generate(self, gen_tasks_config_mock, exit_mock, write_mock):
+ def test_cap_on_task_generate(self, gen_tasks_config_mock, exit_mock):
evg_api = MagicMock()
- options = MagicMock()
+ gen_config = MagicMock()
+ repeat_config = MagicMock()
tests_by_task = MagicMock()
task_list = [f"task_{i}" for i in range(1005)]
@@ -808,96 +564,46 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
exit_mock.side_effect = ValueError("exiting")
with self.assertRaises(ValueError):
- burn_in.create_generate_tasks_file(evg_api, options, tests_by_task)
+ under_test.create_generate_tasks_file(tests_by_task, gen_config, repeat_config, evg_api)
exit_mock.assert_called_once()
- write_mock.assert_not_called()
-
-
-class UpdateReportDataTests(unittest.TestCase):
- def test_update_report_data_nofile(self):
- data = {}
- task = ""
- pathname = "file_exists"
- with patch("os.path.isfile", return_value=False) as mock_isfile,\
- patch("json.load", return_value=data) as mock_json:
- burn_in._update_report_data(data, pathname, task)
- self.assertEqual(mock_isfile.call_count, 1)
- self.assertEqual(mock_json.call_count, 0)
-
- def test_update_report_data(self):
- task1 = "task1"
- task2 = "task2"
- data = {
- "failures": 1,
- "results": [
- {"test_file": "test1:" + task1},
- {"test_file": "test2:" + task1}]
- } # yapf: disable
- new_data = {
- "failures": 1,
- "results": [
- {"test_file": "test3"},
- {"test_file": "test4"}]
- } # yapf: disable
-
- pathname = "file_exists"
- with patch("os.path.isfile", return_value=True),\
- patch("builtins.open", mock_open()),\
- patch("json.load", return_value=new_data):
- burn_in._update_report_data(data, pathname, task2)
- self.assertEqual(len(data["results"]), 4)
- self.assertEqual(data["failures"], 2)
- self.assertIn({"test_file": "test1:" + task1}, data["results"])
- self.assertIn({"test_file": "test3:" + task2}, data["results"])
class RunTests(unittest.TestCase):
- class SysExit(Exception):
- pass
-
- def _test_run_tests(self, no_exec, tests_by_task, resmoke_cmd):
- with patch("subprocess.check_call", return_value=None) as mock_subproc,\
- patch(BURN_IN + "._update_report_data", return_value=None),\
- patch(BURN_IN + "._write_json_file", return_value=None):
- burn_in.run_tests(no_exec, tests_by_task, resmoke_cmd, None)
- self.assertEqual(mock_subproc.call_count, len(tests_by_task.keys()))
- for idx, task in enumerate(sorted(tests_by_task)):
- for task_test in tests_by_task[task].get("tests", []):
- self.assertIn(task_test, mock_subproc.call_args_list[idx][0][0])
-
- def test_run_tests_noexec(self):
- no_exec = True
- resmoke_cmd = None
- with patch("subprocess.check_call", return_value=None) as mock_subproc,\
- patch(BURN_IN + "._write_json_file", return_value=None) as mock_write_json:
- burn_in.run_tests(no_exec, TESTS_BY_TASK, resmoke_cmd, None)
- self.assertEqual(mock_subproc.call_count, 0)
- self.assertEqual(mock_write_json.call_count, 0)
-
- def test_run_tests_notests(self):
- no_exec = False
+ @patch(ns('subprocess.check_call'))
+ def test_run_tests_no_tests(self, check_call_mock):
tests_by_task = {}
resmoke_cmd = ["python", "buildscripts/resmoke.py", "--continueOnFailure"]
- self._test_run_tests(no_exec, tests_by_task, resmoke_cmd)
- def test_run_tests_tests(self):
- no_exec = False
+ under_test.run_tests(tests_by_task, resmoke_cmd)
+
+ check_call_mock.assert_not_called()
+
+ @patch(ns('subprocess.check_call'))
+ def test_run_tests_some_test(self, check_call_mock):
+ n_tasks = 3
+ tests_by_task = create_tests_by_task_mock(n_tasks, 5)
resmoke_cmd = ["python", "buildscripts/resmoke.py", "--continueOnFailure"]
- self._test_run_tests(no_exec, TESTS_BY_TASK, resmoke_cmd)
- def test_run_tests_tests_resmoke_failure(self):
- no_exec = False
+ under_test.run_tests(tests_by_task, resmoke_cmd)
+
+ self.assertEqual(n_tasks, check_call_mock.call_count)
+
+ @patch(ns('sys.exit'))
+ @patch(ns('subprocess.check_call'))
+ def test_run_tests_tests_resmoke_failure(self, check_call_mock, exit_mock):
+ error_code = 42
+ n_tasks = 3
+ tests_by_task = create_tests_by_task_mock(n_tasks, 5)
resmoke_cmd = ["python", "buildscripts/resmoke.py", "--continueOnFailure"]
- error_code = -1
- with patch("subprocess.check_call", return_value=None) as mock_subproc,\
- patch("sys.exit", return_value=error_code) as mock_exit,\
- patch(BURN_IN + "._update_report_data", return_value=None),\
- patch(BURN_IN + "._write_json_file", return_value=None):
- mock_subproc.side_effect = subprocess.CalledProcessError(error_code, "err1")
- mock_exit.side_effect = self.SysExit(error_code)
- with self.assertRaises(self.SysExit):
- burn_in.run_tests(no_exec, TESTS_BY_TASK, resmoke_cmd, None)
+ check_call_mock.side_effect = subprocess.CalledProcessError(error_code, "err1")
+ exit_mock.side_effect = ValueError('exiting')
+
+ with self.assertRaises(ValueError):
+ under_test.run_tests(tests_by_task, resmoke_cmd)
+
+ self.assertEqual(1, check_call_mock.call_count)
+ exit_mock.assert_called_with(error_code)
MEMBERS_MAP = {
@@ -914,8 +620,8 @@ SUITE3.tests = ["test2.js", "test4.js"]
def _create_executor_list(suites, exclude_suites):
- with patch(RESMOKELIB + ".suitesconfig.create_test_membership_map", return_value=MEMBERS_MAP):
- return burn_in.create_executor_list(suites, exclude_suites)
+ with patch(ns("create_test_membership_map"), return_value=MEMBERS_MAP):
+ return under_test.create_executor_list(suites, exclude_suites)
class CreateExecutorList(unittest.TestCase):
@@ -944,7 +650,7 @@ class CreateExecutorList(unittest.TestCase):
def test_create_executor_list_runs_core_suite(self, mock_get_named_suites, mock_suite_class):
mock_get_named_suites.return_value = ["core"]
- burn_in.create_executor_list([], [])
+ under_test.create_executor_list([], [])
self.assertEqual(mock_suite_class.call_count, 1)
@patch(RESMOKELIB + ".testing.suite.Suite")
@@ -953,7 +659,7 @@ class CreateExecutorList(unittest.TestCase):
mock_suite_class):
mock_get_named_suites.return_value = ["dbtest"]
- burn_in.create_executor_list([], [])
+ under_test.create_executor_list([], [])
self.assertEqual(mock_suite_class.call_count, 0)
@@ -963,7 +669,9 @@ class CreateTaskList(unittest.TestCase):
suites = [SUITE1, SUITE2, SUITE3]
exclude_suites = []
suite_list = _create_executor_list(suites, exclude_suites)
- task_list = burn_in.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
+
+ task_list = under_test.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
+
self.assertEqual(len(task_list), len(VARIANTS["variantall"].tasks))
self.assertIn("task1", task_list)
self.assertEqual(task_list["task1"]["resmoke_args"], "--suites=suite1 var1arg1")
@@ -983,7 +691,7 @@ class CreateTaskList(unittest.TestCase):
suites = [SUITE1, SUITE2, SUITE3]
exclude_suites = []
suite_list = _create_executor_list(suites, exclude_suites)
- task_list = burn_in.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
+ task_list = under_test.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
self.assertEqual(len(task_list), len(VARIANTS["variant_multiversion"].tasks))
self.assertEqual(task_list["multiversion_task"]["use_multiversion"], MULTIVERSION_PATH)
@@ -992,7 +700,7 @@ class CreateTaskList(unittest.TestCase):
suites = [SUITE3]
exclude_suites = []
suite_list = _create_executor_list(suites, exclude_suites)
- task_list = burn_in.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
+ task_list = under_test.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
self.assertEqual(len(task_list), len(VARIANTS["variant_generate_tasks"].tasks))
self.assertIn(GENERATE_RESMOKE_TASKS_BASENAME, task_list)
self.assertEqual(task_list[GENERATE_RESMOKE_TASKS_BASENAME]["tests"], SUITE3.tests)
@@ -1003,7 +711,7 @@ class CreateTaskList(unittest.TestCase):
suites = [SUITE3]
exclude_suites = []
suite_list = _create_executor_list(suites, exclude_suites)
- task_list = burn_in.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
+ task_list = under_test.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
self.assertEqual(len(task_list), len(VARIANTS["variant_generate_tasks_multiversion"].tasks))
self.assertEqual(task_list[GENERATE_RESMOKE_TASKS_BASENAME]["use_multiversion"],
MULTIVERSION_PATH)
@@ -1013,7 +721,7 @@ class CreateTaskList(unittest.TestCase):
suites = [SUITE3]
exclude_suites = []
suite_list = _create_executor_list(suites, exclude_suites)
- task_list = burn_in.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
+ task_list = under_test.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
self.assertEqual(len(task_list), len(VARIANTS["variant_generate_tasks_no_suite"].tasks))
self.assertIn(GENERATE_RESMOKE_TASKS_BASENAME, task_list)
self.assertEqual(task_list[GENERATE_RESMOKE_TASKS_BASENAME]["tests"], SUITE3.tests)
@@ -1023,7 +731,7 @@ class CreateTaskList(unittest.TestCase):
suites = [SUITE1, SUITE2]
exclude_suites = []
suite_list = _create_executor_list(suites, exclude_suites)
- task_list = burn_in.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
+ task_list = under_test.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
self.assertEqual(len(task_list), 1)
self.assertIn("task1", task_list)
self.assertEqual(task_list["task1"]["resmoke_args"], "--suites=suite1 var1arg1")
@@ -1036,7 +744,7 @@ class CreateTaskList(unittest.TestCase):
suites = [SUITE1, SUITE2, SUITE3]
suite_list = _create_executor_list(suites, [])
exclude_suites = ["suite2"]
- task_list = burn_in.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
+ task_list = under_test.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
self.assertEqual(len(task_list), 1)
self.assertIn("task3", task_list)
self.assertEqual(task_list["task3"]["resmoke_args"], "--suites=suite3 var2arg3")
@@ -1046,25 +754,22 @@ class CreateTaskList(unittest.TestCase):
def test_create_task_list_no_suites(self):
variant = "variant2"
+ evg_conf_mock = MagicMock()
suite_list = {}
- exclude_suites = ["suite2"]
- task_list = burn_in.create_task_list(EVERGREEN_CONF, variant, suite_list, exclude_suites)
- self.assertEqual(len(task_list), 0)
- self.assertEqual(task_list, {})
- def test_create_task_list_novariant(self):
- class BadVariant(Exception):
- pass
+ task_list = under_test.create_task_list(evg_conf_mock, variant, suite_list, [])
- def _raise_bad_variant(code=0):
- raise BadVariant("Bad variant {}".format(code))
+ self.assertEqual(len(task_list), 0)
+ self.assertEqual(task_list, {})
+ def test_create_task_list_no_variant(self):
variant = "novariant"
- suites = [SUITE1, SUITE2, SUITE3]
- suite_list = _create_executor_list(suites, [])
- with patch("sys.exit", _raise_bad_variant):
- with self.assertRaises(BadVariant):
- burn_in.create_task_list(EVERGREEN_CONF, variant, suite_list, [])
+ evg_conf_mock = MagicMock()
+ evg_conf_mock.get_variant.return_value = None
+ suites = {}
+
+ with self.assertRaises(ValueError):
+ under_test.create_task_list(EVERGREEN_CONF, variant, suites, [])
class TestFindChangedTests(unittest.TestCase):
@@ -1073,7 +778,7 @@ class TestFindChangedTests(unittest.TestCase):
repo_mock = MagicMock()
changed_files_mock.return_value = set()
- self.assertEqual(0, len(burn_in.find_changed_tests(repo_mock)))
+ self.assertEqual(0, len(under_test.find_changed_tests(repo_mock)))
@patch(ns("find_changed_files"))
@patch(ns("os.path.isfile"))
@@ -1087,7 +792,7 @@ class TestFindChangedTests(unittest.TestCase):
changed_files_mock.return_value = set(file_list)
is_file_mock.return_value = True
- found_tests = burn_in.find_changed_tests(repo_mock)
+ found_tests = under_test.find_changed_tests(repo_mock)
self.assertIn(file_list[0], found_tests)
self.assertIn(file_list[2], found_tests)
@@ -1105,7 +810,7 @@ class TestFindChangedTests(unittest.TestCase):
changed_files_mock.return_value = set(file_list)
is_file_mock.return_value = False
- found_tests = burn_in.find_changed_tests(repo_mock)
+ found_tests = under_test.find_changed_tests(repo_mock)
self.assertEqual(0, len(found_tests))
@@ -1121,7 +826,7 @@ class TestFindChangedTests(unittest.TestCase):
changed_files_mock.return_value = set(file_list)
is_file_mock.return_value = True
- found_tests = burn_in.find_changed_tests(repo_mock)
+ found_tests = under_test.find_changed_tests(repo_mock)
self.assertIn(file_list[0], found_tests)
self.assertIn(file_list[2], found_tests)
diff --git a/buildscripts/util/taskname.py b/buildscripts/util/taskname.py
index ee9852e2295..c7b3141cf20 100644
--- a/buildscripts/util/taskname.py
+++ b/buildscripts/util/taskname.py
@@ -10,11 +10,12 @@ def name_generated_task(parent_name, task_index, total_tasks, variant=None):
:param parent_name: Name of the parent task.
:param task_index: Index of this sub-task.
:param total_tasks: Total number of sub-tasks being generated.
+ :param variant: Build variant to run task in.
:return: Zero-padded name of sub-task.
"""
suffix = ""
if variant:
- suffix = "_{0}".format(variant)
+ suffix = f"_{variant}"
index_width = int(math.ceil(math.log10(total_tasks)))
- return "{0}_{1}{2}".format(parent_name, str(task_index).zfill(index_width), suffix)
+ return f"{parent_name}_{str(task_index).zfill(index_width)}{suffix}"
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index b0551714084..dd3d8814eae 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -4831,23 +4831,17 @@ tasks:
set -o verbose
${activate_virtualenv}
- # If this is a scheduled build, we check for changes against the last scheduled commit.
- if [ "${is_patch}" != "true" ]; then
- burn_in_args="--checkEvergreen"
- fi
-
# Capture a list of new and modified tests. The expansion macro burn_in_tests_build_variant
# is used to for finding the associated tasks from a different build varaint than the
# burn_in_tests_gen task executes on.
- build_variant_opts="--buildVariant=${build_variant}"
+ build_variant_opts="--build-variant=${build_variant}"
if [ -n "${burn_in_tests_build_variant|}" ]; then
- build_variant_opts="--buildVariant=${burn_in_tests_build_variant} --runBuildVariant=${build_variant}"
+ build_variant_opts="--build-variant=${burn_in_tests_build_variant} --run-build-variant=${build_variant}"
fi
- # Increase the burn_in repetition from 2 to 1000 executions or 10 minutes
- burn_in_args="$burn_in_args --repeatTestsMin=2 --repeatTestsMax=1000 --repeatTestsSecs=600"
+ burn_in_args="$burn_in_args --repeat-tests-min=2 --repeat-tests-max=1000 --repeat-tests-secs=600"
# Evergreen executable is in $HOME.
- PATH=$PATH:$HOME $python buildscripts/burn_in_tests.py --branch=${branch_name} --project=${project} $build_variant_opts --distro=${distro_id} --generateTasksFile=burn_in_tests_gen.json --noExec $burn_in_args
+ PATH=$PATH:$HOME $python buildscripts/burn_in_tests.py --project=${project} $build_variant_opts --distro=${distro_id} --generate-tasks-file=burn_in_tests_gen.json $burn_in_args
- command: archive.targz_pack
params:
target: src/burn_in_tests_gen.tgz
@@ -10258,8 +10252,6 @@ buildvariants:
- name: lint_fuzzer_sanity_patch
- name: lint_yaml
- name: burn_in_tests_gen
- distros:
- - rhel62-large
- name: .aggfuzzer
- name: .aggregation
- name: audit