summaryrefslogtreecommitdiff
path: root/buildscripts
diff options
context:
space:
mode:
authorDavid Bradford <david.bradford@mongodb.com>2021-03-25 11:29:10 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-03-25 20:24:55 +0000
commit65b7837eee6b6b2554e277e81775f2cf09c698fc (patch)
tree51e2a00e403cd52101efb47cab3f48dfcbce9a7f /buildscripts
parentb6b932cdf0cfebad564722c9bf41f60e0b8f5464 (diff)
downloadmongo-65b7837eee6b6b2554e277e81775f2cf09c698fc.tar.gz
SERVER-54814: Separate evergreen burn_in_tests from local mode
Diffstat (limited to 'buildscripts')
-rw-r--r--buildscripts/burn_in_tags.py18
-rw-r--r--buildscripts/burn_in_tags_bypass_compile_and_fetch_binaries.py59
-rwxr-xr-xbuildscripts/burn_in_tests.py643
-rw-r--r--buildscripts/burn_in_tests_multiversion.py72
-rw-r--r--buildscripts/evergreen_burn_in_tests.py495
-rw-r--r--buildscripts/selected_tests.py6
-rw-r--r--buildscripts/tests/test_burn_in_tags.py27
-rw-r--r--buildscripts/tests/test_burn_in_tests.py385
-rw-r--r--buildscripts/tests/test_burn_in_tests_multiversion.py55
-rw-r--r--buildscripts/tests/test_evergreen_burn_in_tests.py384
-rw-r--r--buildscripts/tests/test_selected_tests.py27
11 files changed, 1207 insertions, 964 deletions
diff --git a/buildscripts/burn_in_tags.py b/buildscripts/burn_in_tags.py
index badc8e413ee..fba9a1e1d42 100644
--- a/buildscripts/burn_in_tags.py
+++ b/buildscripts/burn_in_tags.py
@@ -13,16 +13,18 @@ from git import Repo
from shrub.v2 import ShrubProject, BuildVariant, ExistingTask
# Get relative imports to work when the package is not installed on the PYTHONPATH.
+
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# pylint: disable=wrong-import-position
+from buildscripts.evergreen_burn_in_tests import GenerateBurnInExecutor, GenerateConfig, \
+ EvergreenFileChangeDetector
from buildscripts.util.fileops import write_file_to_dir
import buildscripts.util.read_config as read_config
from buildscripts.ciconfig import evergreen
from buildscripts.ciconfig.evergreen import EvergreenProjectConfig, Variant
-from buildscripts.burn_in_tests import create_generate_tasks_config, create_tests_by_task, \
- find_changed_tests, GenerateConfig, RepeatConfig, DEFAULT_REPO_LOCATIONS
+from buildscripts.burn_in_tests import create_tests_by_task, RepeatConfig, DEFAULT_REPO_LOCATIONS
# pylint: enable=wrong-import-position
EXTERNAL_LOGGERS = {
@@ -126,7 +128,7 @@ def _generate_evg_build_variant(
return build_variant
-# pylint: disable=too-many-arguments
+# pylint: disable=too-many-arguments,too-many-locals
def _generate_evg_tasks(evergreen_api: EvergreenApi, shrub_project: ShrubProject,
task_expansions: Dict[str, Any], build_variant_map: Dict[str, str],
repos: List[Repo], evg_conf: EvergreenProjectConfig) -> None:
@@ -142,20 +144,22 @@ def _generate_evg_tasks(evergreen_api: EvergreenApi, shrub_project: ShrubProject
for build_variant, run_build_variant in build_variant_map.items():
config_options = _get_config_options(task_expansions, build_variant, run_build_variant)
task_id = task_expansions[TASK_ID_EXPANSION]
- changed_tests = find_changed_tests(repos, evg_api=evergreen_api, task_id=task_id)
+ change_detector = EvergreenFileChangeDetector(task_id, evergreen_api)
+ changed_tests = change_detector.find_changed_tests(repos)
tests_by_task = create_tests_by_task(build_variant, evg_conf, changed_tests)
if tests_by_task:
shrub_build_variant = _generate_evg_build_variant(
evg_conf.get_variant(build_variant), run_build_variant,
task_expansions["build_variant"])
gen_config = GenerateConfig(build_variant, config_options.project, run_build_variant,
- config_options.distro).validate(evg_conf)
+ config_options.distro,
+ include_gen_task=False).validate(evg_conf)
repeat_config = RepeatConfig(repeat_tests_min=config_options.repeat_tests_min,
repeat_tests_max=config_options.repeat_tests_max,
repeat_tests_secs=config_options.repeat_tests_secs)
- create_generate_tasks_config(shrub_build_variant, tests_by_task, gen_config,
- repeat_config, evergreen_api, include_gen_task=False)
+ burn_in_generator = GenerateBurnInExecutor(gen_config, repeat_config, evergreen_api)
+ burn_in_generator.add_config_for_build_variant(shrub_build_variant, tests_by_task)
shrub_project.add_build_variant(shrub_build_variant)
diff --git a/buildscripts/burn_in_tags_bypass_compile_and_fetch_binaries.py b/buildscripts/burn_in_tags_bypass_compile_and_fetch_binaries.py
deleted file mode 100644
index 8a7e0678941..00000000000
--- a/buildscripts/burn_in_tags_bypass_compile_and_fetch_binaries.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python3
-"""Bypass compile and fetch binaries for burn_in_tags."""
-
-import logging
-import sys
-
-import click
-import structlog
-from structlog.stdlib import LoggerFactory
-
-from evergreen.api import RetryingEvergreenApi
-from buildscripts.bypass_compile_and_fetch_binaries import TargetBuild, gather_artifacts_and_update_expansions
-
-structlog.configure(logger_factory=LoggerFactory())
-LOGGER = structlog.get_logger(__name__)
-
-EVG_CONFIG_FILE = ".evergreen.yml"
-
-
-@click.command()
-@click.option("--project", required=True, help="The evergreen project.")
-@click.option("--build-variant", required=True, help="Build variant where compile is running.")
-@click.option("--revision", required=True, help="Base revision of the build.")
-@click.option("--out-file", required=True, help="File to write macros expansions to.")
-@click.option("--version-id", required=True, help="Evergreen version id of the current build.")
-@click.option("--json-artifact", required=True,
- help="The JSON file to write out the metadata of files to attach to task.")
-def main( # pylint: disable=too-many-arguments,too-many-locals
- project, build_variant, revision, out_file, version_id, json_artifact):
- """
- Create a file with expansions that can be used to bypass compile.
-
- This is used for dynamically generated build variants that can use a base build variants
- compile artifacts to run against.
- \f
-
- :param project: The evergreen project.
- :param build_variant: The build variant whose artifacts we want to use.
- :param revision: The base revision being run against.
- :param out_file: File to write expansions to.
- :param version_id: Evergreen version id being run against.
- """
- logging.basicConfig(
- format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s",
- level=logging.DEBUG,
- stream=sys.stdout,
- )
-
- evg_api = RetryingEvergreenApi.get_api(config_file=EVG_CONFIG_FILE)
-
- version = evg_api.version_by_id(version_id)
- build = version.build_by_variant(build_variant)
-
- target = TargetBuild(project=project, revision=revision, build_variant=build_variant)
- gather_artifacts_and_update_expansions(build, target, json_artifact, out_file)
-
-
-if __name__ == "__main__":
- main() # pylint: disable=no-value-for-parameter
diff --git a/buildscripts/burn_in_tests.py b/buildscripts/burn_in_tests.py
index e139528d918..e0f829d6365 100755
--- a/buildscripts/burn_in_tests.py
+++ b/buildscripts/burn_in_tests.py
@@ -1,44 +1,33 @@
#!/usr/bin/env python3
"""Command line utility for determining what jstests have been added or modified."""
import copy
-import datetime
import logging
import os.path
import shlex
import subprocess
import sys
+from abc import ABC, abstractmethod
from collections import defaultdict
-from math import ceil
-from typing import Optional, Set, Tuple, List, Dict
+from typing import Optional, Set, Tuple, List, Dict, NamedTuple
import click
-import requests
import yaml
-from evergreen.api import RetryingEvergreenApi, EvergreenApi
from git import Repo
import structlog
from structlog.stdlib import LoggerFactory
-from shrub.v2 import Task, TaskDependency, BuildVariant, ShrubProject, ExistingTask
-
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# pylint: disable=wrong-import-position
from buildscripts.patch_builds.change_data import generate_revision_map, \
- generate_revision_map_from_manifest, RevisionMap, find_changed_files_in_repos
+ RevisionMap, find_changed_files_in_repos
import buildscripts.resmokelib.parser
from buildscripts.resmokelib.suitesconfig import create_test_membership_map, get_suites
from buildscripts.resmokelib.utils import default_if_none, globstar
from buildscripts.ciconfig.evergreen import parse_evergreen_file, ResmokeArgs, \
EvergreenProjectConfig, VariantTask
-from buildscripts.util.fileops import write_file
-from buildscripts.util.teststats import HistoricTaskData, TestRuntime
-from buildscripts.util.taskname import name_generated_task
-from buildscripts.patch_builds.task_generation import (resmoke_commands, TimeoutInfo,
- validate_task_generation_limit)
-
# pylint: enable=wrong-import-position
structlog.configure(logger_factory=LoggerFactory())
@@ -49,18 +38,11 @@ EXTERNAL_LOGGERS = {
"urllib3",
}
-AVG_TEST_RUNTIME_ANALYSIS_DAYS = 14
-AVG_TEST_SETUP_SEC = 4 * 60
-AVG_TEST_TIME_MULTIPLIER = 3
-CONFIG_FILE = ".evergreen.yml"
-DEFAULT_PROJECT = "mongodb-mongo-master"
DEFAULT_VARIANT = "enterprise-rhel-80-64-bit-dynamic-required"
ENTERPRISE_MODULE_PATH = "src/mongo/db/modules/enterprise"
DEFAULT_REPO_LOCATIONS = [".", f"./{ENTERPRISE_MODULE_PATH}"]
REPEAT_SUITES = 2
EVERGREEN_FILE = "etc/evergreen.yml"
-MIN_AVG_TEST_OVERFLOW_SEC = float(60)
-MIN_AVG_TEST_TIME_SEC = 5 * 60
# The executor_file and suite_files defaults are required to make the suite resolver work
# correctly.
SELECTOR_FILE = "etc/burn_in_tests.yml"
@@ -69,9 +51,6 @@ SUITE_FILES = ["with_server"]
SUPPORTED_TEST_KINDS = ("fsm_workload_test", "js_test", "json_schema_test",
"multi_stmt_txn_passthrough", "parallel_fsm_workload_test")
-BURN_IN_TESTS_GEN_TASK = "burn_in_tests_gen"
-BURN_IN_TESTS_TASK = "burn_in_tests"
-
class RepeatConfig(object):
"""Configuration for how tests should be repeated."""
@@ -137,56 +116,6 @@ class RepeatConfig(object):
])
-class GenerateConfig(object):
- """Configuration for how to generate tasks."""
-
- def __init__(self, build_variant: str, project: str, run_build_variant: Optional[str] = None,
- distro: Optional[str] = None, task_id: Optional[str] = None):
- # pylint: disable=too-many-arguments,too-many-locals
- """
- Create a GenerateConfig.
-
- :param build_variant: Build variant to get tasks from.
- :param project: Project to run tasks on.
- :param run_build_variant: Build variant to run new tasks on.
- :param distro: Distro to run tasks on.
- :param task_id: Evergreen task being run under.
- """
- self.build_variant = build_variant
- self._run_build_variant = run_build_variant
- self.distro = distro
- self.project = project
- self.task_id = task_id
-
- @property
- def run_build_variant(self):
- """Build variant tasks should run against."""
- if self._run_build_variant:
- return self._run_build_variant
- return self.build_variant
-
- def validate(self, evg_conf: EvergreenProjectConfig):
- """
- Raise an exception if this configuration is invalid.
-
- :param evg_conf: Evergreen configuration.
- :return: self.
- """
- self._check_variant(self.build_variant, evg_conf)
- return self
-
- @staticmethod
- def _check_variant(build_variant: str, evg_conf: EvergreenProjectConfig):
- """
- Check if the build_variant is found in the evergreen file.
-
- :param build_variant: Build variant to check.
- :param evg_conf: Evergreen configuration to check against.
- """
- if not evg_conf.get_variant(build_variant):
- raise ValueError(f"Build variant '{build_variant}' not found in Evergreen file")
-
-
def is_file_a_test_file(file_path: str) -> bool:
"""
Check if the given path points to a test file.
@@ -204,47 +133,6 @@ def is_file_a_test_file(file_path: str) -> bool:
return True
-def _create_revision_map(repos: List[Repo], origin_rev: Optional[str], evg_api: EvergreenApi,
- task_id: Optional[str]) -> RevisionMap:
- """
- Create a map of the repos and the given revisions to diff against.
-
- :param repos: Repositories to include in the map.
- :param origin_rev: User specified revision to compare against.
- :param evg_api: Evergreen API client.
- :param task_id: Evergreen task ID.
- :return: Map of repositories to revisions.
- """
- if origin_rev:
- return generate_revision_map(repos, {"mongo": origin_rev})
-
- if evg_api and task_id:
- return generate_revision_map_from_manifest(repos, task_id, evg_api)
-
- return {}
-
-
-def find_changed_tests(repos: List[Repo], origin_rev: Optional[str] = None,
- evg_api: Optional[EvergreenApi] = None,
- task_id: Optional[str] = None) -> Set[str]:
- """
- Find the changed tests.
-
- Use git to find which test files have changed in this patch.
- The returned file paths are in normalized form (see os.path.normpath(path)).
-
- :param repos: List of repos containing changed files.
- :param origin_rev: The revision that local changes will be compared against.
- :param evg_api: Evergreen API client.
- :param task_id: Evergreen task ID.
- :return: Set of changed tests.
- """
- revision_map = _create_revision_map(repos, origin_rev, evg_api, task_id)
- LOGGER.info("Calculated revision map", revision_map=revision_map)
- changed_files = find_changed_files_in_repos(repos, revision_map)
- return {os.path.normpath(path) for path in changed_files if is_file_a_test_file(path)}
-
-
def find_excludes(selector_file: str) -> Tuple[List, List, List]:
"""Parse etc/burn_in_tests.yml. Returns lists of excluded suites, tasks & tests."""
@@ -362,38 +250,47 @@ def _distro_to_run_task_on(task: VariantTask, evg_proj_config: EvergreenProjectC
return task.run_on[0]
-def _gather_task_info(task: VariantTask, tests_by_suite: Dict,
- evg_proj_config: EvergreenProjectConfig, build_variant: str) -> Dict:
+class TaskInfo(NamedTuple):
"""
- Gather the information needed to run the given task.
+ Information about tests to run under a specific Task.
- :param task: Task to be run.
- :param tests_by_suite: Dict of suites.
- :param evg_proj_config: Evergreen project configuration.
- :param build_variant: Build variant task will be run on.
- :return: Dictionary of information needed to run task.
+ display_task_name: Display name of task.
+ resmoke_args: Arguments to provide to resmoke on task invocation.
+ tests: List of tests to run as part of task.
+ use_multiversion: If running under multiversion, path to multiversion binaries.
+ distro: Evergreen distro task runs on.
"""
- return {
- "display_task_name": _get_task_name(task),
- "resmoke_args": _set_resmoke_args(task),
- "tests": tests_by_suite[task.resmoke_suite],
- "use_multiversion": task.multiversion_path,
- "distro": _distro_to_run_task_on(task, evg_proj_config, build_variant)
- } # yapf: disable
+
+ display_task_name: str
+ resmoke_args: str
+ tests: List[str]
+ use_multiversion: Optional[str]
+ distro: str
+
+ @classmethod
+ def from_task(cls, task: VariantTask, tests_by_suite: Dict[str, List[str]],
+ evg_proj_config: EvergreenProjectConfig, build_variant: str) -> "TaskInfo":
+ """
+ Gather the information needed to run the given task.
+
+ :param task: Task to be run.
+ :param tests_by_suite: Dict of suites.
+ :param evg_proj_config: Evergreen project configuration.
+ :param build_variant: Build variant task will be run on.
+ :return: Dictionary of information needed to run task.
+ """
+ return cls(
+ display_task_name=_get_task_name(task), resmoke_args=_set_resmoke_args(task),
+ tests=tests_by_suite[task.resmoke_suite], use_multiversion=task.multiversion_path,
+ distro=_distro_to_run_task_on(task, evg_proj_config, build_variant))
def create_task_list(evergreen_conf: EvergreenProjectConfig, build_variant: str,
- tests_by_suite: Dict[str, List[str]], exclude_tasks: [str]):
+ tests_by_suite: Dict[str, List[str]],
+ exclude_tasks: [str]) -> Dict[str, TaskInfo]:
"""
Find associated tasks for the specified build_variant and suites.
- Returns a dict keyed by task_name, with executor, resmoke_args & tests, i.e.,
- {'jsCore_small_oplog':
- {'resmoke_args': '--suites=core_small_oplog --storageEngine=inMemory',
- 'tests': ['jstests/core/all2.js', 'jstests/core/all3.js'],
- 'use_multiversion': '/data/multiversion'}
- }
-
:param evergreen_conf: Evergreen configuration for project.
:param build_variant: Build variant to select tasks from.
:param tests_by_suite: Suites to be run.
@@ -418,7 +315,7 @@ def create_task_list(evergreen_conf: EvergreenProjectConfig, build_variant: str,
# Return the list of tasks to run for the specified suite.
task_list = {
- task_name: _gather_task_info(task, tests_by_suite, evergreen_conf, build_variant)
+ task_name: TaskInfo.from_task(task, tests_by_suite, evergreen_conf, build_variant)
for task_name, task in all_variant_tasks.items() if task.resmoke_suite in tests_by_suite
}
@@ -437,202 +334,10 @@ def _set_resmoke_cmd(repeat_config: RepeatConfig, resmoke_args: [str]) -> [str]:
return new_args
-def _parse_avg_test_runtime(test: str,
- task_avg_test_runtime_stats: List[TestRuntime]) -> Optional[float]:
- """
- Parse list of test runtimes to find runtime for particular test.
-
- :param task_avg_test_runtime_stats: List of average historic runtimes of tests.
- :param test: Test name.
- :return: Historical average runtime of the test.
- """
- for test_stat in task_avg_test_runtime_stats:
- if test_stat.test_name == test:
- return test_stat.runtime
- return None
-
-
-def _calculate_timeout(avg_test_runtime: float) -> int:
- """
- Calculate timeout_secs for the Evergreen task.
-
- :param avg_test_runtime: How long a test has historically taken to run.
- :return: The test runtime times AVG_TEST_TIME_MULTIPLIER, or MIN_AVG_TEST_TIME_SEC (whichever
- is higher).
- """
- return max(MIN_AVG_TEST_TIME_SEC, ceil(avg_test_runtime * AVG_TEST_TIME_MULTIPLIER))
-
-
-def _calculate_exec_timeout(repeat_config: RepeatConfig, avg_test_runtime: float) -> int:
- """
- Calculate exec_timeout_secs for the Evergreen task.
-
- :param repeat_config: Information about how the test will repeat.
- :param avg_test_runtime: How long a test has historically taken to run.
- :return: repeat_tests_secs + an amount of padding time so that the test has time to finish on
- its final run.
- """
- LOGGER.debug("Calculating exec timeout", repeat_config=repeat_config,
- avg_test_runtime=avg_test_runtime)
- repeat_tests_secs = repeat_config.repeat_tests_secs
- if avg_test_runtime > repeat_tests_secs and repeat_config.repeat_tests_min:
- # If a single execution of the test takes longer than the repeat time, then we don't
- # have to worry about the repeat time at all and can just use the average test runtime
- # and minimum number of executions to calculate the exec timeout value.
- return ceil(avg_test_runtime * AVG_TEST_TIME_MULTIPLIER * repeat_config.repeat_tests_min)
-
- test_execution_time_over_limit = avg_test_runtime - (repeat_tests_secs % avg_test_runtime)
- test_execution_time_over_limit = max(MIN_AVG_TEST_OVERFLOW_SEC, test_execution_time_over_limit)
- return ceil(repeat_tests_secs + (test_execution_time_over_limit * AVG_TEST_TIME_MULTIPLIER) +
- AVG_TEST_SETUP_SEC)
-
-
-def _generate_timeouts(repeat_config: RepeatConfig, test: str,
- task_avg_test_runtime_stats: [TestRuntime]) -> TimeoutInfo:
- """
- Add timeout.update command to list of commands for a burn in execution task.
-
- :param repeat_config: Information on how the test will repeat.
- :param test: Test name.
- :param task_avg_test_runtime_stats: Average historic runtimes of tests.
- :return: TimeoutInfo to use.
- """
- if task_avg_test_runtime_stats:
- avg_test_runtime = _parse_avg_test_runtime(test, task_avg_test_runtime_stats)
- if avg_test_runtime:
- LOGGER.debug("Avg test runtime", test=test, runtime=avg_test_runtime)
-
- timeout = _calculate_timeout(avg_test_runtime)
- exec_timeout = _calculate_exec_timeout(repeat_config, avg_test_runtime)
- LOGGER.debug("Using timeout overrides", exec_timeout=exec_timeout, timeout=timeout)
- timeout_info = TimeoutInfo.overridden(exec_timeout, timeout)
-
- LOGGER.debug("Override runtime for test", test=test, timeout=timeout_info)
- return timeout_info
-
- return TimeoutInfo.default_timeout()
-
-
-def _get_task_runtime_history(evg_api: Optional[EvergreenApi], project: str, task: str,
- variant: str) -> List[TestRuntime]:
- """
- Fetch historical average runtime for all tests in a task from Evergreen API.
-
- :param evg_api: Evergreen API.
- :param project: Project name.
- :param task: Task name.
- :param variant: Variant name.
- :return: Test historical runtimes, parsed into teststat objects.
- """
- if not evg_api:
- return []
-
- try:
- end_date = datetime.datetime.utcnow().replace(microsecond=0)
- start_date = end_date - datetime.timedelta(days=AVG_TEST_RUNTIME_ANALYSIS_DAYS)
- test_stats = HistoricTaskData.from_evg(evg_api, project, start_date=start_date,
- end_date=end_date, task=task, variant=variant)
- return test_stats.get_tests_runtimes()
- except requests.HTTPError as err:
- if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
- # Evergreen may return a 503 when the service is degraded.
- # We fall back to returning no test history
- return []
- else:
- raise
-
-
-def _create_task(index: int, test_count: int, test: str, task_data: Dict,
- task_runtime_stats: List[TestRuntime], generate_config: GenerateConfig,
- repeat_config: RepeatConfig, task_prefix: str) -> Task:
- # pylint: disable=too-many-arguments,too-many-locals
- """
- Create the described shrub sub task.
-
- :param index: Index of task being created.
- :param test_count: Total number of testing being created.
- :param test: Test task is being generated for.
- :param task_data: Data about task to create.
- :param task_runtime_stats: Historical runtime of test.
- :param generate_config: Configuration of how to generate the task.
- :param repeat_config: Configuration of how the task should be repeated.
- :param task_prefix: String to prefix generated task with.
- :return: Shrub task for given configuration.
- """
- multiversion_path = task_data.get("use_multiversion")
- display_task_name = task_data["display_task_name"]
- resmoke_args = task_data["resmoke_args"]
- sub_task_name = name_generated_task(f"{task_prefix}:{display_task_name}", index, test_count,
- generate_config.run_build_variant)
- LOGGER.debug("Generating sub-task", sub_task=sub_task_name)
-
- test_unix_style = test.replace('\\', '/')
- run_tests_vars = {
- "resmoke_args":
- f"{resmoke_args} {repeat_config.generate_resmoke_options()} {test_unix_style}"
- }
- if multiversion_path:
- run_tests_vars["task_path_suffix"] = multiversion_path
- timeout = _generate_timeouts(repeat_config, test, task_runtime_stats)
- commands = resmoke_commands("run tests", run_tests_vars, timeout, multiversion_path)
- dependencies = {TaskDependency("archive_dist_test_debug")}
-
- return Task(sub_task_name, commands, dependencies)
-
-
-def create_generated_tasks(tests_by_task: Dict, task_prefix: str, generate_config: GenerateConfig,
- repeat_config: RepeatConfig, evg_api: EvergreenApi) -> Set[Task]:
- """
- Create the set of tasks to run the given tests_by_task.
-
- :param tests_by_task: Dictionary of tests to generate tasks for.
- :param task_prefix: Prefix all task names with this.
- :param generate_config: Configuration of what to generate.
- :param repeat_config: Configuration of how to repeat tests.
- :param evg_api: Evergreen API.
- :return: Set of shrub tasks to run tests_by_task.
- """
- tasks: Set[Task] = set()
- for task in sorted(tests_by_task):
- task_info = tests_by_task[task]
- test_list = task_info["tests"]
- task_runtime_stats = _get_task_runtime_history(evg_api, generate_config.project,
- task_info["display_task_name"],
- generate_config.build_variant)
- test_count = len(test_list)
- for index, test in enumerate(test_list):
- tasks.add(
- _create_task(index, test_count, test, task_info, task_runtime_stats,
- generate_config, repeat_config, task_prefix))
-
- return tasks
-
-
-def create_generate_tasks_config(build_variant: BuildVariant, tests_by_task: Dict,
- generate_config: GenerateConfig, repeat_config: RepeatConfig,
- evg_api: Optional[EvergreenApi], include_gen_task: bool = True,
- task_prefix: str = "burn_in") -> None:
- # pylint: disable=too-many-arguments,too-many-locals
- """
- Create the config for the Evergreen generate.tasks file.
-
- :param build_variant: Shrub configuration to add to.
- :param tests_by_task: Dictionary of tests to generate tasks for.
- :param generate_config: Configuration of what to generate.
- :param repeat_config: Configuration of how to repeat tests.
- :param evg_api: Evergreen API.
- :param include_gen_task: Should generating task be include in display task.
- :param task_prefix: Prefix all task names with this.
- """
- tasks = create_generated_tasks(tests_by_task, task_prefix, generate_config, repeat_config,
- evg_api)
- existing_tasks = {ExistingTask(BURN_IN_TESTS_GEN_TASK)} if include_gen_task else None
- build_variant.display_task(BURN_IN_TESTS_TASK, tasks, execution_existing_tasks=existing_tasks)
-
-
-def create_task_list_for_tests(
- changed_tests: Set[str], build_variant: str, evg_conf: EvergreenProjectConfig,
- exclude_suites: Optional[List] = None, exclude_tasks: Optional[List] = None) -> Dict:
+def create_task_list_for_tests(changed_tests: Set[str], build_variant: str,
+ evg_conf: EvergreenProjectConfig,
+ exclude_suites: Optional[List] = None,
+ exclude_tasks: Optional[List] = None) -> Dict[str, TaskInfo]:
"""
Create a list of tests by task for the given tests.
@@ -658,7 +363,7 @@ def create_task_list_for_tests(
def create_tests_by_task(build_variant: str, evg_conf: EvergreenProjectConfig,
- changed_tests: Set[str]) -> Dict:
+ changed_tests: Set[str]) -> Dict[str, TaskInfo]:
"""
Create a list of tests by task.
@@ -681,36 +386,7 @@ def create_tests_by_task(build_variant: str, evg_conf: EvergreenProjectConfig,
return {}
-# pylint: disable=too-many-arguments
-def create_generate_tasks_file(tests_by_task: Dict, generate_config: GenerateConfig,
- repeat_config: RepeatConfig, evg_api: Optional[EvergreenApi],
- task_prefix: str = 'burn_in', include_gen_task: bool = True) -> str:
- """
- Create an Evergreen generate.tasks file to run the given tasks and tests.
-
- :param tests_by_task: Dictionary of tests and tasks to run.
- :param generate_config: Information about how burn_in should generate tasks.
- :param repeat_config: Information about how burn_in should repeat tests.
- :param evg_api: Evergreen api.
- :param task_prefix: Prefix to start generated task's name with.
- :param include_gen_task: Should the generating task be included in the display task.
- :returns: Configuration to pass to 'generate.tasks'.
- """
- build_variant = BuildVariant(generate_config.run_build_variant)
- create_generate_tasks_config(build_variant, tests_by_task, generate_config, repeat_config,
- evg_api, include_gen_task=include_gen_task,
- task_prefix=task_prefix)
-
- shrub_project = ShrubProject.empty()
- shrub_project.add_build_variant(build_variant)
-
- if not validate_task_generation_limit(shrub_project):
- sys.exit(1)
-
- return shrub_project.json()
-
-
-def run_tests(tests_by_task: Dict, resmoke_cmd: [str]):
+def run_tests(tests_by_task: Dict[str, TaskInfo], resmoke_cmd: [str]) -> None:
"""
Run the given tests locally.
@@ -722,8 +398,8 @@ def run_tests(tests_by_task: Dict, resmoke_cmd: [str]):
for task in sorted(tests_by_task):
log = LOGGER.bind(task=task)
new_resmoke_cmd = copy.deepcopy(resmoke_cmd)
- new_resmoke_cmd.extend(shlex.split(tests_by_task[task]["resmoke_args"]))
- new_resmoke_cmd.extend(tests_by_task[task]["tests"])
+ new_resmoke_cmd.extend(shlex.split(tests_by_task[task].resmoke_args))
+ new_resmoke_cmd.extend(tests_by_task[task].tests)
log.debug("starting execution of task")
try:
subprocess.check_call(new_resmoke_cmd, shell=False)
@@ -748,68 +424,150 @@ def _configure_logging(verbose: bool):
logging.getLogger(log_name).setLevel(logging.WARNING)
-def _get_evg_api(evg_api_config: str, local_mode: bool) -> Optional[EvergreenApi]:
- """
- Get an instance of the Evergreen Api.
+class FileChangeDetector(ABC):
+ """Interface to detect changes to files."""
- :param evg_api_config: Config file with evg auth information.
- :param local_mode: If true, do not connect to Evergreen API.
- :return: Evergreen Api instance.
- """
- if not local_mode:
- return RetryingEvergreenApi.get_api(config_file=evg_api_config)
- return None
+ @abstractmethod
+ def create_revision_map(self, repos: List[Repo]) -> RevisionMap:
+ """
+ Create a map of the repos and the given revisions to diff against.
+ :param repos: List of repos being tracked.
+ :return: Map of repositories and revisions to diff against.
+ """
+ raise NotImplementedError()
-def burn_in(repeat_config: RepeatConfig, generate_config: GenerateConfig, resmoke_args: str,
- generate_tasks_file: str, no_exec: bool, evg_conf: EvergreenProjectConfig,
- repos: List[Repo], evg_api: EvergreenApi, origin_rev: Optional[str]) -> None:
- """
- Run burn_in_tests with the given configuration.
+ def find_changed_tests(self, repos: List[Repo]) -> Set[str]:
+ """
+ Find the changed tests.
- :param repeat_config: Config on how much to repeat tests.
- :param generate_config: Config on how to generate tests.
- :param resmoke_args: Arguments to pass to resmoke.
- :param generate_tasks_file: File to write generated config to.
- :param no_exec: Do not execute tests, just discover tests to run.
- :param evg_conf: Evergreen configuration.
- :param repos: Git repositories to check.
- :param evg_api: Evergreen API client.
- :param project: Evergreen project to query.
- :param origin_rev: The revision that local changes will be compared against.
- """
- changed_tests = find_changed_tests(repos, origin_rev, evg_api, generate_config.task_id)
- LOGGER.info("Found changed tests", files=changed_tests)
+ Use git to find which test files have changed in this patch.
+ The returned file paths are in normalized form (see os.path.normpath(path)).
- # Populate the config values in order to use the helpers from resmokelib.suitesconfig.
- resmoke_cmd = _set_resmoke_cmd(repeat_config, list(resmoke_args))
+ :param repos: List of repos containing changed files.
+ :return: Set of changed tests.
+ """
+ revision_map = self.create_revision_map(repos)
+ LOGGER.info("Calculated revision map", revision_map=revision_map)
- tests_by_task = create_tests_by_task(generate_config.build_variant, evg_conf, changed_tests)
- LOGGER.debug("tests and tasks found", tests_by_task=tests_by_task)
+ changed_files = find_changed_files_in_repos(repos, revision_map)
+ return {os.path.normpath(path) for path in changed_files if is_file_a_test_file(path)}
- if generate_tasks_file:
- json_text = create_generate_tasks_file(tests_by_task, generate_config, repeat_config,
- evg_api)
- write_file(generate_tasks_file, json_text)
- elif not no_exec:
- run_tests(tests_by_task, resmoke_cmd)
- else:
+
+class LocalFileChangeDetector(FileChangeDetector):
+ """A change detector for detecting changes in a local repository."""
+
+ def __init__(self, origin_rev: Optional[str]) -> None:
+ """
+ Create a local file change detector.
+
+ :param origin_rev: Git revision to diff against.
+ """
+ self.origin_rev = origin_rev
+
+ def create_revision_map(self, repos: List[Repo]) -> RevisionMap:
+ """
+ Create a map of the repos and the given revisions to diff against.
+
+ :param repos: List of repos being tracked.
+ :return: Map of repositories and revisions to diff against.
+ """
+ if self.origin_rev:
+ return generate_revision_map(repos, {"mongo": self.origin_rev})
+
+ return {}
+
+
+class BurnInExecutor(ABC):
+ """An interface to execute discovered tests."""
+
+ @abstractmethod
+ def execute(self, tests_by_task: Dict[str, TaskInfo]) -> None:
+ """
+ Execute the given tests in the given tasks.
+
+ :param tests_by_task: Dictionary of tasks to run with tests to run in each.
+ """
+ raise NotImplementedError()
+
+
+class NopBurnInExecutor(BurnInExecutor):
+ """A burn-in executor that displays results, but doesn't execute."""
+
+ def execute(self, tests_by_task: Dict[str, TaskInfo]) -> None:
+ """
+ Execute the given tests in the given tasks.
+
+ :param tests_by_task: Dictionary of tasks to run with tests to run in each.
+ """
LOGGER.info("Not running tests due to 'no_exec' option.")
+ for task_name, task_info in tests_by_task.items():
+ print(task_name)
+ for test_name in task_info.tests:
+ print(f"- {test_name}")
+
+
+class LocalBurnInExecutor(BurnInExecutor):
+ """A burn-in executor that runs tests on the local machine."""
+
+ def __init__(self, resmoke_args: str, repeat_config: RepeatConfig) -> None:
+ """
+ Create a new local burn-in executor.
+
+ :param resmoke_args: Resmoke arguments to use for execution.
+ :param repeat_config: How tests should be repeated.
+ """
+ self.resmoke_args = resmoke_args
+ self.repeat_config = repeat_config
+
+ def execute(self, tests_by_task: Dict[str, TaskInfo]) -> None:
+ """
+ Execute the given tests in the given tasks.
+
+ :param tests_by_task: Dictionary of tasks to run with tests to run in each.
+ """
+ # Populate the config values in order to use the helpers from resmokelib.suitesconfig.
+ resmoke_cmd = _set_resmoke_cmd(self.repeat_config, list(self.resmoke_args))
+ run_tests(tests_by_task, resmoke_cmd)
+
+
+class BurnInOrchestrator:
+ """Orchestrate the execution of burn_in_tests."""
+
+ def __init__(self, change_detector: FileChangeDetector, burn_in_executor: BurnInExecutor,
+ evg_conf: EvergreenProjectConfig) -> None:
+ """
+ Create a new orchestrator.
+
+ :param change_detector: Component to use to detect test changes.
+ :param burn_in_executor: Components to execute tests.
+ :param evg_conf: Evergreen project configuration.
+ """
+ self.change_detector = change_detector
+ self.burn_in_executor = burn_in_executor
+ self.evg_conf = evg_conf
+
+ def burn_in(self, repos: List[Repo], build_variant: str) -> None:
+ """
+ Execute burn in tests for the given git repositories.
+
+ :param repos: Repositories to check for changes.
+ :param build_variant: Build variant to use for task definitions.
+ """
+ changed_tests = self.change_detector.find_changed_tests(repos)
+ LOGGER.info("Found changed tests", files=changed_tests)
+
+ tests_by_task = create_tests_by_task(build_variant, self.evg_conf, changed_tests)
+ LOGGER.debug("tests and tasks found", tests_by_task=tests_by_task)
+
+ self.burn_in_executor.execute(tests_by_task)
@click.command()
@click.option("--no-exec", "no_exec", default=False, is_flag=True,
help="Do not execute the found tests.")
-@click.option("--generate-tasks-file", "generate_tasks_file", default=None, metavar='FILE',
- help="Run in 'generate.tasks' mode. Store task config to given file.")
@click.option("--build-variant", "build_variant", default=DEFAULT_VARIANT, metavar='BUILD_VARIANT',
help="Tasks to run will be selected from this build variant.")
-@click.option("--run-build-variant", "run_build_variant", default=None, metavar='BUILD_VARIANT',
- help="Burn in tasks will be generated on this build variant.")
-@click.option("--distro", "distro", default=None, metavar='DISTRO',
- help="The distro the tasks will execute on.")
-@click.option("--project", "project", default=DEFAULT_PROJECT, metavar='PROJECT',
- help="The evergreen project the tasks will execute on.")
@click.option("--repeat-tests", "repeat_tests_num", default=None, type=int,
help="Number of times to repeat tests.")
@click.option("--repeat-tests-min", "repeat_tests_min", default=None, type=int,
@@ -818,21 +576,16 @@ def burn_in(repeat_config: RepeatConfig, generate_config: GenerateConfig, resmok
help="The maximum number of times to repeat tests if time option is specified.")
@click.option("--repeat-tests-secs", "repeat_tests_secs", default=None, type=int, metavar="SECONDS",
help="Repeat tests for the given time (in secs).")
-@click.option("--evg-api-config", "evg_api_config", default=CONFIG_FILE, metavar="FILE",
- help="Configuration file with connection info for Evergreen API.")
-@click.option("--local", "local_mode", default=False, is_flag=True,
- help="Local mode. Do not call out to evergreen api.")
@click.option("--verbose", "verbose", default=False, is_flag=True, help="Enable extra logging.")
-@click.option("--task_id", "task_id", default=None, metavar='TASK_ID',
- help="The evergreen task id.")
@click.option(
"--origin-rev", "origin_rev", default=None,
help="The revision in the mongo repo that changes will be compared against if specified.")
@click.argument("resmoke_args", nargs=-1, type=click.UNPROCESSED)
# pylint: disable=too-many-arguments,too-many-locals
-def main(build_variant, run_build_variant, distro, project, generate_tasks_file, no_exec,
- repeat_tests_num, repeat_tests_min, repeat_tests_max, repeat_tests_secs, resmoke_args,
- local_mode, evg_api_config, verbose, task_id, origin_rev):
+def main(build_variant: str, no_exec: bool, repeat_tests_num: Optional[int],
+ repeat_tests_min: Optional[int], repeat_tests_max: Optional[int],
+ repeat_tests_secs: Optional[int], resmoke_args: str, verbose: bool,
+ origin_rev: Optional[str]) -> None:
"""
Run new or changed tests in repeated mode to validate their stability.
@@ -848,61 +601,35 @@ def main(build_variant, run_build_variant, distro, project, generate_tasks_file,
The `--repeat-*` arguments allow configuration of how burn_in_tests repeats tests. Tests can
either be repeated a specified number of times with the `--repeat-tests` option, or they can
be repeated for a certain time period with the `--repeat-tests-secs` option.
-
- There are two modes that burn_in_tests can run in:
-
- (1) Normal mode: by default burn_in_tests will attempt to run all detected tests the
- configured number of times. This is useful if you have a test or tests you would like to
- check before submitting a patch to evergreen.
-
- (2) By specifying the `--generate-tasks-file`, burn_in_tests will run generate a configuration
- file that can then be sent to the Evergreen 'generate.tasks' command to create evergreen tasks
- to do all the test executions. This is the mode used to run tests in patch builds.
-
- NOTE: There is currently a limit of the number of tasks burn_in_tests will attempt to generate
- in evergreen. The limit is 1000. If you change enough tests that more than 1000 tasks would
- be generated, burn_in_test will fail. This is to avoid generating more tasks than evergreen
- can handle.
\f
:param build_variant: Build variant to query tasks from.
- :param run_build_variant:Build variant to actually run against.
- :param distro: Distro to run tests on.
- :param project: Project to run tests on.
- :param generate_tasks_file: Create a generate tasks configuration in this file.
:param no_exec: Just perform test discover, do not execute the tests.
:param repeat_tests_num: Repeat each test this number of times.
:param repeat_tests_min: Repeat each test at least this number of times.
:param repeat_tests_max: Once this number of repetitions has been reached, stop repeating.
:param repeat_tests_secs: Continue repeating tests for this number of seconds.
:param resmoke_args: Arguments to pass through to resmoke.
- :param local_mode: Don't call out to the evergreen API (used for testing).
- :param evg_api_config: Location of configuration file to connect to evergreen.
:param verbose: Log extra debug information.
- :param task_id: Id of evergreen task being run in.
:param origin_rev: The revision that local changes will be compared against.
"""
_configure_logging(verbose)
- evg_conf = parse_evergreen_file(EVERGREEN_FILE)
repeat_config = RepeatConfig(repeat_tests_secs=repeat_tests_secs,
repeat_tests_min=repeat_tests_min,
repeat_tests_max=repeat_tests_max,
repeat_tests_num=repeat_tests_num) # yapf: disable
- generate_config = GenerateConfig(build_variant=build_variant,
- run_build_variant=run_build_variant,
- distro=distro,
- project=project,
- task_id=task_id) # yapf: disable
- if generate_tasks_file:
- generate_config.validate(evg_conf)
-
- evg_api = _get_evg_api(evg_api_config, local_mode)
repos = [Repo(x) for x in DEFAULT_REPO_LOCATIONS if os.path.isdir(x)]
+ evg_conf = parse_evergreen_file(EVERGREEN_FILE)
+
+ change_detector = LocalFileChangeDetector(origin_rev)
+ executor = LocalBurnInExecutor(resmoke_args, repeat_config)
+ if no_exec:
+ executor = NopBurnInExecutor()
- burn_in(repeat_config, generate_config, resmoke_args, generate_tasks_file, no_exec, evg_conf,
- repos, evg_api, origin_rev)
+ burn_in_orchestrator = BurnInOrchestrator(change_detector, executor, evg_conf)
+ burn_in_orchestrator.burn_in(repos, build_variant)
if __name__ == "__main__":
diff --git a/buildscripts/burn_in_tests_multiversion.py b/buildscripts/burn_in_tests_multiversion.py
index 43943728f36..1cff5566347 100644
--- a/buildscripts/burn_in_tests_multiversion.py
+++ b/buildscripts/burn_in_tests_multiversion.py
@@ -6,7 +6,7 @@ import sys
from typing import Dict
import click
-from evergreen.api import EvergreenApi
+from evergreen.api import EvergreenApi, RetryingEvergreenApi
from git import Repo
from shrub.v2 import BuildVariant, ExistingTask, ShrubProject
import structlog
@@ -14,10 +14,11 @@ from structlog.stdlib import LoggerFactory
import buildscripts.evergreen_gen_multiversion_tests as gen_multiversion
import buildscripts.evergreen_generate_resmoke_tasks as gen_resmoke
-from buildscripts.burn_in_tests import GenerateConfig, DEFAULT_PROJECT, CONFIG_FILE, _configure_logging, RepeatConfig, \
- _get_evg_api, EVERGREEN_FILE, DEFAULT_REPO_LOCATIONS, _set_resmoke_cmd, create_tests_by_task, \
- find_changed_tests, run_tests
+from buildscripts.burn_in_tests import _configure_logging, EVERGREEN_FILE, \
+ DEFAULT_REPO_LOCATIONS, create_tests_by_task, TaskInfo
from buildscripts.ciconfig.evergreen import parse_evergreen_file
+from buildscripts.evergreen_burn_in_tests import GenerateConfig, DEFAULT_PROJECT, CONFIG_FILE, \
+ EvergreenFileChangeDetector
from buildscripts.patch_builds.task_generation import validate_task_generation_limit
from buildscripts.resmokelib.suitesconfig import get_named_suites_with_root_level_key
from buildscripts.util.fileops import write_file
@@ -32,7 +33,8 @@ BURN_IN_MULTIVERSION_TASK = gen_multiversion.BURN_IN_TASK
TASK_PATH_SUFFIX = "/data/multiversion"
-def create_multiversion_generate_tasks_config(tests_by_task: Dict, evg_api: EvergreenApi,
+def create_multiversion_generate_tasks_config(tests_by_task: Dict[str, TaskInfo],
+ evg_api: EvergreenApi,
generate_config: GenerateConfig) -> BuildVariant:
"""
Create the multiversion config for the Evergreen generate.tasks file.
@@ -72,7 +74,7 @@ def create_multiversion_generate_tasks_config(tests_by_task: Dict, evg_api: Ever
config_generator = gen_multiversion.EvergreenMultiversionConfigGenerator(
evg_api, gen_resmoke.ConfigOptions(config_options))
- test_list = tests_by_task[suite["origin"]]["tests"]
+ test_list = tests_by_task[suite["origin"]].tests
for test in test_list:
# Generate the multiversion tasks for each test.
sub_tasks = config_generator.get_burn_in_tasks(test, idx)
@@ -86,9 +88,7 @@ def create_multiversion_generate_tasks_config(tests_by_task: Dict, evg_api: Ever
@click.command()
-@click.option("--no-exec", "no_exec", default=False, is_flag=True,
- help="Do not execute the found tests.")
-@click.option("--generate-tasks-file", "generate_tasks_file", default=None, metavar='FILE',
+@click.option("--generate-tasks-file", "generate_tasks_file", required=True, metavar='FILE',
help="Run in 'generate.tasks' mode. Store task config to given file.")
@click.option("--build-variant", "build_variant", default=None, metavar='BUILD_VARIANT',
help="Tasks to run will be selected from this build variant.")
@@ -103,10 +103,9 @@ def create_multiversion_generate_tasks_config(tests_by_task: Dict, evg_api: Ever
@click.option("--verbose", "verbose", default=False, is_flag=True, help="Enable extra logging.")
@click.option("--task_id", "task_id", default=None, metavar='TASK_ID',
help="The evergreen task id.")
-@click.argument("resmoke_args", nargs=-1, type=click.UNPROCESSED)
# pylint: disable=too-many-arguments,too-many-locals
-def main(build_variant, run_build_variant, distro, project, generate_tasks_file, no_exec,
- resmoke_args, evg_api_config, verbose, task_id):
+def main(build_variant, run_build_variant, distro, project, generate_tasks_file, evg_api_config,
+ verbose, task_id):
"""
Run new or changed tests in repeated mode to validate their stability.
@@ -137,56 +136,45 @@ def main(build_variant, run_build_variant, distro, project, generate_tasks_file,
:param distro: Distro to run tests on.
:param project: Project to run tests on.
:param generate_tasks_file: Create a generate tasks configuration in this file.
- :param no_exec: Just perform test discover, do not execute the tests.
- :param resmoke_args: Arguments to pass through to resmoke.
:param evg_api_config: Location of configuration file to connect to evergreen.
:param verbose: Log extra debug information.
"""
_configure_logging(verbose)
evg_conf = parse_evergreen_file(EVERGREEN_FILE)
- repeat_config = RepeatConfig() # yapf: disable
generate_config = GenerateConfig(build_variant=build_variant,
run_build_variant=run_build_variant,
distro=distro,
project=project,
task_id=task_id) # yapf: disable
- if generate_tasks_file:
- generate_config.validate(evg_conf)
-
- evg_api = _get_evg_api(evg_api_config, False)
+ generate_config.validate(evg_conf)
repos = [Repo(x) for x in DEFAULT_REPO_LOCATIONS if os.path.isdir(x)]
+ evg_api = RetryingEvergreenApi.get_api(config_file=evg_api_config)
- resmoke_cmd = _set_resmoke_cmd(repeat_config, list(resmoke_args))
-
- changed_tests = find_changed_tests(repos, evg_api=evg_api, task_id=task_id)
+ change_detector = EvergreenFileChangeDetector(task_id, evg_api)
+ changed_tests = change_detector.find_changed_tests(repos)
tests_by_task = create_tests_by_task(generate_config.build_variant, evg_conf, changed_tests)
LOGGER.debug("tests and tasks found", tests_by_task=tests_by_task)
- if generate_tasks_file:
- multiversion_tasks = evg_conf.get_task_names_by_tag(MULTIVERSION_PASSTHROUGH_TAG)
- LOGGER.debug("Multiversion tasks by tag", tasks=multiversion_tasks,
- tag=MULTIVERSION_PASSTHROUGH_TAG)
- # We expect the number of suites with MULTIVERSION_PASSTHROUGH_TAG to be the same as in
- # multiversion_suites. Multiversion passthrough suites must include
- # MULTIVERSION_CONFIG_KEY as a root level key and must be set to true.
- multiversion_suites = get_named_suites_with_root_level_key(MULTIVERSION_CONFIG_KEY)
- assert len(multiversion_tasks) == len(multiversion_suites)
+ multiversion_tasks = evg_conf.get_task_names_by_tag(MULTIVERSION_PASSTHROUGH_TAG)
+ LOGGER.debug("Multiversion tasks by tag", tasks=multiversion_tasks,
+ tag=MULTIVERSION_PASSTHROUGH_TAG)
+ # We expect the number of suites with MULTIVERSION_PASSTHROUGH_TAG to be the same as in
+ # multiversion_suites. Multiversion passthrough suites must include
+ # MULTIVERSION_CONFIG_KEY as a root level key and must be set to true.
+ multiversion_suites = get_named_suites_with_root_level_key(MULTIVERSION_CONFIG_KEY)
+ assert len(multiversion_tasks) == len(multiversion_suites)
- build_variant = create_multiversion_generate_tasks_config(tests_by_task, evg_api,
- generate_config)
- shrub_project = ShrubProject.empty()
- shrub_project.add_build_variant(build_variant)
+ build_variant = create_multiversion_generate_tasks_config(tests_by_task, evg_api,
+ generate_config)
+ shrub_project = ShrubProject.empty()
+ shrub_project.add_build_variant(build_variant)
- if not validate_task_generation_limit(shrub_project):
- sys.exit(1)
+ if not validate_task_generation_limit(shrub_project):
+ sys.exit(1)
- write_file(generate_tasks_file, shrub_project.json())
- elif not no_exec:
- run_tests(tests_by_task, resmoke_cmd)
- else:
- LOGGER.info("Not running tests due to 'no_exec' option.")
+ write_file(generate_tasks_file, shrub_project.json())
if __name__ == "__main__":
diff --git a/buildscripts/evergreen_burn_in_tests.py b/buildscripts/evergreen_burn_in_tests.py
new file mode 100644
index 00000000000..909ff3f1e0c
--- /dev/null
+++ b/buildscripts/evergreen_burn_in_tests.py
@@ -0,0 +1,495 @@
+#!/usr/bin/env python3
+"""Wrapper around burn_in_tests for evergreen execution."""
+import logging
+import os
+import sys
+from datetime import datetime, timedelta
+from math import ceil
+from typing import Optional, List, Dict, Set
+
+import click
+import requests
+import structlog
+from evergreen import RetryingEvergreenApi, EvergreenApi
+from git import Repo
+from shrub.v2 import ShrubProject, BuildVariant, Task, TaskDependency, ExistingTask
+
+from buildscripts.burn_in_tests import RepeatConfig, BurnInExecutor, TaskInfo, FileChangeDetector, \
+ DEFAULT_REPO_LOCATIONS, BurnInOrchestrator
+from buildscripts.ciconfig.evergreen import parse_evergreen_file, EvergreenProjectConfig
+from buildscripts.patch_builds.change_data import RevisionMap, generate_revision_map_from_manifest
+from buildscripts.patch_builds.task_generation import TimeoutInfo, resmoke_commands, \
+ validate_task_generation_limit
+from buildscripts.util.fileops import write_file
+from buildscripts.util.taskname import name_generated_task
+from buildscripts.util.teststats import TestRuntime, HistoricTaskData
+
+CONFIG_FILE = ".evergreen.yml"
+DEFAULT_PROJECT = "mongodb-mongo-master"
+DEFAULT_VARIANT = "enterprise-rhel-80-64-bit-dynamic-required"
+EVERGREEN_FILE = "etc/evergreen.yml"
+BURN_IN_TESTS_GEN_TASK = "burn_in_tests_gen"
+BURN_IN_TESTS_TASK = "burn_in_tests"
+TASK_WITH_ARTIFACTS = "archive_dist_test_debug"
+AVG_TEST_RUNTIME_ANALYSIS_DAYS = 14
+AVG_TEST_SETUP_SEC = 4 * 60
+AVG_TEST_TIME_MULTIPLIER = 3
+MIN_AVG_TEST_OVERFLOW_SEC = float(60)
+MIN_AVG_TEST_TIME_SEC = 5 * 60
+
+LOGGER = structlog.getLogger(__name__)
+EXTERNAL_LOGGERS = {
+ "evergreen",
+ "git",
+ "urllib3",
+}
+
+
+def _configure_logging(verbose: bool):
+ """
+ Configure logging for the application.
+
+ :param verbose: If True set log level to DEBUG.
+ """
+ level = logging.DEBUG if verbose else logging.INFO
+ logging.basicConfig(
+ format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s",
+ level=level,
+ stream=sys.stdout,
+ )
+ for log_name in EXTERNAL_LOGGERS:
+ logging.getLogger(log_name).setLevel(logging.WARNING)
+
+
+class GenerateConfig(object):
+ """Configuration for how to generate tasks."""
+
+ def __init__(self, build_variant: str, project: str, run_build_variant: Optional[str] = None,
+ distro: Optional[str] = None, task_id: Optional[str] = None,
+ task_prefix: str = "burn_in", include_gen_task: bool = True) -> None:
+ # pylint: disable=too-many-arguments,too-many-locals
+ """
+ Create a GenerateConfig.
+
+ :param build_variant: Build variant to get tasks from.
+ :param project: Project to run tasks on.
+ :param run_build_variant: Build variant to run new tasks on.
+ :param distro: Distro to run tasks on.
+ :param task_id: Evergreen task being run under.
+ :param task_prefix: Prefix to include in generated task names.
+ :param include_gen_task: Indicates the "_gen" task should be grouped in the display task.
+ """
+ self.build_variant = build_variant
+ self._run_build_variant = run_build_variant
+ self.distro = distro
+ self.project = project
+ self.task_id = task_id
+ self.task_prefix = task_prefix
+ self.include_gen_task = include_gen_task
+
+ @property
+ def run_build_variant(self):
+ """Build variant tasks should run against."""
+ if self._run_build_variant:
+ return self._run_build_variant
+ return self.build_variant
+
+ def validate(self, evg_conf: EvergreenProjectConfig):
+ """
+ Raise an exception if this configuration is invalid.
+
+ :param evg_conf: Evergreen configuration.
+ :return: self.
+ """
+ self._check_variant(self.build_variant, evg_conf)
+ return self
+
+ @staticmethod
+ def _check_variant(build_variant: str, evg_conf: EvergreenProjectConfig):
+ """
+ Check if the build_variant is found in the evergreen file.
+
+ :param build_variant: Build variant to check.
+ :param evg_conf: Evergreen configuration to check against.
+ """
+ if not evg_conf.get_variant(build_variant):
+ raise ValueError(f"Build variant '{build_variant}' not found in Evergreen file")
+
+
+def _parse_avg_test_runtime(test: str,
+ task_avg_test_runtime_stats: List[TestRuntime]) -> Optional[float]:
+ """
+ Parse list of test runtimes to find runtime for particular test.
+
+ :param task_avg_test_runtime_stats: List of average historic runtimes of tests.
+ :param test: Test name.
+ :return: Historical average runtime of the test.
+ """
+ for test_stat in task_avg_test_runtime_stats:
+ if test_stat.test_name == test:
+ return test_stat.runtime
+ return None
+
+
+def _calculate_timeout(avg_test_runtime: float) -> int:
+ """
+ Calculate timeout_secs for the Evergreen task.
+
+ :param avg_test_runtime: How long a test has historically taken to run.
+ :return: The test runtime times AVG_TEST_TIME_MULTIPLIER, or MIN_AVG_TEST_TIME_SEC (whichever
+ is higher).
+ """
+ return max(MIN_AVG_TEST_TIME_SEC, ceil(avg_test_runtime * AVG_TEST_TIME_MULTIPLIER))
+
+
+def _calculate_exec_timeout(repeat_config: RepeatConfig, avg_test_runtime: float) -> int:
+ """
+ Calculate exec_timeout_secs for the Evergreen task.
+
+ :param repeat_config: Information about how the test will repeat.
+ :param avg_test_runtime: How long a test has historically taken to run.
+ :return: repeat_tests_secs + an amount of padding time so that the test has time to finish on
+ its final run.
+ """
+ LOGGER.debug("Calculating exec timeout", repeat_config=repeat_config,
+ avg_test_runtime=avg_test_runtime)
+ repeat_tests_secs = repeat_config.repeat_tests_secs
+ if avg_test_runtime > repeat_tests_secs and repeat_config.repeat_tests_min:
+ # If a single execution of the test takes longer than the repeat time, then we don't
+ # have to worry about the repeat time at all and can just use the average test runtime
+ # and minimum number of executions to calculate the exec timeout value.
+ return ceil(avg_test_runtime * AVG_TEST_TIME_MULTIPLIER * repeat_config.repeat_tests_min)
+
+ test_execution_time_over_limit = avg_test_runtime - (repeat_tests_secs % avg_test_runtime)
+ test_execution_time_over_limit = max(MIN_AVG_TEST_OVERFLOW_SEC, test_execution_time_over_limit)
+ return ceil(repeat_tests_secs + (test_execution_time_over_limit * AVG_TEST_TIME_MULTIPLIER) +
+ AVG_TEST_SETUP_SEC)
+
+
+class TaskGenerator:
+ """Class to generate task configurations."""
+
+ def __init__(self, generate_config: GenerateConfig, repeat_config: RepeatConfig,
+ task_info: TaskInfo, task_runtime_stats: List[TestRuntime]) -> None:
+ """
+ Create a new task generator.
+
+ :param generate_config: Generate configuration to use.
+ :param repeat_config: Repeat configuration to use.
+ :param task_info: Information about how tasks should be generated.
+ :param task_runtime_stats: Historic runtime of tests associated with task.
+ """
+ self.generate_config = generate_config
+ self.repeat_config = repeat_config
+ self.task_info = task_info
+ self.task_runtime_stats = task_runtime_stats
+
+ def generate_timeouts(self, test: str) -> TimeoutInfo:
+ """
+ Add timeout.update command to list of commands for a burn in execution task.
+
+ :param test: Test name.
+ :return: TimeoutInfo to use.
+ """
+ if self.task_runtime_stats:
+ avg_test_runtime = _parse_avg_test_runtime(test, self.task_runtime_stats)
+ if avg_test_runtime:
+ LOGGER.debug("Avg test runtime", test=test, runtime=avg_test_runtime)
+
+ timeout = _calculate_timeout(avg_test_runtime)
+ exec_timeout = _calculate_exec_timeout(self.repeat_config, avg_test_runtime)
+ LOGGER.debug("Using timeout overrides", exec_timeout=exec_timeout, timeout=timeout)
+ timeout_info = TimeoutInfo.overridden(exec_timeout, timeout)
+
+ LOGGER.debug("Override runtime for test", test=test, timeout=timeout_info)
+ return timeout_info
+
+ return TimeoutInfo.default_timeout()
+
+ def generate_name(self, index: int) -> str:
+ """
+ Generate a subtask name.
+
+ :param index: Index of subtask.
+ :return: Name to use for generated sub-task.
+ """
+ prefix = self.generate_config.task_prefix
+ task_name = self.task_info.display_task_name
+ return name_generated_task(f"{prefix}:{task_name}", index, len(self.task_info.tests),
+ self.generate_config.run_build_variant)
+
+ def create_task(self, index: int, test_name: str) -> Task:
+ """
+ Create the task configuration for the given test using the given index.
+
+ :param index: Index of sub-task being created.
+ :param test_name: Name of test that should be executed.
+ :return: Configuration for generating the specified task.
+ """
+ multiversion_path = self.task_info.use_multiversion
+ resmoke_args = self.task_info.resmoke_args
+
+ sub_task_name = self.generate_name(index)
+ LOGGER.debug("Generating sub-task", sub_task=sub_task_name)
+
+ test_unix_style = test_name.replace('\\', '/')
+ run_tests_vars = {
+ "resmoke_args":
+ f"{resmoke_args} {self.repeat_config.generate_resmoke_options()} {test_unix_style}"
+ }
+ if multiversion_path:
+ run_tests_vars["task_path_suffix"] = multiversion_path
+
+ timeout = self.generate_timeouts(test_name)
+ commands = resmoke_commands("run tests", run_tests_vars, timeout, multiversion_path)
+ dependencies = {TaskDependency(TASK_WITH_ARTIFACTS)}
+
+ return Task(sub_task_name, commands, dependencies)
+
+
+class EvergreenFileChangeDetector(FileChangeDetector):
+ """A file changes detector for detecting test change in evergreen."""
+
+ def __init__(self, task_id: str, evg_api: EvergreenApi) -> None:
+ """
+ Create a new evergreen file change detector.
+
+ :param task_id: Id of task being run under.
+ :param evg_api: Evergreen API client.
+ """
+ self.task_id = task_id
+ self.evg_api = evg_api
+
+ def create_revision_map(self, repos: List[Repo]) -> RevisionMap:
+ """
+ Create a map of the repos and the given revisions to diff against.
+
+ :param repos: List of repos being tracked.
+ :return: Map of repositories and revisions to diff against.
+ """
+ return generate_revision_map_from_manifest(repos, self.task_id, self.evg_api)
+
+
+class GenerateBurnInExecutor(BurnInExecutor):
+ """A burn-in executor that generates tasks."""
+
+ def __init__(self, generate_config: GenerateConfig, repeat_config: RepeatConfig,
+ evg_api: EvergreenApi, generate_tasks_file: Optional[str] = None) -> None:
+ """
+ Create a new generate burn-in executor.
+
+ :param generate_config: Configuration for how to generate tasks.
+ :param repeat_config: Configuration for how tests should be repeated.
+ :param evg_api: Evergreen API client.
+ :param generate_tasks_file: File to write generated task configuration to.
+ """
+ self.generate_config = generate_config
+ self.repeat_config = repeat_config
+ self.evg_api = evg_api
+ self.generate_tasks_file = generate_tasks_file
+
+ def get_task_runtime_history(self, task: str) -> List[TestRuntime]:
+ """
+ Query the runtime history of the specified task.
+
+ :param task: Task to query.
+ :return: List of runtime histories for all tests in specified task.
+ """
+ try:
+ project = self.generate_config.project
+ variant = self.generate_config.build_variant
+ end_date = datetime.utcnow().replace(microsecond=0)
+ start_date = end_date - timedelta(days=AVG_TEST_RUNTIME_ANALYSIS_DAYS)
+ test_stats = HistoricTaskData.from_evg(self.evg_api, project, start_date=start_date,
+ end_date=end_date, task=task, variant=variant)
+ return test_stats.get_tests_runtimes()
+ except requests.HTTPError as err:
+ if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
+ # Evergreen may return a 503 when the service is degraded.
+ # We fall back to returning no test history
+ return []
+ else:
+ raise
+
+ def create_generated_tasks(self, tests_by_task: Dict[str, TaskInfo]) -> Set[Task]:
+ """
+ Create generate.tasks configuration for the the given tests and tasks.
+
+ :param tests_by_task: Dictionary of tasks and test to generate configuration for.
+ :return: Shrub tasks containing the configuration for generating specified tasks.
+ """
+ tasks: Set[Task] = set()
+ for task in sorted(tests_by_task):
+ task_info = tests_by_task[task]
+ task_runtime_stats = self.get_task_runtime_history(task_info.display_task_name)
+ task_generator = TaskGenerator(self.generate_config, self.repeat_config, task_info,
+ task_runtime_stats)
+
+ for index, test_name in enumerate(task_info.tests):
+ tasks.add(task_generator.create_task(index, test_name))
+
+ return tasks
+
+ def get_existing_tasks(self) -> Optional[Set[ExistingTask]]:
+ """Get any existing tasks that should be included in the generated display task."""
+ if self.generate_config.include_gen_task:
+ return {ExistingTask(BURN_IN_TESTS_GEN_TASK)}
+ return None
+
+ def add_config_for_build_variant(self, build_variant: BuildVariant,
+ tests_by_task: Dict[str, TaskInfo]) -> None:
+ """
+ Add configuration for generating tasks to the given build variant.
+
+ :param build_variant: Build variant to update.
+ :param tests_by_task: Tasks and tests to update.
+ """
+ tasks = self.create_generated_tasks(tests_by_task)
+ build_variant.display_task(BURN_IN_TESTS_TASK, tasks,
+ execution_existing_tasks=self.get_existing_tasks())
+
+ def create_generate_tasks_configuration(self, tests_by_task: Dict[str, TaskInfo]) -> str:
+ """
+ Create the configuration with the configuration to generate the burn_in tasks.
+
+ :param tests_by_task: Dictionary of tasks and test to generate.
+ :return: Configuration to use to create generated tasks.
+ """
+ build_variant = BuildVariant(self.generate_config.run_build_variant)
+ self.add_config_for_build_variant(build_variant, tests_by_task)
+
+ shrub_project = ShrubProject.empty()
+ shrub_project.add_build_variant(build_variant)
+
+ if not validate_task_generation_limit(shrub_project):
+ sys.exit(1)
+
+ return shrub_project.json()
+
+ def execute(self, tests_by_task: Dict[str, TaskInfo]) -> None:
+ """
+ Execute the given tests in the given tasks.
+
+ :param tests_by_task: Dictionary of tasks to run with tests to run in each.
+ """
+ json_text = self.create_generate_tasks_configuration(tests_by_task)
+ assert self.generate_tasks_file is not None
+ if self.generate_tasks_file:
+ write_file(self.generate_tasks_file, json_text)
+
+
+# pylint: disable=too-many-arguments
+def burn_in(task_id: str, build_variant: str, generate_config: GenerateConfig,
+ repeat_config: RepeatConfig, evg_api: EvergreenApi, evg_conf: EvergreenProjectConfig,
+ repos: List[Repo], generate_tasks_file: str) -> None:
+ """
+ Run burn_in_tests.
+
+ :param task_id: Id of task running.
+ :param build_variant: Build variant to run against.
+ :param generate_config: Configuration for how to generate tasks.
+ :param repeat_config: Configuration for how to repeat tests.
+ :param evg_api: Evergreen API client.
+ :param evg_conf: Evergreen project configuration.
+ :param repos: Git repos containing changes.
+ :param generate_tasks_file: File to write generate tasks configuration to.
+ """
+ change_detector = EvergreenFileChangeDetector(task_id, evg_api)
+ executor = GenerateBurnInExecutor(generate_config, repeat_config, evg_api, generate_tasks_file)
+
+ burn_in_orchestrator = BurnInOrchestrator(change_detector, executor, evg_conf)
+ burn_in_orchestrator.burn_in(repos, build_variant)
+
+
+@click.command()
+@click.option("--generate-tasks-file", "generate_tasks_file", default=None, metavar='FILE',
+ help="Run in 'generate.tasks' mode. Store task config to given file.")
+@click.option("--build-variant", "build_variant", default=DEFAULT_VARIANT, metavar='BUILD_VARIANT',
+ help="Tasks to run will be selected from this build variant.")
+@click.option("--run-build-variant", "run_build_variant", default=None, metavar='BUILD_VARIANT',
+ help="Burn in tasks will be generated on this build variant.")
+@click.option("--distro", "distro", default=None, metavar='DISTRO',
+ help="The distro the tasks will execute on.")
+@click.option("--project", "project", default=DEFAULT_PROJECT, metavar='PROJECT',
+ help="The evergreen project the tasks will execute on.")
+@click.option("--repeat-tests", "repeat_tests_num", default=None, type=int,
+ help="Number of times to repeat tests.")
+@click.option("--repeat-tests-min", "repeat_tests_min", default=None, type=int,
+ help="The minimum number of times to repeat tests if time option is specified.")
+@click.option("--repeat-tests-max", "repeat_tests_max", default=None, type=int,
+ help="The maximum number of times to repeat tests if time option is specified.")
+@click.option("--repeat-tests-secs", "repeat_tests_secs", default=None, type=int, metavar="SECONDS",
+ help="Repeat tests for the given time (in secs).")
+@click.option("--evg-api-config", "evg_api_config", default=CONFIG_FILE, metavar="FILE",
+ help="Configuration file with connection info for Evergreen API.")
+@click.option("--verbose", "verbose", default=False, is_flag=True, help="Enable extra logging.")
+@click.option("--task_id", "task_id", required=True, metavar='TASK_ID',
+ help="The evergreen task id.")
+# pylint: disable=too-many-arguments,too-many-locals
+def main(build_variant: str, run_build_variant: str, distro: str, project: str,
+ generate_tasks_file: str, repeat_tests_num: Optional[int], repeat_tests_min: Optional[int],
+ repeat_tests_max: Optional[int], repeat_tests_secs: Optional[int], evg_api_config: str,
+ verbose: bool, task_id: str):
+ """
+ Run new or changed tests in repeated mode to validate their stability.
+
+ burn_in_tests detects jstests that are new or changed since the last git command and then
+ runs those tests in a loop to validate their reliability.
+
+ The `--origin-rev` argument allows users to specify which revision should be used as the last
+ git command to compare against to find changed files. If the `--origin-rev` argument is provided,
+ we find changed files by comparing your latest changes to this revision. If not provided, we
+ find changed test files by comparing your latest changes to HEAD. The revision provided must
+ be a revision that exists in the mongodb repository.
+
+ The `--repeat-*` arguments allow configuration of how burn_in_tests repeats tests. Tests can
+ either be repeated a specified number of times with the `--repeat-tests` option, or they can
+ be repeated for a certain time period with the `--repeat-tests-secs` option.
+
+ Specifying the `--generate-tasks-file`, burn_in_tests will run generate a configuration
+ file that can then be sent to the Evergreen 'generate.tasks' command to create evergreen tasks
+ to do all the test executions. This is the mode used to run tests in patch builds.
+
+ NOTE: There is currently a limit of the number of tasks burn_in_tests will attempt to generate
+ in evergreen. The limit is 1000. If you change enough tests that more than 1000 tasks would
+ be generated, burn_in_test will fail. This is to avoid generating more tasks than evergreen
+ can handle.
+ \f
+
+ :param build_variant: Build variant to query tasks from.
+ :param run_build_variant:Build variant to actually run against.
+ :param distro: Distro to run tests on.
+ :param project: Project to run tests on.
+ :param generate_tasks_file: Create a generate tasks configuration in this file.
+ :param repeat_tests_num: Repeat each test this number of times.
+ :param repeat_tests_min: Repeat each test at least this number of times.
+ :param repeat_tests_max: Once this number of repetitions has been reached, stop repeating.
+ :param repeat_tests_secs: Continue repeating tests for this number of seconds.
+ :param evg_api_config: Location of configuration file to connect to evergreen.
+ :param verbose: Log extra debug information.
+ :param task_id: Id of evergreen task being run in.
+ """
+ _configure_logging(verbose)
+
+ repeat_config = RepeatConfig(repeat_tests_secs=repeat_tests_secs,
+ repeat_tests_min=repeat_tests_min,
+ repeat_tests_max=repeat_tests_max,
+ repeat_tests_num=repeat_tests_num) # yapf: disable
+
+ repos = [Repo(x) for x in DEFAULT_REPO_LOCATIONS if os.path.isdir(x)]
+ evg_conf = parse_evergreen_file(EVERGREEN_FILE)
+ evg_api = RetryingEvergreenApi.get_api(config_file=evg_api_config)
+
+ generate_config = GenerateConfig(build_variant=build_variant,
+ run_build_variant=run_build_variant,
+ distro=distro,
+ project=project,
+ task_id=task_id) # yapf: disable
+ generate_config.validate(evg_conf)
+
+ burn_in(task_id, build_variant, generate_config, repeat_config, evg_api, evg_conf, repos,
+ generate_tasks_file)
+
+
+if __name__ == "__main__":
+ main() # pylint: disable=no-value-for-parameter
diff --git a/buildscripts/selected_tests.py b/buildscripts/selected_tests.py
index b3bb018c314..147625a53b6 100644
--- a/buildscripts/selected_tests.py
+++ b/buildscripts/selected_tests.py
@@ -25,7 +25,7 @@ if __name__ == "__main__" and __package__ is None:
import buildscripts.resmokelib.parser
import buildscripts.util.read_config as read_config
from buildscripts.burn_in_tests import DEFAULT_REPO_LOCATIONS, create_task_list_for_tests, \
- is_file_a_test_file
+ is_file_a_test_file, TaskInfo
from buildscripts.ciconfig.evergreen import (
EvergreenProjectConfig,
ResmokeArgs,
@@ -296,7 +296,7 @@ def _update_config_with_task(evg_api: EvergreenApi, build_variant: BuildVariant,
def _get_task_configs_for_test_mappings(selected_tests_variant_expansions: Dict[str, str],
- tests_by_task: Dict[str, Any],
+ tests_by_task: Dict[str, TaskInfo],
build_variant_config: Variant) -> Dict[str, dict]:
"""
For test mappings, generate a dict containing task names and their config settings.
@@ -312,7 +312,7 @@ def _get_task_configs_for_test_mappings(selected_tests_variant_expansions: Dict[
if task and not _exclude_task(task):
evg_task_config = _get_evg_task_config(selected_tests_variant_expansions, task,
build_variant_config)
- evg_task_config.update({"selected_tests_to_run": set(test_list_info["tests"])})
+ evg_task_config.update({"selected_tests_to_run": set(test_list_info.tests)})
evg_task_configs[task.name] = evg_task_config
return evg_task_configs
diff --git a/buildscripts/tests/test_burn_in_tags.py b/buildscripts/tests/test_burn_in_tags.py
index e0abc86a686..dc8672bc350 100644
--- a/buildscripts/tests/test_burn_in_tags.py
+++ b/buildscripts/tests/test_burn_in_tags.py
@@ -9,6 +9,7 @@ from unittest.mock import MagicMock, patch
from shrub.v2 import ShrubProject
import buildscripts.ciconfig.evergreen as _evergreen
+from buildscripts.burn_in_tests import TaskInfo
from buildscripts.tests.test_burn_in_tests import ns as burn_in_tests_ns
from buildscripts.ciconfig.evergreen import EvergreenProjectConfig
@@ -121,13 +122,13 @@ class TestGenerateEvgTasks(unittest.TestCase):
def test_generate_evg_tasks_one_test_changed(self, create_tests_by_task_mock):
evg_conf_mock = get_evergreen_config()
create_tests_by_task_mock.return_value = {
- "aggregation_mongos_passthrough": {
- "display_task_name": "aggregation_mongos_passthrough",
- "resmoke_args":
- "--suites=aggregation_mongos_passthrough --storageEngine=wiredTiger",
- "tests": ["jstests/aggregation/ifnull.js"],
- "use_multiversion": None
- }
+ "aggregation_mongos_passthrough": TaskInfo(
+ display_task_name="aggregation_mongos_passthrough",
+ resmoke_args="--suites=aggregation_mongos_passthrough --storageEngine=wiredTiger",
+ tests=["jstests/aggregation/ifnull.js"],
+ use_multiversion=None,
+ distro="",
+ )
} # yapf: disable
expansions_file_data = get_expansions_data()
buildvariant_map = {
@@ -204,7 +205,7 @@ CREATE_TEST_MEMBERSHIP_MAP = {
class TestAcceptance(unittest.TestCase):
@patch(ns("write_file_to_dir"))
@patch(ns("_create_evg_build_variant_map"))
- @patch(ns("find_changed_tests"))
+ @patch(ns("EvergreenFileChangeDetector"))
def test_no_tests_run_if_none_changed(self, find_changed_tests_mock,
create_evg_build_variant_map_mock, write_to_file_mock):
"""
@@ -214,11 +215,11 @@ class TestAcceptance(unittest.TestCase):
"""
repos = [MagicMock(working_dir=os.getcwd())]
evg_conf_mock = MagicMock()
- find_changed_tests_mock.return_value = {}
+ find_changed_tests_mock.return_value.find_changed_tests.return_value = {}
create_evg_build_variant_map_mock.return_value = CREATE_EVG_BUILD_VARIANT_MAP
- under_test.burn_in(EXPANSIONS_FILE_DATA, evg_conf_mock, None, repos)
+ under_test.burn_in(EXPANSIONS_FILE_DATA, evg_conf_mock, MagicMock(), repos)
write_to_file_mock.assert_called_once()
shrub_config = write_to_file_mock.call_args[0][2]
@@ -227,7 +228,7 @@ class TestAcceptance(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
@patch(ns("write_file_to_dir"))
@patch(ns("_create_evg_build_variant_map"))
- @patch(ns("find_changed_tests"))
+ @patch(ns("EvergreenFileChangeDetector"))
@patch(burn_in_tests_ns("create_test_membership_map"))
def test_tests_generated_if_a_file_changed(
self, create_test_membership_map_mock, find_changed_tests_mock,
@@ -242,12 +243,12 @@ class TestAcceptance(unittest.TestCase):
repos = [MagicMock(working_dir=os.getcwd())]
evg_conf = get_evergreen_config()
create_evg_build_variant_map_mock.return_value = CREATE_EVG_BUILD_VARIANT_MAP
- find_changed_tests_mock.return_value = {
+ find_changed_tests_mock.return_value.find_changed_tests.return_value = {
'jstests/slow1/large_role_chain.js',
'jstests/aggregation/accumulators/accumulator_js.js'
}
- under_test.burn_in(EXPANSIONS_FILE_DATA, evg_conf, None, repos)
+ under_test.burn_in(EXPANSIONS_FILE_DATA, evg_conf, MagicMock(), repos)
write_to_file_mock.assert_called_once()
written_config = write_to_file_mock.call_args[0][2]
diff --git a/buildscripts/tests/test_burn_in_tests.py b/buildscripts/tests/test_burn_in_tests.py
index cc90831405b..a0cdf321904 100644
--- a/buildscripts/tests/test_burn_in_tests.py
+++ b/buildscripts/tests/test_burn_in_tests.py
@@ -4,24 +4,16 @@ from __future__ import absolute_import
import collections
import datetime
-import json
import os
import sys
import subprocess
import unittest
-from math import ceil
from mock import Mock, patch, MagicMock
-import requests
-
-from shrub.v2 import ShrubProject, BuildVariant
-
import buildscripts.burn_in_tests as under_test
from buildscripts.ciconfig.evergreen import parse_evergreen_file
-import buildscripts.util.teststats as teststats_utils
import buildscripts.resmokelib.parser as _parser
-import buildscripts.resmokelib.config as _config
_parser.set_run_options()
# pylint: disable=missing-docstring,protected-access,too-many-lines,no-self-use
@@ -29,10 +21,10 @@ _parser.set_run_options()
def create_tests_by_task_mock(n_tasks, n_tests):
return {
- f"task_{i}_gen": {
- "display_task_name": f"task_{i}", "resmoke_args": f"--suites=suite_{i}",
- "tests": [f"jstests/tests_{j}" for j in range(n_tests)]
- }
+ f"task_{i}_gen":
+ under_test.TaskInfo(display_task_name=f"task_{i}", resmoke_args=f"--suites=suite_{i}",
+ tests=[f"jstests/tests_{j}" for j in range(n_tests)],
+ use_multiversion=None, distro=f"distro_{i}")
for i in range(n_tasks)
}
@@ -75,77 +67,6 @@ def get_evergreen_config(config_file_path):
return parse_evergreen_file(config_file_path)
-class TestAcceptance(unittest.TestCase):
- def tearDown(self):
- _parser.set_run_options()
-
- @patch(ns("write_file"))
- def test_no_tests_run_if_none_changed(self, write_json_mock):
- """
- Given a git repository with no changes,
- When burn_in_tests is run,
- Then no tests are discovered to run.
- """
- variant = "build_variant"
- repos = [mock_changed_git_files([])]
- repeat_config = under_test.RepeatConfig()
- gen_config = under_test.GenerateConfig(
- variant,
- "project",
- ) # yapf: disable
- evg_conf_mock = MagicMock()
- evg_conf_mock.get_task_names_by_tag.return_value = set()
-
- under_test.burn_in(repeat_config, gen_config, "", "testfile.json", False, evg_conf_mock,
- repos, None, None)
-
- write_json_mock.assert_called_once()
- written_config = json.loads(write_json_mock.call_args[0][1])
- display_task = written_config["buildvariants"][0]["display_tasks"][0]
- self.assertEqual(1, len(display_task["execution_tasks"]))
- self.assertEqual(under_test.BURN_IN_TESTS_GEN_TASK, display_task["execution_tasks"][0])
-
- @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
- @patch(ns("write_file"))
- def test_tests_generated_if_a_file_changed(self, write_json_mock):
- """
- Given a git repository with changes,
- When burn_in_tests is run,
- Then tests are discovered to run.
- """
- # Note: this test is using actual tests and suites. So changes to those suites could
- # introduce failures and require this test to be updated.
- # You can see the test file it is using below. This test is used in the 'auth' and
- # 'auth_audit' test suites. It needs to be in at least one of those for the test to pass.
- _config.NAMED_SUITES = None
- variant = "enterprise-rhel-80-64-bit"
- repos = [mock_changed_git_files(["jstests/auth/auth1.js"])]
- repeat_config = under_test.RepeatConfig()
- gen_config = under_test.GenerateConfig(
- variant,
- "project",
- ) # yapf: disable
- evg_config = get_evergreen_config("etc/evergreen.yml")
-
- under_test.burn_in(repeat_config, gen_config, "", "testfile.json", False, evg_config, repos,
- None, None)
-
- write_json_mock.assert_called_once()
- written_config = json.loads(write_json_mock.call_args[0][1])
- n_tasks = len(written_config["tasks"])
- # Ensure we are generating at least one task for the test.
- self.assertGreaterEqual(n_tasks, 1)
-
- written_build_variant = written_config["buildvariants"][0]
- self.assertEqual(variant, written_build_variant["name"])
- self.assertEqual(n_tasks, len(written_build_variant["tasks"]))
-
- display_task = written_build_variant["display_tasks"][0]
- # The display task should contain all the generated tasks as well as 1 extra task for
- # the burn_in_test_gen task.
- self.assertEqual(n_tasks + 1, len(display_task["execution_tasks"]))
-
-
class TestRepeatConfig(unittest.TestCase):
def test_validate_no_args(self):
repeat_config = under_test.RepeatConfig()
@@ -230,157 +151,6 @@ class TestRepeatConfig(unittest.TestCase):
self.assertEqual(repeat_options.strip(), "--repeatSuites=2")
-class TestGenerateConfig(unittest.TestCase):
- def test_run_build_variant_with_no_run_build_variant(self):
- gen_config = under_test.GenerateConfig("build_variant", "project")
-
- self.assertEqual(gen_config.build_variant, gen_config.run_build_variant)
-
- def test_run_build_variant_with_run_build_variant(self):
- gen_config = under_test.GenerateConfig("build_variant", "project", "run_build_variant")
-
- self.assertNotEqual(gen_config.build_variant, gen_config.run_build_variant)
- self.assertEqual(gen_config.run_build_variant, "run_build_variant")
-
- def test_validate_non_existing_build_variant(self):
- evg_conf_mock = MagicMock()
- evg_conf_mock.get_variant.return_value = None
-
- gen_config = under_test.GenerateConfig("build_variant", "project", "run_build_variant")
-
- with self.assertRaises(ValueError):
- gen_config.validate(evg_conf_mock)
-
- def test_validate_existing_build_variant(self):
- evg_conf_mock = MagicMock()
-
- gen_config = under_test.GenerateConfig("build_variant", "project", "run_build_variant")
- gen_config.validate(evg_conf_mock)
-
- def test_validate_non_existing_run_build_variant(self):
- evg_conf_mock = MagicMock()
-
- gen_config = under_test.GenerateConfig("build_variant", "project")
- gen_config.validate(evg_conf_mock)
-
-
-class TestParseAvgTestRuntime(unittest.TestCase):
- def test__parse_avg_test_runtime(self):
- task_avg_test_runtime_stats = [
- teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=30.2),
- teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)
- ]
- result = under_test._parse_avg_test_runtime("dir/test2.js", task_avg_test_runtime_stats)
- self.assertEqual(result, 455.1)
-
-
-class TestCalculateTimeout(unittest.TestCase):
- def test__calculate_timeout(self):
- avg_test_runtime = 455.1
- expected_result = ceil(avg_test_runtime * under_test.AVG_TEST_TIME_MULTIPLIER)
- self.assertEqual(expected_result, under_test._calculate_timeout(avg_test_runtime))
-
- def test__calculate_timeout_avg_is_less_than_min(self):
- avg_test_runtime = 10
- self.assertEqual(under_test.MIN_AVG_TEST_TIME_SEC,
- under_test._calculate_timeout(avg_test_runtime))
-
-
-class TestCalculateExecTimeout(unittest.TestCase):
- def test__calculate_exec_timeout(self):
- repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
- avg_test_runtime = 455.1
-
- exec_timeout = under_test._calculate_exec_timeout(repeat_config, avg_test_runtime)
-
- self.assertEqual(1771, exec_timeout)
-
- def test_average_timeout_greater_than_execution_time(self):
- repeat_config = under_test.RepeatConfig(repeat_tests_secs=600, repeat_tests_min=2)
- avg_test_runtime = 750
-
- exec_timeout = under_test._calculate_exec_timeout(repeat_config, avg_test_runtime)
-
- # The timeout needs to be greater than the number of the test * the minimum number of runs.
- minimum_expected_timeout = avg_test_runtime * repeat_config.repeat_tests_min
-
- self.assertGreater(exec_timeout, minimum_expected_timeout)
-
-
-class TestGenerateTimeouts(unittest.TestCase):
- def test__generate_timeouts(self):
- repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
- runtime_stats = [teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)]
- test_name = "dir/test2.js"
-
- timeout_info = under_test._generate_timeouts(repeat_config, test_name, runtime_stats)
-
- self.assertEqual(timeout_info.exec_timeout, 1771)
- self.assertEqual(timeout_info.timeout, 1366)
-
- def test__generate_timeouts_no_results(self):
- repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
- runtime_stats = []
- test_name = "dir/new_test.js"
-
- timeout_info = under_test._generate_timeouts(repeat_config, test_name, runtime_stats)
-
- self.assertIsNone(timeout_info.cmd)
-
- def test__generate_timeouts_avg_runtime_is_zero(self):
- repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
- runtime_stats = [
- teststats_utils.TestRuntime(test_name="dir/test_with_zero_runtime.js", runtime=0)
- ]
- test_name = "dir/test_with_zero_runtime.js"
-
- timeout_info = under_test._generate_timeouts(repeat_config, test_name, runtime_stats)
-
- self.assertIsNone(timeout_info.cmd)
-
-
-class TestGetTaskRuntimeHistory(unittest.TestCase):
- def test_get_task_runtime_history_with_no_api(self):
- self.assertListEqual([],
- under_test._get_task_runtime_history(None, "project", "task",
- "variant"))
-
- def test__get_task_runtime_history(self):
- evergreen_api = Mock()
- evergreen_api.test_stats_by_project.return_value = [
- Mock(
- test_file="dir/test2.js",
- task_name="task1",
- variant="variant1",
- distro="distro1",
- date=_DATE,
- num_pass=1,
- num_fail=0,
- avg_duration_pass=10.1,
- )
- ]
- analysis_duration = under_test.AVG_TEST_RUNTIME_ANALYSIS_DAYS
- end_date = datetime.datetime.utcnow().replace(microsecond=0)
- start_date = end_date - datetime.timedelta(days=analysis_duration)
-
- result = under_test._get_task_runtime_history(evergreen_api, "project1", "task1",
- "variant1")
- self.assertEqual(result, [("dir/test2.js", 10.1)])
- evergreen_api.test_stats_by_project.assert_called_with(
- "project1", after_date=start_date, before_date=end_date, group_by="test",
- group_num_days=14, tasks=["task1"], variants=["variant1"])
-
- def test__get_task_runtime_history_evg_degraded_mode_error(self): # pylint: disable=invalid-name
- response = Mock()
- response.status_code = requests.codes.SERVICE_UNAVAILABLE
- evergreen_api = Mock()
- evergreen_api.test_stats_by_project.side_effect = requests.HTTPError(response=response)
-
- result = under_test._get_task_runtime_history(evergreen_api, "project1", "task1",
- "variant1")
- self.assertEqual(result, [])
-
-
class TestGetTaskName(unittest.TestCase):
def test__get_task_name(self):
name = "mytask"
@@ -450,92 +220,6 @@ class TestSetResmokeCmd(unittest.TestCase):
self.assertListEqual(resmoke_args + ['--repeatSuites=3'], resmoke_cmd)
-TESTS_BY_TASK = {
- "task1": {
- "resmoke_args": "--suites=suite1",
- "tests": ["jstests/test1.js", "jstests/test2.js"]},
- "task2": {
- "resmoke_args": "--suites=suite1",
- "tests": ["jstests/test1.js", "jstests/test3.js"]},
- "task3": {
- "resmoke_args": "--suites=suite3",
- "tests": ["jstests/test4.js", "jstests/test5.js"]},
- "task4": {
- "resmoke_args": "--suites=suite4", "tests": []},
-} # yapf: disable
-
-
-class TestCreateGenerateTasksConfig(unittest.TestCase):
- @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
- def test_no_tasks_given(self):
- build_variant = BuildVariant("build variant")
- gen_config = MagicMock(run_build_variant="variant")
- repeat_config = MagicMock()
-
- under_test.create_generate_tasks_config(build_variant, {}, gen_config, repeat_config, None)
-
- evg_config_dict = build_variant.as_dict()
- self.assertEqual(0, len(evg_config_dict["tasks"]))
-
- @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
- def test_one_task_one_test(self):
- n_tasks = 1
- n_tests = 1
- resmoke_options = "options for resmoke"
- build_variant = BuildVariant("build variant")
- gen_config = MagicMock(run_build_variant="variant", distro=None)
- repeat_config = MagicMock()
- repeat_config.generate_resmoke_options.return_value = resmoke_options
- tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
-
- under_test.create_generate_tasks_config(build_variant, tests_by_task, gen_config,
- repeat_config, None)
-
- shrub_config = ShrubProject.empty().add_build_variant(build_variant)
- evg_config_dict = shrub_config.as_dict()
- tasks = evg_config_dict["tasks"]
- self.assertEqual(n_tasks * n_tests, len(tasks))
- cmd = tasks[0]["commands"]
- self.assertIn(resmoke_options, cmd[1]["vars"]["resmoke_args"])
- self.assertIn("--suites=suite_0", cmd[1]["vars"]["resmoke_args"])
- self.assertIn("tests_0", cmd[1]["vars"]["resmoke_args"])
-
- @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
- def test_n_task_m_test(self):
- n_tasks = 3
- n_tests = 5
- build_variant = BuildVariant("build variant")
- gen_config = MagicMock(run_build_variant="variant", distro=None)
- repeat_config = MagicMock()
- tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
-
- under_test.create_generate_tasks_config(build_variant, tests_by_task, gen_config,
- repeat_config, None)
-
- evg_config_dict = build_variant.as_dict()
- self.assertEqual(n_tasks * n_tests, len(evg_config_dict["tasks"]))
-
-
-class TestCreateGenerateTasksFile(unittest.TestCase):
- @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
- @patch(ns("sys.exit"))
- @patch(ns("create_generate_tasks_config"))
- @patch(ns("validate_task_generation_limit"))
- def test_cap_on_task_generate(self, validate_mock, _, exit_mock):
- evg_api = MagicMock()
- gen_config = MagicMock(use_multiversion=False)
- repeat_config = MagicMock()
- tests_by_task = MagicMock()
-
- validate_mock.return_value = False
-
- exit_mock.side_effect = ValueError("exiting")
- with self.assertRaises(ValueError):
- under_test.create_generate_tasks_file(tests_by_task, gen_config, repeat_config, evg_api)
-
- exit_mock.assert_called_once()
-
-
class RunTests(unittest.TestCase):
@patch(ns('subprocess.check_call'))
def test_run_tests_no_tests(self, check_call_mock):
@@ -642,7 +326,7 @@ def create_variant_task_mock(task_name, suite_name, distro="distro"):
return variant_task
-class TestGatherTaskInfo(unittest.TestCase):
+class TestTaskInfo(unittest.TestCase):
def test_non_generated_task(self):
suite_name = "suite_1"
distro_name = "distro_1"
@@ -657,13 +341,13 @@ class TestGatherTaskInfo(unittest.TestCase):
"suite 3": [f"test{i}.js" for i in range(2)],
}
- task_info = under_test._gather_task_info(task_mock, tests_by_suite, evg_conf_mock, variant)
+ task_info = under_test.TaskInfo.from_task(task_mock, tests_by_suite, evg_conf_mock, variant)
- self.assertIn(suite_name, task_info["resmoke_args"])
+ self.assertIn(suite_name, task_info.resmoke_args)
for test in test_list:
- self.assertIn(test, task_info["tests"])
- self.assertIsNone(task_info["use_multiversion"])
- self.assertEqual(distro_name, task_info["distro"])
+ self.assertIn(test, task_info.tests)
+ self.assertIsNone(task_info.use_multiversion)
+ self.assertEqual(distro_name, task_info.distro)
def test_generated_task_no_large_on_task(self):
suite_name = "suite_1"
@@ -681,13 +365,13 @@ class TestGatherTaskInfo(unittest.TestCase):
"suite 3": [f"test{i}.js" for i in range(2)],
}
- task_info = under_test._gather_task_info(task_mock, tests_by_suite, evg_conf_mock, variant)
+ task_info = under_test.TaskInfo.from_task(task_mock, tests_by_suite, evg_conf_mock, variant)
- self.assertIn(suite_name, task_info["resmoke_args"])
+ self.assertIn(suite_name, task_info.resmoke_args)
for test in test_list:
- self.assertIn(test, task_info["tests"])
- self.assertIsNone(task_info["use_multiversion"])
- self.assertEqual(distro_name, task_info["distro"])
+ self.assertIn(test, task_info.tests)
+ self.assertIsNone(task_info.use_multiversion)
+ self.assertEqual(distro_name, task_info.distro)
def test_generated_task_no_large_on_build_variant(self):
suite_name = "suite_1"
@@ -705,13 +389,13 @@ class TestGatherTaskInfo(unittest.TestCase):
"suite 3": [f"test{i}.js" for i in range(2)],
}
- task_info = under_test._gather_task_info(task_mock, tests_by_suite, evg_conf_mock, variant)
+ task_info = under_test.TaskInfo.from_task(task_mock, tests_by_suite, evg_conf_mock, variant)
- self.assertIn(suite_name, task_info["resmoke_args"])
+ self.assertIn(suite_name, task_info.resmoke_args)
for test in test_list:
- self.assertIn(test, task_info["tests"])
- self.assertIsNone(task_info["use_multiversion"])
- self.assertEqual(distro_name, task_info["distro"])
+ self.assertIn(test, task_info.tests)
+ self.assertIsNone(task_info.use_multiversion)
+ self.assertEqual(distro_name, task_info.distro)
def test_generated_task_large_distro(self):
suite_name = "suite_1"
@@ -735,13 +419,13 @@ class TestGatherTaskInfo(unittest.TestCase):
"suite 3": [f"test{i}.js" for i in range(2)],
}
- task_info = under_test._gather_task_info(task_mock, tests_by_suite, evg_conf_mock, variant)
+ task_info = under_test.TaskInfo.from_task(task_mock, tests_by_suite, evg_conf_mock, variant)
- self.assertIn(suite_name, task_info["resmoke_args"])
+ self.assertIn(suite_name, task_info.resmoke_args)
for test in test_list:
- self.assertIn(test, task_info["tests"])
- self.assertIsNone(task_info["use_multiversion"])
- self.assertEqual(large_distro_name, task_info["distro"])
+ self.assertIn(test, task_info.tests)
+ self.assertIsNone(task_info.use_multiversion)
+ self.assertEqual(large_distro_name, task_info.distro)
class TestCreateTaskList(unittest.TestCase):
@@ -785,11 +469,11 @@ class TestCreateTaskList(unittest.TestCase):
self.assertIn("task 1", task_list)
task_info = task_list["task 1"]
- self.assertIn("suite_1", task_info["resmoke_args"])
+ self.assertIn("suite_1", task_info.resmoke_args)
for i in range(3):
- self.assertIn(f"test{i}.js", task_info["tests"])
- self.assertIsNone(task_info["use_multiversion"])
- self.assertEqual("distro 1", task_info["distro"])
+ self.assertIn(f"test{i}.js", task_info.tests)
+ self.assertIsNone(task_info.use_multiversion)
+ self.assertEqual("distro 1", task_info.distro)
def test_create_task_list_with_excludes(self):
variant = "variant name"
@@ -833,7 +517,7 @@ class TestCreateTaskList(unittest.TestCase):
under_test.create_task_list(evg_conf_mock, variant, suite_dict, [])
-class TestFindChangedTests(unittest.TestCase):
+class TestLocalFileChangeDetector(unittest.TestCase):
@patch(ns("find_changed_files_in_repos"))
@patch(ns("os.path.isfile"))
def test_non_js_files_filtered(self, is_file_mock, changed_files_mock):
@@ -846,7 +530,8 @@ class TestFindChangedTests(unittest.TestCase):
changed_files_mock.return_value = set(file_list)
is_file_mock.return_value = True
- found_tests = under_test.find_changed_tests([repo_mock])
+ file_change_detector = under_test.LocalFileChangeDetector(None)
+ found_tests = file_change_detector.find_changed_tests([repo_mock])
self.assertIn(file_list[0], found_tests)
self.assertIn(file_list[2], found_tests)
@@ -864,7 +549,8 @@ class TestFindChangedTests(unittest.TestCase):
changed_files_mock.return_value = set(file_list)
is_file_mock.return_value = False
- found_tests = under_test.find_changed_tests([repo_mock])
+ file_change_detector = under_test.LocalFileChangeDetector(None)
+ found_tests = file_change_detector.find_changed_tests([repo_mock])
self.assertEqual(0, len(found_tests))
@@ -880,7 +566,8 @@ class TestFindChangedTests(unittest.TestCase):
changed_files_mock.return_value = set(file_list)
is_file_mock.return_value = True
- found_tests = under_test.find_changed_tests([repo_mock])
+ file_change_detector = under_test.LocalFileChangeDetector(None)
+ found_tests = file_change_detector.find_changed_tests([repo_mock])
self.assertIn(file_list[0], found_tests)
self.assertIn(file_list[2], found_tests)
diff --git a/buildscripts/tests/test_burn_in_tests_multiversion.py b/buildscripts/tests/test_burn_in_tests_multiversion.py
index 090c9e84358..9cd304a7bf0 100644
--- a/buildscripts/tests/test_burn_in_tests_multiversion.py
+++ b/buildscripts/tests/test_burn_in_tests_multiversion.py
@@ -12,10 +12,12 @@ from mock import MagicMock, patch
from shrub.v2 import BuildVariant, ShrubProject
import buildscripts.burn_in_tests_multiversion as under_test
-from buildscripts.burn_in_tests import _gather_task_info, create_generate_tasks_config
+from buildscripts.burn_in_tests import TaskInfo, RepeatConfig
from buildscripts.ciconfig.evergreen import parse_evergreen_file
import buildscripts.resmokelib.parser as _parser
import buildscripts.evergreen_gen_multiversion_tests as gen_multiversion
+from buildscripts.evergreen_burn_in_tests import GenerateBurnInExecutor
+
_parser.set_run_options()
MONGO_4_2_HASH = "d94888c0d0a8065ca57d354ece33b3c2a1a5a6d6"
@@ -23,12 +25,17 @@ MONGO_4_2_HASH = "d94888c0d0a8065ca57d354ece33b3c2a1a5a6d6"
# pylint: disable=missing-docstring,protected-access,too-many-lines,no-self-use
-def create_tests_by_task_mock(n_tasks, n_tests):
+def create_tests_by_task_mock(n_tasks, n_tests, multiversion_values=None):
+ if multiversion_values is None:
+ multiversion_values = [None for _ in range(n_tasks)]
return {
- f"task_{i}_gen": {
- "display_task_name": f"task_{i}", "resmoke_args": f"--suites=suite_{i}",
- "tests": [f"jstests/tests_{j}" for j in range(n_tests)]
- }
+ f"task_{i}_gen": TaskInfo(
+ display_task_name=f"task_{i}",
+ resmoke_args=f"--suites=suite_{i}",
+ tests=[f"jstests/tests_{j}" for j in range(n_tests)],
+ use_multiversion=multiversion_values[i],
+ distro="",
+ )
for i in range(n_tasks)
}
@@ -39,10 +46,13 @@ MV_MOCK_SUITES = ["replica_sets_jscore_passthrough", "sharding_jscore_passthroug
def create_multiversion_tests_by_task_mock(n_tasks, n_tests):
assert n_tasks <= len(MV_MOCK_SUITES)
return {
- f"{MV_MOCK_SUITES[i % len(MV_MOCK_SUITES)]}": {
- "resmoke_args": f"--suites=suite_{i}",
- "tests": [f"jstests/tests_{j}" for j in range(n_tests)]
- }
+ f"{MV_MOCK_SUITES[i % len(MV_MOCK_SUITES)]}": TaskInfo(
+ display_task_name=f"task_{i}",
+ resmoke_args=f"--suites=suite_{i}",
+ tests=[f"jstests/tests_{j}" for j in range(n_tests)],
+ use_multiversion=None,
+ distro="",
+ )
for i in range(n_tasks)
}
@@ -201,13 +211,6 @@ class TestCreateMultiversionGenerateTasksConfig(unittest.TestCase):
(NUM_REPL_MIXED_VERSION_CONFIGS + NUM_SHARDED_MIXED_VERSION_CONFIGS) * n_tests)
-class TestRepeatConfig(unittest.TestCase):
- def test_get_resmoke_repeat_options_use_multiversion(self):
- repeat_config = under_test.RepeatConfig()
-
- self.assertEqual(repeat_config, repeat_config.validate())
-
-
class TestGenerateConfig(unittest.TestCase):
def test_validate_use_multiversion(self):
evg_conf_mock = MagicMock()
@@ -225,12 +228,12 @@ class TestCreateGenerateTasksConfig(unittest.TestCase):
build_variant = BuildVariant("variant")
gen_config = MagicMock(run_build_variant="variant", distro=None)
repeat_config = MagicMock()
- tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
- first_task = "task_0_gen"
multiversion_path = "multiversion_path"
- tests_by_task[first_task]["use_multiversion"] = multiversion_path
+ tests_by_task = create_tests_by_task_mock(n_tasks, n_tests, [multiversion_path])
+ mock_evg_api = MagicMock()
- create_generate_tasks_config(build_variant, tests_by_task, gen_config, repeat_config, None)
+ executor = GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api)
+ executor.add_config_for_build_variant(build_variant, tests_by_task)
shrub_project = ShrubProject.empty().add_build_variant(build_variant)
evg_config_dict = shrub_project.as_dict()
@@ -255,10 +258,10 @@ class TestGatherTaskInfo(unittest.TestCase):
"suite 3": [f"test{i}.js" for i in range(2)],
}
- task_info = _gather_task_info(task_mock, tests_by_suite, evg_conf_mock, variant)
+ task_info = TaskInfo.from_task(task_mock, tests_by_suite, evg_conf_mock, variant)
- self.assertIn(suite_name, task_info["resmoke_args"])
+ self.assertIn(suite_name, task_info.resmoke_args)
for test in test_list:
- self.assertIn(test, task_info["tests"])
- self.assertEqual(task_mock.multiversion_path, task_info["use_multiversion"])
- self.assertEqual(distro_name, task_info["distro"])
+ self.assertIn(test, task_info.tests)
+ self.assertEqual(task_mock.multiversion_path, task_info.use_multiversion)
+ self.assertEqual(distro_name, task_info.distro)
diff --git a/buildscripts/tests/test_evergreen_burn_in_tests.py b/buildscripts/tests/test_evergreen_burn_in_tests.py
new file mode 100644
index 00000000000..64ce3b6c51d
--- /dev/null
+++ b/buildscripts/tests/test_evergreen_burn_in_tests.py
@@ -0,0 +1,384 @@
+"""Unit tests for buildscripts/burn_in_tests.py."""
+
+from __future__ import absolute_import
+
+import json
+import os
+import sys
+import unittest
+from datetime import datetime, timedelta
+from math import ceil
+
+import requests
+from mock import patch, MagicMock
+from shrub.v2 import BuildVariant, ShrubProject
+
+import buildscripts.evergreen_burn_in_tests as under_test
+from buildscripts.ciconfig.evergreen import parse_evergreen_file
+import buildscripts.resmokelib.parser as _parser
+import buildscripts.resmokelib.config as _config
+import buildscripts.util.teststats as teststats_utils
+_parser.set_run_options()
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
+
+NS = "buildscripts.evergreen_burn_in_tests"
+
+
+def ns(relative_name): # pylint: disable=invalid-name
+ """Return a full name from a name relative to the test module"s name space."""
+ return NS + "." + relative_name
+
+
+def mock_a_file(filename):
+ change = MagicMock(a_path=filename)
+ return change
+
+
+def mock_git_diff(change_list):
+ diff = MagicMock()
+ diff.iter_change_type.return_value = change_list
+ return diff
+
+
+def mock_changed_git_files(add_files):
+ repo = MagicMock()
+ repo.index.diff.return_value = mock_git_diff([mock_a_file(f) for f in add_files])
+ repo.working_dir = "."
+ return repo
+
+
+def get_evergreen_config(config_file_path):
+ evergreen_home = os.path.expanduser(os.path.join("~", "evergreen"))
+ if os.path.exists(evergreen_home):
+ return parse_evergreen_file(config_file_path, evergreen_home)
+ return parse_evergreen_file(config_file_path)
+
+
+class TestAcceptance(unittest.TestCase):
+ def tearDown(self):
+ _parser.set_run_options()
+
+ @patch(ns("write_file"))
+ def test_no_tests_run_if_none_changed(self, write_json_mock):
+ """
+ Given a git repository with no changes,
+ When burn_in_tests is run,
+ Then no tests are discovered to run.
+ """
+ variant = "build_variant"
+ repos = [mock_changed_git_files([])]
+ repeat_config = under_test.RepeatConfig()
+ gen_config = under_test.GenerateConfig(
+ variant,
+ "project",
+ ) # yapf: disable
+ mock_evg_conf = MagicMock()
+ mock_evg_conf.get_task_names_by_tag.return_value = set()
+ mock_evg_api = MagicMock()
+
+ under_test.burn_in("task_id", variant, gen_config, repeat_config, mock_evg_api,
+ mock_evg_conf, repos, "testfile.json")
+
+ write_json_mock.assert_called_once()
+ written_config = json.loads(write_json_mock.call_args[0][1])
+ display_task = written_config["buildvariants"][0]["display_tasks"][0]
+ self.assertEqual(1, len(display_task["execution_tasks"]))
+ self.assertEqual(under_test.BURN_IN_TESTS_GEN_TASK, display_task["execution_tasks"][0])
+
+ @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
+ @patch(ns("write_file"))
+ def test_tests_generated_if_a_file_changed(self, write_json_mock):
+ """
+ Given a git repository with changes,
+ When burn_in_tests is run,
+ Then tests are discovered to run.
+ """
+ # Note: this test is using actual tests and suites. So changes to those suites could
+ # introduce failures and require this test to be updated.
+ # You can see the test file it is using below. This test is used in the 'auth' and
+ # 'auth_audit' test suites. It needs to be in at least one of those for the test to pass.
+ _config.NAMED_SUITES = None
+ variant = "enterprise-rhel-80-64-bit"
+ repos = [mock_changed_git_files(["jstests/auth/auth1.js"])]
+ repeat_config = under_test.RepeatConfig()
+ gen_config = under_test.GenerateConfig(
+ variant,
+ "project",
+ ) # yapf: disable
+ mock_evg_conf = get_evergreen_config("etc/evergreen.yml")
+ mock_evg_api = MagicMock()
+
+ under_test.burn_in("task_id", variant, gen_config, repeat_config, mock_evg_api,
+ mock_evg_conf, repos, "testfile.json")
+
+ write_json_mock.assert_called_once()
+ written_config = json.loads(write_json_mock.call_args[0][1])
+ n_tasks = len(written_config["tasks"])
+ # Ensure we are generating at least one task for the test.
+ self.assertGreaterEqual(n_tasks, 1)
+
+ written_build_variant = written_config["buildvariants"][0]
+ self.assertEqual(variant, written_build_variant["name"])
+ self.assertEqual(n_tasks, len(written_build_variant["tasks"]))
+
+ display_task = written_build_variant["display_tasks"][0]
+ # The display task should contain all the generated tasks as well as 1 extra task for
+ # the burn_in_test_gen task.
+ self.assertEqual(n_tasks + 1, len(display_task["execution_tasks"]))
+
+
+class TestGenerateConfig(unittest.TestCase):
+ def test_run_build_variant_with_no_run_build_variant(self):
+ gen_config = under_test.GenerateConfig("build_variant", "project")
+
+ self.assertEqual(gen_config.build_variant, gen_config.run_build_variant)
+
+ def test_run_build_variant_with_run_build_variant(self):
+ gen_config = under_test.GenerateConfig("build_variant", "project", "run_build_variant")
+
+ self.assertNotEqual(gen_config.build_variant, gen_config.run_build_variant)
+ self.assertEqual(gen_config.run_build_variant, "run_build_variant")
+
+ def test_validate_non_existing_build_variant(self):
+ evg_conf_mock = MagicMock()
+ evg_conf_mock.get_variant.return_value = None
+
+ gen_config = under_test.GenerateConfig("build_variant", "project", "run_build_variant")
+
+ with self.assertRaises(ValueError):
+ gen_config.validate(evg_conf_mock)
+
+ def test_validate_existing_build_variant(self):
+ evg_conf_mock = MagicMock()
+
+ gen_config = under_test.GenerateConfig("build_variant", "project", "run_build_variant")
+ gen_config.validate(evg_conf_mock)
+
+ def test_validate_non_existing_run_build_variant(self):
+ evg_conf_mock = MagicMock()
+
+ gen_config = under_test.GenerateConfig("build_variant", "project")
+ gen_config.validate(evg_conf_mock)
+
+
+class TestParseAvgTestRuntime(unittest.TestCase):
+ def test__parse_avg_test_runtime(self):
+ task_avg_test_runtime_stats = [
+ teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=30.2),
+ teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)
+ ]
+ result = under_test._parse_avg_test_runtime("dir/test2.js", task_avg_test_runtime_stats)
+ self.assertEqual(result, 455.1)
+
+
+class TestCalculateTimeout(unittest.TestCase):
+ def test__calculate_timeout(self):
+ avg_test_runtime = 455.1
+ expected_result = ceil(avg_test_runtime * under_test.AVG_TEST_TIME_MULTIPLIER)
+ self.assertEqual(expected_result, under_test._calculate_timeout(avg_test_runtime))
+
+ def test__calculate_timeout_avg_is_less_than_min(self):
+ avg_test_runtime = 10
+ self.assertEqual(under_test.MIN_AVG_TEST_TIME_SEC,
+ under_test._calculate_timeout(avg_test_runtime))
+
+
+class TestCalculateExecTimeout(unittest.TestCase):
+ def test__calculate_exec_timeout(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
+ avg_test_runtime = 455.1
+
+ exec_timeout = under_test._calculate_exec_timeout(repeat_config, avg_test_runtime)
+
+ self.assertEqual(1771, exec_timeout)
+
+ def test_average_timeout_greater_than_execution_time(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=600, repeat_tests_min=2)
+ avg_test_runtime = 750
+
+ exec_timeout = under_test._calculate_exec_timeout(repeat_config, avg_test_runtime)
+
+ # The timeout needs to be greater than the number of the test * the minimum number of runs.
+ minimum_expected_timeout = avg_test_runtime * repeat_config.repeat_tests_min
+
+ self.assertGreater(exec_timeout, minimum_expected_timeout)
+
+
+class TestGenerateTimeouts(unittest.TestCase):
+ def test__generate_timeouts(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
+ runtime_stats = [teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)]
+ test_name = "dir/test2.js"
+
+ task_generator = under_test.TaskGenerator(MagicMock(), repeat_config, MagicMock(),
+ runtime_stats)
+ timeout_info = task_generator.generate_timeouts(test_name)
+
+ self.assertEqual(timeout_info.exec_timeout, 1771)
+ self.assertEqual(timeout_info.timeout, 1366)
+
+ def test__generate_timeouts_no_results(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
+ runtime_stats = []
+ test_name = "dir/new_test.js"
+
+ task_generator = under_test.TaskGenerator(MagicMock(), repeat_config, MagicMock(),
+ runtime_stats)
+ timeout_info = task_generator.generate_timeouts(test_name)
+
+ self.assertIsNone(timeout_info.cmd)
+
+ def test__generate_timeouts_avg_runtime_is_zero(self):
+ repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
+ runtime_stats = [
+ teststats_utils.TestRuntime(test_name="dir/test_with_zero_runtime.js", runtime=0)
+ ]
+ test_name = "dir/test_with_zero_runtime.js"
+
+ task_generator = under_test.TaskGenerator(MagicMock(), repeat_config, MagicMock(),
+ runtime_stats)
+ timeout_info = task_generator.generate_timeouts(test_name)
+
+ self.assertIsNone(timeout_info.cmd)
+
+
+class TestGetTaskRuntimeHistory(unittest.TestCase):
+ def test_get_task_runtime_history(self):
+ mock_evg_api = MagicMock()
+ mock_evg_api.test_stats_by_project.return_value = [
+ MagicMock(
+ test_file="dir/test2.js",
+ task_name="task1",
+ variant="variant1",
+ distro="distro1",
+ date=datetime.utcnow().date(),
+ num_pass=1,
+ num_fail=0,
+ avg_duration_pass=10.1,
+ )
+ ]
+ analysis_duration = under_test.AVG_TEST_RUNTIME_ANALYSIS_DAYS
+ end_date = datetime.utcnow().replace(microsecond=0)
+ start_date = end_date - timedelta(days=analysis_duration)
+ mock_gen_config = MagicMock(project="project1", build_variant="variant1")
+
+ executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock(), mock_evg_api)
+ result = executor.get_task_runtime_history("task1")
+
+ self.assertEqual(result, [("dir/test2.js", 10.1)])
+ mock_evg_api.test_stats_by_project.assert_called_with(
+ "project1", after_date=start_date, before_date=end_date, group_by="test",
+ group_num_days=14, tasks=["task1"], variants=["variant1"])
+
+ def test_get_task_runtime_history_evg_degraded_mode_error(self):
+ mock_response = MagicMock(status_code=requests.codes.SERVICE_UNAVAILABLE)
+ mock_evg_api = MagicMock()
+ mock_evg_api.test_stats_by_project.side_effect = requests.HTTPError(response=mock_response)
+ mock_gen_config = MagicMock(project="project1", build_variant="variant1")
+
+ executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock(), mock_evg_api)
+ result = executor.get_task_runtime_history("task1")
+
+ self.assertEqual(result, [])
+
+
+TESTS_BY_TASK = {
+ "task1": {
+ "resmoke_args": "--suites=suite1",
+ "tests": ["jstests/test1.js", "jstests/test2.js"]},
+ "task2": {
+ "resmoke_args": "--suites=suite1",
+ "tests": ["jstests/test1.js", "jstests/test3.js"]},
+ "task3": {
+ "resmoke_args": "--suites=suite3",
+ "tests": ["jstests/test4.js", "jstests/test5.js"]},
+ "task4": {
+ "resmoke_args": "--suites=suite4", "tests": []},
+} # yapf: disable
+
+
+def create_tests_by_task_mock(n_tasks, n_tests):
+ return {
+ f"task_{i}_gen":
+ under_test.TaskInfo(display_task_name=f"task_{i}", resmoke_args=f"--suites=suite_{i}",
+ tests=[f"jstests/tests_{j}" for j in range(n_tests)],
+ use_multiversion=None, distro=f"distro_{i}")
+ for i in range(n_tasks)
+ }
+
+
+class TestCreateGenerateTasksConfig(unittest.TestCase):
+ @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
+ def test_no_tasks_given(self):
+ build_variant = BuildVariant("build variant")
+ gen_config = MagicMock(run_build_variant="variant")
+ repeat_config = MagicMock()
+ mock_evg_api = MagicMock()
+
+ executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api)
+ executor.add_config_for_build_variant(build_variant, {})
+
+ evg_config_dict = build_variant.as_dict()
+ self.assertEqual(0, len(evg_config_dict["tasks"]))
+
+ @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
+ def test_one_task_one_test(self):
+ n_tasks = 1
+ n_tests = 1
+ resmoke_options = "options for resmoke"
+ build_variant = BuildVariant("build variant")
+ gen_config = MagicMock(run_build_variant="variant", distro=None)
+ repeat_config = MagicMock()
+ repeat_config.generate_resmoke_options.return_value = resmoke_options
+ mock_evg_api = MagicMock()
+ tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
+
+ executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api)
+ executor.add_config_for_build_variant(build_variant, tests_by_task)
+
+ shrub_config = ShrubProject.empty().add_build_variant(build_variant)
+ evg_config_dict = shrub_config.as_dict()
+ tasks = evg_config_dict["tasks"]
+ self.assertEqual(n_tasks * n_tests, len(tasks))
+ cmd = tasks[0]["commands"]
+ self.assertIn(resmoke_options, cmd[1]["vars"]["resmoke_args"])
+ self.assertIn("--suites=suite_0", cmd[1]["vars"]["resmoke_args"])
+ self.assertIn("tests_0", cmd[1]["vars"]["resmoke_args"])
+
+ @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
+ def test_n_task_m_test(self):
+ n_tasks = 3
+ n_tests = 5
+ build_variant = BuildVariant("build variant")
+ gen_config = MagicMock(run_build_variant="variant", distro=None)
+ repeat_config = MagicMock()
+ tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
+ mock_evg_api = MagicMock()
+
+ executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api)
+ executor.add_config_for_build_variant(build_variant, tests_by_task)
+
+ evg_config_dict = build_variant.as_dict()
+ self.assertEqual(n_tasks * n_tests, len(evg_config_dict["tasks"]))
+
+
+class TestCreateGenerateTasksFile(unittest.TestCase):
+ @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
+ @patch(ns("sys.exit"))
+ @patch(ns("validate_task_generation_limit"))
+ def test_cap_on_task_generate(self, validate_mock, exit_mock):
+ gen_config = MagicMock(use_multiversion=False)
+ repeat_config = MagicMock()
+ tests_by_task = MagicMock()
+ mock_evg_api = MagicMock()
+
+ validate_mock.return_value = False
+
+ exit_mock.side_effect = ValueError("exiting")
+ with self.assertRaises(ValueError):
+ executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api,
+ "gen_file.json")
+ executor.execute(tests_by_task)
+
+ exit_mock.assert_called_once()
diff --git a/buildscripts/tests/test_selected_tests.py b/buildscripts/tests/test_selected_tests.py
index fd9e02d88fb..b1adad986f8 100644
--- a/buildscripts/tests/test_selected_tests.py
+++ b/buildscripts/tests/test_selected_tests.py
@@ -9,6 +9,7 @@ from shrub.v2 import BuildVariant, ShrubProject
# pylint: disable=wrong-import-position
import buildscripts.ciconfig.evergreen as _evergreen
+from buildscripts.burn_in_tests import TaskInfo
from buildscripts.evergreen_generate_resmoke_tasks import Suite
from buildscripts.tests.test_burn_in_tests import get_evergreen_config, mock_changed_git_files
from buildscripts import selected_tests as under_test
@@ -421,13 +422,25 @@ class TestGetTaskConfigsForTestMappings(unittest.TestCase):
]
exclude_task_mock.return_value = False
tests_by_task = {
- "jsCore_auth": {
- "tests": [
- "jstests/core/currentop_waiting_for_latch.js",
- "jstests/core/latch_analyzer.js",
- ],
- },
- "auth_gen": {"tests": ["jstests/auth/auth3.js"], },
+ "jsCore_auth":
+ TaskInfo(
+ display_task_name="task 1",
+ tests=[
+ "jstests/core/currentop_waiting_for_latch.js",
+ "jstests/core/latch_analyzer.js",
+ ],
+ resmoke_args="",
+ use_multiversion=None,
+ distro="",
+ ),
+ "auth_gen":
+ TaskInfo(
+ display_task_name="task 2",
+ tests=["jstests/auth/auth3.js"],
+ resmoke_args="",
+ use_multiversion=None,
+ distro="",
+ ),
}
get_evg_task_config_mock.side_effect = [{"task_config_key": "task_config_value_1"},
{"task_config_key": "task_config_value_2"}]