summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Bradford <david.bradford@mongodb.com>2021-05-14 18:52:08 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-05-28 21:27:10 +0000
commitcd4f55bc0bc90046b35c71eeb9e4fb18dad9b379 (patch)
treee3b94e3e9fb66248ecb47c71da4fe38f1626c257
parente779656ef9c8509ec8838d4019b95c5fc4a9e81a (diff)
downloadmongo-cd4f55bc0bc90046b35c71eeb9e4fb18dad9b379.tar.gz
SERVER-57002: Refactor dynamic task creation
-rw-r--r--buildscripts/burn_in_tests_multiversion.py285
-rw-r--r--buildscripts/ciconfig/evergreen.py37
-rwxr-xr-xbuildscripts/evergreen_gen_fuzzer_tests.py358
-rwxr-xr-xbuildscripts/evergreen_gen_multiversion_tests.py479
-rwxr-xr-xbuildscripts/evergreen_generate_resmoke_tasks.py1200
-rw-r--r--buildscripts/patch_builds/selected_tests/__init__.py1
-rw-r--r--buildscripts/patch_builds/selected_tests/selected_tests_client.py171
-rw-r--r--buildscripts/patch_builds/selected_tests/selected_tests_service.py52
-rw-r--r--buildscripts/patch_builds/selected_tests_service.py84
-rw-r--r--buildscripts/selected_tests.py688
-rw-r--r--buildscripts/task_generation/__init__.py1
-rw-r--r--buildscripts/task_generation/evg_config_builder.py155
-rw-r--r--buildscripts/task_generation/gen_config.py22
-rw-r--r--buildscripts/task_generation/gen_task_service.py163
-rw-r--r--buildscripts/task_generation/gen_task_validation.py36
-rw-r--r--buildscripts/task_generation/generated_config.py43
-rw-r--r--buildscripts/task_generation/multiversion_util.py44
-rw-r--r--buildscripts/task_generation/resmoke_proxy.py147
-rw-r--r--buildscripts/task_generation/suite_split.py451
-rw-r--r--buildscripts/task_generation/suite_split_strategies.py118
-rw-r--r--buildscripts/task_generation/task_types/__init__.py1
-rw-r--r--buildscripts/task_generation/task_types/fuzzer_tasks.py116
-rw-r--r--buildscripts/task_generation/task_types/gentask_options.py41
-rw-r--r--buildscripts/task_generation/task_types/multiversion_tasks.py140
-rw-r--r--buildscripts/task_generation/task_types/resmoke_tasks.py177
-rw-r--r--buildscripts/task_generation/timeout.py105
-rw-r--r--buildscripts/tests/patch_builds/selected_tests/__init__.py1
-rw-r--r--buildscripts/tests/patch_builds/selected_tests/test_selected_tests_client.py87
-rw-r--r--buildscripts/tests/patch_builds/selected_tests/test_selected_tests_service.py87
-rw-r--r--buildscripts/tests/patch_builds/test_selected_tests_service.py95
-rw-r--r--buildscripts/tests/task_generation/__init__.py1
-rw-r--r--buildscripts/tests/task_generation/task_types/__init__.py1
-rw-r--r--buildscripts/tests/task_generation/task_types/test_fuzzer_tasks.py76
-rw-r--r--buildscripts/tests/task_generation/task_types/test_gentask_options.py27
-rw-r--r--buildscripts/tests/task_generation/task_types/test_resmoke_tasks.py147
-rw-r--r--buildscripts/tests/task_generation/test_gen_task_service.py127
-rw-r--r--buildscripts/tests/task_generation/test_gen_task_validation.py62
-rw-r--r--buildscripts/tests/task_generation/test_resmoke_proxy.py49
-rw-r--r--buildscripts/tests/task_generation/test_suite_split.py314
-rw-r--r--buildscripts/tests/task_generation/test_suite_split_strategies.py128
-rw-r--r--buildscripts/tests/task_generation/test_timeout.py51
-rw-r--r--buildscripts/tests/test_burn_in_tests_multiversion.py230
-rw-r--r--buildscripts/tests/test_evergreen_gen_fuzzer_tests.py85
-rw-r--r--buildscripts/tests/test_evergreen_gen_multiversion_tests.py62
-rw-r--r--buildscripts/tests/test_evergreen_generate_resmoke_tasks.py1163
-rw-r--r--buildscripts/tests/test_selected_tests.py653
-rw-r--r--buildscripts/util/cmdutils.py29
-rw-r--r--etc/evergreen.yml140
-rw-r--r--etc/pip/components/evergreen.req1
-rwxr-xr-xevergreen/burn_in_tests_multiversion.sh2
-rw-r--r--evergreen/resmoke_tasks_generate.sh5
51 files changed, 4767 insertions, 3971 deletions
diff --git a/buildscripts/burn_in_tests_multiversion.py b/buildscripts/burn_in_tests_multiversion.py
index 256ef6cb57e..e382ca83ac3 100644
--- a/buildscripts/burn_in_tests_multiversion.py
+++ b/buildscripts/burn_in_tests_multiversion.py
@@ -2,26 +2,36 @@
"""Command line utility for running newly added or modified jstests under the appropriate multiversion passthrough suites."""
import os
-import sys
-from typing import Dict
+from datetime import datetime
+from functools import partial
+from typing import List, Dict, NamedTuple
import click
+import inject
from git import Repo
-from shrub.v2 import BuildVariant, ExistingTask, ShrubProject
import structlog
from structlog.stdlib import LoggerFactory
from evergreen.api import EvergreenApi, RetryingEvergreenApi
import buildscripts.evergreen_gen_multiversion_tests as gen_multiversion
import buildscripts.evergreen_generate_resmoke_tasks as gen_resmoke
-from buildscripts.burn_in_tests import _configure_logging, EVERGREEN_FILE, \
+from buildscripts.burn_in_tests import EVERGREEN_FILE, \
DEFAULT_REPO_LOCATIONS, create_tests_by_task, TaskInfo
-from buildscripts.ciconfig.evergreen import parse_evergreen_file
+from buildscripts.ciconfig.evergreen import parse_evergreen_file, EvergreenProjectConfig
from buildscripts.evergreen_burn_in_tests import GenerateConfig, DEFAULT_PROJECT, CONFIG_FILE, \
EvergreenFileChangeDetector
-from buildscripts.patch_builds.task_generation import validate_task_generation_limit
from buildscripts.resmokelib.suitesconfig import get_named_suites_with_root_level_key
-from buildscripts.util.fileops import write_file
+from buildscripts.task_generation.evg_config_builder import EvgConfigBuilder
+from buildscripts.task_generation.gen_config import GenerationConfiguration
+from buildscripts.task_generation.generated_config import GeneratedConfiguration
+from buildscripts.task_generation.multiversion_util import MultiversionUtilService
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyConfig
+from buildscripts.task_generation.suite_split import SuiteSplitConfig, SuiteSplitParameters
+from buildscripts.task_generation.suite_split_strategies import SplitStrategy, greedy_division, \
+ FallbackStrategy, round_robin_fallback
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
+from buildscripts.task_generation.task_types.multiversion_tasks import MultiversionGenTaskParams
+from buildscripts.util.cmdutils import enable_logging
structlog.configure(logger_factory=LoggerFactory())
LOGGER = structlog.getLogger(__name__)
@@ -31,60 +41,156 @@ MULTIVERSION_PASSTHROUGH_TAG = gen_multiversion.PASSTHROUGH_TAG
RANDOM_MULTIVERSION_REPLSETS_TAG = gen_multiversion.RANDOM_REPLSETS_TAG
BURN_IN_MULTIVERSION_TASK = gen_multiversion.BURN_IN_TASK
TASK_PATH_SUFFIX = "/data/multiversion"
+DEFAULT_CONFIG_DIR = "generated_resmoke_config"
+DEFAULT_TEST_SUITE_DIR = os.path.join("buildscripts", "resmokeconfig", "suites")
-def create_multiversion_generate_tasks_config(tests_by_task: Dict[str, TaskInfo],
- evg_api: EvergreenApi,
- generate_config: GenerateConfig) -> BuildVariant:
+def filter_list(item: str, input_list: List[str]) -> bool:
"""
- Create the multiversion config for the Evergreen generate.tasks file.
+ Filter to determine if the given item is in the given list.
- :param tests_by_task: Dictionary of tests to generate tasks for.
- :param evg_api: Evergreen API.
- :param generate_config: Configuration of what to generate.
- :return: Shrub configuration with added tasks.
+ :param item: Item to search for.
+ :param input_list: List to search.
+ :return: True if the item is contained in the list.
"""
- build_variant = BuildVariant(generate_config.build_variant)
- tasks = set()
- if tests_by_task:
- # Get the multiversion suites that will run in as part of burn_in_multiversion.
+ return item in input_list
+
+
+class BurnInConfig(NamedTuple):
+ """Configuration for generating build in."""
+
+ build_id: str
+ build_variant: str
+ revision: str
+
+ def build_config_location(self) -> str:
+ """Build the configuration location for the generated configuration."""
+ return f"{self.build_variant}/{self.revision}/generate_tasks/burn_in_tests_multiversion_gen-{self.build_id}.tgz"
+
+
+class MultiversionBurnInOrchestrator:
+ """Orchestrator for generating multiversion burn_in_tests."""
+
+ @inject.autoparams()
+ def __init__(self, change_detector: EvergreenFileChangeDetector,
+ evg_conf: EvergreenProjectConfig, multiversion_util: MultiversionUtilService,
+ burn_in_config: BurnInConfig) -> None:
+ """
+ Initialize the orchestrator.
+
+ :param change_detector: Service to find changed files.
+ :param evg_conf: Evergreen project configuration.
+ :param multiversion_util: Multiversion utilities.
+ :param burn_in_config: Configuration for generating burn in.
+ """
+ self.change_detector = change_detector
+ self.evg_config = evg_conf
+ self.multiversion_util = multiversion_util
+ self.burn_in_config = burn_in_config
+
+ def validate_multiversion_tasks_and_suites(self) -> None:
+ """
+ Validate that the multiversion suites and tasks match up.
+
+ We expect the number of suites with MULTIVERSION_PASSTHROUGH_TAG to be the same as in
+ multiversion_suites. Multiversion passthrough suites must include
+ MULTIVERSION_CONFIG_KEY as a root level key and must be set to true.
+
+ Throws an exception if there are inconsistencies.
+ """
+ multiversion_tasks = self.evg_config.get_task_names_by_tag(MULTIVERSION_PASSTHROUGH_TAG)
+ LOGGER.debug("Multiversion tasks by tag", tasks=multiversion_tasks,
+ tag=MULTIVERSION_PASSTHROUGH_TAG)
+
multiversion_suites = get_named_suites_with_root_level_key(MULTIVERSION_CONFIG_KEY)
- for suite in multiversion_suites:
- idx = 0
- if suite["origin"] not in tests_by_task.keys():
- # Only generate burn in multiversion tasks for suites that would run the detected
- # changed tests.
- continue
- LOGGER.debug("Generating multiversion suite", suite=suite["multiversion_name"])
-
- # We hardcode the number of fallback sub suites and the target resmoke time here
- # since burn_in_tests cares about individual tests and not entire suites. The config
- # options here are purely used to generate the proper multiversion suites to run
- # tests against.
- config_options = {
- "suite": suite["origin"],
- "fallback_num_sub_suites": 1,
- "project": generate_config.project,
- "build_variant": generate_config.build_variant,
- "task_id": generate_config.task_id,
- "task_name": suite["multiversion_name"],
- "target_resmoke_time": 60,
- }
- config_options.update(gen_resmoke.DEFAULT_CONFIG_VALUES)
-
- config_generator = gen_multiversion.EvergreenMultiversionConfigGenerator(
- evg_api, gen_resmoke.ConfigOptions(config_options))
- test_list = tests_by_task[suite["origin"]].tests
- for test in test_list:
- # Generate the multiversion tasks for each test.
- sub_tasks = config_generator.get_burn_in_tasks(test, idx)
- tasks = tasks.union(sub_tasks)
- idx += 1
-
- existing_tasks = {ExistingTask(f"{BURN_IN_MULTIVERSION_TASK}_gen")}
- build_variant.display_task(BURN_IN_MULTIVERSION_TASK, tasks,
- execution_existing_tasks=existing_tasks)
- return build_variant
+ assert len(multiversion_tasks) == len(multiversion_suites)
+
+ def generate_tests(self, repos: List[Repo], generate_config: GenerateConfig,
+ target_file: str) -> None:
+ """
+ Generate evergreen configuration to run any changed tests and save them to disk.
+
+ :param repos: List of repos to check for changed tests.
+ :param generate_config: Configuration for how to generate tasks.
+ :param target_file: File to write configuration to.
+ """
+ tests_by_task = self.find_changes(repos, generate_config)
+ generated_config = self.generate_configuration(tests_by_task, target_file,
+ generate_config.build_variant)
+ generated_config.write_all_to_dir(DEFAULT_CONFIG_DIR)
+
+ def find_changes(self, repos: List[Repo],
+ generate_config: GenerateConfig) -> Dict[str, TaskInfo]:
+ """
+ Find tests and tasks to run based on test changes.
+
+ :param repos: List of repos to check for changed tests.
+ :param generate_config: Configuration for how to generate tasks.
+ :return: Dictionary of tasks with tests to run in them.
+ """
+ changed_tests = self.change_detector.find_changed_tests(repos)
+ tests_by_task = create_tests_by_task(generate_config.build_variant, self.evg_config,
+ changed_tests)
+ LOGGER.debug("tests and tasks found", tests_by_task=tests_by_task)
+ return tests_by_task
+
+ # pylint: disable=too-many-locals
+ def generate_configuration(self, tests_by_task: Dict[str, TaskInfo], target_file: str,
+ build_variant: str) -> GeneratedConfiguration:
+ """
+ Generate configuration for the given tasks and tests.
+
+ :param tests_by_task: Map of what to generate.
+ :param target_file: Location to write generated configuration.
+ :param build_variant: Name of build variant being generated on.
+ :return: Generated configuration to create requested tasks and tests.
+ """
+ builder = EvgConfigBuilder() # pylint: disable=no-value-for-parameter
+ build_variant_config = self.evg_config.get_variant(build_variant)
+ is_asan = build_variant_config.is_asan_build()
+ tasks = set()
+ if tests_by_task:
+ # Get the multiversion suites that will run in as part of burn_in_multiversion.
+ multiversion_suites = get_named_suites_with_root_level_key(MULTIVERSION_CONFIG_KEY)
+ for suite in multiversion_suites:
+ task_name = suite["origin"]
+ if task_name not in tests_by_task.keys():
+ # Only generate burn in multiversion tasks for suites that would run the
+ # detected changed tests.
+ continue
+
+ LOGGER.debug("Generating multiversion suite", suite=suite["multiversion_name"])
+ test_list = tests_by_task[task_name].tests
+ split_params = SuiteSplitParameters(
+ task_name=suite["multiversion_name"], suite_name=task_name, filename=task_name,
+ test_file_filter=partial(filter_list, input_list=test_list),
+ build_variant=build_variant, is_asan=is_asan)
+ version_configs = self.multiversion_util.get_version_configs_for_suite(task_name)
+ gen_params = MultiversionGenTaskParams(
+ mixed_version_configs=version_configs,
+ is_sharded=self.multiversion_util.is_suite_sharded(task_name),
+ resmoke_args="",
+ parent_task_name="burn_in_tests_multiversion",
+ origin_suite=task_name,
+ use_large_distro=False,
+ large_distro_name=None,
+ name_prefix="burn_in_multiversion",
+ create_misc_suite=False,
+ add_to_display_task=False,
+ config_location=self.burn_in_config.build_config_location(),
+ )
+
+ tasks = tasks.union(builder.add_multiversion_burn_in_test(split_params, gen_params))
+
+ if len(tasks) == 0:
+ builder.get_build_variant(build_variant)
+
+ executions_tasks = {task.name for task in tasks}
+ executions_tasks.add("burn_in_tests_multiversion_gen")
+ builder.add_display_task(display_task_name="burn_in_multiversion",
+ execution_task_names=executions_tasks, build_variant=build_variant)
+
+ return builder.build(target_file)
@click.command()
@@ -100,12 +206,14 @@ def create_multiversion_generate_tasks_config(tests_by_task: Dict[str, TaskInfo]
help="The evergreen project the tasks will execute on.")
@click.option("--evg-api-config", "evg_api_config", default=CONFIG_FILE, metavar="FILE",
help="Configuration file with connection info for Evergreen API.")
+@click.option("--revision", required=True, help="Revision generation is being run against.")
+@click.option("--build-id", required=True, help="ID of build being run against.")
@click.option("--verbose", "verbose", default=False, is_flag=True, help="Enable extra logging.")
@click.option("--task_id", "task_id", default=None, metavar='TASK_ID',
help="The evergreen task id.")
# pylint: disable=too-many-arguments,too-many-locals
def main(build_variant, run_build_variant, distro, project, generate_tasks_file, evg_api_config,
- verbose, task_id):
+ verbose, task_id, revision, build_id):
"""
Run new or changed tests in repeated mode to validate their stability.
@@ -139,8 +247,9 @@ def main(build_variant, run_build_variant, distro, project, generate_tasks_file,
:param evg_api_config: Location of configuration file to connect to evergreen.
:param verbose: Log extra debug information.
"""
- _configure_logging(verbose)
+ enable_logging(verbose)
+ burn_in_config = BurnInConfig(build_variant=build_variant, build_id=build_id, revision=revision)
evg_conf = parse_evergreen_file(EVERGREEN_FILE)
generate_config = GenerateConfig(build_variant=build_variant,
run_build_variant=run_build_variant,
@@ -149,32 +258,44 @@ def main(build_variant, run_build_variant, distro, project, generate_tasks_file,
task_id=task_id) # yapf: disable
generate_config.validate(evg_conf)
+ gen_task_options = GenTaskOptions(
+ create_misc_suite=False,
+ is_patch=True,
+ generated_config_dir=DEFAULT_CONFIG_DIR,
+ use_default_timeouts=False,
+ )
+ split_task_options = SuiteSplitConfig(
+ evg_project=project,
+ target_resmoke_time=60,
+ max_sub_suites=100,
+ max_tests_per_suite=1,
+ start_date=datetime.utcnow(),
+ end_date=datetime.utcnow(),
+ default_to_fallback=True,
+ )
+
repos = [Repo(x) for x in DEFAULT_REPO_LOCATIONS if os.path.isdir(x)]
- evg_api = RetryingEvergreenApi.get_api(config_file=evg_api_config)
-
- change_detector = EvergreenFileChangeDetector(task_id, evg_api)
- changed_tests = change_detector.find_changed_tests(repos)
- tests_by_task = create_tests_by_task(generate_config.build_variant, evg_conf, changed_tests)
- LOGGER.debug("tests and tasks found", tests_by_task=tests_by_task)
-
- multiversion_tasks = evg_conf.get_task_names_by_tag(MULTIVERSION_PASSTHROUGH_TAG)
- LOGGER.debug("Multiversion tasks by tag", tasks=multiversion_tasks,
- tag=MULTIVERSION_PASSTHROUGH_TAG)
- # We expect the number of suites with MULTIVERSION_PASSTHROUGH_TAG to be the same as in
- # multiversion_suites. Multiversion passthrough suites must include
- # MULTIVERSION_CONFIG_KEY as a root level key and must be set to true.
- multiversion_suites = get_named_suites_with_root_level_key(MULTIVERSION_CONFIG_KEY)
- assert len(multiversion_tasks) == len(multiversion_suites)
-
- build_variant = create_multiversion_generate_tasks_config(tests_by_task, evg_api,
- generate_config)
- shrub_project = ShrubProject.empty()
- shrub_project.add_build_variant(build_variant)
-
- if not validate_task_generation_limit(shrub_project):
- sys.exit(1)
-
- write_file(generate_tasks_file, shrub_project.json())
+
+ def dependencies(binder: inject.Binder) -> None:
+ evg_api = RetryingEvergreenApi.get_api(config_file=evg_api_config)
+ binder.bind(SuiteSplitConfig, split_task_options)
+ binder.bind(SplitStrategy, greedy_division)
+ binder.bind(FallbackStrategy, round_robin_fallback)
+ binder.bind(EvergreenProjectConfig, evg_conf)
+ binder.bind(GenTaskOptions, gen_task_options)
+ binder.bind(EvergreenApi, evg_api)
+ binder.bind(GenerationConfiguration,
+ GenerationConfiguration.from_yaml_file(gen_resmoke.GENERATE_CONFIG_FILE))
+ binder.bind(ResmokeProxyConfig,
+ ResmokeProxyConfig(resmoke_suite_dir=DEFAULT_TEST_SUITE_DIR))
+ binder.bind(EvergreenFileChangeDetector, EvergreenFileChangeDetector(task_id, evg_api))
+ binder.bind(BurnInConfig, burn_in_config)
+
+ inject.configure(dependencies)
+
+ burn_in_orchestrator = MultiversionBurnInOrchestrator() # pylint: disable=no-value-for-parameter
+ burn_in_orchestrator.validate_multiversion_tasks_and_suites()
+ burn_in_orchestrator.generate_tests(repos, generate_config, generate_tasks_file)
if __name__ == "__main__":
diff --git a/buildscripts/ciconfig/evergreen.py b/buildscripts/ciconfig/evergreen.py
index 9f9d6d82be5..e4e517a9647 100644
--- a/buildscripts/ciconfig/evergreen.py
+++ b/buildscripts/ciconfig/evergreen.py
@@ -8,13 +8,14 @@ from __future__ import annotations
import datetime
import distutils.spawn # pylint: disable=no-name-in-module
import re
-from typing import Set
+from typing import Set, List
import yaml
import buildscripts.util.runcommand as runcommand
ENTERPRISE_MODULE_NAME = "enterprise"
+ASAN_SIGNATURE = "detect_leaks=1"
def parse_evergreen_file(path, evergreen_binary="evergreen"):
@@ -61,11 +62,11 @@ class EvergreenProjectConfig(object): # pylint: disable=too-many-instance-attri
self.distro_names.update(variant.distro_names)
@property
- def task_names(self):
+ def task_names(self) -> List[str]:
"""Get the list of task names."""
return list(self._tasks_by_name.keys())
- def get_task(self, task_name):
+ def get_task(self, task_name: str) -> Task:
"""Return the task with the given name as a Task instance."""
return self._tasks_by_name.get(task_name)
@@ -113,7 +114,7 @@ class Task(object):
"""Get the list of task names this task depends on."""
return self.raw.get("depends_on", [])
- def _find_func_command(self, func_command):
+ def find_func_command(self, func_command):
"""Return the 'func_command' if found, or None."""
for command in self.raw.get("commands", []):
if command.get("func") == func_command:
@@ -123,25 +124,34 @@ class Task(object):
@property
def generate_resmoke_tasks_command(self):
"""Return the 'generate resmoke tasks' command if found, or None."""
- func = self._find_func_command("generate resmoke tasks")
- return func if func is not None else self._find_func_command(
+ func = self.find_func_command("generate resmoke tasks")
+ return func if func is not None else self.find_func_command(
"generate randomized multiversion tasks")
@property
def generate_randomized_multiversion_command(self):
"""Return the 'generate resmoke tasks' command if found, or None."""
- return self._find_func_command("generate randomized multiversion tasks")
+ return self.find_func_command("generate randomized multiversion tasks")
@property
- def is_generate_resmoke_task(self):
+ def is_generate_resmoke_task(self) -> bool:
"""Return True if 'generate resmoke tasks' command is found."""
return (self.generate_resmoke_tasks_command is not None
or self.generate_randomized_multiversion_command is not None)
+ def generate_fuzzer_tasks_command(self):
+ """Return the 'generate fuzzer tasks' command if found, or None."""
+ func = self.find_func_command("generate fuzzer tasks")
+ return func
+
+ def is_generate_fuzzer_task(self) -> bool:
+ """Return True if 'generate fuzzer tasks' command is found."""
+ return self.generate_fuzzer_tasks_command() is not None
+
@property
def run_tests_command(self):
"""Return the 'run tests' command if found, or None."""
- return self._find_func_command("run tests")
+ return self.find_func_command("run tests")
@property
def is_run_tests_task(self):
@@ -151,7 +161,7 @@ class Task(object):
@property
def multiversion_setup_command(self):
"""Return the 'do multiversion setup' command if found, or None."""
- return self._find_func_command("do multiversion setup")
+ return self.find_func_command("do multiversion setup")
@property
def is_multiversion_task(self):
@@ -353,6 +363,13 @@ class Variant(object):
"""Get the value of the num_jobs_available expansion or None if not found."""
return self.expansion("num_jobs_available")
+ def is_asan_build(self) -> bool:
+ """Determine if this task is an ASAN build."""
+ san_options = self.expansion("san_options")
+ if san_options:
+ return ASAN_SIGNATURE in san_options
+ return False
+
class VariantTask(Task):
"""Represent a task definition in the context of a build variant."""
diff --git a/buildscripts/evergreen_gen_fuzzer_tests.py b/buildscripts/evergreen_gen_fuzzer_tests.py
index 4b5f4e8ed85..ddaf298df92 100755
--- a/buildscripts/evergreen_gen_fuzzer_tests.py
+++ b/buildscripts/evergreen_gen_fuzzer_tests.py
@@ -1,222 +1,178 @@
#!/usr/bin/env python3
"""Generate fuzzer tests to run in evergreen in parallel."""
-import argparse
-from typing import Set, Optional, List, NamedTuple
-
-from shrub.v2 import ShrubProject, FunctionCall, Task, TaskDependency, BuildVariant, ExistingTask
-
-from buildscripts.evergreen_generate_resmoke_tasks import NO_LARGE_DISTRO_ERR, GenerationConfiguration, GENERATE_CONFIG_FILE
-from buildscripts.util.fileops import write_file_to_dir
-import buildscripts.util.read_config as read_config
-import buildscripts.util.taskname as taskname
+import os
+from typing import Optional
+
+import click
+import inject
+from pydantic import BaseModel
+from evergreen import EvergreenApi, RetryingEvergreenApi
+
+from buildscripts.task_generation.evg_config_builder import EvgConfigBuilder
+from buildscripts.task_generation.gen_config import GenerationConfiguration
+from buildscripts.task_generation.gen_task_service import GenTaskOptions, FuzzerGenTaskParams
+from buildscripts.task_generation.gen_task_validation import GenTaskValidationService
+from buildscripts.task_generation.generated_config import GeneratedConfiguration
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyConfig
+from buildscripts.task_generation.suite_split import SuiteSplitService
+from buildscripts.util.cmdutils import enable_logging
+from buildscripts.util.fileops import read_yaml_file
CONFIG_DIRECTORY = "generated_resmoke_config"
GEN_PARENT_TASK = "generator_tasks"
+GENERATE_CONFIG_FILE = "etc/generate_subtasks_config.yml"
+DEFAULT_TEST_SUITE_DIR = os.path.join("buildscripts", "resmokeconfig", "suites")
+EVG_CONFIG_FILE = "./.evergreen.yml"
-class ConfigOptions(NamedTuple):
- """Configuration options populated by Evergreen expansions."""
+class EvgExpansions(BaseModel):
+ """
+ Evergreen expansions to read for configuration.
+
+ build_id: ID of build being run.
+ build_variant: Build Variant being run on.
+ continue_on_failure: Should tests continue after encountering a failure.
+ is_patch: Are tests being run in a patch build.
+ jstestfuzz_vars: Variable to pass to jstestfuzz command.
+ large_distro_name: Name of "large" distro to use.
+ name: Name of task to generate.
+ npm_command: NPM command to generate fuzzer tests.
+ num_files: Number of fuzzer files to generate.
+ num_tasks: Number of sub-tasks to generate.
+ resmoke_args: Arguments to pass to resmoke.
+ resmoke_jobs_max: Max number of jobs resmoke should execute in parallel.
+ revision: git revision being run against.
+ should_shuffle: Should remove shuffle tests before executing.
+ suite: Resmoke suite to run the tests.
+ task_id: ID of task currently being executed.
+ task_path_suffix: Multiversion configuration if needed.
+ timeout_secs: Timeout to set for task execution.
+ use_large_distro: Should tasks be generated to run on a large distro.
+ """
+ build_id: str
+ build_variant: str
+ continue_on_failure: bool
+ is_patch: Optional[bool]
+ jstestfuzz_vars: Optional[str]
+ large_distro_name: Optional[str]
+ name: str
+ npm_command: Optional[str]
num_files: int
num_tasks: int
resmoke_args: str
- npm_command: str
- jstestfuzz_vars: str
- name: str
- variant: str
- continue_on_failure: bool
resmoke_jobs_max: int
+ revision: str
should_shuffle: bool
- timeout_secs: int
- use_multiversion: str
suite: str
- large_distro_name: str
- use_large_distro: bool
-
-
-def _get_config_options(cmd_line_options, config_file): # pylint: disable=too-many-locals
- """
- Get the configuration to use.
-
- Command line options override config files options.
-
- :param cmd_line_options: Command line options specified.
- :param config_file: config file to use.
- :return: ConfigOptions to use.
- """
- config_file_data = read_config.read_config_file(config_file)
-
- num_files = int(
- read_config.get_config_value("num_files", cmd_line_options, config_file_data,
- required=True))
- num_tasks = int(
- read_config.get_config_value("num_tasks", cmd_line_options, config_file_data,
- required=True))
- resmoke_args = read_config.get_config_value("resmoke_args", cmd_line_options, config_file_data,
- default="")
- npm_command = read_config.get_config_value("npm_command", cmd_line_options, config_file_data,
- default="jstestfuzz")
- jstestfuzz_vars = read_config.get_config_value("jstestfuzz_vars", cmd_line_options,
- config_file_data, default="")
- name = read_config.get_config_value("name", cmd_line_options, config_file_data, required=True)
- variant = read_config.get_config_value("build_variant", cmd_line_options, config_file_data,
- required=True)
- continue_on_failure = read_config.get_config_value("continue_on_failure", cmd_line_options,
- config_file_data, default="false")
- resmoke_jobs_max = read_config.get_config_value("resmoke_jobs_max", cmd_line_options,
- config_file_data, default="0")
- should_shuffle = read_config.get_config_value("should_shuffle", cmd_line_options,
- config_file_data, default="false")
- timeout_secs = read_config.get_config_value("timeout_secs", cmd_line_options, config_file_data,
- default="1800")
- use_multiversion = read_config.get_config_value("task_path_suffix", cmd_line_options,
- config_file_data, default=False)
-
- suite = read_config.get_config_value("suite", cmd_line_options, config_file_data, required=True)
-
- large_distro_name = read_config.get_config_value("large_distro_name", cmd_line_options,
- config_file_data, default="")
- use_large_distro = read_config.get_config_value("use_large_distro", cmd_line_options,
- config_file_data, default=False)
-
- return ConfigOptions(num_files, num_tasks, resmoke_args, npm_command, jstestfuzz_vars, name,
- variant, continue_on_failure, resmoke_jobs_max, should_shuffle,
- timeout_secs, use_multiversion, suite, large_distro_name, use_large_distro)
-
-
-def build_fuzzer_sub_task(task_name: str, task_index: int, options: ConfigOptions) -> Task:
- """
- Build a shrub task to run the fuzzer.
-
- :param task_name: Parent name of task.
- :param task_index: Index of sub task being generated.
- :param options: Options to use for task.
- :return: Shrub task to run the fuzzer.
- """
- sub_task_name = taskname.name_generated_task(task_name, task_index, options.num_tasks,
- options.variant)
-
- run_jstestfuzz_vars = {
- "jstestfuzz_vars":
- "--numGeneratedFiles {0} {1}".format(options.num_files, options.jstestfuzz_vars),
- "npm_command":
- options.npm_command,
- }
- suite_arg = f"--suites={options.suite}"
- run_tests_vars = {
- "continue_on_failure": options.continue_on_failure,
- "resmoke_args": f"{suite_arg} {options.resmoke_args}",
- "resmoke_jobs_max": options.resmoke_jobs_max,
- "should_shuffle": options.should_shuffle,
- "task_path_suffix": options.use_multiversion,
- "timeout_secs": options.timeout_secs,
- "task": options.name
- } # yapf: disable
-
- commands = [
- FunctionCall("do setup"),
- FunctionCall("configure evergreen api credentials") if options.use_multiversion else None,
- FunctionCall("do multiversion setup") if options.use_multiversion else None,
- FunctionCall("setup jstestfuzz"),
- FunctionCall("run jstestfuzz", run_jstestfuzz_vars),
- FunctionCall("run generated tests", run_tests_vars)
- ]
- commands = [command for command in commands if command is not None]
-
- return Task(sub_task_name, commands, {TaskDependency("archive_dist_test_debug")})
-
-
-def generate_fuzzer_sub_tasks(task_name: str, options: ConfigOptions) -> Set[Task]:
- """
- Generate evergreen tasks for fuzzers based on the options given.
-
- :param task_name: Parent name for tasks being generated.
- :param options: task options.
- :return: Set of shrub tasks.
- """
- sub_tasks = {
- build_fuzzer_sub_task(task_name, index, options)
- for index in range(options.num_tasks)
- }
- return sub_tasks
-
-
-def get_distro(options: ConfigOptions, build_variant: str) -> Optional[List[str]]:
- """
- Get the distros that the tasks should be run on.
-
- :param options: ConfigOptions instance
- :param build_variant: Name of build variant being generated.
- :return: List of distros to run on.
- """
- if options.use_large_distro:
- if options.large_distro_name:
- return [options.large_distro_name]
-
- generate_config = GenerationConfiguration.from_yaml_file(GENERATE_CONFIG_FILE)
- if build_variant not in generate_config.build_variant_large_distro_exceptions:
- print(NO_LARGE_DISTRO_ERR.format(build_variant=build_variant))
- raise ValueError("Invalid Evergreen Configuration")
-
- return None
-
-
-def create_fuzzer_task(options: ConfigOptions, build_variant: BuildVariant) -> None:
- """
- Generate an evergreen configuration for fuzzers and add it to the given build variant.
-
- :param options: task options.
- :param build_variant: Build variant to add tasks to.
- """
- task_name = options.name
- sub_tasks = generate_fuzzer_sub_tasks(task_name, options)
+ task_id: str
+ timeout_secs: int
+ use_large_distro: Optional[bool]
+ task_path_suffix: Optional[str]
+
+ @classmethod
+ def from_yaml_file(cls, path: str) -> "EvgExpansions":
+ """
+ Read the generation configuration from the given file.
+
+ :param path: Path to file.
+ :return: Parse evergreen expansions.
+ """
+ return cls(**read_yaml_file(path))
+
+ def gen_task_options(self) -> GenTaskOptions:
+ """Determine the options for generating tasks based on the given expansions."""
+ return GenTaskOptions(
+ is_patch=self.is_patch,
+ create_misc_suite=True,
+ generated_config_dir=CONFIG_DIRECTORY,
+ use_default_timeouts=False,
+ )
+
+ def fuzzer_gen_task_params(self) -> FuzzerGenTaskParams:
+ """Determine the parameters for generating fuzzer tasks based on the given expansions."""
+ return FuzzerGenTaskParams(
+ task_name=self.name, num_files=self.num_files, num_tasks=self.num_tasks,
+ resmoke_args=self.resmoke_args, npm_command=self.npm_command or "jstestfuzz",
+ jstestfuzz_vars=self.jstestfuzz_vars, variant=self.build_variant,
+ continue_on_failure=self.continue_on_failure, resmoke_jobs_max=self.resmoke_jobs_max,
+ should_shuffle=self.should_shuffle, timeout_secs=self.timeout_secs,
+ use_multiversion=self.task_path_suffix, suite=self.suite,
+ use_large_distro=self.use_large_distro, large_distro_name=self.large_distro_name,
+ config_location=
+ f"{self.build_variant}/{self.revision}/generate_tasks/{self.name}_gen-{self.build_id}.tgz"
+ )
+
+
+class EvgGenFuzzerOrchestrator:
+ """Orchestrate the generation of fuzzer tasks."""
+
+ @inject.autoparams()
+ def __init__(self, validation_service: GenTaskValidationService) -> None:
+ """
+ Initialize the orchestrator.
+
+ :param validation_service: Validation Service for generating tasks.
+ """
+ self.validation_service = validation_service
+
+ @staticmethod
+ def generate_config(fuzzer_params: FuzzerGenTaskParams) -> GeneratedConfiguration:
+ """
+ Generate a fuzzer task based on the given parameters.
+
+ :param fuzzer_params: Parameters describing how fuzzer should be generated.
+ :return: Configuration to generate the specified fuzzer.
+ """
+ builder = EvgConfigBuilder() # pylint: disable=no-value-for-parameter
+
+ builder.generate_fuzzer(fuzzer_params)
+ builder.add_display_task(GEN_PARENT_TASK, {f"{fuzzer_params.task_name}_gen"},
+ fuzzer_params.variant)
+ return builder.build(fuzzer_params.task_name + ".json")
+
+ def generate_fuzzer(self, task_id: str, fuzzer_params: FuzzerGenTaskParams) -> None:
+ """
+ Save the configuration to generate the specified fuzzer to disk.
+
+ :param task_id: ID of task doing the generation.
+ :param fuzzer_params: Parameters describing how fuzzer should be generated.
+ """
+ if not self.validation_service.should_task_be_generated(task_id):
+ print("Not generating configuration due to previous successful generation.")
+ return
+
+ generated_config = self.generate_config(fuzzer_params)
+ generated_config.write_all_to_dir(CONFIG_DIRECTORY)
+
+
+@click.command()
+@click.option("--expansion-file", type=str, required=True,
+ help="Location of expansions file generated by evergreen.")
+@click.option("--evergreen-config", type=str, default=EVG_CONFIG_FILE,
+ help="Location of evergreen configuration file.")
+@click.option("--verbose", is_flag=True, default=False, help="Enable verbose logging.")
+def main(expansion_file: str, evergreen_config: str, verbose: bool) -> None:
+ """Generate fuzzer tests to run in evergreen."""
+ enable_logging(verbose)
- build_variant.display_task(GEN_PARENT_TASK,
- execution_existing_tasks={ExistingTask(f"{options.name}_gen")})
+ evg_expansions = EvgExpansions.from_yaml_file(expansion_file)
- distros = get_distro(options, build_variant.name)
- build_variant.display_task(task_name, sub_tasks, distros=distros)
+ def dependencies(binder: inject.Binder) -> None:
+ binder.bind(SuiteSplitService, None)
+ binder.bind(GenTaskOptions, evg_expansions.gen_task_options())
+ binder.bind(EvergreenApi, RetryingEvergreenApi.get_api(config_file=evergreen_config))
+ binder.bind(GenerationConfiguration,
+ GenerationConfiguration.from_yaml_file(GENERATE_CONFIG_FILE))
+ binder.bind(ResmokeProxyConfig,
+ ResmokeProxyConfig(resmoke_suite_dir=DEFAULT_TEST_SUITE_DIR))
+ inject.configure(dependencies)
-def main():
- """Generate fuzzer tests to run in evergreen."""
- parser = argparse.ArgumentParser(description=main.__doc__)
-
- parser.add_argument("--expansion-file", dest="expansion_file", type=str,
- help="Location of expansions file generated by evergreen.")
- parser.add_argument("--num-files", dest="num_files", type=int,
- help="Number of files to generate per task.")
- parser.add_argument("--num-tasks", dest="num_tasks", type=int,
- help="Number of tasks to generate.")
- parser.add_argument("--resmoke-args", dest="resmoke_args", help="Arguments to pass to resmoke.")
- parser.add_argument("--npm-command", dest="npm_command", help="npm command to run for fuzzer.")
- parser.add_argument("--jstestfuzz-vars", dest="jstestfuzz_vars",
- help="options to pass to jstestfuzz.")
- parser.add_argument("--name", dest="name", help="name of task to generate.")
- parser.add_argument("--variant", dest="build_variant", help="build variant to generate.")
- parser.add_argument("--use-multiversion", dest="task_path_suffix",
- help="Task path suffix for multiversion generated tasks.")
- parser.add_argument("--continue-on-failure", dest="continue_on_failure",
- help="continue_on_failure value for generated tasks.")
- parser.add_argument("--resmoke-jobs-max", dest="resmoke_jobs_max",
- help="resmoke_jobs_max value for generated tasks.")
- parser.add_argument("--should-shuffle", dest="should_shuffle",
- help="should_shuffle value for generated tasks.")
- parser.add_argument("--timeout-secs", dest="timeout_secs",
- help="timeout_secs value for generated tasks.")
- parser.add_argument("--suite", dest="suite", help="Suite to run using resmoke.")
-
- options = parser.parse_args()
-
- config_options = _get_config_options(options, options.expansion_file)
- build_variant = BuildVariant(config_options.variant)
- create_fuzzer_task(config_options, build_variant)
-
- shrub_project = ShrubProject.empty()
- shrub_project.add_build_variant(build_variant)
-
- write_file_to_dir(CONFIG_DIRECTORY, f"{config_options.name}.json", shrub_project.json())
+ gen_fuzzer_orchestrator = EvgGenFuzzerOrchestrator() # pylint: disable=no-value-for-parameter
+ gen_fuzzer_orchestrator.generate_fuzzer(evg_expansions.task_id,
+ evg_expansions.fuzzer_gen_task_params())
if __name__ == '__main__':
- main()
+ main() # pylint: disable=no-value-for-parameter
diff --git a/buildscripts/evergreen_gen_multiversion_tests.py b/buildscripts/evergreen_gen_multiversion_tests.py
index 85ef4aa3f2c..7095af871e6 100755
--- a/buildscripts/evergreen_gen_multiversion_tests.py
+++ b/buildscripts/evergreen_gen_multiversion_tests.py
@@ -1,30 +1,41 @@
#!/usr/bin/env python3
"""Generate multiversion tests to run in evergreen in parallel."""
-import datetime
-import logging
+from datetime import datetime, timedelta
import os
import re
-import sys
import tempfile
-from typing import Optional, List, Set
+from typing import Optional, List
from collections import defaultdict
from subprocess import check_output
+import inject
import requests
import click
import structlog
+from pydantic import BaseModel
-from shrub.v2 import ShrubProject, FunctionCall, Task, TaskDependency, BuildVariant, ExistingTask
+from shrub.v2 import ExistingTask
from evergreen.api import RetryingEvergreenApi, EvergreenApi
from buildscripts.resmokelib.multiversionconstants import (
LAST_LTS_MONGO_BINARY, LAST_CONTINUOUS_MONGO_BINARY, REQUIRES_FCV_TAG)
-import buildscripts.util.taskname as taskname
-from buildscripts.util.fileops import write_file_to_dir, read_yaml_file
+from buildscripts.task_generation.evg_config_builder import EvgConfigBuilder
+from buildscripts.task_generation.gen_config import GenerationConfiguration
+from buildscripts.task_generation.generated_config import GeneratedConfiguration
+from buildscripts.task_generation.multiversion_util import MultiversionUtilService
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyConfig
+from buildscripts.task_generation.suite_split import SuiteSplitConfig, SuiteSplitParameters, \
+ remove_gen_suffix
+from buildscripts.task_generation.suite_split_strategies import SplitStrategy, FallbackStrategy, \
+ greedy_division, round_robin_fallback
+from buildscripts.task_generation.task_types.fuzzer_tasks import FuzzerGenTaskParams
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
+from buildscripts.task_generation.task_types.multiversion_tasks import MultiversionGenTaskParams
+from buildscripts.util.cmdutils import enable_logging
+from buildscripts.util.fileops import read_yaml_file
import buildscripts.evergreen_generate_resmoke_tasks as generate_resmoke
-from buildscripts.evergreen_generate_resmoke_tasks import Suite, ConfigOptions
import buildscripts.evergreen_gen_fuzzer_tests as gen_fuzzer
import buildscripts.ciconfig.tags as _tags
@@ -32,17 +43,11 @@ import buildscripts.ciconfig.tags as _tags
LOGGER = structlog.getLogger(__name__)
-REQUIRED_CONFIG_KEYS = {
- "build_variant", "fallback_num_sub_suites", "project", "task_id", "task_name",
- "use_multiversion"
-}
-
-DEFAULT_CONFIG_VALUES = generate_resmoke.DEFAULT_CONFIG_VALUES
-CONFIG_DIR = DEFAULT_CONFIG_VALUES["generated_config_dir"]
-DEFAULT_CONFIG_VALUES["is_jstestfuzz"] = False
-TEST_SUITE_DIR = DEFAULT_CONFIG_VALUES["test_suites_dir"]
+DEFAULT_CONFIG_DIR = "generated_resmoke_config"
+CONFIG_DIR = DEFAULT_CONFIG_DIR
+DEFAULT_TEST_SUITE_DIR = os.path.join("buildscripts", "resmokeconfig", "suites")
+LOOKBACK_DURATION_DAYS = 14
CONFIG_FILE = generate_resmoke.EVG_CONFIG_FILE
-CONFIG_FORMAT_FN = generate_resmoke.CONFIG_FORMAT_FN
REPL_MIXED_VERSION_CONFIGS = ["new-old-new", "new-new-old", "old-new-new"]
SHARDED_MIXED_VERSION_CONFIGS = ["new-old-old-new"]
@@ -53,6 +58,9 @@ RANDOM_REPLSETS_TAG = "random_multiversion_ds"
BACKPORT_REQUIRED_TAG = "backport_required_multiversion"
EXCLUDE_TAGS = f"{REQUIRES_FCV_TAG},multiversion_incompatible,{BACKPORT_REQUIRED_TAG}"
EXCLUDE_TAGS_FILE = "multiversion_exclude_tags.yml"
+GEN_SUFFIX = "_gen"
+GEN_PARENT_TASK = "generator_tasks"
+ASAN_SIGNATURE = "detect_leaks=1"
# The directory in which BACKPORTS_REQUIRED_FILE resides.
ETC_DIR = "etc"
@@ -60,20 +68,145 @@ BACKPORTS_REQUIRED_FILE = "backports_required_for_multiversion_tests.yml"
BACKPORTS_REQUIRED_BASE_URL = "https://raw.githubusercontent.com/mongodb/mongo"
-def enable_logging():
- """Enable INFO level logging."""
- logging.basicConfig(
- format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s",
- level=logging.INFO,
- stream=sys.stdout,
- )
- structlog.configure(logger_factory=structlog.stdlib.LoggerFactory())
+class EvgExpansions(BaseModel):
+ """Evergreen expansions file contents."""
+
+ project: str
+ target_resmoke_time: int = 60
+ max_sub_suites: int = 5
+ max_tests_per_suite: int = 100
+ san_options: Optional[str]
+ task_name: str
+ suite: Optional[str]
+ num_files: Optional[int]
+ num_tasks: Optional[int]
+ resmoke_args: Optional[str]
+ npm_command: Optional[str]
+ jstestfuzz_vars: Optional[str]
+ build_variant: str
+ continue_on_failure: Optional[bool]
+ resmoke_jobs_max: Optional[int]
+ should_shuffle: Optional[bool]
+ timeout_secs: Optional[int]
+ use_multiversion: Optional[str]
+ use_large_distro: Optional[bool]
+ large_distro_name: Optional[str]
+ revision: str
+ build_id: str
+ create_misc_suite: bool = True
+ is_patch: bool = False
+ is_jstestfuzz: bool = False
+
+ @property
+ def task(self) -> str:
+ """Get the name of the task."""
+ return self.task_name
+
+ @classmethod
+ def from_yaml_file(cls, path: str) -> "EvgExpansions":
+ """Read the evergreen expansions from the given file."""
+ return cls(**read_yaml_file(path))
+
+ def config_location(self) -> str:
+ """Get the location to store the configuration."""
+ return f"{self.build_variant}/{self.revision}/generate_tasks/{self.task}_gen-{self.build_id}.tgz"
+
+ def is_asan_build(self) -> bool:
+ """Determine if this task is an ASAN build."""
+ san_options = self.san_options
+ if san_options:
+ return ASAN_SIGNATURE in san_options
+ return False
+
+ def get_generation_options(self) -> GenTaskOptions:
+ """Get options for how tasks should be generated."""
+ return GenTaskOptions(
+ create_misc_suite=self.create_misc_suite,
+ is_patch=self.is_patch,
+ generated_config_dir=DEFAULT_CONFIG_DIR,
+ use_default_timeouts=False,
+ )
+
+ def get_fuzzer_params(self, version_config: str, is_sharded: bool) -> FuzzerGenTaskParams:
+ """
+ Get parameters to generate fuzzer tasks.
+ :param version_config: Version configuration to generate for.
+ :param is_sharded: If configuration is for sharded tests.
+ :return: Parameters to generate fuzzer tasks.
+ """
+ name = f"{self.suite}_multiversion_{version_config}"
+ add_resmoke_args = get_multiversion_resmoke_args(is_sharded)
+ resmoke_args = f"{self.resmoke_args or ''} --mixedBinVersions={version_config} {add_resmoke_args}"
+
+ return FuzzerGenTaskParams(
+ num_files=self.num_files,
+ num_tasks=self.num_tasks,
+ resmoke_args=resmoke_args,
+ npm_command=self.npm_command,
+ jstestfuzz_vars=self.jstestfuzz_vars,
+ task_name=name,
+ variant=self.build_variant,
+ continue_on_failure=self.continue_on_failure,
+ resmoke_jobs_max=self.resmoke_jobs_max,
+ should_shuffle=self.should_shuffle,
+ timeout_secs=self.timeout_secs,
+ use_multiversion=self.use_multiversion,
+ suite=self.suite or self.task,
+ use_large_distro=self.use_large_distro,
+ large_distro_name=self.large_distro_name,
+ config_location=self.config_location(),
+ )
+
+ def get_split_params(self) -> SuiteSplitParameters:
+ """Get the parameters specified to split suites."""
+ task = remove_gen_suffix(self.task)
+
+ return SuiteSplitParameters(
+ task_name=self.task,
+ suite_name=self.suite or task,
+ filename=self.suite or task,
+ test_file_filter=None,
+ build_variant=self.build_variant,
+ is_asan=self.is_asan_build(),
+ )
+
+ def get_split_config(self, start_date: datetime, end_date: datetime) -> SuiteSplitConfig:
+ """
+ Get the configuration specifed to split suites.
+
+ :param start_date: Start date for historic results query.
+ :param end_date: End date for historic results query.
+ :return: Configuration to use for splitting suites.
+ """
+ return SuiteSplitConfig(
+ evg_project=self.project,
+ target_resmoke_time=self.target_resmoke_time,
+ max_sub_suites=self.max_sub_suites,
+ max_tests_per_suite=self.max_tests_per_suite,
+ start_date=start_date,
+ end_date=end_date,
+ )
+
+ def get_generation_params(self, is_sharded: bool) -> MultiversionGenTaskParams:
+ """
+ Get the parameters to use to generating multiversion tasks.
-def is_suite_sharded(suite_dir: str, suite_name: str) -> bool:
- """Return true if a suite uses ShardedClusterFixture."""
- source_config = read_yaml_file(os.path.join(suite_dir, suite_name + ".yml"))
- return source_config["executor"]["fixture"]["class"] == "ShardedClusterFixture"
+ :param is_sharded: True if a sharded sutie is being generated.
+ :return: Parameters to use for generating multiversion tasks.
+ """
+ version_config_list = get_version_configs(is_sharded)
+ task = remove_gen_suffix(self.task)
+ return MultiversionGenTaskParams(
+ mixed_version_configs=version_config_list,
+ is_sharded=is_sharded,
+ resmoke_args=self.resmoke_args,
+ parent_task_name=task,
+ origin_suite=self.suite or task,
+ use_large_distro=self.use_large_distro,
+ large_distro_name=self.large_distro_name,
+ config_location=self.config_location(),
+ )
def get_version_configs(is_sharded: bool) -> List[str]:
@@ -126,220 +259,77 @@ def get_last_lts_yaml(commit_hash):
return backports_required_last_lts
-def _generate_resmoke_args(suite_file: str, mixed_version_config: str, is_sharded: bool, options,
- burn_in_test: Optional[str]) -> str:
- return (
- f"{options.resmoke_args} --suite={suite_file} --mixedBinVersions={mixed_version_config}"
- f" --excludeWithAnyTags={EXCLUDE_TAGS},{generate_resmoke.remove_gen_suffix(options.task)}_{BACKPORT_REQUIRED_TAG} --tagFile={os.path.join(CONFIG_DIR, EXCLUDE_TAGS_FILE)} --originSuite={options.suite} "
- f" {get_multiversion_resmoke_args(is_sharded)} {burn_in_test if burn_in_test else ''}")
-
+class MultiVersionGenerateOrchestrator:
+ """An orchestrator for generating multiversion tasks."""
-class EvergreenMultiversionConfigGenerator(object):
- """Generate evergreen configurations for multiversion tests."""
-
- def __init__(self, evg_api: EvergreenApi, options):
- """Create new EvergreenMultiversionConfigGenerator object."""
- self.evg_api = evg_api
- self.options = options
- # Strip the "_gen" suffix appended to the name of tasks generated by evergreen.
- self.task = generate_resmoke.remove_gen_suffix(self.options.task)
-
- def _generate_sub_task(self, mixed_version_config: str, task: str, task_index: int, suite: str,
- num_suites: int, is_sharded: bool,
- burn_in_test: Optional[str] = None) -> Task:
- # pylint: disable=too-many-arguments
- """
- Generate a sub task to be run with the provided suite and mixed version config.
-
- :param mixed_version_config: mixed version configuration.
- :param task: Name of task.
- :param task_index: Index of task to generate.
- :param suite: Name of suite being generated.
- :param num_suites: Number os suites being generated.
- :param is_sharded: If this is being generated for a sharded configuration.
- :param burn_in_test: If generation is for burn_in, burn_in options to use.
- :return: Shrub configuration for task specified.
- """
- # Create a sub task name appended with the task_index and build variant name.
- task_name = f"{task}_{mixed_version_config}"
- sub_task_name = taskname.name_generated_task(task_name, task_index, num_suites,
- self.options.variant)
- gen_task_name = BURN_IN_TASK if burn_in_test is not None else self.task
-
- run_tests_vars = {
- "resmoke_args":
- _generate_resmoke_args(suite, mixed_version_config, is_sharded, self.options,
- burn_in_test),
- "task":
- gen_task_name,
- }
-
- commands = [
- FunctionCall("do setup"),
- # Fetch and download the proper mongod binaries before running multiversion tests.
- FunctionCall("configure evergreen api credentials"),
- FunctionCall("do multiversion setup"),
- FunctionCall("run generated tests", run_tests_vars),
- ]
-
- return Task(sub_task_name, commands, {TaskDependency("archive_dist_test_debug")})
-
- def _generate_burn_in_execution_tasks(self, version_configs: List[str], suites: List[Suite],
- burn_in_test: str, burn_in_idx: int,
- is_sharded: bool) -> Set[Task]:
- """
- Generate shrub tasks for burn_in executions.
-
- :param version_configs: Version configs to generate for.
- :param suites: Suites to generate.
- :param burn_in_test: burn_in_test configuration.
- :param burn_in_idx: Index of burn_in task being generated.
- :param is_sharded: If configuration should be generated for sharding tests.
- :return: Set of generated shrub tasks.
- """
- # pylint: disable=too-many-arguments
- burn_in_prefix = "burn_in_multiversion"
- task = f"{burn_in_prefix}:{self.task}"
-
- # For burn in tasks, it doesn't matter which generated suite yml to use as all the
- # yaml configurations are the same.
- source_suite = os.path.join(CONFIG_DIR, suites[0].name + ".yml")
- tasks = {
- self._generate_sub_task(version_config, task, burn_in_idx, source_suite, 1, is_sharded,
- burn_in_test)
- for version_config in version_configs
- }
-
- return tasks
-
- def _get_fuzzer_options(self, version_config: str, is_sharded: bool) -> ConfigOptions:
+ @inject.autoparams()
+ def __init__(self, evg_api: EvergreenApi, multiversion_util: MultiversionUtilService,
+ gen_task_options: GenTaskOptions) -> None:
"""
- Get options to generate fuzzer tasks.
+ Initialize the orchestrator.
- :param version_config: Version configuration to generate for.
- :param is_sharded: If configuration is for sharded tests.
- :return: Configuration options to generate fuzzer tasks.
+ :param evg_api: Evergreen API client.
+ :param multiversion_util: Multiverison utilities service.
+ :param gen_task_options: Options to use for generating tasks.
"""
- fuzzer_config = ConfigOptions(self.options.config)
- fuzzer_config.name = f"{self.options.suite}_multiversion"
- fuzzer_config.num_files = int(self.options.num_files)
- fuzzer_config.num_tasks = int(self.options.num_tasks)
- add_resmoke_args = get_multiversion_resmoke_args(is_sharded)
- fuzzer_config.resmoke_args = f"{self.options.resmoke_args} "\
- f"--mixedBinVersions={version_config} {add_resmoke_args}"
- return fuzzer_config
+ self.evg_api = evg_api
+ self.multiversion_util = multiversion_util
+ self.gen_task_options = gen_task_options
- def _generate_fuzzer_tasks(self, build_variant: BuildVariant, version_configs: List[str],
- is_sharded: bool) -> None:
+ def generate_fuzzer(self, evg_expansions: EvgExpansions) -> GeneratedConfiguration:
"""
- Generate fuzzer tasks and add them to the given build variant.
+ Generate configuration for the fuzzer task specified by the expansions.
- :param build_variant: Build variant to add tasks to.
- :param version_configs: Version configurations to generate.
- :param is_sharded: Should configuration be generated for sharding.
- """
- tasks = set()
- for version_config in version_configs:
- fuzzer_config = self._get_fuzzer_options(version_config, is_sharded)
- task_name = f"{fuzzer_config.name}_{version_config}"
- sub_tasks = gen_fuzzer.generate_fuzzer_sub_tasks(task_name, fuzzer_config)
- tasks = tasks.union(sub_tasks)
-
- existing_tasks = {ExistingTask(f"{self.options.suite}_multiversion_gen")}
- build_variant.display_task(self.task, tasks, execution_existing_tasks=existing_tasks)
-
- def generate_resmoke_suites(self) -> List[Suite]:
- """Generate the resmoke configuration files for this generator."""
- # Divide tests into suites based on run-time statistics for the last
- # LOOKBACK_DURATION_DAYS. Tests without enough run-time statistics will be placed
- # in the misc suite.
- gen_suites = generate_resmoke.GenerateSubSuites(self.evg_api, self.options)
- end_date = datetime.datetime.utcnow().replace(microsecond=0)
- start_date = end_date - datetime.timedelta(days=generate_resmoke.LOOKBACK_DURATION_DAYS)
- suites = gen_suites.calculate_suites(start_date, end_date)
- # Render the given suites into yml files that can be used by resmoke.py.
- config_file_dict = generate_resmoke.render_suite_files(
- suites, self.options.suite, self.options.generated_suite_filename, gen_suites.test_list,
- TEST_SUITE_DIR, self.options.create_misc_suite)
- generate_resmoke.write_file_dict(CONFIG_DIR, config_file_dict)
-
- return suites
-
- def get_burn_in_tasks(self, burn_in_test: str, burn_in_idx: int) -> Set[Task]:
+ :param evg_expansions: Evergreen expansions describing what to generate.
+ :return: Configuration to generate the specified task.
"""
- Get the burn_in tasks being generated.
+ suite = evg_expansions.suite
+ is_sharded = self.multiversion_util.is_suite_sharded(suite)
+ version_config_list = get_version_configs(is_sharded)
- :param burn_in_test: Burn in test configuration.
- :param burn_in_idx: Index of burn_in configuration being generated.
- :return: Set of shrub tasks for the specified burn_in.
+ builder = EvgConfigBuilder() # pylint: disable=no-value-for-parameter
+
+ fuzzer_task_set = set()
+ for version_config in version_config_list:
+ fuzzer_params = evg_expansions.get_fuzzer_params(version_config, is_sharded)
+ fuzzer_task = builder.generate_fuzzer(fuzzer_params)
+ fuzzer_task_set = fuzzer_task_set.union(fuzzer_task.sub_tasks)
+
+ existing_tasks = {ExistingTask(task) for task in fuzzer_task_set}
+ existing_tasks.add({ExistingTask(f"{suite}_multiversion_gen")})
+ builder.add_display_task(evg_expansions.task, existing_tasks, evg_expansions.build_variant)
+ return builder.build(f"{evg_expansions.task}.json")
+
+ def generate_resmoke_suite(self, evg_expansions: EvgExpansions) -> GeneratedConfiguration:
"""
- is_sharded = is_suite_sharded(TEST_SUITE_DIR, self.options.suite)
- version_configs = get_version_configs(is_sharded)
- suites = self.generate_resmoke_suites()
-
- # Generate the subtasks to run burn_in_test against the appropriate mixed version
- # configurations. The display task is defined later as part of generating the burn
- # in tests.
- tasks = self._generate_burn_in_execution_tasks(version_configs, suites, burn_in_test,
- burn_in_idx, is_sharded)
- return tasks
-
- def generate_evg_tasks(self, build_variant: BuildVariant) -> None:
- # pylint: disable=too-many-locals
+ Generate configuration for the resmoke task specified by the expansions.
+
+ :param evg_expansions: Evergreen expansions describing what to generate.
+ :return: Configuration to generate the specified task.
"""
- Generate evergreen tasks for multiversion tests.
+ suite = evg_expansions.suite or evg_expansions.task
+ is_sharded = self.multiversion_util.is_suite_sharded(suite)
- The number of tasks generated equals
- (the number of version configs) * (the number of generated suites).
+ split_params = evg_expansions.get_split_params()
+ gen_params = evg_expansions.get_generation_params(is_sharded)
- :param build_variant: Build variant to add generated configuration to.
+ builder = EvgConfigBuilder() # pylint: disable=no-value-for-parameter
+ builder.add_multiversion_suite(split_params, gen_params)
+ builder.add_display_task(GEN_PARENT_TASK, {f"{split_params.task_name}{GEN_SUFFIX}"},
+ evg_expansions.build_variant)
+ return builder.build(f"{evg_expansions.task}.json")
+
+ def generate(self, evg_expansions: EvgExpansions) -> None:
+ """
+ Generate configuration for the specified task and save it to disk.
+
+ :param evg_expansions: Evergreen expansions describing what to generate.
"""
- is_sharded = is_suite_sharded(TEST_SUITE_DIR, self.options.suite)
- version_configs = get_version_configs(is_sharded)
-
- if self.options.is_jstestfuzz:
- self._generate_fuzzer_tasks(build_variant, version_configs, is_sharded)
- return
-
- suites = self.generate_resmoke_suites()
- sub_tasks = set()
- for version_config in version_configs:
- idx = 0
- for suite in suites:
- # Generate the newly divided test suites
- source_suite = os.path.join(CONFIG_DIR, suite.name + ".yml")
- sub_tasks.add(
- self._generate_sub_task(version_config, self.task, idx, source_suite,
- len(suites), is_sharded))
- idx += 1
-
- # Also generate the misc task.
- misc_suite_name = "{0}_misc".format(self.options.suite)
- misc_suite = os.path.join(CONFIG_DIR, misc_suite_name + ".yml")
- sub_tasks.add(
- self._generate_sub_task(version_config, self.task, idx, misc_suite, 1, is_sharded))
- idx += 1
-
- build_variant.display_task(self.task, sub_tasks,
- execution_existing_tasks={ExistingTask(f"{self.task}_gen")})
-
- def run(self) -> None:
- """Generate multiversion suites that run within a specified target execution time."""
- if not generate_resmoke.should_tasks_be_generated(self.evg_api, self.options.task_id):
- LOGGER.info("Not generating configuration due to previous successful generation.")
- return
-
- build_variant = BuildVariant(self.options.variant)
- self.generate_evg_tasks(build_variant)
-
- shrub_project = ShrubProject.empty()
- shrub_project.add_build_variant(build_variant)
- write_file_to_dir(CONFIG_DIR, f"{self.task}.json", shrub_project.json())
-
- if len(os.listdir(CONFIG_DIR)) == 0:
- raise RuntimeError(
- f"Multiversion suite generator unexpectedly yielded no configuration in '{CONFIG_DIR}'"
- )
+ if evg_expansions.is_jstestfuzz:
+ generated_config = self.generate_fuzzer(evg_expansions)
+ else:
+ generated_config = self.generate_resmoke_suite(evg_expansions)
+ generated_config.write_all_to_dir(DEFAULT_CONFIG_DIR)
@click.group()
@@ -367,11 +357,28 @@ def run_generate_tasks(expansion_file: str, evergreen_config: Optional[str] = No
:param expansion_file: Configuration file.
:param evergreen_config: Evergreen configuration file.
"""
- evg_api = RetryingEvergreenApi.get_api(config_file=evergreen_config)
- config_options = generate_resmoke.ConfigOptions.from_file(
- expansion_file, REQUIRED_CONFIG_KEYS, DEFAULT_CONFIG_VALUES, CONFIG_FORMAT_FN)
- config_generator = EvergreenMultiversionConfigGenerator(evg_api, config_options)
- config_generator.run()
+ enable_logging(False)
+
+ end_date = datetime.utcnow().replace(microsecond=0)
+ start_date = end_date - timedelta(days=LOOKBACK_DURATION_DAYS)
+
+ evg_expansions = EvgExpansions.from_yaml_file(expansion_file)
+
+ def dependencies(binder: inject.Binder) -> None:
+ binder.bind(SuiteSplitConfig, evg_expansions.get_split_config(start_date, end_date))
+ binder.bind(SplitStrategy, greedy_division)
+ binder.bind(FallbackStrategy, round_robin_fallback)
+ binder.bind(GenTaskOptions, evg_expansions.get_generation_options())
+ binder.bind(EvergreenApi, RetryingEvergreenApi.get_api(config_file=evergreen_config))
+ binder.bind(GenerationConfiguration,
+ GenerationConfiguration.from_yaml_file(gen_fuzzer.GENERATE_CONFIG_FILE))
+ binder.bind(ResmokeProxyConfig,
+ ResmokeProxyConfig(resmoke_suite_dir=DEFAULT_TEST_SUITE_DIR))
+
+ inject.configure(dependencies)
+
+ generate_orchestrator = MultiVersionGenerateOrchestrator() # pylint: disable=no-value-for-parameter
+ generate_orchestrator.generate(evg_expansions)
@main.command("generate-exclude-tags")
@@ -386,7 +393,7 @@ def generate_exclude_yaml(output: str) -> None:
last-lts branch to determine which tests should be denylisted.
"""
- enable_logging()
+ enable_logging(False)
location, _ = os.path.split(os.path.abspath(output))
if not os.path.isdir(location):
diff --git a/buildscripts/evergreen_generate_resmoke_tasks.py b/buildscripts/evergreen_generate_resmoke_tasks.py
index 5d6f47b6afb..18e6682f353 100755
--- a/buildscripts/evergreen_generate_resmoke_tasks.py
+++ b/buildscripts/evergreen_generate_resmoke_tasks.py
@@ -5,274 +5,164 @@ Resmoke Test Suite Generator.
Analyze the evergreen history for tests run under the given task and create new evergreen tasks
to attempt to keep the task runtime under a specified amount.
"""
-# pylint: disable=too-many-lines
-from copy import deepcopy
-import datetime
-from datetime import timedelta
-from inspect import getframeinfo, currentframe
-import logging
-import math
+from datetime import timedelta, datetime
import os
-import re
import sys
-from distutils.util import strtobool # pylint: disable=no-name-in-module
-from typing import Dict, List, Set, Sequence, Optional, Any, Match, NamedTuple
+from typing import Optional
import click
-import requests
+import inject
import structlog
-import yaml
from pydantic.main import BaseModel
-
-from shrub.v2 import Task, TaskDependency, BuildVariant, ExistingTask, ShrubProject
from evergreen.api import EvergreenApi, RetryingEvergreenApi
# Get relative imports to work when the package is not installed on the PYTHONPATH.
+from buildscripts.task_generation.gen_task_validation import GenTaskValidationService
+
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# pylint: disable=wrong-import-position
-import buildscripts.resmokelib.parser as _parser
-import buildscripts.resmokelib.suitesconfig as suitesconfig
-from buildscripts.util.fileops import write_file_to_dir, read_yaml_file
-import buildscripts.util.read_config as read_config
-import buildscripts.util.taskname as taskname
-from buildscripts.util.teststats import HistoricTaskData, TestRuntime, normalize_test_name
-from buildscripts.patch_builds.task_generation import TimeoutInfo, resmoke_commands
+from buildscripts.task_generation.evg_config_builder import EvgConfigBuilder
+from buildscripts.task_generation.gen_config import GenerationConfiguration
+from buildscripts.task_generation.gen_task_service import GenTaskOptions, ResmokeGenTaskParams
+from buildscripts.task_generation.suite_split_strategies import SplitStrategy, FallbackStrategy, \
+ greedy_division, round_robin_fallback
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyConfig
+from buildscripts.task_generation.suite_split import SuiteSplitConfig, \
+ SuiteSplitParameters
+from buildscripts.util.cmdutils import enable_logging
+from buildscripts.util.fileops import read_yaml_file
# pylint: enable=wrong-import-position
LOGGER = structlog.getLogger(__name__)
-AVG_SETUP_TIME = int(timedelta(minutes=5).total_seconds())
DEFAULT_TEST_SUITE_DIR = os.path.join("buildscripts", "resmokeconfig", "suites")
EVG_CONFIG_FILE = "./.evergreen.yml"
GENERATE_CONFIG_FILE = "etc/generate_subtasks_config.yml"
-MIN_TIMEOUT_SECONDS = int(timedelta(minutes=5).total_seconds())
-MAX_EXPECTED_TIMEOUT = int(timedelta(hours=48).total_seconds())
LOOKBACK_DURATION_DAYS = 14
GEN_SUFFIX = "_gen"
GEN_PARENT_TASK = "generator_tasks"
-CLEAN_EVERY_N_HOOK = "CleanEveryN"
+GENERATED_CONFIG_DIR = "generated_resmoke_config"
ASAN_SIGNATURE = "detect_leaks=1"
-HEADER_TEMPLATE = """# DO NOT EDIT THIS FILE. All manual edits will be lost.
-# This file was generated by {file} from
-# {suite_file}.
-"""
-
-NO_LARGE_DISTRO_ERR = """
-***************************************************************************************
-It appears we are trying to generate a task marked as requiring a large distro, but the
-build variant has not specified a large build variant. In order to resolve this error,
-you need to:
-
-(1) add a "large_distro_name" expansion to this build variant ("{build_variant}").
-
- -- or --
-
-(2) add this build variant ("{build_variant}") to the "build_variant_large_distro_exception"
-list in the "etc/generate_subtasks_config.yml" file.
-***************************************************************************************
-"""
-
-REQUIRED_CONFIG_KEYS = {
- "build_variant",
- "fallback_num_sub_suites",
- "project",
- "task_id",
- "task_name",
-}
-
-DEFAULT_CONFIG_VALUES = {
- "generated_config_dir": "generated_resmoke_config",
- "max_tests_per_suite": 100,
- "max_sub_suites": 3,
- "resmoke_args": "",
- "resmoke_repeat_suites": 1,
- "run_multiple_jobs": "true",
- "target_resmoke_time": 60,
- "test_suites_dir": DEFAULT_TEST_SUITE_DIR,
- "use_default_timeouts": False,
- "use_large_distro": False,
-}
-
-CONFIG_FORMAT_FN = {
- "fallback_num_sub_suites": int,
- "max_sub_suites": int,
- "max_tests_per_suite": int,
- "target_resmoke_time": int,
-}
+DEFAULT_MAX_SUB_SUITES = 5
+DEFAULT_MAX_TESTS_PER_SUITE = 100
+DEFAULT_TARGET_RESMOKE_TIME = 60
-class GenerationConfiguration(BaseModel):
- """Configuration for generating sub-tasks."""
+class EvgExpansions(BaseModel):
+ """
+ Evergreen expansions file contents.
+
+ build_id: ID of build being run.
+ build_variant: Build variant task is being generated under.
+ is_patch: Is this part of a patch build.
+ large_distro_name: Name of distro to use for 'large' tasks.
+ max_sub_suites: Max number of sub-suites to create for a single task.
+ max_tests_per_suite: Max number of tests to include in a single sub-suite.
+ project: Evergreen project being run in.
+ resmoke_args: Arguments to pass to resmoke for generated tests.
+ resmoke_jobs_max: Max number of jobs for resmoke to run in parallel.
+ resmoke_repeat_suites: Number of times resmoke should repeat each suite.
+ revision: Git revision being run against.
+ san_options: SAN options build variant is running under.
+ suite: Name of test suite being generated.
+ target_resmoke_time: Target time (in minutes) to keep sub-suite under.
+ task_id: ID of task creating the generated configuration.
+ task_name: Name of task creating the generated configuration.
+ use_large_distro: Should the generated tasks run on "large" distros.
+ use_multiversion: Multiversion configuration if generated tasks are multiversion.
+ """
- build_variant_large_distro_exceptions: Set[str]
+ build_id: str
+ build_variant: str
+ is_patch: Optional[bool]
+ large_distro_name: Optional[str]
+ max_sub_suites: int = DEFAULT_MAX_SUB_SUITES
+ max_tests_per_suite: int = DEFAULT_MAX_TESTS_PER_SUITE
+ project: str
+ resmoke_args: str = ""
+ resmoke_jobs_max: Optional[int]
+ resmoke_repeat_suites: int = 1
+ revision: str
+ san_options: Optional[str]
+ suite: Optional[str]
+ target_resmoke_time: int = DEFAULT_TARGET_RESMOKE_TIME
+ task_id: str
+ task_name: str
+ use_large_distro: bool = False
+ use_multiversion: Optional[str]
@classmethod
- def from_yaml_file(cls, path: str) -> "GenerationConfiguration":
+ def from_yaml_file(cls, path: str) -> "EvgExpansions":
"""Read the generation configuration from the given file."""
return cls(**read_yaml_file(path))
- @classmethod
- def default_config(cls) -> "GenerationConfiguration":
- """Create a default configuration."""
- return cls(build_variant_large_distro_exceptions=set())
-
-
-class ConfigOptions(object):
- """Retrieve configuration from a config file."""
-
- def __init__(self, config, required_keys=None, defaults=None, formats=None):
- """
- Create an instance of ConfigOptions.
-
- :param config: Dictionary of configuration to use.
- :param required_keys: Set of keys required by this config.
- :param defaults: Dict of default values for keys.
- :param formats: Dict with functions to format values before returning.
- """
- self.config = config
- self.required_keys = required_keys if required_keys else set()
- self.default_values = defaults if defaults else {}
- self.formats = formats if formats else {}
-
- @classmethod
- def from_file(cls, filepath, required_keys, defaults, formats):
- """
- Create an instance of ConfigOptions based on the given config file.
-
- :param filepath: Path to file containing configuration.
- :param required_keys: Set of keys required by this config.
- :param defaults: Dict of default values for keys.
- :param formats: Dict with functions to format values before returning.
- :return: Instance of ConfigOptions.
- """
- return cls(read_config.read_config_file(filepath), required_keys, defaults, formats)
-
- @property
- def depends_on(self):
- """List of dependencies specified."""
- return split_if_exists(self._lookup(self.config, "depends_on"))
-
- @property
- def is_patch(self):
- """Is this running in a patch build."""
- patch = self.config.get("is_patch")
- if patch:
- return strtobool(patch)
- return None
-
- @property
- def repeat_suites(self):
- """How many times should the suite be repeated."""
- return int(self.resmoke_repeat_suites)
-
- @property
- def suite(self):
- """Return test suite is being run."""
- return self.config.get("suite", self.task)
-
@property
- def task(self):
- """Return task being run."""
+ def task(self) -> str:
+ """Get the task being generated."""
return remove_gen_suffix(self.task_name)
- @property
- def run_tests_task(self):
- """Return name of task name for s3 folder containing generated tasks config."""
- return self.task
-
- @property
- def run_tests_build_variant(self):
- """Return name of build_variant for s3 folder containing generated tasks config."""
- return self.build_variant
-
- @property
- def run_tests_build_id(self):
- """Return name of build_id for s3 folder containing generated tasks config."""
- return self.build_id
-
- @property
- def create_misc_suite(self):
- """Whether or not a _misc suite file should be created."""
- return True
-
- @property
- def display_task_name(self):
- """Return the name to use as the display task."""
- return self.task
-
- @property
- def generated_suite_filename(self):
- """Filename for the generated suite file."""
- return self.suite
-
- @property
- def gen_task_set(self):
- """Return the set of tasks used to generate this configuration."""
- return {self.task_name}
-
- @property
- def variant(self):
- """Return build variant is being run on."""
- return self.build_variant
-
- def _lookup(self, config, item):
- if item not in config:
- if item in self.required_keys:
- raise KeyError(f"{item} must be specified in configuration.")
- return self.default_values.get(item, None)
-
- if item in self.formats and item in config:
- return self.formats[item](config[item])
-
- return config.get(item, None)
-
- def __getattr__(self, item):
- """Determine the value of the given attribute."""
- return self._lookup(self.config, item)
-
- def __repr__(self):
- """Provide a string representation of this object for debugging."""
- required_values = [f"{key}: {self.config[key]}" for key in REQUIRED_CONFIG_KEYS]
- return f"ConfigOptions({', '.join(required_values)})"
-
-
-def enable_logging(verbose):
- """Enable verbose logging for execution."""
-
- level = logging.DEBUG if verbose else logging.INFO
- logging.basicConfig(
- format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s",
- level=level,
- stream=sys.stdout,
- )
- structlog.configure(logger_factory=structlog.stdlib.LoggerFactory())
-
-
-def write_file_dict(directory: str, file_dict: Dict[str, str]) -> None:
- """
- Write files in the given dictionary to disk.
-
- The keys of the dictionary should be the filenames to write and the values should be
- the contents to write to each file.
-
- If the given directory does not exist, it will be created.
-
- :param directory: Directory to write files to.
- :param file_dict: Dictionary of files to write.
- """
- for name, contents in file_dict.items():
- write_file_to_dir(directory, name, contents)
-
+ def is_asan_build(self) -> bool:
+ """Determine if this task is an ASAN build."""
+ san_options = self.san_options
+ if san_options:
+ return ASAN_SIGNATURE in san_options
+ return False
-def split_if_exists(str_to_split):
- """Split the given string on "," if it is not None."""
- if str_to_split:
- return str_to_split.split(",")
- return None
+ def get_suite_split_config(self, start_date: datetime, end_date: datetime) -> SuiteSplitConfig:
+ """
+ Get the configuration for splitting suites based on Evergreen expansions.
+
+ :param start_date: Start date for historic stats lookup.
+ :param end_date: End date for historic stats lookup.
+ :return: Configuration to use for splitting suites.
+ """
+ return SuiteSplitConfig(
+ evg_project=self.project,
+ target_resmoke_time=self.target_resmoke_time,
+ max_sub_suites=self.max_sub_suites,
+ max_tests_per_suite=self.max_tests_per_suite,
+ start_date=start_date,
+ end_date=end_date,
+ )
+
+ def get_evg_config_gen_options(self, generated_config_dir: str) -> GenTaskOptions:
+ """
+ Get the configuration for generating tasks from Evergreen expansions.
+
+ :param generated_config_dir: Directory to write generated configuration.
+ :return: Configuration to use for splitting suites.
+ """
+ return GenTaskOptions(
+ create_misc_suite=True,
+ is_patch=self.is_patch,
+ generated_config_dir=generated_config_dir,
+ use_default_timeouts=False,
+ )
+
+ def get_suite_split_params(self) -> SuiteSplitParameters:
+ """Get the parameters to use for splitting suites."""
+ task = remove_gen_suffix(self.task_name)
+ return SuiteSplitParameters(
+ build_variant=self.build_variant,
+ task_name=task,
+ suite_name=self.suite or task,
+ filename=self.suite or task,
+ test_file_filter=None,
+ is_asan=self.is_asan_build(),
+ )
+
+ def get_gen_params(self) -> "ResmokeGenTaskParams":
+ """Get the parameters to use for generating tasks."""
+ return ResmokeGenTaskParams(
+ use_large_distro=self.use_large_distro, large_distro_name=self.large_distro_name,
+ use_multiversion=self.use_multiversion, repeat_suites=self.resmoke_repeat_suites,
+ resmoke_args=self.resmoke_args, resmoke_jobs_max=self.resmoke_jobs_max, config_location=
+ f"{self.build_variant}/{self.revision}/generate_tasks/{self.task}_gen-{self.build_id}.tgz"
+ )
def remove_gen_suffix(task_name):
@@ -282,815 +172,42 @@ def remove_gen_suffix(task_name):
return task_name
-def string_contains_any_of_args(string, args):
- """
- Return whether array contains any of a group of args.
-
- :param string: String being checked.
- :param args: Args being analyzed.
- :return: True if any args are found in the string.
- """
- return any(arg in string for arg in args)
-
-
-def divide_remaining_tests_among_suites(remaining_tests_runtimes, suites):
- """Divide the list of tests given among the suites given."""
- suite_idx = 0
- for test_file, runtime in remaining_tests_runtimes:
- current_suite = suites[suite_idx]
- current_suite.add_test(test_file, runtime)
- suite_idx += 1
- if suite_idx >= len(suites):
- suite_idx = 0
-
-
-def _new_suite_needed(current_suite, test_runtime, max_suite_runtime, max_tests_per_suite):
- """
- Check if a new suite should be created for the given suite.
-
- :param current_suite: Suite currently being added to.
- :param test_runtime: Runtime of test being added.
- :param max_suite_runtime: Max runtime of a single suite.
- :param max_tests_per_suite: Max number of tests in a suite.
- :return: True if a new test suite should be created.
- """
- if current_suite.get_runtime() + test_runtime > max_suite_runtime:
- # Will adding this test put us over the target runtime?
- return True
-
- if max_tests_per_suite and current_suite.get_test_count() + 1 > max_tests_per_suite:
- # Will adding this test put us over the max number of tests?
- return True
-
- return False
-
-
-def divide_tests_into_suites(suite_name, tests_runtimes: List[TestRuntime], max_time_seconds,
- max_suites=None, max_tests_per_suite=None):
- """
- Divide the given tests into suites.
-
- Each suite should be able to execute in less than the max time specified. If a single
- test has a runtime greater than `max_time_seconds`, it will be run in a suite on its own.
-
- If max_suites is reached before assigning all tests to a suite, the remaining tests will be
- divided up among the created suites.
-
- Note: If `max_suites` is hit, suites may have more tests than `max_tests_per_suite` and may have
- runtimes longer than `max_time_seconds`.
-
- :param suite_name: Name of suite being split.
- :param tests_runtimes: List of tuples containing test names and test runtimes.
- :param max_time_seconds: Maximum runtime to add to a single bucket.
- :param max_suites: Maximum number of suites to create.
- :param max_tests_per_suite: Maximum number of tests to add to a single suite.
- :return: List of Suite objects representing grouping of tests.
- """
- suites = []
- Suite.reset_current_index()
- current_suite = Suite(suite_name)
- last_test_processed = len(tests_runtimes)
- LOGGER.debug("Determines suites for runtime", max_runtime_seconds=max_time_seconds,
- max_suites=max_suites, max_tests_per_suite=max_tests_per_suite)
- for idx, (test_file, runtime) in enumerate(tests_runtimes):
- LOGGER.debug("Adding test", test=test_file, test_runtime=runtime)
- if _new_suite_needed(current_suite, runtime, max_time_seconds, max_tests_per_suite):
- LOGGER.debug("Finished suite", suite_runtime=current_suite.get_runtime(),
- test_runtime=runtime, max_time=max_time_seconds)
- if current_suite.get_test_count() > 0:
- suites.append(current_suite)
- current_suite = Suite(suite_name)
- if max_suites and len(suites) >= max_suites:
- last_test_processed = idx
- break
-
- current_suite.add_test(test_file, runtime)
-
- if current_suite.get_test_count() > 0:
- suites.append(current_suite)
-
- if max_suites and last_test_processed < len(tests_runtimes):
- # We must have hit the max suite limit, just randomly add the remaining tests to suites.
- divide_remaining_tests_among_suites(tests_runtimes[last_test_processed:], suites)
-
- return suites
-
-
-def read_suite_config(suite_dir, suite_name) -> Dict[str, Any]:
- """
- Read the given resmoke suite configuration.
-
- :param suite_dir: Directory containing suite configurations.
- :param suite_name: Name of suite to read.
- :return: Configuration of specified suite.
- """
- return read_yaml_file(os.path.join(suite_dir, f"{suite_name}.yml"))
-
-
-def update_suite_config(suite_config, roots=None, excludes=None):
- """
- Update suite config based on the roots and excludes passed in.
-
- :param suite_config: suite_config to update.
- :param roots: new roots to run, or None if roots should not be updated.
- :param excludes: excludes to add, or None if excludes should not be include.
- :return: updated suite_config
- """
- if roots:
- suite_config["selector"]["roots"] = roots
-
- if excludes:
- # This must be a misc file, if the exclude_files section exists, extend it, otherwise,
- # create it.
- if "exclude_files" in suite_config["selector"] and \
- suite_config["selector"]["exclude_files"]:
- suite_config["selector"]["exclude_files"] += excludes
- else:
- suite_config["selector"]["exclude_files"] = excludes
- else:
- # if excludes was not specified this must not a misc file, so don"t exclude anything.
- if "exclude_files" in suite_config["selector"]:
- del suite_config["selector"]["exclude_files"]
-
- return suite_config
-
-
-def generate_resmoke_suite_config(source_config, source_file, roots=None, excludes=None):
- """
- Read and evaluate the yaml suite file.
-
- Override selector.roots and selector.excludes with the provided values. Write the results to
- target_suite_name.
-
- :param source_config: Config of suite to base generated config on.
- :param source_file: Filename of source suite.
- :param roots: Roots used to select tests for split suite.
- :param excludes: Tests that should be excluded from split suite.
- """
- suite_config = update_suite_config(deepcopy(source_config), roots, excludes)
-
- contents = HEADER_TEMPLATE.format(file=__file__, suite_file=source_file)
- contents += yaml.safe_dump(suite_config, default_flow_style=False)
- return contents
-
-
-def render_suite_files(suites: List, suite_name: str, generated_suite_filename: str,
- test_list: List[str], suite_dir, create_misc_suite: bool) -> Dict:
- """
- Render the given list of suites.
-
- This will create a dictionary of all the resmoke config files to create with the
- filename of each file as the key and the contents as the value.
-
- :param suites: List of suites to render.
- :param suite_name: Base name of suites.
- :param generated_suite_filename: The name to use as the file name for generated suite file.
- :param test_list: List of tests used in suites.
- :param suite_dir: Directory containing test suite configurations.
- :param create_misc_suite: Whether or not a _misc suite file should be created.
- :return: Dictionary of rendered resmoke config files.
- """
- # pylint: disable=too-many-arguments
- source_config = read_suite_config(suite_dir, suite_name)
- suite_configs = {
- f"{os.path.basename(suite.name)}.yml": suite.generate_resmoke_config(source_config)
- for suite in suites
- }
- if create_misc_suite:
- suite_configs[f"{generated_suite_filename}_misc.yml"] = generate_resmoke_suite_config(
- source_config, generated_suite_filename, excludes=test_list)
- return suite_configs
-
-
-def calculate_timeout(avg_runtime: float, scaling_factor: int) -> int:
- """
- Determine how long a runtime to set based on average runtime and a scaling factor.
-
- :param avg_runtime: Average runtime of previous runs.
- :param scaling_factor: scaling factor for timeout.
- :return: timeout to use (in seconds).
- """
-
- def round_to_minute(runtime):
- """Round the given seconds up to the nearest minute."""
- distance_to_min = 60 - (runtime % 60)
- return int(math.ceil(runtime + distance_to_min))
-
- return max(MIN_TIMEOUT_SECONDS, round_to_minute(avg_runtime)) * scaling_factor + AVG_SETUP_TIME
-
-
-def should_tasks_be_generated(evg_api, task_id):
- """
- Determine if we should attempt to generate tasks.
-
- If an evergreen task that calls 'generate.tasks' is restarted, the 'generate.tasks' command
- will no-op. So, if we are in that state, we should avoid generating new configuration files
- that will just be confusing to the user (since that would not be used).
-
- :param evg_api: Evergreen API object.
- :param task_id: Id of the task being run.
- :return: Boolean of whether to generate tasks.
- """
- task = evg_api.task_by_id(task_id, fetch_all_executions=True)
- # If any previous execution was successful, do not generate more tasks.
- for i in range(task.execution):
- task_execution = task.get_execution(i)
- if task_execution.is_success():
- return False
-
- return True
-
-
-class TimeoutEstimate(NamedTuple):
- """Runtime estimates used to calculate timeouts."""
-
- max_test_runtime: Optional[float]
- expected_task_runtime: Optional[float]
-
- @classmethod
- def no_timeouts(cls) -> "TimeoutEstimate":
- """Create an instance with no estimation data."""
- return cls(max_test_runtime=None, expected_task_runtime=None)
-
- def calculate_test_timeout(self, repeat_factor: int) -> Optional[int]:
- """
- Calculate the timeout to use for tests.
-
- :param repeat_factor: How many times the suite will be repeated.
- :return: Timeout value to use for tests.
- """
- if self.max_test_runtime is None:
- return None
-
- timeout = calculate_timeout(self.max_test_runtime, 3) * repeat_factor
- LOGGER.debug("Setting timeout", timeout=timeout, max_runtime=self.max_test_runtime,
- factor=repeat_factor)
- return timeout
-
- def calculate_task_timeout(self, repeat_factor: int) -> Optional[int]:
- """
- Calculate the timeout to use for tasks.
-
- :param repeat_factor: How many times the suite will be repeated.
- :return: Timeout value to use for tasks.
- """
- if self.expected_task_runtime is None:
- return None
-
- exec_timeout = calculate_timeout(self.expected_task_runtime, 3) * repeat_factor
- LOGGER.debug("Setting exec_timeout", exec_timeout=exec_timeout,
- suite_runtime=self.expected_task_runtime, factor=repeat_factor)
- return exec_timeout
-
- def generate_timeout_cmd(self, is_patch: bool, repeat_factor: int,
- use_default: bool = False) -> TimeoutInfo:
- """
- Create the timeout info to use to create a timeout shrub command.
-
- :param is_patch: Whether the command is being created in a patch build.
- :param repeat_factor: How many times the suite will be repeated.
- :param use_default: Should the default timeout be used.
- :return: Timeout info for the task.
- """
-
- if (self.max_test_runtime is None and self.expected_task_runtime is None) or use_default:
- return TimeoutInfo.default_timeout()
-
- test_timeout = self.calculate_test_timeout(repeat_factor)
- task_timeout = self.calculate_task_timeout(repeat_factor)
-
- if is_patch and (test_timeout > MAX_EXPECTED_TIMEOUT
- or task_timeout > MAX_EXPECTED_TIMEOUT):
- frameinfo = getframeinfo(currentframe())
- LOGGER.error(
- "This task looks like it is expected to run far longer than normal. This is "
- "likely due to setting the suite 'repeat' value very high. If you are sure "
- "this is something you want to do, comment this check out in your patch build "
- "and resubmit", repeat_value=repeat_factor, timeout=test_timeout,
- exec_timeout=task_timeout, code_file=frameinfo.filename, code_line=frameinfo.lineno,
- max_timeout=MAX_EXPECTED_TIMEOUT)
- raise ValueError("Failing due to expected runtime.")
-
- return TimeoutInfo.overridden(timeout=test_timeout, exec_timeout=task_timeout)
-
-
-class Suite(object):
- """A suite of tests that can be run by evergreen."""
-
- _current_index = 0
-
- def __init__(self, source_name: str) -> None:
- """
- Initialize the object.
-
- :param source_name: Base name of suite.
- """
- self.tests = []
- self.total_runtime = 0
- self.max_runtime = 0
- self.tests_with_runtime_info = 0
- self.source_name = source_name
- self.task_overhead = 0
-
- self.index = Suite._current_index
- Suite._current_index += 1
-
- @classmethod
- def reset_current_index(cls):
- """Reset the current index."""
- Suite._current_index = 0
-
- def add_test(self, test_file: str, runtime: float):
- """Add the given test to this suite."""
-
- self.tests.append(test_file)
- self.total_runtime += runtime
-
- if runtime != 0:
- self.tests_with_runtime_info += 1
-
- if runtime > self.max_runtime:
- self.max_runtime = runtime
-
- def should_overwrite_timeout(self) -> bool:
- """
- Whether the timeout for this suite should be overwritten.
-
- We should only overwrite the timeout if we have runtime info for all tests.
- """
- return len(self.tests) == self.tests_with_runtime_info
-
- def get_timeout_estimate(self) -> TimeoutEstimate:
- """Get the estimated runtime of this task to for timeouts."""
- if self.should_overwrite_timeout():
- return TimeoutEstimate(max_test_runtime=self.max_runtime,
- expected_task_runtime=self.total_runtime + self.task_overhead)
- return TimeoutEstimate.no_timeouts()
-
- def get_runtime(self):
- """Get the current average runtime of all the tests currently in this suite."""
- return self.total_runtime
-
- def get_test_count(self):
- """Get the number of tests currently in this suite."""
- return len(self.tests)
-
- @property
- def name(self) -> str:
- """Get the name of this suite."""
- return taskname.name_generated_task(self.source_name, self.index, Suite._current_index)
-
- def generate_resmoke_config(self, source_config: Dict) -> str:
- """
- Generate the contents of resmoke config for this suite.
-
- :param source_config: Resmoke config to base generate config on.
- :return: Resmoke config to run this suite.
- """
- suite_config = update_suite_config(deepcopy(source_config), roots=self.tests)
- contents = HEADER_TEMPLATE.format(file=__file__, suite_file=self.source_name)
- contents += yaml.safe_dump(suite_config, default_flow_style=False)
- return contents
-
-
-class EvergreenConfigGenerator(object):
- """Generate evergreen configurations."""
-
- def __init__(self, suites: List[Suite], options: ConfigOptions, evg_api: EvergreenApi,
- generate_config: Optional[GenerationConfiguration] = None) -> None:
- """
- Create new EvergreenConfigGenerator object.
-
- :param suites: The suite the Evergreen config will be generated for.
- :param options: The ConfigOptions object containing the config file values.
- :param evg_api: Evergreen API object.
- :param generate_config: Configuration about how generation should be done.
- """
- self.suites = suites
- self.options = options
- self.gen_config = GenerationConfiguration.default_config()
- if generate_config:
- self.gen_config = generate_config
- self.evg_api = evg_api
- self.task_specs = []
- self.task_names = []
- self.build_tasks = None
-
- def _get_distro(self, build_variant: str) -> Optional[Sequence[str]]:
- """
- Get the distros that the tasks should be run on.
-
- :param build_variant: Name of build variant being generated.
- :return: List of distros to run on.
- """
- if self.options.use_large_distro:
- if self.options.large_distro_name:
- return [self.options.large_distro_name]
-
- if build_variant not in self.gen_config.build_variant_large_distro_exceptions:
- print(NO_LARGE_DISTRO_ERR.format(build_variant=build_variant))
- raise ValueError("Invalid Evergreen Configuration")
- return None
-
- def _generate_resmoke_args(self, suite_file: str) -> str:
- """
- Generate the resmoke args for the given suite.
-
- :param suite_file: File containing configuration for test suite.
- :return: arguments to pass to resmoke.
- """
- resmoke_args = (f"--suite={suite_file}.yml --originSuite={self.options.suite} "
- f" {self.options.resmoke_args}")
- if self.options.repeat_suites and not string_contains_any_of_args(
- resmoke_args, ["repeatSuites", "repeat"]):
- resmoke_args += f" --repeatSuites={self.options.repeat_suites} "
-
- return resmoke_args
-
- def _get_run_tests_vars(self, suite_file: str) -> Dict[str, Any]:
- """
- Generate a dictionary of the variables to pass to the task.
-
- :param suite_file: Suite being run.
- :return: Dictionary containing variables and value to pass to generated task.
- """
- variables = {
- "resmoke_args": self._generate_resmoke_args(suite_file),
- "run_multiple_jobs": self.options.run_multiple_jobs,
- "task": self.options.run_tests_task,
- "build_variant": self.options.run_tests_build_variant,
- "build_id": self.options.run_tests_build_id,
- }
-
- if self.options.resmoke_jobs_max:
- variables["resmoke_jobs_max"] = self.options.resmoke_jobs_max
-
- if self.options.use_multiversion:
- variables["task_path_suffix"] = self.options.use_multiversion
-
- return variables
-
- @staticmethod
- def _is_task_dependency(task: str, possible_dependency: str) -> Optional[Match[str]]:
- """
- Determine if the given possible_dependency belongs to the given task.
-
- :param task: Name of dependency being checked.
- :param possible_dependency: Task to check if dependency.
- :return: None is task is not a dependency.
- """
- return re.match(f"{task}_(\\d|misc)", possible_dependency)
-
- def _get_tasks_for_depends_on(self, dependent_task: str) -> List[str]:
- """
- Get a list of tasks that belong to the given dependency.
-
- :param dependent_task: Dependency to check.
- :return: List of tasks that are a part of the given dependency.
- """
- return [
- str(task.display_name) for task in self.build_tasks
- if self._is_task_dependency(dependent_task, str(task.display_name))
- ]
-
- def _get_dependencies(self) -> Set[TaskDependency]:
- """Get the set of dependency tasks for these suites."""
- dependencies = {TaskDependency("archive_dist_test_debug")}
- if not self.options.is_patch:
- # Don"t worry about task dependencies in patch builds, only mainline.
- if self.options.depends_on:
- for dep in self.options.depends_on:
- depends_on_tasks = self._get_tasks_for_depends_on(dep)
- for dependency in depends_on_tasks:
- dependencies.add(TaskDependency(dependency))
-
- return dependencies
-
- def _generate_task(self, sub_suite_name: str, sub_task_name: str, target_dir: str,
- timeout_est: TimeoutEstimate) -> Task:
- """
- Generate a shrub evergreen config for a resmoke task.
-
- :param sub_suite_name: Name of suite being generated.
- :param sub_task_name: Name of task to generate.
- :param target_dir: Directory containing generated suite files.
- :param timeout_est: Estimated runtime to use for calculating timeouts.
- :return: Shrub configuration for the described task.
- """
- # pylint: disable=too-many-arguments
- LOGGER.debug("Generating task", sub_suite=sub_suite_name)
-
- # Evergreen always uses a unix shell, even on Windows, so instead of using os.path.join
- # here, just use the forward slash; otherwise the path separator will be treated as
- # the escape character on Windows.
- target_suite_file = '/'.join([target_dir, os.path.basename(sub_suite_name)])
- run_tests_vars = self._get_run_tests_vars(target_suite_file)
-
- use_multiversion = self.options.use_multiversion
- timeout_cmd = timeout_est.generate_timeout_cmd(
- self.options.is_patch, self.options.repeat_suites, self.options.use_default_timeouts)
- commands = resmoke_commands("run generated tests", run_tests_vars, timeout_cmd,
- use_multiversion)
-
- return Task(sub_task_name, commands, self._get_dependencies())
-
- def _create_sub_task(self, idx: int, suite: Suite) -> Task:
- """
- Create the sub task for the given suite.
-
- :param idx: Index of suite to created.
- :param suite: Suite to create.
- :return: Shrub configuration for the suite.
- """
- sub_task_name = taskname.name_generated_task(self.options.task, idx, len(self.suites),
- self.options.variant)
- return self._generate_task(suite.name, sub_task_name, self.options.generated_config_dir,
- suite.get_timeout_estimate())
-
- def _generate_all_tasks(self) -> Set[Task]:
- """Get a set of shrub task for all the sub tasks."""
- tasks = {self._create_sub_task(idx, suite) for idx, suite in enumerate(self.suites)}
-
- if self.options.create_misc_suite:
- # Add the misc suite
- misc_suite_name = f"{os.path.basename(self.options.generated_suite_filename)}_misc"
- misc_task_name = f"{self.options.task}_misc_{self.options.variant}"
- tasks.add(
- self._generate_task(misc_suite_name,
- misc_task_name, self.options.generated_config_dir,
- TimeoutEstimate.no_timeouts()))
-
- return tasks
-
- def generate_config(self, build_variant: BuildVariant) -> None:
- """
- Generate evergreen configuration.
-
- :param build_variant: Build variant to add generated configuration to.
- """
- self.build_tasks = self.evg_api.tasks_by_build(self.options.build_id)
-
- tasks = self._generate_all_tasks()
- generating_task = {ExistingTask(task_name) for task_name in self.options.gen_task_set}
- distros = self._get_distro(build_variant.name)
- build_variant.display_task(GEN_PARENT_TASK, execution_existing_tasks=generating_task)
- build_variant.display_task(self.options.display_task_name, execution_tasks=tasks,
- distros=distros)
-
-
-class GenerateSubSuites(object):
- """Orchestrate the execution of generate_resmoke_suites."""
-
- def __init__(self, evergreen_api: EvergreenApi, config_options: ConfigOptions,
- generate_config: Optional[GenerationConfiguration] = None) -> None:
- """
- Initialize the object.
-
- :param evergreen_api: Evergreen API client.
- :param config_options: Generation configuration options.
- :param generate_config: Configuration for how generate tasks.
- """
- self.evergreen_api = evergreen_api
- self.config_options = config_options
- self.generate_options = GenerationConfiguration.default_config()
- if generate_config:
- self.generate_options = generate_config
- self.test_list = []
-
- # Populate config values for methods like list_tests()
- _parser.set_run_options()
-
- def get_suite_config(self) -> Dict[str, Any]:
- """Get the configuration of the suite being generated."""
- return read_suite_config(self.config_options.test_suites_dir, self.config_options.suite)
-
- def calculate_suites(self, start_date: datetime, end_date: datetime) -> List[Suite]:
- """
- Divide tests into suites based on statistics for the provided period.
-
- :param start_date: Time to start historical analysis.
- :param end_date: Time to end historical analysis.
- :return: List of sub suites to be generated.
- """
- try:
- evg_stats = HistoricTaskData.from_evg(self.evergreen_api, self.config_options.project,
- start_date, end_date, self.config_options.task,
- self.config_options.variant)
- if not evg_stats:
- LOGGER.debug("No test history, using fallback suites")
- # This is probably a new suite, since there is no test history, just use the
- # fallback values.
- return self.calculate_fallback_suites()
- target_execution_time_secs = self.config_options.target_resmoke_time * 60
- return self.calculate_suites_from_evg_stats(evg_stats, target_execution_time_secs)
- except requests.HTTPError as err:
- if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
- # Evergreen may return a 503 when the service is degraded.
- # We fall back to splitting the tests into a fixed number of suites.
- LOGGER.warning("Received 503 from Evergreen, "
- "dividing the tests evenly among suites")
- return self.calculate_fallback_suites()
- else:
- raise
-
- def calculate_suites_from_evg_stats(self, test_stats: HistoricTaskData,
- execution_time_secs: int) -> List[Suite]:
- """
- Divide tests into suites that can be run in less than the specified execution time.
-
- :param test_stats: Historical test results for task being split.
- :param execution_time_secs: Target execution time of each suite (in seconds).
- :return: List of sub suites calculated.
- """
- tests_runtimes = self.filter_tests(test_stats.get_tests_runtimes())
- if not tests_runtimes:
- LOGGER.debug("No test runtimes after filter, using fallback")
- return self.calculate_fallback_suites()
-
- self.test_list = [info.test_name for info in tests_runtimes]
- suites = divide_tests_into_suites(
- self.config_options.generated_suite_filename, tests_runtimes, execution_time_secs,
- self.config_options.max_sub_suites, self.config_options.max_tests_per_suite)
-
- self.add_task_hook_overhead(suites, test_stats)
-
- return suites
-
- def _get_hook_config(self, hook_name: str) -> Optional[Dict[str, Any]]:
- """
- Get the configuration for the given hook.
-
- :param hook_name: Name of hook to query.
- :return: Configuration for hook, if it exists.
- """
- hooks_config = self.get_suite_config().get("executor", {}).get("hooks")
- if hooks_config:
- for hook in hooks_config:
- if hook.get("class") == hook_name:
- return hook
-
- return None
-
- def _is_asan_build(self) -> bool:
- """Determine if this task is being generated on a ASAN build."""
- san_options = self.config_options.san_options
- if san_options:
- return ASAN_SIGNATURE in san_options
- return False
-
- def _get_clean_every_n_cadence(self) -> int:
- """Get the N value for the CleanEveryN hook."""
- # Default to 1, which is the worst case meaning CleanEveryN would run for every test.
- clean_every_n_cadence = 1
- if self._is_asan_build():
- # ASAN runs hard-code N to 1. See `resmokelib/testing/hooks/cleanup.py`.
- return clean_every_n_cadence
-
- clean_every_n_config = self._get_hook_config(CLEAN_EVERY_N_HOOK)
- if clean_every_n_config:
- clean_every_n_cadence = clean_every_n_config.get("n", 1)
-
- return clean_every_n_cadence
-
- def add_task_hook_overhead(self, suites: List[Suite], historic_stats: HistoricTaskData) -> None:
- """
- Add how much overhead task-level hooks each suite should account for.
-
- Certain test hooks need to be accounted for on the task level instead of the test level
- in order to calculate accurate timeouts. So we will add details about those hooks to
- each suite here.
-
- :param suites: List of suites that were created.
- :param historic_stats: Historic runtime data of the suite.
- """
- # The CleanEveryN hook is run every 'N' tests. The runtime of the
- # hook will be associated with whichever test happens to be running, which could be
- # different every run. So we need to take its runtime into account at the task level.
- clean_every_n_cadence = self._get_clean_every_n_cadence()
- avg_clean_every_n_runtime = historic_stats.get_avg_hook_runtime(CLEAN_EVERY_N_HOOK)
- LOGGER.info("task hook overhead", cadence=clean_every_n_cadence,
- runtime=avg_clean_every_n_runtime)
- if avg_clean_every_n_runtime != 0:
- for suite in suites:
- n_expected_runs = suite.get_test_count() / clean_every_n_cadence
- suite.task_overhead += n_expected_runs * avg_clean_every_n_runtime
-
- def filter_tests(self, tests_runtimes: List[TestRuntime]) -> List[TestRuntime]:
- """
- Filter relevant tests.
-
- :param tests_runtimes: List of tuples containing test names and test runtimes.
- :return: Filtered TestRuntime objects indicating tests to be run.
- """
- tests_runtimes = self.filter_existing_tests(tests_runtimes)
- if self.config_options.selected_tests_to_run:
- tests_runtimes = filter_specified_tests(self.config_options.selected_tests_to_run,
- tests_runtimes)
- return tests_runtimes
-
- def filter_existing_tests(self, tests_runtimes: List[TestRuntime]) -> List[TestRuntime]:
- """Filter out tests that do not exist in the filesystem."""
- all_tests = [normalize_test_name(test) for test in self.list_tests()]
- return [
- info for info in tests_runtimes
- if os.path.exists(info.test_name) and info.test_name in all_tests
- ]
-
- def calculate_fallback_suites(self) -> List[Suite]:
- """Divide tests into a fixed number of suites."""
- LOGGER.debug("Splitting tasks based on fallback",
- fallback=self.config_options.fallback_num_sub_suites)
- self.test_list = self.list_tests()
- num_suites = min(self.config_options.fallback_num_sub_suites, len(self.test_list),
- self.config_options.max_sub_suites)
- suites = [Suite(self.config_options.generated_suite_filename) for _ in range(num_suites)]
- for idx, test_file in enumerate(self.test_list):
- suites[idx % num_suites].add_test(test_file, 0)
- return suites
-
- def list_tests(self) -> List[str]:
- """List the test files that are part of the suite being split."""
- suite_config = suitesconfig.get_suite(self.config_options.suite)
- test_list = []
- for tests in suite_config.tests:
- # `tests` could return individual tests or lists of tests, we need to handle both.
- if isinstance(tests, list):
- test_list.extend(tests)
- else:
- test_list.append(tests)
-
- return test_list
-
- def add_suites_to_build_variant(self, suites: List[Suite], build_variant: BuildVariant) -> None:
- """
- Add the given suites to the build variant specified.
+class EvgGenResmokeTaskOrchestrator:
+ """Orchestrator for generating an resmoke tasks."""
- :param suites: Suites to add.
- :param build_variant: Build variant to add suite to.
+ @inject.autoparams()
+ def __init__(self, gen_task_validation: GenTaskValidationService,
+ gen_task_options: GenTaskOptions) -> None:
"""
- EvergreenConfigGenerator(suites, self.config_options, self.evergreen_api,
- self.generate_options).generate_config(build_variant)
+ Initialize the orchestrator.
- def generate_task_config(self, suites: List[Suite]) -> BuildVariant:
+ :param gen_task_validation: Generate tasks validation service.
+ :param gen_task_options: Options for how tasks are generated.
"""
- Generate the evergreen configuration for the new suite.
+ self.gen_task_validation = gen_task_validation
+ self.gen_task_options = gen_task_options
- :param suites: The suite the generated Evergreen config will be generated for.
+ def generate_task(self, task_id: str, split_params: SuiteSplitParameters,
+ gen_params: ResmokeGenTaskParams) -> None:
"""
- build_variant = BuildVariant(self.config_options.variant)
- self.add_suites_to_build_variant(suites, build_variant)
- return build_variant
+ Generate the specified resmoke task.
- def generate_suites_config(self, suites: List[Suite]) -> Dict:
+ :param task_id: Task ID of generating task.
+ :param split_params: Parameters describing how the task should be split.
+ :param gen_params: Parameters describing how the task should be generated.
"""
- Generate the suites files and evergreen configuration for the generated task.
-
- :return: The suites files and evergreen configuration for the generated task.
- """
- return render_suite_files(suites, self.config_options.suite,
- self.config_options.generated_suite_filename, self.test_list,
- self.config_options.test_suites_dir,
- self.config_options.create_misc_suite)
-
- def get_suites(self) -> List[Suite]:
- """
- Generate the suites files and evergreen configuration for the generated task.
-
- :return: The suites files and evergreen configuration for the generated task.
- """
- end_date = datetime.datetime.utcnow().replace(microsecond=0)
- start_date = end_date - datetime.timedelta(days=LOOKBACK_DURATION_DAYS)
- return self.calculate_suites(start_date, end_date)
-
- def run(self):
- """Generate resmoke suites that run within a target execution time and write to disk."""
- LOGGER.debug("config options", config_options=self.config_options)
- if not should_tasks_be_generated(self.evergreen_api, self.config_options.task_id):
+ LOGGER.debug("config options", split_params=split_params, gen_params=gen_params)
+ if not self.gen_task_validation.should_task_be_generated(task_id):
LOGGER.info("Not generating configuration due to previous successful generation.")
return
- suites = self.get_suites()
- LOGGER.debug("Creating suites", num_suites=len(suites), task=self.config_options.task,
- dir=self.config_options.generated_config_dir)
-
- config_dict_of_suites = self.generate_suites_config(suites)
-
- shrub_config = ShrubProject.empty()
- shrub_config.add_build_variant(self.generate_task_config(suites))
+ builder = EvgConfigBuilder() # pylint: disable=no-value-for-parameter
- config_dict_of_suites[self.config_options.task + ".json"] = shrub_config.json()
- write_file_dict(self.config_options.generated_config_dir, config_dict_of_suites)
-
-
-def filter_specified_tests(specified_tests: Set[str], tests_runtimes: List[TestRuntime]):
- """
- Filter out tests that have not been specified in the specified tests config option.
-
- :param specified_tests: List of test files that should be run.
- :param tests_runtimes: List of tuples containing test names and test runtimes.
- :return: List of TestRuntime tuples that match specified_tests.
- """
- return [info for info in tests_runtimes if info.test_name in specified_tests]
+ builder.generate_suite(split_params, gen_params)
+ builder.add_display_task(GEN_PARENT_TASK, {f"{split_params.task_name}{GEN_SUFFIX}"},
+ split_params.build_variant)
+ generated_config = builder.build(split_params.task_name + ".json")
+ generated_config.write_all_to_dir(self.gen_task_options.generated_config_dir)
@click.command()
@@ -1099,7 +216,7 @@ def filter_specified_tests(specified_tests: Set[str], tests_runtimes: List[TestR
@click.option("--evergreen-config", type=str, default=EVG_CONFIG_FILE,
help="Location of evergreen configuration file.")
@click.option("--verbose", is_flag=True, default=False, help="Enable verbose logging.")
-def main(expansion_file, evergreen_config, verbose):
+def main(expansion_file: str, evergreen_config: str, verbose: bool) -> None:
"""
Create a configuration for generate tasks to create sub suites for the specified resmoke suite.
@@ -1110,12 +227,29 @@ def main(expansion_file, evergreen_config, verbose):
:param verbose: Use verbose logging.
"""
enable_logging(verbose)
- evg_api = RetryingEvergreenApi.get_api(config_file=evergreen_config)
- generate_config = GenerationConfiguration.from_yaml_file(GENERATE_CONFIG_FILE)
- config_options = ConfigOptions.from_file(expansion_file, REQUIRED_CONFIG_KEYS,
- DEFAULT_CONFIG_VALUES, CONFIG_FORMAT_FN)
- GenerateSubSuites(evg_api, config_options, generate_config).run()
+ end_date = datetime.utcnow().replace(microsecond=0)
+ start_date = end_date - timedelta(days=LOOKBACK_DURATION_DAYS)
+
+ evg_expansions = EvgExpansions.from_yaml_file(expansion_file)
+
+ def dependencies(binder: inject.Binder) -> None:
+ binder.bind(SuiteSplitConfig, evg_expansions.get_suite_split_config(start_date, end_date))
+ binder.bind(SplitStrategy, greedy_division)
+ binder.bind(FallbackStrategy, round_robin_fallback)
+ binder.bind(GenTaskOptions, evg_expansions.get_evg_config_gen_options(GENERATED_CONFIG_DIR))
+ binder.bind(EvergreenApi, RetryingEvergreenApi.get_api(config_file=evergreen_config))
+ binder.bind(GenerationConfiguration,
+ GenerationConfiguration.from_yaml_file(GENERATE_CONFIG_FILE))
+ binder.bind(ResmokeProxyConfig,
+ ResmokeProxyConfig(resmoke_suite_dir=DEFAULT_TEST_SUITE_DIR))
+
+ inject.configure(dependencies)
+
+ gen_task_orchestrator = EvgGenResmokeTaskOrchestrator() # pylint: disable=no-value-for-parameter
+ gen_task_orchestrator.generate_task(evg_expansions.task_id,
+ evg_expansions.get_suite_split_params(),
+ evg_expansions.get_gen_params())
if __name__ == "__main__":
diff --git a/buildscripts/patch_builds/selected_tests/__init__.py b/buildscripts/patch_builds/selected_tests/__init__.py
new file mode 100644
index 00000000000..4b7a2bb941b
--- /dev/null
+++ b/buildscripts/patch_builds/selected_tests/__init__.py
@@ -0,0 +1 @@
+"""Empty."""
diff --git a/buildscripts/patch_builds/selected_tests/selected_tests_client.py b/buildscripts/patch_builds/selected_tests/selected_tests_client.py
new file mode 100644
index 00000000000..afb50730df4
--- /dev/null
+++ b/buildscripts/patch_builds/selected_tests/selected_tests_client.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python3
+"""Client for accessing selected test app."""
+
+from typing import Set, List, Dict, Any
+from urllib.parse import urlparse
+
+import requests
+import yaml
+from pydantic import BaseModel
+
+
+class TestFileInstance(BaseModel):
+ """
+ Frequency of how often a test file was seen.
+
+ name: Name of test file.
+ test_file_seen_count: Occurrences of test file.
+ """
+
+ name: str
+ test_file_seen_count: int
+
+
+class TestMapping(BaseModel):
+ """
+ How tests map to the specified source files.
+
+ branch: Git branch being analyzed.
+ project: Evergreen project being analyzed.
+ repo: Git repo being analyzed.
+ source_file: Source file of mappings.
+ source_file_seen_count: Number of occurrences of source file.
+ test_files: Test files that have been changed with the source file.
+ """
+
+ branch: str
+ project: str
+ repo: str
+ source_file: str
+ source_file_seen_count: int
+ test_files: List[TestFileInstance]
+
+
+class TestMappingsResponse(BaseModel):
+ """
+ Response from the test mappings end point.
+
+ test_mappings: List of source files with correlated test files.
+ """
+
+ test_mappings: List[TestMapping]
+
+
+class TaskMapInstance(BaseModel):
+ """
+ Frequency of how often a task is impacted by a source file change.
+
+ name: Name of task that was impacted.
+ variant: Name of build variant task was run on.
+ flip_count: Number of times the task was impacted by the source file.
+ """
+
+ name: str
+ variant: str
+ flip_count: int
+
+
+class TaskMapping(BaseModel):
+ """
+ How tasks map to the specified source files.
+
+ branch: Git branch being analyzed.
+ project: Evergreen project being analyzed.
+ repo: Git repo being analyzed.
+ source_file: Source file of mappings.
+ source_file_seen_count: Number of occurrences of source file.
+ tasks: Tasks that have been impacted by the source file.
+ """
+
+ branch: str
+ project: str
+ repo: str
+ source_file: str
+ source_file_seen_count: int
+ tasks: List[TaskMapInstance]
+
+
+class TaskMappingsResponse(BaseModel):
+ """
+ Response from the task mappings end point.
+
+ task_mappings: List of source files with correlated tasks.
+ """
+
+ task_mappings: List[TaskMapping]
+
+
+class SelectedTestsClient(object):
+ """Selected-tests client object."""
+
+ def __init__(self, url: str, project: str, auth_user: str, auth_token: str) -> None:
+ """
+ Create selected-tests client object.
+
+ :param url: Selected-tests service url.
+ :param project: Selected-tests service project.
+ :param auth_user: Selected-tests service auth user to authenticate request.
+ :param auth_token: Selected-tests service auth token to authenticate request.
+ """
+ self.url = url
+ self.project = project
+ self.session = requests.Session()
+ adapter = requests.adapters.HTTPAdapter()
+ self.session.mount(f"{urlparse(self.url).scheme}://", adapter)
+ self.session.cookies.update({"auth_user": auth_user, "auth_token": auth_token})
+ self.session.headers.update(
+ {"Content-type": "application/json", "Accept": "application/json"})
+
+ @classmethod
+ def from_file(cls, filename: str) -> "SelectedTestsClient":
+ """
+ Read config from given filename.
+
+ :param filename: Filename to read config.
+ :return: Config read from file.
+ """
+ with open(filename, 'r') as fstream:
+ config = yaml.safe_load(fstream)
+ if config:
+ return cls(config["url"], config["project"], config["auth_user"],
+ config["auth_token"])
+
+ raise ValueError(f"Could not determine selected tests configuration from {filename}")
+
+ def _call_api(self, endpoint: str, payload: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Make a call to the selected tests service and return the response.
+
+ :param endpoint: Endpoint to call.
+ :param payload: Payload to call with.
+ :return: Response from service.
+ """
+ url = f"{self.url}{endpoint}"
+ response = self.session.get(url, params=payload)
+ response.raise_for_status()
+
+ return response.json()
+
+ def get_test_mappings(self, threshold: float, changed_files: Set[str]) -> TestMappingsResponse:
+ """
+ Request related test files from selected-tests service.
+
+ :param threshold: Threshold for test file correlation.
+ :param changed_files: Set of changed_files.
+ :return: Related test files returned by selected-tests service.
+ """
+ payload = {"threshold": threshold, "changed_files": ",".join(changed_files)}
+ response = self._call_api(f"/projects/{self.project}/test-mappings", payload)
+ return TestMappingsResponse(**response)
+
+ def get_task_mappings(self, threshold: float, changed_files: Set[str]) -> TaskMappingsResponse:
+ """
+ Request related tasks from selected-tests service.
+
+ :param threshold: Threshold for test file correlation.
+ :param changed_files: Set of changed_files.
+ :return: Related tasks returned by selected-tests service.
+ """
+ payload = {"threshold": threshold, "changed_files": ",".join(changed_files)}
+ response = self._call_api(f"/projects/{self.project}/task-mappings", payload)
+ return TaskMappingsResponse(**response)
diff --git a/buildscripts/patch_builds/selected_tests/selected_tests_service.py b/buildscripts/patch_builds/selected_tests/selected_tests_service.py
new file mode 100644
index 00000000000..88e1355b33a
--- /dev/null
+++ b/buildscripts/patch_builds/selected_tests/selected_tests_service.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+"""Selected Tests service."""
+from typing import Set
+
+import inject
+
+from buildscripts.burn_in_tests import is_file_a_test_file
+from buildscripts.patch_builds.selected_tests.selected_tests_client import SelectedTestsClient
+
+DEFAULT_THRESHOLD = 0
+
+
+class SelectedTestsService:
+ """A service for interacting with selected tests."""
+
+ @inject.autoparams()
+ def __init__(self, selected_tests_client: SelectedTestsClient) -> None:
+ """
+ Initialize the service.
+
+ :param selected_tests_client: Client to query selected tests.
+ """
+ self.selected_tests_client = selected_tests_client
+
+ def find_selected_test_files(self, changed_files: Set[str]) -> Set[str]:
+ """
+ Request related test files from selected-tests service and filter invalid files.
+
+ :param changed_files: Set of changed_files.
+ :return: Set of test files returned by selected-tests service that are valid test files.
+ """
+ test_mappings = self.selected_tests_client.get_test_mappings(DEFAULT_THRESHOLD,
+ changed_files)
+ return {
+ test_file.name
+ for test_mapping in test_mappings.test_mappings for test_file in test_mapping.test_files
+ if is_file_a_test_file(test_file.name)
+ }
+
+ def find_selected_tasks(self, changed_files: Set[str]) -> Set[str]:
+ """
+ Request tasks from selected-tests.
+
+ :param changed_files: Set of changed_files.
+ :return: Set of tasks returned by selected-tests service that should not be excluded.
+ """
+ task_mappings = self.selected_tests_client.get_task_mappings(DEFAULT_THRESHOLD,
+ changed_files)
+ return {
+ task.name
+ for task_mapping in task_mappings.task_mappings for task in task_mapping.tasks
+ }
diff --git a/buildscripts/patch_builds/selected_tests_service.py b/buildscripts/patch_builds/selected_tests_service.py
deleted file mode 100644
index 72292601c6f..00000000000
--- a/buildscripts/patch_builds/selected_tests_service.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python3
-"""Selected Tests service."""
-
-from typing import Any, Dict, Set
-
-import requests
-import yaml
-
-# pylint: disable=wrong-import-position
-from buildscripts.burn_in_tests import is_file_a_test_file
-
-
-class SelectedTestsService(object):
- """Selected-tests client object."""
-
- def __init__(self, url: str, project: str, auth_user: str, auth_token: str):
- """
- Create selected-tests client object.
-
- :param url: Selected-tests service url.
- :param project: Selected-tests service project.
- :param auth_user: Selected-tests service auth user to authenticate request.
- :param auth_token: Selected-tests service auth token to authenticate request.
- """
- self.url = url
- self.project = project
- self.auth_user = auth_user
- self.auth_token = auth_token
- self.headers = {"Content-type": "application/json", "Accept": "application/json"}
- self.cookies = {"auth_user": auth_user, "auth_token": auth_token}
-
- @classmethod
- def from_file(cls, filename: str):
- """
- Read config from given filename.
-
- :param filename: Filename to read config.
- :return: Config read from file.
- """
- with open(filename, 'r') as fstream:
- config = yaml.safe_load(fstream)
- if config:
- return cls(config["url"], config["project"], config["auth_user"],
- config["auth_token"])
-
- return None
-
- def get_test_mappings(self, threshold: float, changed_files: Set[str]) -> Dict[str, Any]:
- """
- Request related test files from selected-tests service.
-
- :param threshold: Threshold for test file correlation.
- :param changed_files: Set of changed_files.
- :return: Related test files returned by selected-tests service.
- """
- payload = {"threshold": threshold, "changed_files": ",".join(changed_files)}
- response = requests.get(
- self.url + f"/projects/{self.project}/test-mappings",
- params=payload,
- headers=self.headers,
- cookies=self.cookies,
- )
- response.raise_for_status()
-
- return response.json()["test_mappings"]
-
- def get_task_mappings(self, threshold: float, changed_files: Set[str]) -> Dict[str, Any]:
- """
- Request related tasks from selected-tests service.
-
- :param threshold: Threshold for test file correlation.
- :param changed_files: Set of changed_files.
- :return: Related tasks returned by selected-tests service.
- """
- payload = {"threshold": threshold, "changed_files": ",".join(changed_files)}
- response = requests.get(
- self.url + f"/projects/{self.project}/task-mappings",
- params=payload,
- headers=self.headers,
- cookies=self.cookies,
- )
- response.raise_for_status()
-
- return response.json()["task_mappings"]
diff --git a/buildscripts/selected_tests.py b/buildscripts/selected_tests.py
index 845c8a65f70..ffdec868bae 100644
--- a/buildscripts/selected_tests.py
+++ b/buildscripts/selected_tests.py
@@ -1,31 +1,41 @@
#!/usr/bin/env python3
"""Command line utility for determining what jstests should run for the given changed files."""
-import logging
import os
import re
import sys
-from typing import Any, Dict, List, Set
+from datetime import datetime, timedelta
+from functools import partial
+from typing import Any, Dict, List, Set, Optional
import click
+import inject
import structlog
+from pydantic import BaseModel
from structlog.stdlib import LoggerFactory
from git import Repo
-
-from shrub.v2 import ShrubProject, BuildVariant
from evergreen.api import EvergreenApi, RetryingEvergreenApi
-# Get relative imports to work when the package is not installed on the PYTHONPATH.
-from buildscripts.patch_builds.change_data import find_changed_files_in_repos
-from buildscripts.patch_builds.evg_change_data import generate_revision_map_from_manifest
-
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# pylint: disable=wrong-import-position
-import buildscripts.resmokelib.parser
-import buildscripts.util.read_config as read_config
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+from buildscripts.patch_builds.change_data import find_changed_files_in_repos
+from buildscripts.patch_builds.evg_change_data import generate_revision_map_from_manifest
+from buildscripts.patch_builds.selected_tests.selected_tests_client import SelectedTestsClient
+from buildscripts.task_generation.evg_config_builder import EvgConfigBuilder
+from buildscripts.task_generation.gen_config import GenerationConfiguration
+from buildscripts.task_generation.generated_config import GeneratedConfiguration
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyConfig
+from buildscripts.task_generation.suite_split import SuiteSplitParameters, SuiteSplitConfig
+from buildscripts.task_generation.suite_split_strategies import SplitStrategy, FallbackStrategy, \
+ greedy_division, round_robin_fallback
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
+from buildscripts.task_generation.task_types.resmoke_tasks import ResmokeGenTaskParams
+from buildscripts.util.cmdutils import enable_logging
+from buildscripts.util.fileops import read_yaml_file
from buildscripts.burn_in_tests import DEFAULT_REPO_LOCATIONS, create_task_list_for_tests, \
- is_file_a_test_file, TaskInfo
+ TaskInfo
from buildscripts.ciconfig.evergreen import (
EvergreenProjectConfig,
ResmokeArgs,
@@ -34,16 +44,10 @@ from buildscripts.ciconfig.evergreen import (
Variant,
)
from buildscripts.evergreen_generate_resmoke_tasks import (
- CONFIG_FORMAT_FN,
- DEFAULT_CONFIG_VALUES,
- REQUIRED_CONFIG_KEYS,
- ConfigOptions,
- GenerateSubSuites,
- remove_gen_suffix,
- write_file_dict,
- Suite,
+ DEFAULT_TEST_SUITE_DIR,
+ GENERATE_CONFIG_FILE,
)
-from buildscripts.patch_builds.selected_tests_service import SelectedTestsService
+from buildscripts.patch_builds.selected_tests.selected_tests_service import SelectedTestsService
structlog.configure(logger_factory=LoggerFactory())
LOGGER = structlog.getLogger(__name__)
@@ -51,13 +55,9 @@ LOGGER = structlog.getLogger(__name__)
TASK_ID_EXPANSION = "task_id"
EVERGREEN_FILE = "etc/evergreen.yml"
EVG_CONFIG_FILE = ".evergreen.yml"
-EXTERNAL_LOGGERS = {
- "evergreen",
- "git",
- "urllib3",
-}
-SELECTED_TESTS_CONFIG_DIR = "selected_tests_config"
+SELECTED_TESTS_CONFIG_DIR = "generated_resmoke_config"
RELATION_THRESHOLD = 0
+LOOKBACK_DURATION_DAYS = 14
COMPILE_TASK_PATTERN = re.compile(".*compile.*")
CONCURRENCY_TASK_PATTERN = re.compile("concurrency.*")
@@ -75,7 +75,7 @@ EXCLUDE_TASK_PATTERNS = [
CPP_TASK_NAMES = [
"dbtest",
"idl_tests",
- "unittests",
+ "run_unittests",
]
PUBLISH_TASK_NAMES = [
"package",
@@ -88,97 +88,145 @@ EXCLUDE_TASK_LIST = [
*PYTHON_TESTS,
*PUBLISH_TASK_NAMES,
]
+POSSIBLE_RUN_TASK_FUNCS = [
+ "generate resmoke tasks",
+ "generate randomized multiversion tasks",
+ "run tests",
+ "generate explicit multiversion tasks",
+]
-class SelectedTestsConfigOptions(ConfigOptions):
- """Retrieve configuration from a config file."""
+class EvgExpansions(BaseModel):
+ """
+ Evergreen expansions needed for selected tests.
+
+ task_id: ID of task being run.
+ task_name: Name of task being run.
+ build_variant: Name of build variant being run on.
+ build_id: ID of build being run.
+ is_patch: Is this task run as part of a patch build.
+ project: Evergreen project being run.
+ revision: git revision being run against.
+ version_id: ID of version being run.
+ """
+
+ task_id: str
+ task_name: str
+ build_variant: str
+ build_id: str
+ is_patch: Optional[bool] = None
+ project: str
+ revision: str
+ version_id: str
@classmethod
- # pylint: disable=too-many-arguments,W0221
- def from_file(cls, origin_variant_expansions: Dict[str, str],
- selected_tests_variant_expansions: Dict[str, str], overwrites: Dict[str, Any],
- required_keys: Set[str], defaults: Dict[str, Any], formats: Dict[str, type]):
+ def from_yaml_file(cls, path: str) -> "EvgExpansions":
+ """Read the generation configuration from the given file."""
+ return cls(**read_yaml_file(path))
+
+ def build_gen_task_options(self) -> GenTaskOptions:
+ """Build options needed to generate tasks."""
+ return GenTaskOptions(create_misc_suite=False,
+ generated_config_dir=SELECTED_TESTS_CONFIG_DIR, is_patch=self.is_patch
+ or False, use_default_timeouts=False)
+
+ def build_suite_split_config(self, start_date: datetime,
+ end_date: datetime) -> SuiteSplitConfig:
"""
- Create an instance of SelectedTestsConfigOptions based on the given config file.
-
- :param origin_variant_expansions: Expansions of the origin build variant.
- :param selected_tests_variant_expansions: Expansions of the selected-tests variant.
- :param overwrites: Dict of configuration values to overwrite those listed in expansions.
- :param required_keys: Set of keys required by this config.
- :param defaults: Dict of default values for keys.
- :param formats: Dict with functions to format values before returning.
- :return: Instance of SelectedTestsConfigOptions.
+ Build options need to split suite into sub-suites.
+
+ :param start_date: Start date to look at historic results.
+ :param end_date: End date to look at historic results.
+ :return: Options for splitting suites.
"""
- return cls({**origin_variant_expansions, **selected_tests_variant_expansions, **overwrites},
- required_keys, defaults, formats)
-
- @property
- def run_tests_task(self):
- """Return name of task name for s3 folder containing generated tasks config."""
- return remove_gen_suffix(self.name_of_generating_task)
-
- @property
- def run_tests_build_variant(self):
- """Return name of build_variant for s3 folder containing generated tasks config."""
- return self.name_of_generating_build_variant
-
- @property
- def run_tests_build_id(self):
- """Return name of build_id for s3 folder containing generated tasks config."""
- return self.name_of_generating_build_id
-
- @property
- def create_misc_suite(self):
- """Whether or not a _misc suite file should be created."""
- return not self.selected_tests_to_run
-
- @property
- def display_task_name(self):
- """Return the name to use as the display task."""
- return f"{self.task}_{self.variant}"
-
- @property
- def generated_suite_filename(self):
- """Filename for the generated suite file."""
- return f"{self.suite}_{self.variant}"
-
- @property
- def gen_task_set(self):
- """Return the set of tasks used to generate this configuration."""
- return set()
+ return SuiteSplitConfig(
+ evg_project=self.project,
+ target_resmoke_time=60,
+ max_sub_suites=5,
+ max_tests_per_suite=100,
+ start_date=start_date,
+ end_date=end_date,
+ include_build_variant_in_name=True,
+ )
+
+ def get_config_location(self) -> str:
+ """Get the location the generated configuration will be stored."""
+ return f"{self.build_variant}/{self.revision}/generate_tasks/{self.task_name}-{self.build_id}.tgz"
+
+
+class TaskConfigService:
+ """Service for generating selected tests task configuration."""
+
+ @staticmethod
+ def get_evg_task_config(task: Task, build_variant_config: Variant) -> Dict[str, Any]:
+ """
+ Look up task config of the task to be generated.
+ :param task: Task to get info for.
+ :param build_variant_config: Config of build variant to collect task info from.
+ :return: Task configuration values.
+ """
+ LOGGER.info("Calculating evg_task_config values for task", task=task.name)
+ task_vars = {}
+ for run_task_func in POSSIBLE_RUN_TASK_FUNCS:
+ task_def = task.find_func_command(run_task_func)
+ if task_def:
+ task_vars = task_def["vars"]
+ break
+
+ suite_name = ResmokeArgs.get_arg(task_vars["resmoke_args"], "suites")
+ if suite_name:
+ task_vars.update({"suite": suite_name})
+
+ # the suites argument will run all tests in a suite even when individual
+ # tests are specified in resmoke_args, so we remove it
+ resmoke_args_without_suites = ResmokeArgs.remove_arg(task_vars["resmoke_args"], "suites")
+ task_vars["resmoke_args"] = resmoke_args_without_suites
+
+ task_name = task.name[:-4] if task.name.endswith("_gen") else task.name
+ return {
+ "task_name": task_name,
+ "build_variant": build_variant_config.name,
+ **task_vars,
+ "large_distro_name": build_variant_config.expansion("large_distro_name"),
+ }
+
+ def get_task_configs_for_test_mappings(self, tests_by_task: Dict[str, TaskInfo],
+ build_variant_config: Variant) -> Dict[str, dict]:
+ """
+ For test mappings, generate a dict containing task names and their config settings.
-def _configure_logging(verbose: bool):
- """
- Configure logging for the application.
+ :param tests_by_task: Dictionary of tests and tasks to run.
+ :param build_variant_config: Config of build variant to collect task info from.
+ :return: Dict of task names and their config settings.
+ """
+ evg_task_configs = {}
+ for task_name, test_list_info in tests_by_task.items():
+ task = _find_task(build_variant_config, task_name)
+ if task and not _exclude_task(task):
+ evg_task_config = self.get_evg_task_config(task, build_variant_config)
+ evg_task_config.update({"selected_tests_to_run": set(test_list_info.tests)})
+ evg_task_configs[task.name] = evg_task_config
+
+ return evg_task_configs
+
+ def get_task_configs_for_task_mappings(self, related_tasks: List[str],
+ build_variant_config: Variant) -> Dict[str, dict]:
+ """
+ For task mappings, generate a dict containing task names and their config settings.
- :param verbose: If True set log level to DEBUG.
- """
- level = logging.DEBUG if verbose else logging.INFO
- logging.basicConfig(
- format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s",
- level=level,
- stream=sys.stdout,
- )
- for log_name in EXTERNAL_LOGGERS:
- logging.getLogger(log_name).setLevel(logging.WARNING)
-
-
-def _find_selected_test_files(selected_tests_service: SelectedTestsService,
- changed_files: Set[str]) -> Set[str]:
- """
- Request related test files from selected-tests service and filter invalid files.
+ :param related_tasks: List of tasks to run.
+ :param build_variant_config: Config of build variant to collect task info from.
+ :return: Dict of task names and their config settings.
+ """
+ evg_task_configs = {}
+ for task_name in related_tasks:
+ task = _find_task(build_variant_config, task_name)
+ if task and not _exclude_task(task):
+ evg_task_config = self.get_evg_task_config(task, build_variant_config)
+ evg_task_configs[task.name] = evg_task_config
- :param selected_tests_service: Selected-tests service.
- :param changed_files: Set of changed_files.
- :return: Set of test files returned by selected-tests service that are valid test files.
- """
- test_mappings = selected_tests_service.get_test_mappings(RELATION_THRESHOLD, changed_files)
- return {
- test_file["name"]
- for test_mapping in test_mappings for test_file in test_mapping["test_files"]
- if is_file_a_test_file(test_file["name"])
- }
+ return evg_task_configs
def _exclude_task(task: Task) -> bool:
@@ -195,19 +243,6 @@ def _exclude_task(task: Task) -> bool:
return False
-def _find_selected_tasks(selected_tests_service: SelectedTestsService, changed_files: Set[str]) -> \
- Set[str]:
- """
- Request tasks from selected-tests and filter out tasks that don't exist or should be excluded.
-
- :param selected_tests_service: Selected-tests service.
- :param changed_files: Set of changed_files.
- :return: Set of tasks returned by selected-tests service that should not be excluded.
- """
- task_mappings = selected_tests_service.get_task_mappings(RELATION_THRESHOLD, changed_files)
- return {task["name"] for task_mapping in task_mappings for task in task_mapping["tasks"]}
-
-
def _find_task(build_variant_config: Variant, task_name: str) -> Task:
"""
Look up shrub config for task.
@@ -222,124 +257,6 @@ def _find_task(build_variant_config: Variant, task_name: str) -> Task:
return task
-def _get_selected_tests_task_config(
- selected_tests_variant_expansions: Dict[str, str]) -> Dict[str, str]:
- """
- Look up task config of the selected tests task.
-
- :param selected_tests_variant_expansions: Expansions of the selected-tests variant.
- :return: Task configuration values.
- """
- return {
- "name_of_generating_task": selected_tests_variant_expansions["task_name"],
- "name_of_generating_build_variant": selected_tests_variant_expansions["build_variant"],
- "name_of_generating_build_id": selected_tests_variant_expansions["build_id"]
- }
-
-
-def _get_evg_task_config(
- selected_tests_variant_expansions: Dict[str, str],
- task: Task,
- build_variant_config: Variant,
-) -> Dict[str, Any]:
- """
- Look up task config of the task to be generated.
-
- :param selected_tests_variant_expansions: Expansions of the selected-tests variant.
- :param task: Task to get info for.
- :param build_variant_config: Config of build variant to collect task info from.
- :return: Task configuration values.
- """
- LOGGER.info("Calculating evg_task_config values for task", task=task.name)
- if task.is_generate_resmoke_task:
- task_vars = task.generate_resmoke_tasks_command["vars"]
- else:
- task_vars = task.run_tests_command["vars"]
- task_vars.update({"fallback_num_sub_suites": "1"})
-
- suite_name = ResmokeArgs.get_arg(task_vars["resmoke_args"], "suites")
- if suite_name:
- task_vars.update({"suite": suite_name})
-
- # the suites argument will run all tests in a suite even when individual
- # tests are specified in resmoke_args, so we remove it
- resmoke_args_without_suites = ResmokeArgs.remove_arg(task_vars["resmoke_args"], "suites")
- task_vars["resmoke_args"] = resmoke_args_without_suites
-
- selected_tests_task_config = _get_selected_tests_task_config(selected_tests_variant_expansions)
-
- return {
- "task_name": task.name, "build_variant": build_variant_config.name, **task_vars,
- **selected_tests_task_config
- }
-
-
-def _update_config_with_task(evg_api: EvergreenApi, build_variant: BuildVariant,
- config_options: SelectedTestsConfigOptions,
- config_dict_of_suites_and_tasks: Dict[str, str]) -> None:
- """
- Generate the suites config and the task shrub config for a given task config.
-
- :param evg_api: Evergreen API object.
- :param build_variant: Build variant to add tasks to.
- :param shrub_project: Shrub configuration for task.
- :param config_options: Task configuration options.
- :param config_dict_of_suites_and_tasks: Dict of shrub configs and suite file contents.
- """
- task_generator = GenerateSubSuites(evg_api, config_options)
- suites = task_generator.get_suites()
-
- config_dict_of_suites = task_generator.generate_suites_config(suites)
- config_dict_of_suites_and_tasks.update(config_dict_of_suites)
-
- task_generator.add_suites_to_build_variant(suites, build_variant)
-
-
-def _get_task_configs_for_test_mappings(selected_tests_variant_expansions: Dict[str, str],
- tests_by_task: Dict[str, TaskInfo],
- build_variant_config: Variant) -> Dict[str, dict]:
- """
- For test mappings, generate a dict containing task names and their config settings.
-
- :param selected_tests_variant_expansions: Expansions of the selected-tests variant.
- :param tests_by_task: Dictionary of tests and tasks to run.
- :param build_variant_config: Config of build variant to collect task info from.
- :return: Dict of task names and their config settings.
- """
- evg_task_configs = {}
- for task_name, test_list_info in tests_by_task.items():
- task = _find_task(build_variant_config, task_name)
- if task and not _exclude_task(task):
- evg_task_config = _get_evg_task_config(selected_tests_variant_expansions, task,
- build_variant_config)
- evg_task_config.update({"selected_tests_to_run": set(test_list_info.tests)})
- evg_task_configs[task.name] = evg_task_config
-
- return evg_task_configs
-
-
-def _get_task_configs_for_task_mappings(selected_tests_variant_expansions: Dict[str, str],
- related_tasks: List[str],
- build_variant_config: Variant) -> Dict[str, dict]:
- """
- For task mappings, generate a dict containing task names and their config settings.
-
- :param selected_tests_variant_expansions: Expansions of the selected-tests variant.
- :param related_tasks: List of tasks to run.
- :param build_variant_config: Config of build variant to collect task info from.
- :return: Dict of task names and their config settings.
- """
- evg_task_configs = {}
- for task_name in related_tasks:
- task = _find_task(build_variant_config, task_name)
- if task and not _exclude_task(task):
- evg_task_config = _get_evg_task_config(selected_tests_variant_expansions, task,
- build_variant_config)
- evg_task_configs[task.name] = evg_task_config
-
- return evg_task_configs
-
-
def _remove_repo_path_prefix(file_path: str) -> str:
"""
Remove the repo path prefix from the filepath.
@@ -355,129 +272,184 @@ def _remove_repo_path_prefix(file_path: str) -> str:
return file_path
-def _get_task_configs(evg_conf: EvergreenProjectConfig,
- selected_tests_service: SelectedTestsService,
- selected_tests_variant_expansions: Dict[str, str],
- build_variant_config: Variant, changed_files: Set[str]) -> Dict[str, Dict]:
+def filter_set(item: str, input_set: Set[str]) -> bool:
"""
- Get task configurations for the tasks to be generated.
+ Filter to determine if the given item is in the given set.
- :param evg_conf: Evergreen configuration.
- :param selected_tests_service: Selected-tests service.
- :param selected_tests_variant_expansions: Expansions of the selected-tests variant.
- :param build_variant_config: Config of build variant to collect task info from.
- :param changed_files: Set of changed_files.
- :return: Task configurations.
+ :param item: Item to search for.
+ :param input_set: Set to search.
+ :return: True if the item is contained in the list.
"""
- task_configs = {}
+ return item in input_set
- related_test_files = _find_selected_test_files(selected_tests_service, changed_files)
- LOGGER.info("related test files found", related_test_files=related_test_files,
- variant=build_variant_config.name)
- if related_test_files:
- tests_by_task = create_task_list_for_tests(related_test_files, build_variant_config.name,
- evg_conf)
- LOGGER.info("tests and tasks found", tests_by_task=tests_by_task)
+class SelectedTestsOrchestrator:
+ """Orchestrator for generating selected test builds."""
- test_mapping_task_configs = _get_task_configs_for_test_mappings(
- selected_tests_variant_expansions, tests_by_task, build_variant_config)
- task_configs.update(test_mapping_task_configs)
+ # pylint: disable=too-many-arguments
+ @inject.autoparams()
+ def __init__(self, evg_api: EvergreenApi, evg_conf: EvergreenProjectConfig,
+ selected_tests_service: SelectedTestsService,
+ task_config_service: TaskConfigService, evg_expansions: EvgExpansions) -> None:
+ """
+ Initialize the orchestrator.
- related_tasks = _find_selected_tasks(selected_tests_service, changed_files)
- LOGGER.info("related tasks found", related_tasks=related_tasks,
- variant=build_variant_config.name)
- if related_tasks:
- task_mapping_task_configs = _get_task_configs_for_task_mappings(
- selected_tests_variant_expansions, related_tasks, build_variant_config)
- # task_mapping_task_configs will overwrite test_mapping_task_configs
- # because task_mapping_task_configs will run all tests rather than a subset of tests and we
- # should err on the side of running all tests
- task_configs.update(task_mapping_task_configs)
+ :param evg_api: Evergreen API client.
+ :param evg_conf: Evergreen Project configuration.
+ :param selected_tests_service: Selected tests service.
+ :param task_config_service: Task Config service.
+ :param evg_expansions: Evergreen expansions.
+ """
+ self.evg_api = evg_api
+ self.evg_conf = evg_conf
+ self.selected_tests_service = selected_tests_service
+ self.task_config_service = task_config_service
+ self.evg_expansions = evg_expansions
- return task_configs
+ def find_changed_files(self, repos: List[Repo], task_id: str) -> Set[str]:
+ """
+ Determine what files have changed in the given repos.
+
+ :param repos: List of git repos to query.
+ :param task_id: ID of task being run.
+ :return: Set of files that contain changes.
+ """
+ revision_map = generate_revision_map_from_manifest(repos, task_id, self.evg_api)
+ changed_files = find_changed_files_in_repos(repos, revision_map)
+ changed_files = {_remove_repo_path_prefix(file_path) for file_path in changed_files}
+ changed_files = {
+ file_path
+ for file_path in changed_files if not file_path.startswith("src/third_party")
+ }
+ LOGGER.info("Found changed files", files=changed_files)
+ return changed_files
+
+ def get_task_config(self, build_variant_config: Variant,
+ changed_files: Set[str]) -> Dict[str, Dict]:
+ """
+ Get task configurations for the tasks to be generated.
+ :param build_variant_config: Config of build variant to collect task info from.
+ :param changed_files: Set of changed_files.
+ :return: Task configurations.
+ """
+ existing_tasks = self.get_existing_tasks(self.evg_expansions.version_id,
+ build_variant_config.name)
+ task_configs = {}
+
+ related_test_files = self.selected_tests_service.find_selected_test_files(changed_files)
+ LOGGER.info("related test files found", related_test_files=related_test_files,
+ variant=build_variant_config.name)
+
+ if related_test_files:
+ tests_by_task = create_task_list_for_tests(related_test_files,
+ build_variant_config.name, self.evg_conf)
+ LOGGER.info("tests and tasks found", tests_by_task=tests_by_task)
+ tests_by_task = {
+ task: tests
+ for task, tests in tests_by_task.items() if task not in existing_tasks
+ }
+
+ test_mapping_task_configs = self.task_config_service.get_task_configs_for_test_mappings(
+ tests_by_task, build_variant_config)
+ task_configs.update(test_mapping_task_configs)
+
+ related_tasks = self.selected_tests_service.find_selected_tasks(changed_files)
+ LOGGER.info("related tasks found", related_tasks=related_tasks,
+ variant=build_variant_config.name)
+ related_tasks = {task for task in related_tasks if task not in existing_tasks}
+ if related_tasks:
+ task_mapping_task_configs = self.task_config_service.get_task_configs_for_task_mappings(
+ list(related_tasks), build_variant_config)
+ # task_mapping_task_configs will overwrite test_mapping_task_configs
+ # because task_mapping_task_configs will run all tests rather than a subset of tests
+ # and we should err on the side of running all tests
+ task_configs.update(task_mapping_task_configs)
+
+ return task_configs
+
+ def get_existing_tasks(self, version_id: str, build_variant: str) -> Set[str]:
+ """
+ Get the set of tasks that already exist in the given build.
-def remove_task_configs_already_in_build(task_configs: Dict[str, Dict], evg_api: EvergreenApi,
- build_variant_config: Variant, version_id: str) -> None:
- """
- Remove the task_configs that exist for tasks that have been pulled into the build manually.
+ :param version_id: ID of version to query.
+ :param build_variant: Name of build variant to query.
+ :return: Set of task names that already exist in the specified build.
+ """
+ version = self.evg_api.version_by_id(version_id)
- :param task_configs: The task configurations for the tasks to be generated.
- :param evg_api: Evergreen API object.
- :param build_variant_config: Config of build variant to collect task info from.
- :param version_id: The version_id of the version running selected tests.
- """
- version = evg_api.version_by_id(version_id)
-
- try:
- build = version.build_by_variant(build_variant_config.name)
- except KeyError:
- LOGGER.debug("No build exists on this build variant for this version yet",
- variant=build_variant_config.name)
- build = None
-
- if build:
- tasks_already_in_build = build.get_tasks()
- for task in tasks_already_in_build:
- if task.display_name in task_configs:
- LOGGER.info(
- "Will not generate task that has already been pulled into the build manually",
- variant=build_variant_config.name, task_already_in_build=task.display_name)
- del task_configs[task.display_name]
-
-
-def run(evg_api: EvergreenApi, evg_conf: EvergreenProjectConfig,
- selected_tests_service: SelectedTestsService,
- selected_tests_variant_expansions: Dict[str, str], repos: List[Repo]) -> Dict[str, str]:
- # pylint: disable=too-many-locals
- """
- Run code to select tasks to run based on test and task mappings for each of the build variants.
-
- :param evg_api: Evergreen API object.
- :param evg_conf: Evergreen configuration.
- :param selected_tests_service: Selected-tests service.
- :param selected_tests_variant_expansions: Expansions of the selected-tests variant.
- :param repos: List of repos containing changed files.
- :return: Dict of files and file contents for generated tasks.
- """
- config_dict_of_suites_and_tasks = {}
+ try:
+ build = version.build_by_variant(build_variant)
+ except KeyError:
+ LOGGER.debug("No build exists on this build variant for this version yet",
+ variant=build_variant)
+ return set()
- task_id = selected_tests_variant_expansions[TASK_ID_EXPANSION]
- revision_map = generate_revision_map_from_manifest(repos, task_id, evg_api)
- changed_files = find_changed_files_in_repos(repos, revision_map)
- changed_files = {_remove_repo_path_prefix(file_path) for file_path in changed_files}
- LOGGER.info("Found changed files", files=changed_files)
+ if build:
+ tasks_already_in_build = build.get_tasks()
+ return {task.display_name for task in tasks_already_in_build}
- shrub_project = ShrubProject()
- for build_variant_config in evg_conf.get_required_variants():
- shrub_build_variant = BuildVariant(build_variant_config.name)
- origin_variant_expansions = build_variant_config.expansions
+ return set()
- task_configs = _get_task_configs(evg_conf, selected_tests_service,
- selected_tests_variant_expansions, build_variant_config,
- changed_files)
+ def generate_build_variant(self, build_variant_config: Variant, changed_files: Set[str],
+ builder: EvgConfigBuilder) -> None:
+ """
+ Generate the selected tasks on the specified build variant.
- remove_task_configs_already_in_build(task_configs, evg_api, build_variant_config,
- selected_tests_variant_expansions["version_id"])
+ :param build_variant_config: Configuration of build variant to generate.
+ :param changed_files: List of file changes to determine what to run.
+ :param builder: Builder to create new configuration.
+ """
+ build_variant_name = build_variant_config.name
+ LOGGER.info("Generating build variant", build_variant=build_variant_name)
+ task_configs = self.get_task_config(build_variant_config, changed_files)
for task_config in task_configs.values():
- Suite.reset_current_index()
- config_options = SelectedTestsConfigOptions.from_file(
- origin_variant_expansions,
- selected_tests_variant_expansions,
- task_config,
- REQUIRED_CONFIG_KEYS,
- DEFAULT_CONFIG_VALUES,
- CONFIG_FORMAT_FN,
+ test_filter = None
+ if "selected_tests_to_run" in task_config:
+ test_filter = partial(filter_set, input_set=task_config["selected_tests_to_run"])
+ split_params = SuiteSplitParameters(
+ build_variant=build_variant_name,
+ task_name=task_config["task_name"],
+ suite_name=task_config.get("suite", task_config["task_name"]),
+ filename=task_config.get("suite", task_config["task_name"]),
+ test_file_filter=test_filter,
+ is_asan=build_variant_config.is_asan_build(),
)
- _update_config_with_task(evg_api, shrub_build_variant, config_options,
- config_dict_of_suites_and_tasks)
+ gen_params = ResmokeGenTaskParams(
+ use_large_distro=task_config.get("use_large_distro", False),
+ large_distro_name=task_config.get("large_distro_name"),
+ use_multiversion=task_config.get("use_multiversion"),
+ repeat_suites=task_config.get("repeat_suites", 1),
+ resmoke_args=task_config["resmoke_args"],
+ resmoke_jobs_max=task_config.get("resmoke_jobs_max"),
+ config_location=self.evg_expansions.get_config_location(),
+ )
+ builder.generate_suite(split_params, gen_params)
+
+ def generate(self, repos: List[Repo], task_id: str) -> None:
+ """
+ Build and generate the configuration to create selected tests.
- shrub_project.add_build_variant(shrub_build_variant)
+ :param repos: List of git repos containing changes to check.
+ :param task_id: ID of task being run.
+ """
+ changed_files = self.find_changed_files(repos, task_id)
+ generated_config = self.generate_version(changed_files)
+ generated_config.write_all_to_dir(SELECTED_TESTS_CONFIG_DIR)
- config_dict_of_suites_and_tasks["selected_tests_config.json"] = shrub_project.json()
- return config_dict_of_suites_and_tasks
+ def generate_version(self, changed_files: Set[str]) -> GeneratedConfiguration:
+ """
+ Generate selected tests configuration for the given file changes.
+
+ :param changed_files: Set of files that contain changes.
+ :return: Configuration to generate selected-tests tasks.
+ """
+ builder = EvgConfigBuilder() # pylint: disable=no-value-for-parameter
+ for build_variant_config in self.evg_conf.get_required_variants():
+ self.generate_build_variant(build_variant_config, changed_files, builder)
+
+ return builder.build("selected_tests_config.json")
@click.command()
@@ -517,20 +489,32 @@ def main(
:param evg_api_config: Location of configuration file to connect to evergreen.
:param selected_tests_config: Location of config file to connect to elected-tests service.
"""
- _configure_logging(verbose)
+ enable_logging(verbose)
- evg_api = RetryingEvergreenApi.get_api(config_file=evg_api_config)
- evg_conf = parse_evergreen_file(EVERGREEN_FILE)
- selected_tests_service = SelectedTestsService.from_file(selected_tests_config)
- repos = [Repo(x) for x in DEFAULT_REPO_LOCATIONS if os.path.isdir(x)]
+ end_date = datetime.utcnow().replace(microsecond=0)
+ start_date = end_date - timedelta(days=LOOKBACK_DURATION_DAYS)
- buildscripts.resmokelib.parser.set_run_options()
+ evg_expansions = EvgExpansions.from_yaml_file(expansion_file)
- task_expansions = read_config.read_config_file(expansion_file)
+ def dependencies(binder: inject.Binder) -> None:
+ binder.bind(EvgExpansions, evg_expansions)
+ binder.bind(EvergreenApi, RetryingEvergreenApi.get_api(config_file=evg_api_config))
+ binder.bind(EvergreenProjectConfig, parse_evergreen_file(EVERGREEN_FILE))
+ binder.bind(SelectedTestsClient, SelectedTestsClient.from_file(selected_tests_config))
+ binder.bind(SuiteSplitConfig, evg_expansions.build_suite_split_config(start_date, end_date))
+ binder.bind(SplitStrategy, greedy_division)
+ binder.bind(FallbackStrategy, round_robin_fallback)
+ binder.bind(GenTaskOptions, evg_expansions.build_gen_task_options())
+ binder.bind(GenerationConfiguration,
+ GenerationConfiguration.from_yaml_file(GENERATE_CONFIG_FILE))
+ binder.bind(ResmokeProxyConfig,
+ ResmokeProxyConfig(resmoke_suite_dir=DEFAULT_TEST_SUITE_DIR))
- config_dict_of_suites_and_tasks = run(evg_api, evg_conf, selected_tests_service,
- task_expansions, repos)
- write_file_dict(SELECTED_TESTS_CONFIG_DIR, config_dict_of_suites_and_tasks)
+ inject.configure(dependencies)
+
+ repos = [Repo(x) for x in DEFAULT_REPO_LOCATIONS if os.path.isdir(x)]
+ selected_tests = SelectedTestsOrchestrator() # pylint: disable=no-value-for-parameter
+ selected_tests.generate(repos, evg_expansions.task_id)
if __name__ == "__main__":
diff --git a/buildscripts/task_generation/__init__.py b/buildscripts/task_generation/__init__.py
new file mode 100644
index 00000000000..4b7a2bb941b
--- /dev/null
+++ b/buildscripts/task_generation/__init__.py
@@ -0,0 +1 @@
+"""Empty."""
diff --git a/buildscripts/task_generation/evg_config_builder.py b/buildscripts/task_generation/evg_config_builder.py
new file mode 100644
index 00000000000..b0e83f3f245
--- /dev/null
+++ b/buildscripts/task_generation/evg_config_builder.py
@@ -0,0 +1,155 @@
+"""Builder for generating evergreen configuration."""
+from threading import Lock
+from typing import Optional, Set, List, Dict
+
+import inject
+from shrub.v2 import ShrubProject, BuildVariant, ExistingTask, Task
+
+from buildscripts.patch_builds.task_generation import validate_task_generation_limit
+from buildscripts.task_generation.gen_task_service import GenTaskService, \
+ GenTaskOptions, ResmokeGenTaskParams, FuzzerGenTaskParams
+from buildscripts.task_generation.generated_config import GeneratedFile, GeneratedConfiguration
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyService
+from buildscripts.task_generation.suite_split import SuiteSplitService, GeneratedSuite, \
+ SuiteSplitParameters
+from buildscripts.task_generation.task_types.fuzzer_tasks import FuzzerTask
+from buildscripts.task_generation.task_types.multiversion_tasks import MultiversionGenTaskParams
+
+
+# pylint: disable=too-many-instance-attributes
+class EvgConfigBuilder:
+ """A builder class for building evergreen configuration."""
+
+ @inject.autoparams()
+ def __init__(
+ self,
+ resmoke_proxy: ResmokeProxyService,
+ suite_split_service: SuiteSplitService,
+ evg_config_gen_service: GenTaskService,
+ gen_options: GenTaskOptions,
+ ) -> None:
+ """
+ Initialize a new builder.
+
+ :param resmoke_proxy: Proxy to access resmoke data.
+ :param suite_split_service: Service to split suites into sub-suites.
+ :param evg_config_gen_service: Service to generate evergreen configuration.
+ :param gen_options: Global options for generating evergreen configuration.
+ """
+ self.resmoke_proxy = resmoke_proxy
+ self.suite_split_service = suite_split_service
+ self.evg_config_gen_service = evg_config_gen_service
+ self.gen_options = gen_options
+
+ self.shrub_config = ShrubProject.empty()
+ self.build_variants: Dict[str, BuildVariant] = {}
+ self.generated_files: List[GeneratedFile] = []
+ self.lock = Lock()
+
+ def get_build_variant(self, build_variant: str) -> BuildVariant:
+ """
+ Get the build variant object, creating it if it doesn't exist.
+
+ NOTE: The `lock` should be held by any functions calling this one.
+
+ :param build_variant: Name of build variant.
+ :return: BuildVariant object being created.
+ """
+ if build_variant not in self.build_variants:
+ self.build_variants[build_variant] = BuildVariant(build_variant)
+ return self.build_variants[build_variant]
+
+ def _generate_suites_config(self, generated_suite: GeneratedSuite) -> List[GeneratedFile]:
+ """
+ Generate the suites files and evergreen configuration for the generated task.
+
+ :return: The suites files and evergreen configuration for the generated task.
+ """
+ test_list = self.resmoke_proxy.list_tests(generated_suite.suite_name)
+ return self.resmoke_proxy.render_suite_files(
+ generated_suite.sub_suites, generated_suite.suite_name, generated_suite.filename,
+ test_list, self.gen_options.create_misc_suite, generated_suite.build_variant)
+
+ def generate_suite(self, split_params: SuiteSplitParameters,
+ gen_params: ResmokeGenTaskParams) -> None:
+ """
+ Add configuration to generate a split version of the specified resmoke suite.
+
+ :param split_params: Parameters of how resmoke suite should be split.
+ :param gen_params: Parameters of how evergreen configuration should be generated.
+ """
+ generated_suite = self.suite_split_service.split_suite(split_params)
+ with self.lock:
+ build_variant = self.get_build_variant(generated_suite.build_variant)
+ self.evg_config_gen_service.generate_task(generated_suite, build_variant, gen_params)
+ self.generated_files.extend(self._generate_suites_config(generated_suite))
+
+ def add_multiversion_suite(self, split_params: SuiteSplitParameters,
+ gen_params: MultiversionGenTaskParams) -> None:
+ """
+ Add a multiversion suite to the builder.
+
+ :param split_params: Parameters for how suite should be split.
+ :param gen_params: Parameters for how subtasks should be generated.
+ """
+ generated_suite = self.suite_split_service.split_suite(split_params)
+ with self.lock:
+ build_variant = self.get_build_variant(generated_suite.build_variant)
+ self.evg_config_gen_service.generate_multiversion_task(generated_suite, build_variant,
+ gen_params)
+ self.generated_files.extend(self._generate_suites_config(generated_suite))
+
+ def add_multiversion_burn_in_test(self, split_params: SuiteSplitParameters,
+ gen_params: MultiversionGenTaskParams) -> Set[Task]:
+ """
+ Add a multiversion burn_in suite to the builder.
+
+ :param split_params: Parameters for how suite should be split.
+ :param gen_params: Parameters for how subtasks should be generated.
+ """
+ generated_suite = self.suite_split_service.split_suite(split_params)
+ with self.lock:
+ build_variant = self.get_build_variant(generated_suite.build_variant)
+ tasks = self.evg_config_gen_service.generate_multiversion_burnin_task(
+ generated_suite, gen_params, build_variant)
+ self.generated_files.extend(self._generate_suites_config(generated_suite))
+ return tasks
+
+ def generate_fuzzer(self, fuzzer_params: FuzzerGenTaskParams) -> FuzzerTask:
+ """
+ Add configuration to generate the specified fuzzer task.
+
+ :param fuzzer_params: Parameters of how the fuzzer suite should generated.
+ """
+ with self.lock:
+ build_variant = self.get_build_variant(fuzzer_params.variant)
+ return self.evg_config_gen_service.generate_fuzzer_task(fuzzer_params, build_variant)
+
+ def add_display_task(self, display_task_name: str, execution_task_names: Set[str],
+ build_variant: str) -> None:
+ """
+ Add configuration to generate the specified display task.
+
+ :param display_task_name: Name of display task to create.
+ :param execution_task_names: Name of execution tasks to include in display task.
+ :param build_variant: Name of build variant to add to.
+ """
+ execution_tasks = {ExistingTask(task_name) for task_name in execution_task_names}
+ with self.lock:
+ build_variant = self.get_build_variant(build_variant)
+ build_variant.display_task(display_task_name, execution_existing_tasks=execution_tasks)
+
+ def build(self, config_file_name: str) -> GeneratedConfiguration:
+ """
+ Build the specified configuration and return the files needed to create it.
+
+ :param config_file_name: Filename to use for evergreen configuration.
+ :return: Dictionary of files and contents that are needed to create configuration.
+ """
+ for build_variant in self.build_variants.values():
+ self.shrub_config.add_build_variant(build_variant)
+ if not validate_task_generation_limit(self.shrub_config):
+ raise ValueError("Attempting to generate more than max tasks in single generator")
+
+ self.generated_files.append(GeneratedFile(config_file_name, self.shrub_config.json()))
+ return GeneratedConfiguration(self.generated_files)
diff --git a/buildscripts/task_generation/gen_config.py b/buildscripts/task_generation/gen_config.py
new file mode 100644
index 00000000000..dba51a44ce1
--- /dev/null
+++ b/buildscripts/task_generation/gen_config.py
@@ -0,0 +1,22 @@
+"""Global configuration for generating tasks."""
+from typing import Set
+
+from pydantic import BaseModel
+
+from buildscripts.util.fileops import read_yaml_file
+
+
+class GenerationConfiguration(BaseModel):
+ """Configuration for generating sub-tasks."""
+
+ build_variant_large_distro_exceptions: Set[str]
+
+ @classmethod
+ def from_yaml_file(cls, path: str) -> "GenerationConfiguration":
+ """Read the generation configuration from the given file."""
+ return cls(**read_yaml_file(path))
+
+ @classmethod
+ def default_config(cls) -> "GenerationConfiguration":
+ """Create a default configuration."""
+ return cls(build_variant_large_distro_exceptions=set())
diff --git a/buildscripts/task_generation/gen_task_service.py b/buildscripts/task_generation/gen_task_service.py
new file mode 100644
index 00000000000..1a8eb463237
--- /dev/null
+++ b/buildscripts/task_generation/gen_task_service.py
@@ -0,0 +1,163 @@
+"""Tools for generating evergreen configuration."""
+import os
+import sys
+from typing import Optional, List, Set
+
+import inject
+import structlog
+from shrub.v2 import BuildVariant, Task
+from evergreen import EvergreenApi
+
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+# pylint: disable=wrong-import-position
+from buildscripts.task_generation.task_types.multiversion_tasks import MultiversionGenTaskParams, \
+ MultiversionGenTaskService
+from buildscripts.task_generation.task_types.fuzzer_tasks import FuzzerGenTaskParams, FuzzerTask, \
+ FuzzerGenTaskService
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
+from buildscripts.task_generation.task_types.resmoke_tasks import ResmokeGenTaskParams, \
+ ResmokeGenTaskService
+from buildscripts.task_generation.gen_config import GenerationConfiguration
+from buildscripts.task_generation.suite_split import GeneratedSuite
+# pylint: enable=wrong-import-position
+
+LOGGER = structlog.getLogger(__name__)
+
+NO_LARGE_DISTRO_ERR = """
+***************************************************************************************
+It appears we are trying to generate a task marked as requiring a large distro, but the
+build variant has not specified a large build variant. In order to resolve this error,
+you need to:
+
+(1) add a "large_distro_name" expansion to this build variant ("{build_variant}").
+
+ -- or --
+
+(2) add this build variant ("{build_variant}") to the "build_variant_large_distro_exception"
+list in the "etc/generate_subtasks_config.yml" file.
+***************************************************************************************
+"""
+
+
+class GenTaskService:
+ """A service for building evergreen task configurations."""
+
+ # pylint: disable=too-many-arguments
+ @inject.autoparams()
+ def __init__(self, evg_api: EvergreenApi, gen_task_options: GenTaskOptions,
+ gen_config: GenerationConfiguration,
+ resmoke_gen_task_service: ResmokeGenTaskService,
+ multiversion_gen_task_service: MultiversionGenTaskService,
+ fuzzer_gen_task_service: FuzzerGenTaskService) -> None:
+ """
+ Initialize the service.
+
+ :param evg_api: Evergreen API client.
+ :param gen_task_options: Options for how tasks should be generated.
+ :param gen_config:
+ :param resmoke_gen_task_service: Service for generating standard resmoke tasks.
+ :param multiversion_gen_task_service: Service for generating multiversion resmoke tasks.
+ :param fuzzer_gen_task_service: Service for generating fuzzer tasks.
+ """
+ self.evg_api = evg_api
+ self.gen_task_options = gen_task_options
+ self.gen_config = gen_config
+ self.resmoke_gen_task_service = resmoke_gen_task_service
+ self.multiversion_gen_task_service = multiversion_gen_task_service
+ self.fuzzer_gen_task_service = fuzzer_gen_task_service
+
+ def generate_fuzzer_task(self, params: FuzzerGenTaskParams,
+ build_variant: BuildVariant) -> FuzzerTask:
+ """
+ Generate evergreen configuration for the given fuzzer and add it to the build_variant.
+
+ :param params: Parameters for how fuzzer should be generated.
+ :param build_variant: Build variant to add generated configuration to.
+ """
+ fuzzer_task = self.fuzzer_gen_task_service.generate_tasks(params)
+ distros = self._get_distro(build_variant.name, params.use_large_distro,
+ params.large_distro_name)
+ if params.add_to_display_task:
+ build_variant.display_task(fuzzer_task.task_name, fuzzer_task.sub_tasks,
+ distros=distros)
+ else:
+ build_variant.add_tasks(fuzzer_task.sub_tasks, distros=distros)
+ return fuzzer_task
+
+ def generate_task(self, generated_suite: GeneratedSuite, build_variant: BuildVariant,
+ gen_params: ResmokeGenTaskParams) -> None:
+ """
+ Generate evergreen configuration for the given suite and add it to the build_variant.
+
+ :param generated_suite: Suite to add.
+ :param build_variant: Build variant to add generated configuration to.
+ :param gen_params: Parameters to configuration how tasks are generated.
+ """
+ # self.build_tasks = self.evg_api.tasks_by_build(self.options.build_id)
+
+ execution_tasks = self.resmoke_gen_task_service.generate_tasks(generated_suite, gen_params)
+ distros = self._get_distro(build_variant.name, gen_params.use_large_distro,
+ gen_params.large_distro_name)
+ build_variant.display_task(generated_suite.display_task_name(),
+ execution_tasks=execution_tasks, distros=distros)
+
+ def generate_multiversion_task(self, generated_suite: GeneratedSuite,
+ build_variant: BuildVariant,
+ gen_params: MultiversionGenTaskParams) -> None:
+ """
+ Generate evergreen configuration for the given suite and add it to the build_variant.
+
+ :param generated_suite: Suite to add.
+ :param build_variant: Build variant to add generated configuration to.
+ :param gen_params: Parameters to configuration how tasks are generated.
+ """
+ # self.build_tasks = self.evg_api.tasks_by_build(self.options.build_id)
+
+ execution_tasks = self.multiversion_gen_task_service.generate_tasks(
+ generated_suite, gen_params)
+ distros = self._get_distro(build_variant.name, gen_params.use_large_distro,
+ gen_params.large_distro_name)
+ build_variant.display_task(generated_suite.display_task_name(),
+ execution_tasks=execution_tasks, distros=distros)
+
+ def generate_multiversion_burnin_task(self, generated_suite: GeneratedSuite,
+ gen_params: MultiversionGenTaskParams,
+ build_variant: BuildVariant) -> Set[Task]:
+ """
+ Generate burn_in configuration for the given suite and add it to the build_variant.
+
+ :param generated_suite: Suite to add.
+ :param build_variant: Build variant to add generated configuration to.
+ :param gen_params: Parameters to configuration how tasks are generated.
+ :return: Set of tasks that were generated.
+ """
+ tasks = self.multiversion_gen_task_service.generate_tasks(generated_suite, gen_params)
+ distros = self._get_distro(build_variant.name, gen_params.use_large_distro,
+ gen_params.large_distro_name)
+ if gen_params.add_to_display_task:
+ build_variant.display_task(generated_suite.task_name, tasks, distros=distros)
+ else:
+ build_variant.add_tasks(tasks, distros=distros)
+ return tasks
+
+ def _get_distro(self, build_variant: str, use_large_distro: bool,
+ large_distro_name: Optional[str]) -> Optional[List[str]]:
+ """
+ Get the distros that the tasks should be run on.
+
+ :param build_variant: Name of build variant being generated.
+ :param use_large_distro: Whether a large distro should be used.
+ :return: List of distros to run on.
+ """
+ if use_large_distro:
+ if large_distro_name:
+ return [large_distro_name]
+
+ if build_variant not in self.gen_config.build_variant_large_distro_exceptions:
+ print(NO_LARGE_DISTRO_ERR.format(build_variant=build_variant))
+ raise ValueError("Invalid Evergreen Configuration")
+
+ return None
diff --git a/buildscripts/task_generation/gen_task_validation.py b/buildscripts/task_generation/gen_task_validation.py
new file mode 100644
index 00000000000..2103c840acb
--- /dev/null
+++ b/buildscripts/task_generation/gen_task_validation.py
@@ -0,0 +1,36 @@
+"""Validation checks for generating tasks."""
+import inject
+from evergreen import EvergreenApi
+
+
+class GenTaskValidationService:
+ """A service for validation around generating tasks."""
+
+ @inject.autoparams()
+ def __init__(self, evg_api: EvergreenApi) -> None:
+ """
+ Initialize the service.
+
+ :param evg_api: Evergreen API client.
+ """
+ self.evg_api = evg_api
+
+ def should_task_be_generated(self, task_id: str) -> bool:
+ """
+ Determine if we should attempt to generate tasks.
+
+ If an evergreen task that calls 'generate.tasks' is restarted, the 'generate.tasks' command
+ will no-op. So, if we are in that state, we should avoid generating new configuration files
+ that will just be confusing to the user (since that would not be used).
+
+ :param task_id: Id of the task being run.
+ :return: Boolean of whether to generate tasks.
+ """
+ task = self.evg_api.task_by_id(task_id, fetch_all_executions=True)
+ # If any previous execution was successful, do not generate more tasks.
+ for i in range(task.execution):
+ task_execution = task.get_execution(i)
+ if task_execution.is_success():
+ return False
+
+ return True
diff --git a/buildscripts/task_generation/generated_config.py b/buildscripts/task_generation/generated_config.py
new file mode 100644
index 00000000000..6ffae330149
--- /dev/null
+++ b/buildscripts/task_generation/generated_config.py
@@ -0,0 +1,43 @@
+"""Generated configuration."""
+from typing import NamedTuple, List
+
+from buildscripts.util.fileops import write_file_to_dir
+
+
+class GeneratedFile(NamedTuple):
+ """
+ Generated configuration file.
+
+ file_name: Name of generated configuration.
+ content: Content of generated configuration.
+ """
+
+ file_name: str
+ content: str
+
+ def write_to_dir(self, directory: str) -> None:
+ """
+ Write this file to the given directory.
+
+ :param directory: Directory to write file to.
+ """
+ write_file_to_dir(directory, self.file_name, self.content)
+
+
+class GeneratedConfiguration(NamedTuple):
+ """
+ Contain for the configuration needed to generate a task.
+
+ file_list: List of filenames and file contents needed to generate a task.
+ """
+
+ file_list: List[GeneratedFile]
+
+ def write_all_to_dir(self, directory: str) -> None:
+ """
+ Write all the configuration files to the given directory.
+
+ :param directory: Directory to write to.
+ """
+ for item in self.file_list:
+ item.write_to_dir(directory)
diff --git a/buildscripts/task_generation/multiversion_util.py b/buildscripts/task_generation/multiversion_util.py
new file mode 100644
index 00000000000..f690d7247d1
--- /dev/null
+++ b/buildscripts/task_generation/multiversion_util.py
@@ -0,0 +1,44 @@
+"""Utilities for generating with multiversion tests."""
+from typing import List
+
+import inject
+
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyService
+
+REPL_MIXED_VERSION_CONFIGS = ["new-old-new", "new-new-old", "old-new-new"]
+SHARDED_MIXED_VERSION_CONFIGS = ["new-old-old-new"]
+
+
+class MultiversionUtilService:
+ """Utilities to working with multiversion tests."""
+
+ @inject.autoparams()
+ def __init__(self, resmoke_proxy: ResmokeProxyService) -> None:
+ """
+ Initialize the service.
+
+ :param resmoke_proxy: Resmoke proxy service.
+ """
+ self.resmoke_proxy = resmoke_proxy
+
+ def is_suite_sharded(self, suite_name: str) -> bool:
+ """Return true if a suite uses ShardedClusterFixture."""
+ source_config = self.resmoke_proxy.read_suite_config(suite_name)
+ return source_config["executor"]["fixture"]["class"] == "ShardedClusterFixture"
+
+ def get_version_configs_for_suite(self, suite_name: str) -> List[str]:
+ """
+ Get the version configs that apply for the given suite.
+
+ :param suite_name: Suite to get version configs for.
+ :return: List of version configs.
+ """
+ is_sharded = self.is_suite_sharded(suite_name)
+ return self.get_version_configs(is_sharded)
+
+ @staticmethod
+ def get_version_configs(is_sharded: bool) -> List[str]:
+ """Get the version configurations to use."""
+ if is_sharded:
+ return SHARDED_MIXED_VERSION_CONFIGS
+ return REPL_MIXED_VERSION_CONFIGS
diff --git a/buildscripts/task_generation/resmoke_proxy.py b/buildscripts/task_generation/resmoke_proxy.py
new file mode 100644
index 00000000000..156f5eebe0e
--- /dev/null
+++ b/buildscripts/task_generation/resmoke_proxy.py
@@ -0,0 +1,147 @@
+"""A service to proxy requests to resmoke."""
+import os
+from copy import deepcopy
+from typing import List, Dict, Any, NamedTuple
+
+import inject
+import yaml
+
+import buildscripts.resmokelib.parser as _parser
+import buildscripts.resmokelib.suitesconfig as suitesconfig
+from buildscripts.task_generation.generated_config import GeneratedFile
+from buildscripts.util.fileops import read_yaml_file
+
+HEADER_TEMPLATE = """# DO NOT EDIT THIS FILE. All manual edits will be lost.
+# This file was generated by {file} from
+# {suite_file}.
+"""
+
+
+class ResmokeProxyConfig(NamedTuple):
+ """
+ Configuration for resmoke proxy.
+
+ resmoke_suite_dir: Directory that contains resmoke suite configurations.
+ """
+
+ resmoke_suite_dir: str
+
+
+class ResmokeProxyService:
+ """A service to proxy requests to resmoke."""
+
+ @inject.autoparams()
+ def __init__(self, proxy_config: ResmokeProxyConfig) -> None:
+ """
+ Initialize the service.
+
+ :param proxy_config: Configuration for the proxy.
+ """
+ _parser.set_run_options()
+ self.suitesconfig = suitesconfig
+ self.resmoke_suite_dir = proxy_config.resmoke_suite_dir
+
+ def list_tests(self, suite_name: str) -> List[str]:
+ """
+ List the test files that are part of the suite being split.
+
+ :param suite_name: Name of suite to query.
+ :return: List of test names that belong to the suite.
+ """
+ suite_config = self.suitesconfig.get_suite(suite_name)
+ test_list = []
+ for tests in suite_config.tests:
+ # `tests` could return individual tests or lists of tests, we need to handle both.
+ if isinstance(tests, list):
+ test_list.extend(tests)
+ else:
+ test_list.append(tests)
+
+ return test_list
+
+ def read_suite_config(self, suite_name: str) -> Dict[str, Any]:
+ """
+ Read the given resmoke suite configuration.
+
+ :param suite_name: Name of suite to read.
+ :return: Configuration of specified suite.
+ """
+ return read_yaml_file(os.path.join(self.resmoke_suite_dir, f"{suite_name}.yml"))
+
+ def render_suite_files(self, suites: List, suite_name: str, generated_suite_filename: str,
+ test_list: List[str], create_misc_suite: bool,
+ build_variant: str) -> List[GeneratedFile]:
+ """
+ Render the given list of suites.
+
+ This will create a dictionary of all the resmoke config files to create with the
+ filename of each file as the key and the contents as the value.
+
+ :param suites: List of suites to render.
+ :param suite_name: Base name of suites.
+ :param generated_suite_filename: The name to use as the file name for generated suite file.
+ :param test_list: List of tests used in suites.
+ :param create_misc_suite: Whether or not a _misc suite file should be created.
+ :return: Dictionary of rendered resmoke config files.
+ """
+ # pylint: disable=too-many-arguments
+ source_config = self.read_suite_config(suite_name)
+ suite_configs = [
+ GeneratedFile(
+ file_name=f"{os.path.basename(suite.name(len(suites)))}_{build_variant}.yml",
+ content=suite.generate_resmoke_config(source_config)) for suite in suites
+ ]
+ if create_misc_suite:
+ suite_configs.append(
+ GeneratedFile(
+ file_name=f"{generated_suite_filename}_misc_{build_variant}.yml",
+ content=generate_resmoke_suite_config(source_config, generated_suite_filename,
+ excludes=test_list)))
+ return suite_configs
+
+
+def update_suite_config(suite_config, roots=None, excludes=None):
+ """
+ Update suite config based on the roots and excludes passed in.
+
+ :param suite_config: suite_config to update.
+ :param roots: new roots to run, or None if roots should not be updated.
+ :param excludes: excludes to add, or None if excludes should not be include.
+ :return: updated suite_config
+ """
+ if roots:
+ suite_config["selector"]["roots"] = roots
+
+ if excludes:
+ # This must be a misc file, if the exclude_files section exists, extend it, otherwise,
+ # create it.
+ if "exclude_files" in suite_config["selector"] and \
+ suite_config["selector"]["exclude_files"]:
+ suite_config["selector"]["exclude_files"] += excludes
+ else:
+ suite_config["selector"]["exclude_files"] = excludes
+ else:
+ # if excludes was not specified this must not a misc file, so don"t exclude anything.
+ if "exclude_files" in suite_config["selector"]:
+ del suite_config["selector"]["exclude_files"]
+
+ return suite_config
+
+
+def generate_resmoke_suite_config(source_config, source_file, roots=None, excludes=None):
+ """
+ Read and evaluate the yaml suite file.
+
+ Override selector.roots and selector.excludes with the provided values. Write the results to
+ target_suite_name.
+
+ :param source_config: Config of suite to base generated config on.
+ :param source_file: Filename of source suite.
+ :param roots: Roots used to select tests for split suite.
+ :param excludes: Tests that should be excluded from split suite.
+ """
+ suite_config = update_suite_config(deepcopy(source_config), roots, excludes)
+
+ contents = HEADER_TEMPLATE.format(file=__file__, suite_file=source_file)
+ contents += yaml.safe_dump(suite_config, default_flow_style=False)
+ return contents
diff --git a/buildscripts/task_generation/suite_split.py b/buildscripts/task_generation/suite_split.py
new file mode 100644
index 00000000000..4b929f7bd9c
--- /dev/null
+++ b/buildscripts/task_generation/suite_split.py
@@ -0,0 +1,451 @@
+"""Tools for splitting suites into parallelizable sub-suites."""
+from __future__ import annotations
+
+import os
+from copy import deepcopy
+from datetime import datetime
+from typing import NamedTuple, Callable, Optional, List, Dict, Any
+
+import inject
+import requests
+import structlog
+import yaml
+from evergreen import EvergreenApi
+
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyService
+from buildscripts.task_generation.suite_split_strategies import SplitStrategy, FallbackStrategy
+from buildscripts.task_generation.timeout import TimeoutEstimate
+from buildscripts.util import taskname
+from buildscripts.util.teststats import HistoricTaskData, TestRuntime, normalize_test_name
+
+LOGGER = structlog.getLogger(__name__)
+
+GEN_SUFFIX = "_gen"
+CLEAN_EVERY_N_HOOK = "CleanEveryN"
+ASAN_SIGNATURE = "detect_leaks=1"
+HEADER_TEMPLATE = """# DO NOT EDIT THIS FILE. All manual edits will be lost.
+# This file was generated by {file} from
+# {suite_file}.
+"""
+
+# pylint: disable=too-many-arguments
+
+
+def remove_gen_suffix(task_name: str) -> str:
+ """Remove '_gen' suffix from task_name."""
+ if task_name.endswith(GEN_SUFFIX):
+ return task_name[:-4]
+ return task_name
+
+
+def update_suite_config(suite_config, roots=None, excludes=None):
+ """
+ Update suite config based on the roots and excludes passed in.
+
+ :param suite_config: suite_config to update.
+ :param roots: new roots to run, or None if roots should not be updated.
+ :param excludes: excludes to add, or None if excludes should not be include.
+ :return: updated suite_config
+ """
+ if roots:
+ suite_config["selector"]["roots"] = roots
+
+ if excludes:
+ # This must be a misc file, if the exclude_files section exists, extend it, otherwise,
+ # create it.
+ if "exclude_files" in suite_config["selector"] and \
+ suite_config["selector"]["exclude_files"]:
+ suite_config["selector"]["exclude_files"] += excludes
+ else:
+ suite_config["selector"]["exclude_files"] = excludes
+ else:
+ # if excludes was not specified this must not a misc file, so don"t exclude anything.
+ if "exclude_files" in suite_config["selector"]:
+ del suite_config["selector"]["exclude_files"]
+
+ return suite_config
+
+
+class SubSuite(object):
+ """A suite of tests that can be run by evergreen."""
+
+ def __init__(
+ self,
+ index: int,
+ suite_name: str,
+ test_list: List[str],
+ tests_with_runtime_info: int,
+ max_test_runtime: float,
+ historic_runtime: float,
+ task_overhead: float,
+ ) -> None:
+ """
+ Initialize the object.
+
+ :param index: Sub-suite index.
+ :param suite_name: Name of suite.
+ :param test_list: List of tests to include in this sub-suite.
+ :param tests_with_runtime_info: Number of tests that that historic runtime info.
+ :param max_test_runtime: Runtime of the longest running test.
+ :param historic_runtime: Sum of the average runtime of all tests.
+ :param task_overhead: Runtime overhead to expect from task level hooks.
+ """
+ self.index = index
+ self.suite_name = suite_name
+ self.test_list = test_list
+ self.tests_with_runtime_info = tests_with_runtime_info
+ self.max_test_runtime = max_test_runtime
+ self.historic_runtime = historic_runtime
+ self.task_overhead = task_overhead
+
+ @classmethod
+ def from_test_list(cls, index: int, suite_name: str, test_list: List[str],
+ task_overhead: Optional[float],
+ runtime_list: Optional[List[TestRuntime]] = None) -> SubSuite:
+ """
+ Create a sub-suite from the given test list.
+
+ :param index: Index of sub-suite being created.
+ :param suite_name: Name of suite.
+ :param test_list: List of tests to include.
+ :param task_overhead: Runtime overhead to expect from task level hooks.
+ :param runtime_list: List of historic runtimes for tests in test_list.
+ :return: Sub-suite for the given tests.
+ """
+ runtime_count = 0
+ total_runtime = 0.0
+ max_runtime = 0.0
+ if runtime_list:
+ runtime_map = {test.test_name: test.runtime for test in runtime_list}
+ for test in test_list:
+ if test in runtime_map:
+ runtime_count += 1
+ total_runtime += runtime_map[test]
+ max_runtime = max(max_runtime, runtime_map[test])
+
+ return cls(index, suite_name, test_list, runtime_count, max_runtime, total_runtime,
+ task_overhead or 0.0)
+
+ def should_overwrite_timeout(self) -> bool:
+ """
+ Whether the timeout for this suite should be overwritten.
+
+ We should only overwrite the timeout if we have runtime info for all tests.
+ """
+ return len(self) == self.tests_with_runtime_info
+
+ def get_timeout_estimate(self) -> TimeoutEstimate:
+ """Get the estimated runtime of this task to for timeouts."""
+ if self.should_overwrite_timeout():
+ return TimeoutEstimate(max_test_runtime=self.max_test_runtime,
+ expected_task_runtime=self.historic_runtime + self.task_overhead)
+ return TimeoutEstimate.no_timeouts()
+
+ def get_runtime(self):
+ """Get the current average runtime of all the tests currently in this suite."""
+ return self.historic_runtime
+
+ def get_test_count(self) -> int:
+ """Get the number of tests currently in this suite."""
+ return len(self)
+
+ def __len__(self) -> int:
+ return len(self.test_list)
+
+ def name(self, total_suites: int, suite_name: Optional[str] = None) -> str:
+ """Get the name of this suite."""
+ if suite_name is None:
+ suite_name = self.suite_name
+ return taskname.name_generated_task(suite_name, self.index, total_suites)
+
+ def generate_resmoke_config(self, source_config: Dict) -> str:
+ """
+ Generate the contents of resmoke config for this suite.
+
+ :param source_config: Resmoke config to base generate config on.
+ :return: Resmoke config to run this suite.
+ """
+ suite_config = update_suite_config(deepcopy(source_config), roots=self.test_list)
+ contents = HEADER_TEMPLATE.format(file=__file__, suite_file=self.suite_name)
+ contents += yaml.safe_dump(suite_config, default_flow_style=False)
+ return contents
+
+
+class GeneratedSuite(NamedTuple):
+ """
+ Collection of sub-suites generated from the a parent suite.
+
+ sub_suites: List of sub-suites comprising whole suite.
+ build_variant: Name of build variant suite will run on.
+ task_name: Name of task generating suite.
+ suite_name: Name of suite.
+ filename: File name containing suite config.
+ include_build_variant_in_name: Include the build variant as part of display task names.
+ """
+
+ sub_suites: List[SubSuite]
+ build_variant: str
+ task_name: str
+ suite_name: str
+ filename: str
+ include_build_variant_in_name: bool = False
+
+ def display_task_name(self) -> str:
+ """Get the display name to use for this task."""
+ base_name = remove_gen_suffix(self.task_name)
+ if self.include_build_variant_in_name:
+ return f"{base_name}_{self.build_variant}"
+ return base_name
+
+ def __len__(self) -> int:
+ """Get the number of sub-suites."""
+ return len(self.sub_suites)
+
+
+class SuiteSplitParameters(NamedTuple):
+ """
+ Parameters for splitting resmoke suites.
+
+ build_variant: Build variant generated for.
+ task_name: Name of task being split.
+ suite_name: Name of suite being split.
+ filename: Filename of suite configuration.
+ is_asan: Whether the build variant being generated on is ASAN.
+ test_file_filter: Optional filter describing which tests should be included.
+ """
+
+ build_variant: str
+ task_name: str
+ suite_name: str
+ filename: str
+ is_asan: bool = False
+ test_file_filter: Optional[Callable[[str], bool]] = None
+
+
+class SuiteSplitConfig(NamedTuple):
+ """
+ Global configuration for generating suites.
+
+ evg_project: Evergreen project.
+ target_resmoke_time: Target runtime for generated sub-suites.
+ max_sub_suites: Max number of sub-suites to generate.
+ max_tests_per_suite: Max number of tests to put in a single sub-suite.
+ start_date: Start date to query for test history.
+ end_date: End date to query for test history.
+ default_to_fallback: Use the fallback method for splitting tasks rather than dynamic splitting.
+ include_build_variant_in_name: Include the build variant as part of display task names.
+ """
+
+ evg_project: str
+ target_resmoke_time: int
+ max_sub_suites: int
+ max_tests_per_suite: int
+ start_date: datetime
+ end_date: datetime
+ default_to_fallback: bool = False
+ include_build_variant_in_name: bool = False
+
+
+class SuiteSplitService:
+ """A service for splitting resmoke suites into sub-suites that can be run in parallel."""
+
+ @inject.autoparams()
+ def __init__(
+ self,
+ evg_api: EvergreenApi,
+ resmoke_proxy: ResmokeProxyService,
+ config: SuiteSplitConfig,
+ split_strategy: SplitStrategy,
+ fallback_strategy: FallbackStrategy,
+ ) -> None:
+ """
+ Initialize the suite split service.
+
+ :param evg_api: Evergreen API client.
+ :param resmoke_proxy: Resmoke Proxy service.
+ :param config: Configuration options of how to split suites.
+ """
+ self.evg_api = evg_api
+ self.resmoke_proxy = resmoke_proxy
+ self.config = config
+ self.split_strategy = split_strategy
+ self.fallback_strategy = fallback_strategy
+
+ def split_suite(self, params: SuiteSplitParameters) -> GeneratedSuite:
+ """
+ Split the given resmoke suite into multiple sub-suites.
+
+ :param params: Description of suite to split.
+ :return: List of sub-suites from the given suite.
+ """
+ if self.config.default_to_fallback:
+ return self.calculate_fallback_suites(params)
+
+ try:
+ evg_stats = HistoricTaskData.from_evg(self.evg_api, self.config.evg_project,
+ self.config.start_date, self.config.end_date,
+ params.task_name, params.build_variant)
+ if not evg_stats:
+ LOGGER.debug("No test history, using fallback suites")
+ # This is probably a new suite, since there is no test history, just use the
+ # fallback values.
+ return self.calculate_fallback_suites(params)
+ return self.calculate_suites_from_evg_stats(evg_stats, params)
+ except requests.HTTPError as err:
+ if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
+ # Evergreen may return a 503 when the service is degraded.
+ # We fall back to splitting the tests into a fixed number of suites.
+ LOGGER.warning("Received 503 from Evergreen, "
+ "dividing the tests evenly among suites")
+ return self.calculate_fallback_suites(params)
+ else:
+ raise
+
+ def calculate_fallback_suites(self, params: SuiteSplitParameters) -> GeneratedSuite:
+ """Divide tests into a fixed number of suites."""
+ LOGGER.debug("Splitting tasks based on fallback", max_sub_suites=self.config.max_sub_suites)
+ test_list = self.resmoke_proxy.list_tests(params.suite_name)
+ if params.test_file_filter:
+ test_list = [test for test in test_list if params.test_file_filter(test)]
+
+ test_lists = self.fallback_strategy(test_list, self.config.max_sub_suites)
+ return self.test_lists_to_suite(test_lists, params, [])
+
+ def calculate_suites_from_evg_stats(self, test_stats: HistoricTaskData,
+ params: SuiteSplitParameters) -> GeneratedSuite:
+ """
+ Divide tests into suites that can be run in less than the specified execution time.
+
+ :param test_stats: Historical test results for task being split.
+ :param params: Description of how to split the suite.
+ :return: List of sub suites calculated.
+ """
+ execution_time_secs = self.config.target_resmoke_time * 60
+ tests_runtimes = self.filter_tests(test_stats.get_tests_runtimes(), params)
+ if not tests_runtimes:
+ LOGGER.debug("No test runtimes after filter, using fallback")
+ return self.calculate_fallback_suites(params)
+
+ test_lists = self.split_strategy(tests_runtimes, execution_time_secs,
+ self.config.max_sub_suites,
+ self.config.max_tests_per_suite)
+
+ return self.test_lists_to_suite(test_lists, params, tests_runtimes, test_stats)
+
+ def test_lists_to_suite(self, test_lists: List[List[str]], params: SuiteSplitParameters,
+ tests_runtimes: List[TestRuntime],
+ test_stats: Optional[HistoricTaskData] = None) -> GeneratedSuite:
+ """
+ Create sub-suites for the given test lists.
+
+ :param test_lists: List of tests lists to create suites for.
+ :param params: Parameters for suite creation.
+ :param tests_runtimes: Historic runtimes of tests.
+ :param test_stats: Other historic task data.
+ :return: Generated suite for the sub-suites specified.
+ """
+ suites = [
+ SubSuite.from_test_list(
+ index,
+ params.suite_name,
+ test_list,
+ self.get_task_hook_overhead(params.suite_name, params.is_asan, len(test_list),
+ test_stats),
+ tests_runtimes,
+ ) for index, test_list in enumerate(test_lists)
+ ]
+
+ return GeneratedSuite(
+ sub_suites=suites,
+ build_variant=params.build_variant,
+ task_name=params.task_name,
+ suite_name=params.suite_name,
+ filename=params.filename,
+ include_build_variant_in_name=self.config.include_build_variant_in_name,
+ )
+
+ def filter_tests(self, tests_runtimes: List[TestRuntime],
+ params: SuiteSplitParameters) -> List[TestRuntime]:
+ """
+ Filter out tests that do not exist in the filesystem.
+
+ :param tests_runtimes: List of tests with runtimes to filter.
+ :param params: Suite split parameters.
+ :return: Test list with unneeded tests filtered out.
+ """
+ if params.test_file_filter:
+ tests_runtimes = [
+ test for test in tests_runtimes if params.test_file_filter(test.test_name)
+ ]
+ all_tests = [
+ normalize_test_name(test) for test in self.resmoke_proxy.list_tests(params.suite_name)
+ ]
+ return [
+ info for info in tests_runtimes
+ if os.path.exists(info.test_name) and info.test_name in all_tests
+ ]
+
+ def get_task_hook_overhead(self, suite_name: str, is_asan: bool, test_count: int,
+ historic_stats: Optional[HistoricTaskData]) -> float:
+ """
+ Add how much overhead task-level hooks each suite should account for.
+
+ Certain test hooks need to be accounted for on the task level instead of the test level
+ in order to calculate accurate timeouts. So we will add details about those hooks to
+ each suite here.
+
+ :param suite_name: Name of suite being generated.
+ :param is_asan: Whether ASAN is being used.
+ :param test_count: Number of tests in sub-suite.
+ :param historic_stats: Historic runtime data of the suite.
+ """
+ # The CleanEveryN hook is run every 'N' tests. The runtime of the
+ # hook will be associated with whichever test happens to be running, which could be
+ # different every run. So we need to take its runtime into account at the task level.
+ if historic_stats is None:
+ return 0.0
+
+ clean_every_n_cadence = self._get_clean_every_n_cadence(suite_name, is_asan)
+ avg_clean_every_n_runtime = historic_stats.get_avg_hook_runtime(CLEAN_EVERY_N_HOOK)
+ LOGGER.debug("task hook overhead", cadence=clean_every_n_cadence,
+ runtime=avg_clean_every_n_runtime)
+ if avg_clean_every_n_runtime != 0:
+ n_expected_runs = test_count / clean_every_n_cadence
+ return n_expected_runs * avg_clean_every_n_runtime
+ return 0.0
+
+ def _get_clean_every_n_cadence(self, suite_name: str, is_asan: bool) -> int:
+ """
+ Get the N value for the CleanEveryN hook.
+
+ :param suite_name: Name of suite being generated.
+ :param is_asan: Whether ASAN is being used.
+ :return: How frequently clean every end is run.
+ """
+ # Default to 1, which is the worst case meaning CleanEveryN would run for every test.
+ clean_every_n_cadence = 1
+ if is_asan:
+ # ASAN runs hard-code N to 1. See `resmokelib/testing/hooks/cleanup.py`.
+ return clean_every_n_cadence
+
+ clean_every_n_config = self._get_hook_config(suite_name, CLEAN_EVERY_N_HOOK)
+ if clean_every_n_config:
+ clean_every_n_cadence = clean_every_n_config.get("n", 1)
+
+ return clean_every_n_cadence
+
+ def _get_hook_config(self, suite_name: str, hook_name: str) -> Optional[Dict[str, Any]]:
+ """
+ Get the configuration for the given hook.
+
+ :param hook_name: Name of hook to query.
+ :return: Configuration for hook, if it exists.
+ """
+ hooks_config = self.resmoke_proxy.read_suite_config(suite_name).get("executor",
+ {}).get("hooks")
+ if hooks_config:
+ for hook in hooks_config:
+ if hook.get("class") == hook_name:
+ return hook
+
+ return None
diff --git a/buildscripts/task_generation/suite_split_strategies.py b/buildscripts/task_generation/suite_split_strategies.py
new file mode 100644
index 00000000000..305282ba244
--- /dev/null
+++ b/buildscripts/task_generation/suite_split_strategies.py
@@ -0,0 +1,118 @@
+"""Strategies for splitting tests into multiple sub-suites."""
+from typing import List, Callable, Optional
+
+import structlog
+
+from buildscripts.util.teststats import TestRuntime
+
+LOGGER = structlog.getLogger(__name__)
+
+SplitStrategy = Callable[[List[TestRuntime], int, int, int], List[List[str]]]
+FallbackStrategy = Callable[[List[str], int], List[List[str]]]
+
+
+def divide_remaining_tests_among_suites(remaining_tests_runtimes: List[TestRuntime],
+ suites: List[List[TestRuntime]]) -> None:
+ """
+ Divide the list of tests given among the suites given.
+
+ :param remaining_tests_runtimes: Tests that still need to be added to a suite.
+ :param suites: Lists of tests in their test suites.
+ """
+ suite_idx = 0
+ for test_instance in remaining_tests_runtimes:
+ current_suite = suites[suite_idx]
+ current_suite.append(test_instance)
+ suite_idx += 1
+ if suite_idx >= len(suites):
+ suite_idx = 0
+
+
+def _new_suite_needed(current_suite: List[TestRuntime], test_runtime: float,
+ max_suite_runtime: float, max_tests_per_suite: Optional[int]) -> bool:
+ """
+ Check if a new suite should be created for the given suite.
+
+ :param current_suite: Suite currently being added to.
+ :param test_runtime: Runtime of test being added.
+ :param max_suite_runtime: Max runtime of a single suite.
+ :param max_tests_per_suite: Max number of tests in a suite.
+ :return: True if a new test suite should be created.
+ """
+ current_runtime = sum(test.runtime for test in current_suite)
+ if current_runtime + test_runtime > max_suite_runtime:
+ # Will adding this test put us over the target runtime?
+ return True
+
+ if max_tests_per_suite and len(current_suite) + 1 > max_tests_per_suite:
+ # Will adding this test put us over the max number of tests?
+ return True
+
+ return False
+
+
+def greedy_division(tests_runtimes: List[TestRuntime], max_time_seconds: float,
+ max_suites: Optional[int] = None,
+ max_tests_per_suite: Optional[int] = None) -> List[List[str]]:
+ """
+ Divide the given tests into suites.
+
+ Each suite should be able to execute in less than the max time specified. If a single
+ test has a runtime greater than `max_time_seconds`, it will be run in a suite on its own.
+
+ If max_suites is reached before assigning all tests to a suite, the remaining tests will be
+ divided up among the created suites.
+
+ Note: If `max_suites` is hit, suites may have more tests than `max_tests_per_suite` and may have
+ runtimes longer than `max_time_seconds`.
+
+ :param tests_runtimes: List of tuples containing test names and test runtimes.
+ :param max_time_seconds: Maximum runtime to add to a single bucket.
+ :param max_suites: Maximum number of suites to create.
+ :param max_tests_per_suite: Maximum number of tests to add to a single suite.
+ :return: List of Suite objects representing grouping of tests.
+ """
+ suites = []
+ last_test_processed = len(tests_runtimes)
+ LOGGER.debug("Determines suites for runtime", max_runtime_seconds=max_time_seconds,
+ max_suites=max_suites, max_tests_per_suite=max_tests_per_suite)
+ current_test_list = []
+ for idx, test_instance in enumerate(tests_runtimes):
+ LOGGER.debug("Adding test", test=test_instance)
+ if _new_suite_needed(current_test_list, test_instance.runtime, max_time_seconds,
+ max_tests_per_suite):
+ LOGGER.debug("Finished suite", test_runtime=test_instance.runtime,
+ max_time=max_time_seconds)
+ if current_test_list:
+ suites.append(current_test_list)
+ current_test_list = []
+ if max_suites and len(suites) >= max_suites:
+ last_test_processed = idx
+ break
+
+ current_test_list.append(test_instance)
+
+ if current_test_list:
+ suites.append(current_test_list)
+
+ if max_suites and last_test_processed < len(tests_runtimes):
+ # We must have hit the max suite limit, just randomly add the remaining tests to suites.
+ divide_remaining_tests_among_suites(tests_runtimes[last_test_processed:], suites)
+
+ return [[test.test_name for test in test_list] for test_list in suites]
+
+
+def round_robin_fallback(test_list: List[str], max_suites: int) -> List[List[str]]:
+ """
+ Split the tests among a given number of suites picking them round robin.
+
+ :param test_list: List of tests to divide.
+ :param max_suites: Number of suites to create.
+ :return: List of which tests should be in each suite.
+ """
+ num_suites = min(len(test_list), max_suites)
+ test_lists = [[] for _ in range(num_suites)]
+ for idx, test_file in enumerate(test_list):
+ test_lists[idx % num_suites].append(test_file)
+
+ return test_lists
diff --git a/buildscripts/task_generation/task_types/__init__.py b/buildscripts/task_generation/task_types/__init__.py
new file mode 100644
index 00000000000..4b7a2bb941b
--- /dev/null
+++ b/buildscripts/task_generation/task_types/__init__.py
@@ -0,0 +1 @@
+"""Empty."""
diff --git a/buildscripts/task_generation/task_types/fuzzer_tasks.py b/buildscripts/task_generation/task_types/fuzzer_tasks.py
new file mode 100644
index 00000000000..f7ece45c0b7
--- /dev/null
+++ b/buildscripts/task_generation/task_types/fuzzer_tasks.py
@@ -0,0 +1,116 @@
+"""Task generation for fuzzer tasks."""
+from typing import NamedTuple, Set, Optional, Dict
+
+from shrub.v2 import Task, FunctionCall, TaskDependency
+
+from buildscripts.util import taskname
+
+
+class FuzzerTask(NamedTuple):
+ """
+ Evergreen configuration for a generated fuzzer command.
+
+ task_name: Name of fuzzer task that was generated.
+ sub_tasks: Set of sub-tasks composing the fuzzer task.
+ """
+
+ task_name: str
+ sub_tasks: Set[Task]
+
+
+class FuzzerGenTaskParams(NamedTuple):
+ """
+ Parameters to generate a fuzzer task.
+
+ task_name: Name of task being generated.
+ variant: Name of build variant being generated on.
+ suite: Resmoke suite for generated tests.
+ num_files: Number of javascript files fuzzer should generate.
+ num_tasks: Number of sub-tasks fuzzer should generate.
+ resmoke_args: Arguments to pass to resmoke invocation.
+ npm_command: NPM command to perform fuzzer execution.
+ jstestfuzz_vars: Arguments to pass to fuzzer invocation.
+ continue_on_failure: Should generated tests continue running after hitting an error.
+ resmoke_jobs_max: Maximum number of jobs resmoke should execute in parallel.
+ should_shuffle: Should tests be executed out of order.
+ timeout_secs: Timeout before test execution is considered hung.
+ use_multiversion: Multiversion configuration if tests should run in multiversion mode.
+ use_large_distro: Should tests be generated on a large distro.
+ add_to_display_task: Should generated tasks be grouped in a display task.
+ """
+
+ task_name: str
+ variant: str
+ suite: str
+ num_files: int
+ num_tasks: int
+ resmoke_args: str
+ npm_command: str
+ jstestfuzz_vars: Optional[str]
+ continue_on_failure: bool
+ resmoke_jobs_max: int
+ should_shuffle: bool
+ timeout_secs: int
+ use_multiversion: Optional[str]
+ use_large_distro: Optional[bool]
+ large_distro_name: Optional[str]
+ config_location: str
+ add_to_display_task: bool = True
+
+ def jstestfuzz_params(self) -> Dict[str, str]:
+ """Build a dictionary of parameters to pass to jstestfuzz."""
+ return {
+ "jstestfuzz_vars": f"--numGeneratedFiles {self.num_files} {self.jstestfuzz_vars or ''}",
+ "npm_command": self.npm_command,
+ }
+
+
+class FuzzerGenTaskService:
+ """A service for generating fuzzer tasks."""
+
+ def generate_tasks(self, params: FuzzerGenTaskParams) -> FuzzerTask:
+ """
+ Generate evergreen tasks for fuzzers based on the options given.
+
+ :param params: Parameters for how task should be generated.
+ :return: Set of shrub tasks.
+ """
+ sub_tasks = {self.build_fuzzer_sub_task(index, params) for index in range(params.num_tasks)}
+ return FuzzerTask(task_name=params.task_name, sub_tasks=sub_tasks)
+
+ @staticmethod
+ def build_fuzzer_sub_task(task_index: int, params: FuzzerGenTaskParams) -> Task:
+ """
+ Build a shrub task to run the fuzzer.
+
+ :param task_index: Index of sub task being generated.
+ :param params: Parameters describing how tasks should be generated.
+ :return: Shrub task to run the fuzzer.
+ """
+ sub_task_name = taskname.name_generated_task(params.task_name, task_index, params.num_tasks,
+ params.variant)
+
+ suite_arg = f"--suites={params.suite}"
+ run_tests_vars = {
+ "continue_on_failure": params.continue_on_failure,
+ "resmoke_args": f"{suite_arg} {params.resmoke_args}",
+ "resmoke_jobs_max": params.resmoke_jobs_max,
+ "should_shuffle": params.should_shuffle,
+ "task_path_suffix": params.use_multiversion,
+ "timeout_secs": params.timeout_secs,
+ "task": params.task_name,
+ "gen_task_config_location": params.config_location,
+ } # yapf: disable
+
+ commands = [
+ FunctionCall("do setup"),
+ FunctionCall("configure evergreen api credentials")
+ if params.use_multiversion else None,
+ FunctionCall("do multiversion setup") if params.use_multiversion else None,
+ FunctionCall("setup jstestfuzz"),
+ FunctionCall("run jstestfuzz", params.jstestfuzz_params()),
+ FunctionCall("run generated tests", run_tests_vars)
+ ]
+ commands = [command for command in commands if command is not None]
+
+ return Task(sub_task_name, commands, {TaskDependency("archive_dist_test_debug")})
diff --git a/buildscripts/task_generation/task_types/gentask_options.py b/buildscripts/task_generation/task_types/gentask_options.py
new file mode 100644
index 00000000000..b78a7974979
--- /dev/null
+++ b/buildscripts/task_generation/task_types/gentask_options.py
@@ -0,0 +1,41 @@
+"""Options for generating evergreen tasks."""
+import os
+from typing import NamedTuple, Optional, List
+
+
+class GenTaskOptions(NamedTuple):
+ """
+ Options for how Evergreen tasks should be generated.
+
+ large_distro_name: Name of distro "large" tasks should be run on.
+ create_misc_suite: Should "misc" suites be generated.
+ is_patch: This generation is part of a patch build.
+ generated_config_dir: Path to directory that configuration files should be written.
+ use_default_timeouts: Don't overwrite task timeouts.
+ """
+
+ create_misc_suite: bool
+ is_patch: bool
+ generated_config_dir: str
+ use_default_timeouts: bool
+
+ def suite_location(self, suite_name: str) -> str:
+ """
+ Get the path to the given resmoke suite configuration file.
+
+ :param suite_name: Name of resmoke suite to query.
+ :return: Path to given resmoke suite.
+ """
+ return self.generated_file_location(os.path.basename(suite_name))
+
+ def generated_file_location(self, base_file: str) -> str:
+ """
+ Get the path to the given base file.
+
+ :param base_file: Base file to find.
+ :return: Path to the given file.
+ """
+ # Evergreen always uses a unix shell, even on Windows, so instead of using os.path.join
+ # here, just use the forward slash; otherwise the path separator will be treated as
+ # the escape character on Windows.
+ return "/".join([self.generated_config_dir, base_file])
diff --git a/buildscripts/task_generation/task_types/multiversion_tasks.py b/buildscripts/task_generation/task_types/multiversion_tasks.py
new file mode 100644
index 00000000000..3d396fadd02
--- /dev/null
+++ b/buildscripts/task_generation/task_types/multiversion_tasks.py
@@ -0,0 +1,140 @@
+"""Task generation for multiversion resmoke tasks."""
+from typing import NamedTuple, Set, List, Optional
+
+import inject
+from shrub.v2 import Task, FunctionCall, TaskDependency
+
+from buildscripts.resmokelib.multiversionconstants import REQUIRES_FCV_TAG
+from buildscripts.task_generation.suite_split import GeneratedSuite
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
+
+BACKPORT_REQUIRED_TAG = "backport_required_multiversion"
+EXCLUDE_TAGS = f"{REQUIRES_FCV_TAG},multiversion_incompatible,{BACKPORT_REQUIRED_TAG}"
+EXCLUDE_TAGS_FILE = "multiversion_exclude_tags.yml"
+
+
+class MultiversionGenTaskParams(NamedTuple):
+ """
+ Parameters for how multiversion tests should be generated.
+
+ mixed_version_configs: List of version configuration to generate.
+ is_sharded: Whether sharded tests are being generated.
+ resmoke_args: Arguments to pass to resmoke.
+ parent_task_name: Name of parent task containing all sub tasks.
+ origin_suite: Resmoke suite generated tests are based off.
+ """
+
+ mixed_version_configs: List[str]
+ is_sharded: bool
+ resmoke_args: str
+ parent_task_name: str
+ origin_suite: str
+ use_large_distro: bool
+ large_distro_name: Optional[str]
+ config_location: str
+ name_prefix: Optional[str] = None
+ test_list: Optional[str] = None
+ create_misc_suite: bool = True
+ add_to_display_task: bool = True
+
+ def get_multiversion_resmoke_args(self) -> str:
+ """Return resmoke args used to configure a cluster for multiversion testing."""
+ if self.is_sharded:
+ return "--numShards=2 --numReplSetNodes=2 "
+ return "--numReplSetNodes=3 --linearChain=on "
+
+
+class MultiversionGenTaskService:
+ """A service for generating multiversion tests."""
+
+ @inject.autoparams()
+ def __init__(self, gen_task_options: GenTaskOptions) -> None:
+ """
+ Initialize the service.
+
+ :param gen_task_options: Options for how tasks should be generated.
+ """
+ self.gen_task_options = gen_task_options
+
+ def generate_tasks(self, suite: GeneratedSuite, params: MultiversionGenTaskParams) -> Set[Task]:
+ """
+ Generate multiversion tasks for the given suite.
+
+ :param suite: Suite to generate multiversion tasks for.
+ :param params: Parameters for how tasks should be generated.
+ :return: Evergreen configuration to generate the specified tasks.
+ """
+ sub_tasks = set()
+ for version_config in params.mixed_version_configs:
+ for sub_suite in suite.sub_suites:
+ # Generate the newly divided test suites
+ sub_suite_name = sub_suite.name(len(suite))
+ sub_task_name = f"{sub_suite_name}_{version_config}_{suite.build_variant}"
+ if params.name_prefix is not None:
+ sub_task_name = f"{params.name_prefix}:{sub_task_name}"
+
+ sub_tasks.add(
+ self._generate_task(sub_task_name, sub_suite_name, version_config, params,
+ suite.build_variant))
+
+ if params.create_misc_suite:
+ # Also generate the misc task.
+ misc_suite_name = f"{params.origin_suite}_misc"
+ misc_task_name = f"{misc_suite_name}_{version_config}_{suite.build_variant}"
+ sub_tasks.add(
+ self._generate_task(misc_task_name, misc_suite_name, version_config, params,
+ suite.build_variant))
+
+ return sub_tasks
+
+ # pylint: disable=too-many-arguments
+ def _generate_task(self, sub_task_name: str, sub_suite_name: str, mixed_version_config: str,
+ params: MultiversionGenTaskParams, build_variant: str) -> Task:
+ """
+ Generate a sub task to be run with the provided suite and mixed version config.
+
+ :param sub_task_name: Name of task being generated.
+ :param sub_suite_name: Name of suite to run.
+ :param mixed_version_config: Versions task is being generated for.
+ :param params: Parameters for how tasks should be generated.
+ :return: Shrub configuration for task specified.
+ """
+ suite_file = self.gen_task_options.suite_location(f"{sub_suite_name}_{build_variant}.yml")
+
+ run_tests_vars = {
+ "resmoke_args": self._build_resmoke_args(suite_file, mixed_version_config, params),
+ "task": params.parent_task_name,
+ "gen_task_config_location": params.config_location,
+ }
+
+ commands = [
+ FunctionCall("do setup"),
+ # Fetch and download the proper mongod binaries before running multiversion tests.
+ FunctionCall("configure evergreen api credentials"),
+ FunctionCall("do multiversion setup"),
+ FunctionCall("run generated tests", run_tests_vars),
+ ]
+
+ return Task(sub_task_name, commands, {TaskDependency("archive_dist_test_debug")})
+
+ def _build_resmoke_args(self, suite_file: str, mixed_version_config: str,
+ params: MultiversionGenTaskParams) -> str:
+ """
+ Get the resmoke args needed to run the specified task.
+
+ :param suite_file: Path to resmoke suite configuration to run.
+ :param mixed_version_config: Versions task is being generated for.
+ :param params: Parameters for how tasks should be generated.
+ :return: Arguments to pass to resmoke to run the generated task.
+ """
+ tag_file_location = self.gen_task_options.generated_file_location(EXCLUDE_TAGS_FILE)
+
+ return (
+ f"{params.resmoke_args} "
+ f" --suite={suite_file} "
+ f" --mixedBinVersions={mixed_version_config}"
+ f" --excludeWithAnyTags={EXCLUDE_TAGS},{params.parent_task_name}_{BACKPORT_REQUIRED_TAG} "
+ f" --tagFile={tag_file_location} "
+ f" --originSuite={params.origin_suite} "
+ f" {params.get_multiversion_resmoke_args()} "
+ f" {params.test_list if params.test_list else ''} ")
diff --git a/buildscripts/task_generation/task_types/resmoke_tasks.py b/buildscripts/task_generation/task_types/resmoke_tasks.py
new file mode 100644
index 00000000000..c583364426e
--- /dev/null
+++ b/buildscripts/task_generation/task_types/resmoke_tasks.py
@@ -0,0 +1,177 @@
+"""Task generation for split resmoke tasks."""
+import os
+import re
+from typing import Set, Any, Dict, NamedTuple, Optional, List, Match
+
+import inject
+import structlog
+from shrub.v2 import Task, TaskDependency
+
+from buildscripts.patch_builds.task_generation import resmoke_commands
+from buildscripts.task_generation.suite_split import GeneratedSuite, SubSuite
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
+from buildscripts.task_generation.timeout import TimeoutEstimate
+from buildscripts.util import taskname
+
+LOGGER = structlog.getLogger(__name__)
+
+
+def string_contains_any_of_args(string: str, args: List[str]) -> bool:
+ """
+ Return whether array contains any of a group of args.
+
+ :param string: String being checked.
+ :param args: Args being analyzed.
+ :return: True if any args are found in the string.
+ """
+ return any(arg in string for arg in args)
+
+
+class ResmokeGenTaskParams(NamedTuple):
+ """
+ Parameters describing how a specific resmoke suite should be generated.
+
+ use_large_distro: Whether generated tasks should be run on a "large" distro.
+ use_multiversion: Multiversion configuration if generated tasks are multiversion.
+ repeat_suites: How many times generated suites should be repeated.
+ resmoke_args: Arguments to pass to resmoke in generated tasks.
+ resmoke_jobs_max: Max number of jobs that resmoke should execute in parallel.
+ depends_on: List of tasks this task depends on.
+ """
+
+ use_large_distro: bool
+ large_distro_name: Optional[str]
+ use_multiversion: Optional[str]
+ repeat_suites: int
+ resmoke_args: str
+ resmoke_jobs_max: Optional[int]
+ config_location: str
+
+ def generate_resmoke_args(self, suite_file: str, suite_name: str, build_variant: str) -> str:
+ """
+ Generate the resmoke args for the given suite.
+
+ :param suite_file: File containing configuration for test suite.
+ :param suite_name: Name of suite being generated.
+ :param build_variant: Build Variant being generated for.
+ :return: arguments to pass to resmoke.
+ """
+ resmoke_args = (f"--suite={suite_file}_{build_variant}.yml --originSuite={suite_name} "
+ f" {self.resmoke_args}")
+ if self.repeat_suites and not string_contains_any_of_args(resmoke_args,
+ ["repeatSuites", "repeat"]):
+ resmoke_args += f" --repeatSuites={self.repeat_suites} "
+
+ return resmoke_args
+
+
+class ResmokeGenTaskService:
+ """A service to generated split resmoke suites."""
+
+ @inject.autoparams()
+ def __init__(self, gen_task_options: GenTaskOptions) -> None:
+ """
+ Initialize the service.
+
+ :param gen_task_options: Global options for how tasks should be generated.
+ """
+ self.gen_task_options = gen_task_options
+
+ def generate_tasks(self, generated_suite: GeneratedSuite,
+ params: ResmokeGenTaskParams) -> Set[Task]:
+ """
+ Build a set of shrub task for all the sub tasks.
+
+ :param generated_suite: Suite to generate tasks for.
+ :param params: Parameters describing how tasks should be generated.
+ :return: Set of shrub tasks to generate the given suite.
+ """
+ tasks = {
+ self._create_sub_task(suite, generated_suite, params)
+ for suite in generated_suite.sub_suites
+ }
+
+ if self.gen_task_options.create_misc_suite:
+ # Add the misc suite
+ misc_suite_name = f"{os.path.basename(generated_suite.suite_name)}_misc"
+ misc_task_name = f"{generated_suite.task_name}_misc_{generated_suite.build_variant}"
+ tasks.add(
+ self._generate_task(misc_suite_name, misc_task_name, TimeoutEstimate.no_timeouts(),
+ params, generated_suite))
+
+ return tasks
+
+ def _create_sub_task(self, sub_suite: SubSuite, suite: GeneratedSuite,
+ params: ResmokeGenTaskParams) -> Task:
+ """
+ Create the sub task for the given suite.
+
+ :param sub_suite: Sub-Suite to generate.
+ :param suite: Parent suite being created.
+ :param params: Parameters describing how tasks should be generated.
+ :return: Shrub configuration for the sub-suite.
+ """
+ sub_task_name = taskname.name_generated_task(suite.task_name, sub_suite.index, len(suite),
+ suite.build_variant)
+ return self._generate_task(
+ sub_suite.name(len(suite)), sub_task_name, sub_suite.get_timeout_estimate(), params,
+ suite)
+
+ def _generate_task(self, sub_suite_name: str, sub_task_name: str, timeout_est: TimeoutEstimate,
+ params: ResmokeGenTaskParams, suite: GeneratedSuite) -> Task:
+ """
+ Generate a shrub evergreen config for a resmoke task.
+
+ :param sub_suite_name: Name of suite being generated.
+ :param sub_task_name: Name of task to generate.
+ :param timeout_est: Estimated runtime to use for calculating timeouts.
+ :param params: Parameters describing how tasks should be generated.
+ :param suite: Parent suite being created.
+ :return: Shrub configuration for the described task.
+ """
+ # pylint: disable=too-many-arguments
+ LOGGER.debug("Generating task", sub_suite=sub_suite_name)
+
+ target_suite_file = self.gen_task_options.suite_location(sub_suite_name)
+ run_tests_vars = self._get_run_tests_vars(target_suite_file, suite.suite_name, params,
+ suite.build_variant)
+
+ use_multiversion = params.use_multiversion
+ timeout_cmd = timeout_est.generate_timeout_cmd(self.gen_task_options.is_patch,
+ params.repeat_suites,
+ self.gen_task_options.use_default_timeouts)
+ commands = resmoke_commands("run generated tests", run_tests_vars, timeout_cmd,
+ use_multiversion)
+
+ return Task(sub_task_name, commands, self._get_dependencies())
+
+ @staticmethod
+ def _get_run_tests_vars(suite_file: str, suite_name: str, params: ResmokeGenTaskParams,
+ build_variant: str) -> Dict[str, Any]:
+ """
+ Generate a dictionary of the variables to pass to the task.
+
+ :param suite_file: Suite being generated.
+ :param suite_name: Name of suite being generated
+ :param params: Parameters describing how tasks should be generated.
+ :param build_variant: Build Variant being generated.
+ :return: Dictionary containing variables and value to pass to generated task.
+ """
+ variables = {
+ "resmoke_args": params.generate_resmoke_args(suite_file, suite_name, build_variant),
+ "gen_task_config_location": params.config_location,
+ }
+
+ if params.resmoke_jobs_max:
+ variables["resmoke_jobs_max"] = params.resmoke_jobs_max
+
+ if params.use_multiversion:
+ variables["task_path_suffix"] = params.use_multiversion
+
+ return variables
+
+ @staticmethod
+ def _get_dependencies() -> Set[TaskDependency]:
+ """Get the set of dependency tasks for these suites."""
+ dependencies = {TaskDependency("archive_dist_test_debug")}
+ return dependencies
diff --git a/buildscripts/task_generation/timeout.py b/buildscripts/task_generation/timeout.py
new file mode 100644
index 00000000000..67db6568d65
--- /dev/null
+++ b/buildscripts/task_generation/timeout.py
@@ -0,0 +1,105 @@
+"""Timeout information for generating tasks."""
+import math
+from datetime import timedelta
+from inspect import getframeinfo, currentframe
+from typing import NamedTuple, Optional
+
+import structlog
+
+from buildscripts.patch_builds.task_generation import TimeoutInfo
+
+LOGGER = structlog.getLogger(__name__)
+
+AVG_SETUP_TIME = int(timedelta(minutes=5).total_seconds())
+MIN_TIMEOUT_SECONDS = int(timedelta(minutes=5).total_seconds())
+MAX_EXPECTED_TIMEOUT = int(timedelta(hours=48).total_seconds())
+
+
+def calculate_timeout(avg_runtime: float, scaling_factor: int) -> int:
+ """
+ Determine how long a runtime to set based on average runtime and a scaling factor.
+
+ :param avg_runtime: Average runtime of previous runs.
+ :param scaling_factor: scaling factor for timeout.
+ :return: timeout to use (in seconds).
+ """
+
+ def round_to_minute(runtime):
+ """Round the given seconds up to the nearest minute."""
+ distance_to_min = 60 - (runtime % 60)
+ return int(math.ceil(runtime + distance_to_min))
+
+ return max(MIN_TIMEOUT_SECONDS, round_to_minute(avg_runtime)) * scaling_factor + AVG_SETUP_TIME
+
+
+class TimeoutEstimate(NamedTuple):
+ """Runtime estimates used to calculate timeouts."""
+
+ max_test_runtime: Optional[float]
+ expected_task_runtime: Optional[float]
+
+ @classmethod
+ def no_timeouts(cls) -> "TimeoutEstimate":
+ """Create an instance with no estimation data."""
+ return cls(max_test_runtime=None, expected_task_runtime=None)
+
+ def calculate_test_timeout(self, repeat_factor: int) -> Optional[int]:
+ """
+ Calculate the timeout to use for tests.
+
+ :param repeat_factor: How many times the suite will be repeated.
+ :return: Timeout value to use for tests.
+ """
+ if self.max_test_runtime is None:
+ return None
+
+ timeout = calculate_timeout(self.max_test_runtime, 3) * repeat_factor
+ LOGGER.debug("Setting timeout", timeout=timeout, max_runtime=self.max_test_runtime,
+ factor=repeat_factor)
+ return timeout
+
+ def calculate_task_timeout(self, repeat_factor: int) -> Optional[int]:
+ """
+ Calculate the timeout to use for tasks.
+
+ :param repeat_factor: How many times the suite will be repeated.
+ :return: Timeout value to use for tasks.
+ """
+ if self.expected_task_runtime is None:
+ return None
+
+ exec_timeout = calculate_timeout(self.expected_task_runtime, 3) * repeat_factor
+ LOGGER.debug("Setting exec_timeout", exec_timeout=exec_timeout,
+ suite_runtime=self.expected_task_runtime, factor=repeat_factor)
+ return exec_timeout
+
+ def generate_timeout_cmd(self, is_patch: bool, repeat_factor: int,
+ use_default: bool = False) -> TimeoutInfo:
+ """
+ Create the timeout info to use to create a timeout shrub command.
+
+ :param is_patch: Whether the command is being created in a patch build.
+ :param repeat_factor: How many times the suite will be repeated.
+ :param use_default: Should the default timeout be used.
+ :return: Timeout info for the task.
+ """
+
+ if (self.max_test_runtime is None and self.expected_task_runtime is None) or use_default:
+ return TimeoutInfo.default_timeout()
+
+ test_timeout = self.calculate_test_timeout(repeat_factor)
+ task_timeout = self.calculate_task_timeout(repeat_factor)
+
+ if is_patch and (test_timeout > MAX_EXPECTED_TIMEOUT
+ or task_timeout > MAX_EXPECTED_TIMEOUT):
+ frameinfo = getframeinfo(currentframe())
+ LOGGER.error(
+ "This task looks like it is expected to run far longer than normal. This is "
+ "likely due to setting the suite 'repeat' value very high. If you are sure "
+ "this is something you want to do, comment this check out in your patch build "
+ "and resubmit", repeat_value=repeat_factor, timeout=test_timeout,
+ exec_timeout=task_timeout, code_file=frameinfo.filename, code_line=frameinfo.lineno,
+ max_timeout=MAX_EXPECTED_TIMEOUT)
+ raise ValueError("Failing due to expected runtime.")
+
+ return TimeoutInfo.overridden(timeout=test_timeout, exec_timeout=task_timeout)
diff --git a/buildscripts/tests/patch_builds/selected_tests/__init__.py b/buildscripts/tests/patch_builds/selected_tests/__init__.py
new file mode 100644
index 00000000000..4b7a2bb941b
--- /dev/null
+++ b/buildscripts/tests/patch_builds/selected_tests/__init__.py
@@ -0,0 +1 @@
+"""Empty."""
diff --git a/buildscripts/tests/patch_builds/selected_tests/test_selected_tests_client.py b/buildscripts/tests/patch_builds/selected_tests/test_selected_tests_client.py
new file mode 100644
index 00000000000..974d4cd9e25
--- /dev/null
+++ b/buildscripts/tests/patch_builds/selected_tests/test_selected_tests_client.py
@@ -0,0 +1,87 @@
+"""Unit tests for the selected_tests service."""
+import os
+import unittest
+
+from tempfile import TemporaryDirectory
+import requests
+from mock import MagicMock, patch
+
+# pylint: disable=wrong-import-position
+import buildscripts.patch_builds.selected_tests.selected_tests_client as under_test
+
+# pylint: disable=missing-docstring
+
+
+def build_mock_test_mapping(source_file, test_file):
+ return under_test.TestMapping(
+ branch="branch", project="project", repo="repo", source_file=source_file,
+ source_file_seen_count=5, test_files=[
+ under_test.TestFileInstance(name=test_file, test_file_seen_count=3),
+ ])
+
+
+def build_mock_task_mapping(source_file, task):
+ return under_test.TaskMapping(
+ branch="branch", project="project", repo="repo", source_file=source_file,
+ source_file_seen_count=5, tasks=[
+ under_test.TaskMapInstance(name=task, variant="variant", flip_count=3),
+ ])
+
+
+class TestSelectedTestsClient(unittest.TestCase):
+ def test_from_file_with_valid_file(self):
+ with TemporaryDirectory() as tmpdir:
+ config_file = os.path.join(tmpdir, "selected_tests_test_config.yml")
+ with open(config_file, "w") as fh:
+ fh.write("url: url\n")
+ fh.write("project: project\n")
+ fh.write("auth_user: user\n")
+ fh.write("auth_token: token\n")
+
+ selected_tests_service = under_test.SelectedTestsClient.from_file(config_file)
+
+ self.assertEqual(selected_tests_service.url, "url")
+ self.assertEqual(selected_tests_service.project, "project")
+ self.assertEqual(selected_tests_service.session.cookies["auth_user"], "user")
+ self.assertEqual(selected_tests_service.session.cookies["auth_token"], "token")
+
+ def test_from_file_with_invalid_file(self):
+ with self.assertRaises(FileNotFoundError):
+ under_test.SelectedTestsClient.from_file("")
+
+ @patch("requests.Session")
+ def test_files_returned_from_selected_tests_service(self, requests_mock):
+ changed_files = {"src/file1.cpp", "src/file2.js"}
+ response_object = under_test.TestMappingsResponse(test_mappings=[
+ build_mock_test_mapping("src/file1.cpp", "jstests/file-1.js"),
+ build_mock_test_mapping("src/file2.cpp", "jstests/file-3.js"),
+ ])
+ requests_mock.return_value.get.return_value.json.return_value = response_object.dict()
+
+ related_test_files = under_test.SelectedTestsClient("my-url.com", "my-project", "auth_user",
+ "auth_token").get_test_mappings(
+ 0.1, changed_files)
+
+ self.assertEqual(related_test_files, response_object)
+
+ @patch("requests.Session")
+ def test_selected_tests_service_unavailable(self, requests_mock):
+ changed_files = {"src/file1.cpp", "src/file2.js"}
+ response = MagicMock(status_code=requests.codes.SERVICE_UNAVAILABLE)
+ requests_mock.return_value.get.side_effect = requests.HTTPError(response=response)
+
+ with self.assertRaises(requests.exceptions.HTTPError):
+ under_test.SelectedTestsClient("my-url.com", "my-project", "auth_user",
+ "auth_token").get_test_mappings(0.1, changed_files)
+
+ @patch("requests.Session")
+ def test_no_files_returned(self, requests_mock):
+ changed_files = {"src/file1.cpp", "src/file2.js"}
+ response_object = under_test.TestMappingsResponse(test_mappings=[])
+ requests_mock.return_value.get.return_value.json.return_value = response_object.dict()
+
+ related_test_files = under_test.SelectedTestsClient("my-url.com", "my-project", "auth_user",
+ "auth_token").get_test_mappings(
+ 0.1, changed_files)
+
+ self.assertEqual(related_test_files, response_object)
diff --git a/buildscripts/tests/patch_builds/selected_tests/test_selected_tests_service.py b/buildscripts/tests/patch_builds/selected_tests/test_selected_tests_service.py
new file mode 100644
index 00000000000..a04d2818588
--- /dev/null
+++ b/buildscripts/tests/patch_builds/selected_tests/test_selected_tests_service.py
@@ -0,0 +1,87 @@
+"""Unit tests for selected_tests_service.py."""
+import unittest
+from unittest.mock import MagicMock, patch
+
+import buildscripts.patch_builds.selected_tests.selected_tests_service as under_test
+from buildscripts.patch_builds.selected_tests.selected_tests_client import TestMappingsResponse, \
+ TestMapping, TestFileInstance, TaskMappingsResponse, TaskMapping, TaskMapInstance
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access,no-value-for-parameter
+
+
+def build_mock_test_mapping(source_file, test_file):
+ return TestMapping(branch="branch", project="project", repo="repo", source_file=source_file,
+ source_file_seen_count=5, test_files=[
+ TestFileInstance(name=test_file, test_file_seen_count=3),
+ ])
+
+
+def build_mock_task_mapping(source_file, task):
+ return TaskMapping(branch="branch", project="project", repo="repo", source_file=source_file,
+ source_file_seen_count=5, tasks=[
+ TaskMapInstance(name=task, variant="variant", flip_count=3),
+ ])
+
+
+class TestFindSelectedTestFiles(unittest.TestCase):
+ @patch("os.path.isfile")
+ def test_related_files_returned_from_selected_tests_service(self, mock_is_file):
+ mock_is_file.return_value = True
+ changed_files = {"src/file1.cpp", "src/file2.js"}
+ mock_selected_tests_client = MagicMock()
+ mock_selected_tests_client.get_test_mappings.return_value = TestMappingsResponse(
+ test_mappings=[
+ build_mock_test_mapping("src/file1.cpp", "jstests/file-1.js"),
+ build_mock_test_mapping("src/file2.cpp", "jstests/file-3.js"),
+ ])
+ selected_tests = under_test.SelectedTestsService(mock_selected_tests_client)
+
+ related_test_files = selected_tests.find_selected_test_files(changed_files)
+
+ self.assertEqual(related_test_files, {"jstests/file-1.js", "jstests/file-3.js"})
+
+ @patch("os.path.isfile")
+ def test_related_files_returned_are_not_valid_test_files(self, mock_is_file):
+ mock_is_file.return_value = False
+ changed_files = {"src/file1.cpp", "src/file2.js"}
+ mock_selected_tests_client = MagicMock()
+ mock_selected_tests_client.get_test_mappings.return_value = TestMappingsResponse(
+ test_mappings=[
+ build_mock_test_mapping("src/file1.cpp", "jstests/file-1.js"),
+ build_mock_test_mapping("src/file2.cpp", "jstests/file-3.js"),
+ ])
+ selected_tests = under_test.SelectedTestsService(mock_selected_tests_client)
+
+ related_test_files = selected_tests.find_selected_test_files(changed_files)
+
+ self.assertEqual(related_test_files, set())
+
+ def test_no_related_files_returned(self):
+ changed_files = {"src/file1.cpp", "src/file2.js"}
+ mock_selected_tests_client = MagicMock()
+ mock_selected_tests_client.get_test_mappings.return_value = TestMappingsResponse(
+ test_mappings=[
+ build_mock_test_mapping("src/file1.cpp", "jstests/file-1.js"),
+ build_mock_test_mapping("src/file2.cpp", "jstests/file-3.js"),
+ ])
+ selected_tests = under_test.SelectedTestsService(mock_selected_tests_client)
+
+ related_test_files = selected_tests.find_selected_test_files(changed_files)
+
+ self.assertEqual(related_test_files, set())
+
+
+class TestFindSelectedTasks(unittest.TestCase):
+ def test_related_tasks_returned_from_selected_tests_service(self):
+ changed_files = {"src/file1.cpp", "src/file2.js"}
+ mock_selected_tests_client = MagicMock()
+ mock_selected_tests_client.get_task_mappings.return_value = TaskMappingsResponse(
+ task_mappings=[
+ build_mock_task_mapping("src/file1.cpp", "my_task_1"),
+ build_mock_task_mapping("src/file2.cpp", "my_task_2"),
+ ])
+ selected_tests = under_test.SelectedTestsService(mock_selected_tests_client)
+
+ related_tasks = selected_tests.find_selected_tasks(changed_files)
+
+ self.assertEqual(related_tasks, {"my_task_1", "my_task_2"})
diff --git a/buildscripts/tests/patch_builds/test_selected_tests_service.py b/buildscripts/tests/patch_builds/test_selected_tests_service.py
deleted file mode 100644
index c2452b6f65e..00000000000
--- a/buildscripts/tests/patch_builds/test_selected_tests_service.py
+++ /dev/null
@@ -1,95 +0,0 @@
-"""Unit tests for the selected_tests service."""
-import os
-import unittest
-
-from tempfile import TemporaryDirectory
-import requests
-from mock import MagicMock, patch
-
-# pylint: disable=wrong-import-position
-import buildscripts.patch_builds.selected_tests_service as under_test
-
-# pylint: disable=missing-docstring
-
-NS = "buildscripts.patch_builds.selected_tests_service"
-
-
-def ns(relative_name): # pylint: disable=invalid-name
- """Return a full name from a name relative to the test module"s name space."""
- return NS + "." + relative_name
-
-
-class TestSelectedTestsService(unittest.TestCase):
- def test_from_file_with_valid_file(self):
- with TemporaryDirectory() as tmpdir:
- config_file = os.path.join(tmpdir, "selected_tests_test_config.yml")
- with open(config_file, "w") as fh:
- fh.write("url: url\n")
- fh.write("project: project\n")
- fh.write("auth_user: user\n")
- fh.write("auth_token: token\n")
-
- selected_tests_service = under_test.SelectedTestsService.from_file(config_file)
-
- self.assertEqual(selected_tests_service.url, "url")
- self.assertEqual(selected_tests_service.project, "project")
- self.assertEqual(selected_tests_service.auth_user, "user")
- self.assertEqual(selected_tests_service.auth_token, "token")
-
- def test_from_file_with_invalid_file(self):
- with self.assertRaises(FileNotFoundError):
- under_test.SelectedTestsService.from_file("")
-
- @patch(ns("requests"))
- def test_files_returned_from_selected_tests_service(self, requests_mock):
- changed_files = {"src/file1.cpp", "src/file2.js"}
- response_object = {
- "test_mappings": [
- {
- "source_file": "src/file1.cpp",
- "test_files": [{"name": "jstests/file-1.js"}],
- },
- {
- "source_file": "src/file2.cpp",
- "test_files": [{"name": "jstests/file-3.js"}],
- },
- ]
- }
- requests_mock.get.return_value.json.return_value = response_object
-
- related_test_files = under_test.SelectedTestsService(
- "my-url.com", "my-project", "auth_user", "auth_token").get_test_mappings(
- 0.1, changed_files)
-
- requests_mock.get.assert_called_with(
- "my-url.com/projects/my-project/test-mappings",
- params={"threshold": 0.1, "changed_files": ",".join(changed_files)},
- headers={
- "Content-type": "application/json",
- "Accept": "application/json",
- },
- cookies={"auth_user": "auth_user", "auth_token": "auth_token"},
- )
- self.assertEqual(related_test_files, response_object["test_mappings"])
-
- @patch(ns("requests"))
- def test_selected_tests_service_unavailable(self, requests_mock):
- changed_files = {"src/file1.cpp", "src/file2.js"}
- response = MagicMock(status_code=requests.codes.SERVICE_UNAVAILABLE)
- requests_mock.get.side_effect = requests.HTTPError(response=response)
-
- with self.assertRaises(requests.exceptions.HTTPError):
- under_test.SelectedTestsService("my-url.com", "my-project", "auth_user",
- "auth_token").get_test_mappings(0.1, changed_files)
-
- @patch(ns("requests"))
- def test_no_files_returned(self, requests_mock):
- changed_files = {"src/file1.cpp", "src/file2.js"}
- response_object = {"test_mappings": []}
- requests_mock.get.return_value.json.return_value = response_object
-
- related_test_files = under_test.SelectedTestsService(
- "my-url.com", "my-project", "auth_user", "auth_token").get_test_mappings(
- 0.1, changed_files)
-
- self.assertEqual(related_test_files, [])
diff --git a/buildscripts/tests/task_generation/__init__.py b/buildscripts/tests/task_generation/__init__.py
new file mode 100644
index 00000000000..4b7a2bb941b
--- /dev/null
+++ b/buildscripts/tests/task_generation/__init__.py
@@ -0,0 +1 @@
+"""Empty."""
diff --git a/buildscripts/tests/task_generation/task_types/__init__.py b/buildscripts/tests/task_generation/task_types/__init__.py
new file mode 100644
index 00000000000..4b7a2bb941b
--- /dev/null
+++ b/buildscripts/tests/task_generation/task_types/__init__.py
@@ -0,0 +1 @@
+"""Empty."""
diff --git a/buildscripts/tests/task_generation/task_types/test_fuzzer_tasks.py b/buildscripts/tests/task_generation/task_types/test_fuzzer_tasks.py
new file mode 100644
index 00000000000..7af8157e2da
--- /dev/null
+++ b/buildscripts/tests/task_generation/task_types/test_fuzzer_tasks.py
@@ -0,0 +1,76 @@
+"""Unit tests for fuzzer_tasks.py"""
+
+import unittest
+
+import buildscripts.task_generation.task_types.fuzzer_tasks as under_test
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
+
+
+def build_mock_fuzzer_params(multi_version=None, jstestfuzz_vars="vars for jstestfuzz"):
+ return under_test.FuzzerGenTaskParams(
+ task_name="task name",
+ variant="build variant",
+ suite="resmoke suite",
+ num_files=10,
+ num_tasks=5,
+ resmoke_args="args for resmoke",
+ npm_command="jstestfuzz",
+ jstestfuzz_vars=jstestfuzz_vars,
+ continue_on_failure=True,
+ resmoke_jobs_max=5,
+ should_shuffle=True,
+ timeout_secs=100,
+ use_multiversion=multi_version,
+ use_large_distro=None,
+ add_to_display_task=True,
+ large_distro_name="large distro",
+ config_location="config_location",
+ )
+
+
+class TestFuzzerGenTaskParams(unittest.TestCase):
+ def test_jstestfuzz_params_should_be_generated(self):
+ params = build_mock_fuzzer_params()
+
+ jstestfuzz_vars = params.jstestfuzz_params()
+
+ self.assertEqual(params.npm_command, jstestfuzz_vars["npm_command"])
+ self.assertIn(f"--numGeneratedFiles {params.num_files}", jstestfuzz_vars["jstestfuzz_vars"])
+ self.assertIn(params.jstestfuzz_vars, jstestfuzz_vars["jstestfuzz_vars"])
+
+ def test_jstestfuzz_params_should_handle_no_vars(self):
+ params = build_mock_fuzzer_params(jstestfuzz_vars=None)
+
+ self.assertNotIn("None", params.jstestfuzz_params()["jstestfuzz_vars"])
+
+
+class TestGenerateTasks(unittest.TestCase):
+ def test_fuzzer_tasks_are_generated(self):
+ mock_params = build_mock_fuzzer_params()
+ fuzzer_service = under_test.FuzzerGenTaskService()
+
+ fuzzer_task = fuzzer_service.generate_tasks(mock_params)
+
+ self.assertEqual(fuzzer_task.task_name, mock_params.task_name)
+ self.assertEqual(len(fuzzer_task.sub_tasks), mock_params.num_tasks)
+
+
+class TestBuildFuzzerSubTask(unittest.TestCase):
+ def test_sub_task_should_be_built_correct(self):
+ mock_params = build_mock_fuzzer_params()
+ fuzzer_service = under_test.FuzzerGenTaskService()
+
+ sub_task = fuzzer_service.build_fuzzer_sub_task(3, mock_params)
+
+ self.assertEqual(sub_task.name, f"{mock_params.task_name}_3_{mock_params.variant}")
+ self.assertEqual(len(sub_task.commands), 4)
+
+ def test_sub_task_multi_version_tasks_should_be_built_correct(self):
+ mock_params = build_mock_fuzzer_params(multi_version="multiversion value")
+ fuzzer_service = under_test.FuzzerGenTaskService()
+
+ sub_task = fuzzer_service.build_fuzzer_sub_task(3, mock_params)
+
+ self.assertEqual(sub_task.name, f"{mock_params.task_name}_3_{mock_params.variant}")
+ self.assertEqual(len(sub_task.commands), 6)
diff --git a/buildscripts/tests/task_generation/task_types/test_gentask_options.py b/buildscripts/tests/task_generation/task_types/test_gentask_options.py
new file mode 100644
index 00000000000..6d811e32577
--- /dev/null
+++ b/buildscripts/tests/task_generation/task_types/test_gentask_options.py
@@ -0,0 +1,27 @@
+"""Unit tests for gentask_options.py."""
+
+import unittest
+
+import buildscripts.task_generation.task_types.gentask_options as under_test
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
+
+
+def build_mock_gen_task_options(config_dir="."):
+ return under_test.GenTaskOptions(
+ create_misc_suite=True,
+ is_patch=True,
+ generated_config_dir=config_dir,
+ use_default_timeouts=False,
+ )
+
+
+class TestSuiteLocation(unittest.TestCase):
+ def test_should_return_suite_under_generated_config_dir(self):
+ config_dir = "path/to/config"
+ suite_name = "my_suite"
+ mock_gen_task_options = build_mock_gen_task_options(config_dir=config_dir)
+
+ suite_location = mock_gen_task_options.suite_location(suite_name)
+
+ self.assertEqual(suite_location, f"{config_dir}/{suite_name}")
diff --git a/buildscripts/tests/task_generation/task_types/test_resmoke_tasks.py b/buildscripts/tests/task_generation/task_types/test_resmoke_tasks.py
new file mode 100644
index 00000000000..a6f9b2670bd
--- /dev/null
+++ b/buildscripts/tests/task_generation/task_types/test_resmoke_tasks.py
@@ -0,0 +1,147 @@
+"""Unit tests for resmoke_tasks.py."""
+import unittest
+
+import buildscripts.task_generation.task_types.resmoke_tasks as under_test
+from buildscripts.task_generation.suite_split import GeneratedSuite, SubSuite
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
+from buildscripts.util.teststats import TestRuntime
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
+
+
+class TestHelperMethods(unittest.TestCase):
+ def test_string_contains_any_of_args(self):
+ args = ["repeatSuites", "repeat"]
+ string = "--suite=suite 0.yml --originSuite=suite resmoke_args --repeat=5"
+ self.assertEqual(True, under_test.string_contains_any_of_args(string, args))
+
+ def test_string_contains_any_of_args_for_empty_args(self):
+ args = []
+ string = "--suite=suite 0.yml --originSuite=suite resmoke_args --repeat=5"
+ self.assertEqual(False, under_test.string_contains_any_of_args(string, args))
+
+ def test_string_contains_any_of_args_for_non_matching_args(self):
+ args = ["random_string_1", "random_string_2", "random_string_3"]
+ string = "--suite=suite 0.yml --originSuite=suite resmoke_args --repeat=5"
+ self.assertEqual(False, under_test.string_contains_any_of_args(string, args))
+
+
+def build_mock_gen_options(use_default_timeouts=False):
+ return GenTaskOptions(create_misc_suite=True, is_patch=True, generated_config_dir="tmpdir",
+ use_default_timeouts=use_default_timeouts)
+
+
+def build_mock_gen_params(repeat_suites=1, resmoke_args="resmoke args"):
+ return under_test.ResmokeGenTaskParams(
+ use_large_distro=False,
+ use_multiversion=None,
+ repeat_suites=repeat_suites,
+ resmoke_args=resmoke_args,
+ resmoke_jobs_max=None,
+ large_distro_name=None,
+ config_location="generated_config",
+ )
+
+
+def build_mock_suite(n_sub_suites, include_runtimes=True):
+ return GeneratedSuite(
+ sub_suites=[
+ SubSuite.from_test_list(
+ index=i, suite_name=f"suite_{i}", test_list=[f"test_{i*j}" for j in range(3)],
+ runtime_list=[TestRuntime(test_name=f"test_{i*j}", runtime=3.14)
+ for j in range(3)] if include_runtimes else None, task_overhead=None)
+ for i in range(n_sub_suites)
+ ],
+ build_variant="build variant",
+ task_name="task name",
+ suite_name="suite name",
+ filename="suite file",
+ )
+
+
+class TestGenerateTask(unittest.TestCase):
+ def test_evg_config_does_not_overwrite_repeatSuites_resmoke_arg_with_repeatSuites_default(self):
+ mock_gen_options = build_mock_gen_options()
+ params = build_mock_gen_params(resmoke_args="resmoke_args --repeatSuites=5")
+ suites = build_mock_suite(1, include_runtimes=False)
+
+ resmoke_service = under_test.ResmokeGenTaskService(mock_gen_options)
+ tasks = resmoke_service.generate_tasks(suites, params)
+
+ for task in tasks:
+ found_resmoke_cmd = False
+ for cmd in task.commands:
+ cmd_dict = cmd.as_dict()
+ if cmd_dict.get("func") == "run generated tests":
+ found_resmoke_cmd = True
+ args = cmd_dict.get("vars", {}).get("resmoke_args")
+ self.assertIn("--repeatSuites=5", args)
+ self.assertNotIn("--repeatSuites=1", args)
+
+ self.assertTrue(found_resmoke_cmd)
+
+ def test_evg_config_does_not_overwrite_repeat_resmoke_arg_with_repeatSuites_default(self):
+ mock_gen_options = build_mock_gen_options()
+ params = build_mock_gen_params(resmoke_args="resmoke_args --repeat=5")
+ suites = build_mock_suite(1, include_runtimes=False)
+
+ resmoke_service = under_test.ResmokeGenTaskService(mock_gen_options)
+ tasks = resmoke_service.generate_tasks(suites, params)
+
+ for task in tasks:
+ found_resmoke_cmd = False
+ for cmd in task.commands:
+ cmd_dict = cmd.as_dict()
+ if cmd_dict.get("func") == "run generated tests":
+ found_resmoke_cmd = True
+ args = cmd_dict.get("vars", {}).get("resmoke_args")
+ self.assertIn("--repeat=5", args)
+ self.assertNotIn("--repeatSuites=1", args)
+
+ self.assertTrue(found_resmoke_cmd)
+
+ def test_evg_config_has_timeouts_for_repeated_suites(self):
+ n_sub_suites = 3
+ mock_gen_options = build_mock_gen_options()
+ params = build_mock_gen_params(repeat_suites=5)
+ suites = build_mock_suite(n_sub_suites)
+
+ resmoke_service = under_test.ResmokeGenTaskService(mock_gen_options)
+ tasks = resmoke_service.generate_tasks(suites, params)
+
+ self.assertEqual(n_sub_suites + 1, len(tasks))
+ for task in tasks:
+ if "misc" in task.name:
+ # Misc tasks should use default timeouts.
+ continue
+ self.assertGreaterEqual(len(task.commands), 1)
+ timeout_cmd = task.commands[0]
+ self.assertEqual("timeout.update", timeout_cmd.command)
+
+ def test_suites_without_enough_info_should_not_include_timeouts(self):
+ mock_gen_options = build_mock_gen_options()
+ params = build_mock_gen_params()
+ suites = build_mock_suite(1, include_runtimes=False)
+
+ resmoke_service = under_test.ResmokeGenTaskService(mock_gen_options)
+ tasks = resmoke_service.generate_tasks(suites, params)
+
+ self.assertEqual(2, len(tasks))
+ for task in tasks:
+ for cmd in task.commands:
+ cmd_dict = cmd.as_dict()
+ self.assertNotEqual("timeout.update", cmd_dict.get("command"))
+
+ def test_timeout_info_not_included_if_use_default_timeouts_set(self):
+ mock_gen_options = build_mock_gen_options(use_default_timeouts=True)
+ params = build_mock_gen_params()
+ suites = build_mock_suite(1)
+
+ resmoke_service = under_test.ResmokeGenTaskService(mock_gen_options)
+ tasks = resmoke_service.generate_tasks(suites, params)
+
+ self.assertEqual(2, len(tasks))
+ for task in tasks:
+ for cmd in task.commands:
+ cmd_dict = cmd.as_dict()
+ self.assertNotEqual("timeout.update", cmd_dict.get("command"))
diff --git a/buildscripts/tests/task_generation/test_gen_task_service.py b/buildscripts/tests/task_generation/test_gen_task_service.py
new file mode 100644
index 00000000000..611d15b0029
--- /dev/null
+++ b/buildscripts/tests/task_generation/test_gen_task_service.py
@@ -0,0 +1,127 @@
+"""Unit tests for gen_task_service.py."""
+
+import unittest
+from unittest.mock import MagicMock
+
+from shrub.v2 import BuildVariant
+
+import buildscripts.task_generation.gen_task_service as under_test
+from buildscripts.task_generation.task_types.fuzzer_tasks import FuzzerGenTaskService
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
+
+
+def build_mock_fuzzer_params(multi_version=None, use_large_distro=None, add_to_display=True,
+ large_distro_name=None):
+ return under_test.FuzzerGenTaskParams(
+ task_name="task name",
+ variant="build variant",
+ suite="resmoke suite",
+ num_files=10,
+ num_tasks=5,
+ resmoke_args="args for resmoke",
+ npm_command="jstestfuzz",
+ jstestfuzz_vars="vars for jstestfuzz",
+ continue_on_failure=True,
+ resmoke_jobs_max=5,
+ should_shuffle=True,
+ timeout_secs=100,
+ use_multiversion=multi_version,
+ use_large_distro=use_large_distro,
+ add_to_display_task=add_to_display,
+ large_distro_name=large_distro_name,
+ config_location="config location",
+ )
+
+
+def build_mocked_service():
+ return under_test.GenTaskService(
+ evg_api=MagicMock(),
+ gen_task_options=MagicMock(),
+ gen_config=MagicMock(),
+ resmoke_gen_task_service=MagicMock(),
+ multiversion_gen_task_service=MagicMock(),
+ fuzzer_gen_task_service=FuzzerGenTaskService(),
+ )
+
+
+class TestGenerateFuzzerTask(unittest.TestCase):
+ def test_fuzzer_tasks_should_be_generated(self):
+ mock_params = build_mock_fuzzer_params()
+ build_variant = BuildVariant("mock build variant")
+ service = build_mocked_service()
+
+ fuzzer_task = service.generate_fuzzer_task(mock_params, build_variant)
+
+ self.assertEqual(fuzzer_task.task_name, mock_params.task_name)
+ self.assertEqual(len(fuzzer_task.sub_tasks), mock_params.num_tasks)
+
+ self.assertEqual(len(build_variant.tasks), mock_params.num_tasks)
+
+ display_tasks = list(build_variant.display_tasks)
+ self.assertEqual(len(display_tasks), 1)
+ self.assertEqual(display_tasks[0].display_name, mock_params.task_name)
+ self.assertEqual(len(display_tasks[0].execution_tasks), mock_params.num_tasks)
+
+ def test_fuzzer_for_large_distro_tasks_should_be_generated_on_large(self):
+ mock_distro = "my large distro"
+ mock_params = build_mock_fuzzer_params(use_large_distro=True, large_distro_name=mock_distro)
+ build_variant = BuildVariant("mock build variant")
+ service = build_mocked_service()
+ service.gen_task_options.large_distro_name = mock_distro
+
+ service.generate_fuzzer_task(mock_params, build_variant)
+
+ fuzzer_config = build_variant.as_dict()
+ self.assertTrue(all(mock_distro in task["distros"] for task in fuzzer_config["tasks"]))
+
+ def test_fuzzer_tasks_should_not_be_added_to_display_group_when_specified(self):
+ mock_params = build_mock_fuzzer_params(add_to_display=False)
+ build_variant = BuildVariant("mock build variant")
+ service = build_mocked_service()
+
+ fuzzer_task = service.generate_fuzzer_task(mock_params, build_variant)
+
+ self.assertEqual(fuzzer_task.task_name, mock_params.task_name)
+ self.assertEqual(len(fuzzer_task.sub_tasks), mock_params.num_tasks)
+
+ self.assertEqual(len(build_variant.tasks), mock_params.num_tasks)
+
+ display_tasks = list(build_variant.display_tasks)
+ self.assertEqual(len(display_tasks), 0)
+
+
+class TestGetDistro(unittest.TestCase):
+ def test_default_distro_should_be_used_if_use_large_distro_not_set(self):
+ service = build_mocked_service()
+
+ distros = service._get_distro("build variant", use_large_distro=False,
+ large_distro_name=None)
+
+ self.assertIsNone(distros)
+
+ def test_large_distro_should_be_used_if_use_large_distro_is_set(self):
+ mock_distro = "my large distro"
+ service = build_mocked_service()
+
+ distros = service._get_distro("build variant", use_large_distro=True,
+ large_distro_name=mock_distro)
+
+ self.assertEqual(distros, [mock_distro])
+
+ def test_a_missing_large_distro_should_throw_error(self):
+ service = build_mocked_service()
+
+ with self.assertRaises(ValueError):
+ service._get_distro("build variant", use_large_distro=True, large_distro_name=None)
+
+ def test_a_missing_large_distro_can_be_ignored(self):
+ build_variant = "my build variant"
+ service = build_mocked_service()
+ service.gen_config.build_variant_large_distro_exceptions = {
+ "some other build", build_variant, "build 3"
+ }
+
+ distros = service._get_distro(build_variant, use_large_distro=True, large_distro_name=None)
+
+ self.assertIsNone(distros)
diff --git a/buildscripts/tests/task_generation/test_gen_task_validation.py b/buildscripts/tests/task_generation/test_gen_task_validation.py
new file mode 100644
index 00000000000..951e60da344
--- /dev/null
+++ b/buildscripts/tests/task_generation/test_gen_task_validation.py
@@ -0,0 +1,62 @@
+"""Unit tests for gen_task_validation.py"""
+import unittest
+from unittest.mock import MagicMock
+
+import buildscripts.task_generation.gen_task_validation as under_test
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
+
+
+class TestShouldTasksBeGenerated(unittest.TestCase):
+ def test_during_first_execution(self):
+ task_id = "task_id"
+ mock_evg_api = MagicMock()
+ mock_evg_api.task_by_id.return_value.execution = 0
+ validate_service = under_test.GenTaskValidationService(mock_evg_api)
+
+ self.assertTrue(validate_service.should_task_be_generated(task_id))
+ mock_evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
+
+ def test_after_successful_execution(self):
+ task_id = "task_id"
+ mock_evg_api = MagicMock()
+ task = mock_evg_api.task_by_id.return_value
+ task.execution = 1
+ task.get_execution.return_value.is_success.return_value = True
+ validate_service = under_test.GenTaskValidationService(mock_evg_api)
+
+ self.assertFalse(validate_service.should_task_be_generated(task_id))
+ mock_evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
+
+ def test_after_multiple_successful_execution(self):
+ task_id = "task_id"
+ mock_evg_api = MagicMock()
+ task = mock_evg_api.task_by_id.return_value
+ task.execution = 5
+ task.get_execution.return_value.is_success.return_value = True
+ validate_service = under_test.GenTaskValidationService(mock_evg_api)
+
+ self.assertFalse(validate_service.should_task_be_generated(task_id))
+ mock_evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
+
+ def test_after_failed_execution(self):
+ mock_evg_api = MagicMock()
+ task_id = "task_id"
+ task = mock_evg_api.task_by_id.return_value
+ task.execution = 1
+ task.get_execution.return_value.is_success.return_value = False
+ validate_service = under_test.GenTaskValidationService(mock_evg_api)
+
+ self.assertTrue(validate_service.should_task_be_generated(task_id))
+ mock_evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
+
+ def test_after_multiple_failed_execution(self):
+ mock_evg_api = MagicMock()
+ task_id = "task_id"
+ task = mock_evg_api.task_by_id.return_value
+ task.execution = 5
+ task.get_execution.return_value.is_success.return_value = False
+ validate_service = under_test.GenTaskValidationService(mock_evg_api)
+
+ self.assertTrue(validate_service.should_task_be_generated(task_id))
+ mock_evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
diff --git a/buildscripts/tests/task_generation/test_resmoke_proxy.py b/buildscripts/tests/task_generation/test_resmoke_proxy.py
new file mode 100644
index 00000000000..4752cf4659e
--- /dev/null
+++ b/buildscripts/tests/task_generation/test_resmoke_proxy.py
@@ -0,0 +1,49 @@
+"""Unit tests for resmoke_proxy.py"""
+import unittest
+from unittest.mock import MagicMock
+
+from buildscripts.task_generation import resmoke_proxy as under_test
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
+
+
+class TestResmokeProxy(unittest.TestCase):
+ def test_list_tests_can_handle_strings_and_lists(self):
+ mock_suite = MagicMock(
+ tests=["test0", "test1", ["test2a", "tests2b", "test2c"], "test3", ["test4a"]])
+
+ resmoke_proxy = under_test.ResmokeProxyService(under_test.ResmokeProxyConfig("suites_dir"))
+ resmoke_proxy.suitesconfig = MagicMock()
+ resmoke_proxy.suitesconfig.get_suite.return_value = mock_suite
+
+ test_list = resmoke_proxy.list_tests("some suite")
+
+ self.assertEqual(len(test_list), 7)
+
+
+class UpdateSuiteConfigTest(unittest.TestCase):
+ def test_roots_are_updated(self):
+ config = {"selector": {}}
+
+ updated_config = under_test.update_suite_config(config, "root value")
+ self.assertEqual("root value", updated_config["selector"]["roots"])
+
+ def test_excluded_files_not_included_if_not_specified(self):
+ config = {"selector": {"excluded_files": "files to exclude"}}
+
+ updated_config = under_test.update_suite_config(config, excludes=None)
+ self.assertNotIn("exclude_files", updated_config["selector"])
+
+ def test_excluded_files_added_to_misc(self):
+ config = {"selector": {}}
+
+ updated_config = under_test.update_suite_config(config, excludes="files to exclude")
+ self.assertEqual("files to exclude", updated_config["selector"]["exclude_files"])
+
+ def test_excluded_files_extended_in_misc(self):
+ config = {"selector": {"exclude_files": ["file 0", "file 1"]}}
+
+ updated_config = under_test.update_suite_config(config, excludes=["file 2", "file 3"])
+ self.assertEqual(4, len(updated_config["selector"]["exclude_files"]))
+ for exclude in ["file 0", "file 1", "file 2", "file 3"]:
+ self.assertIn(exclude, updated_config["selector"]["exclude_files"])
diff --git a/buildscripts/tests/task_generation/test_suite_split.py b/buildscripts/tests/task_generation/test_suite_split.py
new file mode 100644
index 00000000000..5b3963a43d4
--- /dev/null
+++ b/buildscripts/tests/task_generation/test_suite_split.py
@@ -0,0 +1,314 @@
+"""Unit tests for suite_split.py."""
+import unittest
+from datetime import datetime
+from unittest.mock import MagicMock, patch
+
+import requests
+
+import buildscripts.task_generation.suite_split as under_test
+from buildscripts.task_generation.suite_split_strategies import greedy_division, \
+ round_robin_fallback
+from buildscripts.util.teststats import TestRuntime
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
+
+
+def mock_evg_error(mock_evg_api, error_code=requests.codes.SERVICE_UNAVAILABLE):
+ response = MagicMock(status_code=error_code)
+ mock_evg_api.test_stats_by_project.side_effect = requests.HTTPError(response=response)
+ return mock_evg_api
+
+
+def build_mock_service(evg_api=None, split_config=None, resmoke_proxy=None):
+
+ return under_test.SuiteSplitService(
+ evg_api=evg_api if evg_api else MagicMock(),
+ resmoke_proxy=resmoke_proxy if resmoke_proxy else MagicMock(),
+ config=split_config if split_config else MagicMock(),
+ split_strategy=greedy_division,
+ fallback_strategy=round_robin_fallback,
+ )
+
+
+def tst_stat_mock(file, duration, pass_count):
+ return MagicMock(test_file=file, avg_duration_pass=duration, num_pass=pass_count)
+
+
+def build_mock_split_config(target_resmoke_time=None, max_sub_suites=None):
+ return under_test.SuiteSplitConfig(
+ evg_project="project",
+ target_resmoke_time=target_resmoke_time if target_resmoke_time else 60,
+ max_sub_suites=max_sub_suites if max_sub_suites else 1000,
+ max_tests_per_suite=100,
+ start_date=datetime.utcnow(),
+ end_date=datetime.utcnow(),
+ )
+
+
+def build_mock_split_params(test_filter=None):
+ return under_test.SuiteSplitParameters(
+ build_variant="build variant",
+ task_name="task name",
+ suite_name="suite name",
+ filename="targetfile",
+ test_file_filter=test_filter,
+ )
+
+
+class TestSplitSuite(unittest.TestCase):
+ def test_calculate_suites(self):
+ mock_test_stats = [tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)]
+ split_config = build_mock_split_config(target_resmoke_time=10)
+ split_params = build_mock_split_params()
+
+ suite_split_service = build_mock_service(split_config=split_config)
+ suite_split_service.evg_api.test_stats_by_project.return_value = mock_test_stats
+ suite_split_service.resmoke_proxy.list_tests.return_value = [
+ stat.test_file for stat in mock_test_stats
+ ]
+ suite_split_service.resmoke_proxy.read_suite_config.return_value = {}
+
+ with patch("os.path.exists") as exists_mock:
+ exists_mock.return_value = True
+
+ suite = suite_split_service.split_suite(split_params)
+
+ # There are 100 tests taking 1 minute, with a target of 10 min we expect 10 suites.
+ self.assertEqual(10, len(suite))
+ for sub_suite in suite.sub_suites:
+ self.assertEqual(10, len(sub_suite.test_list))
+
+ def test_calculate_suites_fallback_on_error(self):
+ n_tests = 100
+ max_sub_suites = 4
+ split_config = build_mock_split_config(max_sub_suites=max_sub_suites)
+ split_params = build_mock_split_params()
+
+ suite_split_service = build_mock_service(split_config=split_config)
+ mock_evg_error(suite_split_service.evg_api)
+ suite_split_service.resmoke_proxy.list_tests.return_value = [
+ f"test_{i}.js" for i in range(n_tests)
+ ]
+
+ suite = suite_split_service.split_suite(split_params)
+
+ self.assertEqual(max_sub_suites, len(suite))
+ for sub_suite in suite.sub_suites:
+ self.assertEqual(n_tests / max_sub_suites, len(sub_suite.test_list))
+
+ def test_calculate_suites_uses_fallback_on_no_results(self):
+ n_tests = 100
+ max_sub_suites = 5
+ split_config = build_mock_split_config(max_sub_suites=max_sub_suites)
+ split_params = build_mock_split_params()
+
+ suite_split_service = build_mock_service(split_config=split_config)
+ suite_split_service.evg_api.test_stats_by_project.return_value = []
+ suite_split_service.resmoke_proxy.list_tests.return_value = [
+ f"test_{i}.js" for i in range(n_tests)
+ ]
+
+ suite = suite_split_service.split_suite(split_params)
+
+ self.assertEqual(max_sub_suites, len(suite))
+ for sub_suite in suite.sub_suites:
+ self.assertEqual(n_tests / max_sub_suites, len(sub_suite.test_list))
+
+ def test_calculate_suites_uses_fallback_if_only_results_are_filtered(self):
+ n_tests = 100
+ max_sub_suites = 10
+ mock_test_stats = [tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)]
+ split_config = build_mock_split_config(target_resmoke_time=10,
+ max_sub_suites=max_sub_suites)
+ split_params = build_mock_split_params()
+
+ suite_split_service = build_mock_service(split_config=split_config)
+ suite_split_service.evg_api.test_stats_by_project.return_value = mock_test_stats
+ suite_split_service.resmoke_proxy.list_tests.return_value = [
+ f"test_{i}.js" for i in range(n_tests)
+ ]
+ suite_split_service.resmoke_proxy.read_suite_config.return_value = {}
+
+ with patch("os.path.exists") as exists_mock:
+ exists_mock.return_value = False
+
+ suite = suite_split_service.split_suite(split_params)
+
+ # There are 100 tests taking 1 minute, with a target of 10 min we expect 10 suites.
+ self.assertEqual(max_sub_suites, len(suite))
+ for sub_suite in suite.sub_suites:
+ self.assertEqual(n_tests / max_sub_suites, len(sub_suite.test_list))
+
+ def test_calculate_suites_fail_on_unexpected_error(self):
+ n_tests = 100
+ max_sub_suites = 4
+ split_config = build_mock_split_config(max_sub_suites=max_sub_suites)
+ split_params = build_mock_split_params()
+
+ suite_split_service = build_mock_service(split_config=split_config)
+ mock_evg_error(suite_split_service.evg_api, error_code=requests.codes.INTERNAL_SERVER_ERROR)
+ suite_split_service.resmoke_proxy.list_tests.return_value = [
+ f"test_{i}.js" for i in range(n_tests)
+ ]
+
+ with self.assertRaises(requests.HTTPError):
+ suite_split_service.split_suite(split_params)
+
+ def test_calculate_suites_will_filter_specified_tests(self):
+ mock_test_stats = [tst_stat_mock(f"test_{i}.js", 60, 1) for i in range(100)]
+ split_config = build_mock_split_config(target_resmoke_time=10)
+ split_params = build_mock_split_params(
+ test_filter=lambda t: t in {"test_1.js", "test_2.js"})
+
+ suite_split_service = build_mock_service(split_config=split_config)
+ suite_split_service.evg_api.test_stats_by_project.return_value = mock_test_stats
+ suite_split_service.resmoke_proxy.list_tests.return_value = [
+ stat.test_file for stat in mock_test_stats
+ ]
+ suite_split_service.resmoke_proxy.read_suite_config.return_value = {}
+
+ with patch("os.path.exists") as exists_mock:
+ exists_mock.return_value = True
+
+ suite = suite_split_service.split_suite(split_params)
+
+ self.assertEqual(1, len(suite))
+ for sub_suite in suite.sub_suites:
+ self.assertEqual(2, len(sub_suite.test_list))
+ self.assertIn("test_1.js", sub_suite.test_list)
+ self.assertIn("test_2.js", sub_suite.test_list)
+
+
+class TestFilterTests(unittest.TestCase):
+ def test_filter_missing_files(self):
+ tests_runtimes = [
+ TestRuntime(test_name="dir1/file1.js", runtime=20.32),
+ TestRuntime(test_name="dir2/file2.js", runtime=24.32),
+ TestRuntime(test_name="dir1/file3.js", runtime=36.32),
+ ]
+ mock_params = MagicMock(test_file_filter=None)
+ mock_resmoke_proxy = MagicMock()
+ mock_resmoke_proxy.list_tests.return_value = [
+ runtime.test_name for runtime in tests_runtimes
+ ]
+ suite_split_service = build_mock_service(resmoke_proxy=mock_resmoke_proxy)
+
+ with patch("os.path.exists") as exists_mock:
+ exists_mock.side_effect = [False, True, True]
+ filtered_list = suite_split_service.filter_tests(tests_runtimes, mock_params)
+
+ self.assertEqual(2, len(filtered_list))
+ self.assertNotIn(tests_runtimes[0], filtered_list)
+ self.assertIn(tests_runtimes[2], filtered_list)
+ self.assertIn(tests_runtimes[1], filtered_list)
+
+ def test_filter_blacklist_files(self):
+ tests_runtimes = [
+ TestRuntime(test_name="dir1/file1.js", runtime=20.32),
+ TestRuntime(test_name="dir2/file2.js", runtime=24.32),
+ TestRuntime(test_name="dir1/file3.js", runtime=36.32),
+ ]
+ blacklisted_test = tests_runtimes[1][0]
+ mock_params = MagicMock(test_file_filter=None)
+ mock_resmoke_proxy = MagicMock()
+ mock_resmoke_proxy.list_tests.return_value = [
+ runtime.test_name for runtime in tests_runtimes if runtime.test_name != blacklisted_test
+ ]
+ suite_split_service = build_mock_service(resmoke_proxy=mock_resmoke_proxy)
+
+ with patch("os.path.exists") as exists_mock:
+ exists_mock.return_value = True
+
+ filtered_list = suite_split_service.filter_tests(tests_runtimes, mock_params)
+
+ self.assertEqual(2, len(filtered_list))
+ self.assertNotIn(blacklisted_test, filtered_list)
+ self.assertIn(tests_runtimes[2], filtered_list)
+ self.assertIn(tests_runtimes[0], filtered_list)
+
+ def test_filter_blacklist_files_for_windows(self):
+ tests_runtimes = [
+ TestRuntime(test_name="dir1/file1.js", runtime=20.32),
+ TestRuntime(test_name="dir2/file2.js", runtime=24.32),
+ TestRuntime(test_name="dir1/dir3/file3.js", runtime=36.32),
+ ]
+
+ blacklisted_test = tests_runtimes[1][0]
+
+ mock_params = MagicMock(test_file_filter=None)
+ mock_resmoke_proxy = MagicMock()
+ mock_resmoke_proxy.list_tests.return_value = [
+ runtime.test_name.replace("/", "\\") for runtime in tests_runtimes
+ if runtime.test_name != blacklisted_test
+ ]
+ suite_split_service = build_mock_service(resmoke_proxy=mock_resmoke_proxy)
+
+ with patch("os.path.exists") as exists_mock:
+ exists_mock.return_value = True
+
+ filtered_list = suite_split_service.filter_tests(tests_runtimes, mock_params)
+
+ self.assertNotIn(blacklisted_test, filtered_list)
+ self.assertIn(tests_runtimes[2], filtered_list)
+ self.assertIn(tests_runtimes[0], filtered_list)
+ self.assertEqual(2, len(filtered_list))
+
+
+class TestGetCleanEveryNCadence(unittest.TestCase):
+ def test_clean_every_n_cadence_on_asan(self):
+ split_config = MagicMock(
+ san_options="ASAN_OPTIONS=\"detect_leaks=1:check_initialization_order=true\"")
+ suite_split_service = build_mock_service(split_config=split_config)
+
+ cadence = suite_split_service._get_clean_every_n_cadence("suite", True)
+
+ self.assertEqual(1, cadence)
+
+ def test_clean_every_n_cadence_from_hook_config(self):
+ expected_n = 42
+ mock_resmoke_proxy = MagicMock()
+ mock_resmoke_proxy.read_suite_config.return_value = {
+ "executor": {
+ "hooks": [{
+ "class": "hook1",
+ }, {
+ "class": under_test.CLEAN_EVERY_N_HOOK,
+ "n": expected_n,
+ }]
+ }
+ }
+ suite_split_service = build_mock_service(resmoke_proxy=mock_resmoke_proxy)
+
+ cadence = suite_split_service._get_clean_every_n_cadence("suite", False)
+
+ self.assertEqual(expected_n, cadence)
+
+ def test_clean_every_n_cadence_no_n_in_hook_config(self):
+ mock_resmoke_proxy = MagicMock()
+ mock_resmoke_proxy.read_suite_config.return_value = {
+ "executor": {
+ "hooks": [{
+ "class": "hook1",
+ }, {
+ "class": under_test.CLEAN_EVERY_N_HOOK,
+ }]
+ }
+ }
+ suite_split_service = build_mock_service(resmoke_proxy=mock_resmoke_proxy)
+
+ cadence = suite_split_service._get_clean_every_n_cadence("suite", False)
+
+ self.assertEqual(1, cadence)
+
+ def test_clean_every_n_cadence_no_hook_config(self):
+ mock_resmoke_proxy = MagicMock()
+ mock_resmoke_proxy.read_suite_config.return_value = {
+ "executor": {"hooks": [{
+ "class": "hook1",
+ }, ]}
+ }
+ suite_split_service = build_mock_service(resmoke_proxy=mock_resmoke_proxy)
+
+ cadence = suite_split_service._get_clean_every_n_cadence("suite", False)
+
+ self.assertEqual(1, cadence)
diff --git a/buildscripts/tests/task_generation/test_suite_split_strategies.py b/buildscripts/tests/task_generation/test_suite_split_strategies.py
new file mode 100644
index 00000000000..08f295d1249
--- /dev/null
+++ b/buildscripts/tests/task_generation/test_suite_split_strategies.py
@@ -0,0 +1,128 @@
+"""Unit tests for suite_split_strategies.py."""
+import unittest
+
+import buildscripts.task_generation.suite_split_strategies as under_test
+from buildscripts.util.teststats import TestRuntime
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access,no-value-for-parameter
+
+
+class TestDivideRemainingTestsAmongSuites(unittest.TestCase):
+ @staticmethod
+ def generate_tests_runtimes(n_tests):
+ tests_runtimes = []
+ # Iterating backwards so the list is sorted by descending runtimes
+ for idx in range(n_tests - 1, -1, -1):
+ name = "test_{0}".format(idx)
+ tests_runtimes.append(TestRuntime(name, 2 * idx))
+
+ return tests_runtimes
+
+ def test_each_suite_gets_one_test(self):
+ suites = [[] for _ in range(3)]
+ tests_runtimes = self.generate_tests_runtimes(3)
+
+ under_test.divide_remaining_tests_among_suites(tests_runtimes, suites)
+
+ for suite in suites:
+ self.assertEqual(len(suite), 1)
+
+ def test_each_suite_gets_at_least_one_test(self):
+ suites = [[] for _ in range(3)]
+ tests_runtimes = self.generate_tests_runtimes(5)
+
+ under_test.divide_remaining_tests_among_suites(tests_runtimes, suites)
+
+ for suite in suites:
+ self.assertGreaterEqual(len(suite), 1)
+
+ total_tests = sum(len(suite) for suite in suites)
+ self.assertEqual(total_tests, len(tests_runtimes))
+
+
+class TestGreedyDivision(unittest.TestCase):
+ def test_if_less_total_than_max_only_one_suite_created(self):
+ max_time = 20
+ tests_runtimes = [
+ TestRuntime("test1", 5),
+ TestRuntime("test2", 4),
+ TestRuntime("test3", 3),
+ ]
+
+ suites = under_test.greedy_division(tests_runtimes, max_time)
+
+ self.assertEqual(len(suites), 1)
+ for test in tests_runtimes:
+ self.assertIn(test.test_name, suites[0])
+
+ def test_if_each_test_should_be_own_suite(self):
+ max_time = 5
+ tests_runtimes = [
+ TestRuntime("test1", 5),
+ TestRuntime("test2", 4),
+ TestRuntime("test3", 3),
+ ]
+
+ suites = under_test.greedy_division(tests_runtimes, max_time)
+
+ self.assertEqual(len(suites), 3)
+
+ def test_if_test_is_greater_than_max_it_goes_alone(self):
+ max_time = 7
+ tests_runtimes = [
+ TestRuntime("test1", 15),
+ TestRuntime("test2", 4),
+ TestRuntime("test3", 3),
+ ]
+
+ suites = under_test.greedy_division(tests_runtimes, max_time)
+
+ self.assertEqual(len(suites), 2)
+ self.assertEqual(len(suites[0]), 1)
+ self.assertIn("test1", suites[0])
+
+ def test_max_sub_suites_options(self):
+ max_time = 5
+ max_suites = 2
+ tests_runtimes = [
+ TestRuntime("test1", 5),
+ TestRuntime("test2", 4),
+ TestRuntime("test3", 3),
+ TestRuntime("test4", 4),
+ TestRuntime("test5", 3),
+ ]
+
+ suites = under_test.greedy_division(tests_runtimes, max_time, max_suites=max_suites)
+
+ self.assertEqual(len(suites), max_suites)
+ total_tests = sum(len(suite) for suite in suites)
+ self.assertEqual(total_tests, len(tests_runtimes))
+
+ def test_max_tests_per_suites_is_one(self):
+ max_time = 5
+ num_tests = 10
+ tests_runtimes = [TestRuntime(f"tests_{i}", i) for i in range(num_tests)]
+
+ suites = under_test.greedy_division(tests_runtimes, max_time, max_tests_per_suite=1)
+
+ self.assertEqual(len(suites), num_tests)
+
+ def test_max_tests_per_suites_is_less_than_number_of_tests(self):
+ max_time = 100
+ num_tests = 10
+ tests_runtimes = [TestRuntime(f"tests_{i}", 1) for i in range(num_tests)]
+
+ suites = under_test.greedy_division(tests_runtimes, max_time, max_tests_per_suite=2)
+
+ self.assertEqual(len(suites), num_tests // 2)
+
+ def test_max_suites_overrides_max_tests_per_suite(self):
+ max_time = 100
+ num_tests = 10
+ max_suites = 2
+ tests_runtimes = [TestRuntime(f"tests_{i}", 1) for i in range(num_tests)]
+
+ suites = under_test.greedy_division(tests_runtimes, max_time, max_suites=max_suites,
+ max_tests_per_suite=2)
+
+ self.assertEqual(len(suites), max_suites)
diff --git a/buildscripts/tests/task_generation/test_timeout.py b/buildscripts/tests/task_generation/test_timeout.py
new file mode 100644
index 00000000000..4e785897706
--- /dev/null
+++ b/buildscripts/tests/task_generation/test_timeout.py
@@ -0,0 +1,51 @@
+"""Unit tests for timeout.py."""
+import unittest
+
+from buildscripts.task_generation import timeout as under_test
+
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access,no-value-for-parameter
+
+
+class CalculateTimeoutTest(unittest.TestCase):
+ def test_min_timeout(self):
+ self.assertEqual(under_test.MIN_TIMEOUT_SECONDS + under_test.AVG_SETUP_TIME,
+ under_test.calculate_timeout(15, 1))
+
+ def test_over_timeout_by_one_minute(self):
+ self.assertEqual(660, under_test.calculate_timeout(301, 1))
+
+ def test_float_runtimes(self):
+ self.assertEqual(660, under_test.calculate_timeout(300.14, 1))
+
+ def test_scaling_factor(self):
+ scaling_factor = 10
+ self.assertEqual(
+ under_test.MIN_TIMEOUT_SECONDS * scaling_factor + under_test.AVG_SETUP_TIME,
+ under_test.calculate_timeout(30, scaling_factor))
+
+
+class TimeoutEstimateTest(unittest.TestCase):
+ def test_too_high_a_timeout_raises_errors(self):
+ timeout_est = under_test.TimeoutEstimate(
+ max_test_runtime=5, expected_task_runtime=under_test.MAX_EXPECTED_TIMEOUT)
+
+ with self.assertRaises(ValueError):
+ timeout_est.generate_timeout_cmd(is_patch=True, repeat_factor=1)
+
+
+class TestGenerateTimeoutCmd(unittest.TestCase):
+ def test_evg_config_does_not_fails_if_test_timeout_too_high_on_mainline(self):
+ timeout = under_test.TimeoutEstimate(max_test_runtime=under_test.MAX_EXPECTED_TIMEOUT + 1,
+ expected_task_runtime=None)
+
+ time_cmd = timeout.generate_timeout_cmd(is_patch=False, repeat_factor=1)
+
+ self.assertGreater(time_cmd.timeout, under_test.MAX_EXPECTED_TIMEOUT)
+
+ def test_evg_config_does_not_fails_if_task_timeout_too_high_on_mainline(self):
+ timeout = under_test.TimeoutEstimate(
+ expected_task_runtime=under_test.MAX_EXPECTED_TIMEOUT + 1, max_test_runtime=None)
+
+ time_cmd = timeout.generate_timeout_cmd(is_patch=False, repeat_factor=1)
+
+ self.assertGreater(time_cmd.exec_timeout, under_test.MAX_EXPECTED_TIMEOUT)
diff --git a/buildscripts/tests/test_burn_in_tests_multiversion.py b/buildscripts/tests/test_burn_in_tests_multiversion.py
index ac9fda0974c..d5bcf373ef5 100644
--- a/buildscripts/tests/test_burn_in_tests_multiversion.py
+++ b/buildscripts/tests/test_burn_in_tests_multiversion.py
@@ -2,27 +2,36 @@
from __future__ import absolute_import
-import datetime
+import json
+from datetime import datetime
import os
import sys
import unittest
-
from mock import MagicMock, patch
+import inject
from shrub.v2 import BuildVariant, ShrubProject
+from evergreen import EvergreenApi
import buildscripts.burn_in_tests_multiversion as under_test
-from buildscripts.burn_in_tests import TaskInfo, RepeatConfig
-from buildscripts.ciconfig.evergreen import parse_evergreen_file
+from buildscripts.burn_in_tests import TaskInfo
+from buildscripts.ciconfig.evergreen import parse_evergreen_file, EvergreenProjectConfig
import buildscripts.resmokelib.parser as _parser
import buildscripts.evergreen_gen_multiversion_tests as gen_multiversion
-from buildscripts.evergreen_burn_in_tests import GenerateBurnInExecutor
+from buildscripts.evergreen_burn_in_tests import GenerateBurnInExecutor, EvergreenFileChangeDetector
+from buildscripts.evergreen_generate_resmoke_tasks import GENERATE_CONFIG_FILE
+from buildscripts.task_generation.gen_config import GenerationConfiguration
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyConfig
+from buildscripts.task_generation.suite_split import SuiteSplitConfig
+from buildscripts.task_generation.suite_split_strategies import greedy_division, SplitStrategy, \
+ FallbackStrategy, round_robin_fallback
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
_parser.set_run_options()
MONGO_4_2_HASH = "d94888c0d0a8065ca57d354ece33b3c2a1a5a6d6"
-# pylint: disable=missing-docstring,protected-access,too-many-lines,no-self-use
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access,no-value-for-parameter
def create_tests_by_task_mock(n_tasks, n_tests, multiversion_values=None):
@@ -41,15 +50,31 @@ def create_tests_by_task_mock(n_tasks, n_tests, multiversion_values=None):
MV_MOCK_SUITES = ["replica_sets_jscore_passthrough", "sharding_jscore_passthrough"]
+MV_MOCK_TESTS = {
+ "replica_sets_jscore_passthrough": [
+ "core/all.js",
+ "core/andor.js",
+ "core/apitest_db.js",
+ "core/auth1.js",
+ "core/auth2.js",
+ ], "sharding_jscore_passthrough": [
+ "core/basic8.js",
+ "core/batch_size.js",
+ "core/bson.js",
+ "core/bulk_insert.js",
+ "core/capped.js",
+ ]
+}
def create_multiversion_tests_by_task_mock(n_tasks, n_tests):
assert n_tasks <= len(MV_MOCK_SUITES)
+ assert n_tests <= len(MV_MOCK_TESTS[MV_MOCK_SUITES[0]])
return {
- f"{MV_MOCK_SUITES[i % len(MV_MOCK_SUITES)]}": TaskInfo(
+ f"{MV_MOCK_SUITES[i]}": TaskInfo(
display_task_name=f"task_{i}",
resmoke_args=f"--suites=suite_{i}",
- tests=[f"jstests/tests_{j}" for j in range(n_tests)],
+ tests=[f"jstests/{MV_MOCK_TESTS[MV_MOCK_SUITES[i]][j]}" for j in range(n_tests)],
use_multiversion=None,
distro="",
)
@@ -57,7 +82,7 @@ def create_multiversion_tests_by_task_mock(n_tasks, n_tests):
}
-_DATE = datetime.datetime(2018, 7, 15)
+_DATE = datetime(2018, 7, 15)
BURN_IN_TESTS = "buildscripts.burn_in_tests"
NUM_REPL_MIXED_VERSION_CONFIGS = len(gen_multiversion.REPL_MIXED_VERSION_CONFIGS)
NUM_SHARDED_MIXED_VERSION_CONFIGS = len(gen_multiversion.SHARDED_MIXED_VERSION_CONFIGS)
@@ -110,34 +135,92 @@ def create_variant_task_mock(task_name, suite_name, distro="distro"):
return variant_task
+def build_mock_gen_task_options():
+ return GenTaskOptions(
+ create_misc_suite=False,
+ is_patch=True,
+ generated_config_dir=under_test.DEFAULT_CONFIG_DIR,
+ use_default_timeouts=False,
+ )
+
+
+def build_mock_split_task_config():
+ return SuiteSplitConfig(
+ evg_project="my project",
+ target_resmoke_time=60,
+ max_sub_suites=100,
+ max_tests_per_suite=1,
+ start_date=datetime.utcnow(),
+ end_date=datetime.utcnow(),
+ default_to_fallback=True,
+ )
+
+
+def configure_dependencies(evg_api, split_config):
+ gen_task_options = build_mock_gen_task_options()
+
+ def dependencies(binder: inject.Binder) -> None:
+ binder.bind(SuiteSplitConfig, split_config)
+ binder.bind(SplitStrategy, greedy_division)
+ binder.bind(FallbackStrategy, round_robin_fallback)
+ binder.bind(GenTaskOptions, gen_task_options)
+ binder.bind(EvergreenApi, evg_api)
+ binder.bind(GenerationConfiguration,
+ GenerationConfiguration.from_yaml_file(GENERATE_CONFIG_FILE))
+ binder.bind(ResmokeProxyConfig,
+ ResmokeProxyConfig(resmoke_suite_dir=under_test.DEFAULT_TEST_SUITE_DIR))
+ binder.bind(EvergreenFileChangeDetector, None)
+ binder.bind(EvergreenProjectConfig, MagicMock())
+ binder.bind(
+ under_test.BurnInConfig,
+ under_test.BurnInConfig(build_id="build_id", build_variant="build variant",
+ revision="revision"))
+
+ inject.clear_and_configure(dependencies)
+
+
class TestCreateMultiversionGenerateTasksConfig(unittest.TestCase):
def tests_no_tasks_given(self):
- gen_config = MagicMock(run_build_variant="variant", fallback_num_sub_suites=1,
- project="project", build_variant="build_variant", task_id="task_id",
- target_resmoke_time=60)
- evg_api = MagicMock()
- build_variant = under_test.create_multiversion_generate_tasks_config({}, evg_api,
- gen_config)
- evg_config_dict = build_variant.as_dict()
+ target_file = "target_file.json"
+ mock_evg_api = MagicMock()
+ split_config = build_mock_split_task_config()
+ configure_dependencies(mock_evg_api, split_config)
+
+ orchestrator = under_test.MultiversionBurnInOrchestrator()
+ generated_config = orchestrator.generate_configuration({}, target_file, "build_variant")
+
+ evg_config = [
+ config for config in generated_config.file_list if config.file_name == target_file
+ ]
+ self.assertEqual(1, len(evg_config))
+ evg_config = evg_config[0]
+ evg_config_dict = json.loads(evg_config.content)
+
self.assertEqual(0, len(evg_config_dict["tasks"]))
def test_tasks_not_in_multiversion_suites(self):
n_tasks = 1
n_tests = 1
- gen_config = MagicMock(run_build_variant="variant", fallback_num_sub_suites=1,
- project="project", build_variant="build_variant", task_id="task_id",
- target_resmoke_time=60)
- evg_api = MagicMock()
-
- # Create a tests_by_tasks dict that doesn't contain any multiversion suites.
+ target_file = "target_file.json"
+ mock_evg_api = MagicMock()
+ split_config = build_mock_split_task_config()
+ configure_dependencies(mock_evg_api, split_config)
tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
- build_variant = under_test.create_multiversion_generate_tasks_config(
- tests_by_task, evg_api, gen_config)
- evg_config_dict = build_variant.as_dict()
- # We should not generate any tasks that are not part of the burn_in_multiversion suite.
+ orchestrator = under_test.MultiversionBurnInOrchestrator()
+ generated_config = orchestrator.generate_configuration(tests_by_task, target_file,
+ "build_variant")
+
+ evg_config = [
+ config for config in generated_config.file_list if config.file_name == target_file
+ ]
+ self.assertEqual(1, len(evg_config))
+ evg_config = evg_config[0]
+ evg_config_dict = json.loads(evg_config.content)
+
self.assertEqual(0, len(evg_config_dict["tasks"]))
+ @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
@patch(
"buildscripts.evergreen_gen_multiversion_tests.get_backports_required_hash_for_shell_version"
)
@@ -145,18 +228,26 @@ class TestCreateMultiversionGenerateTasksConfig(unittest.TestCase):
mock_hash.return_value = MONGO_4_2_HASH
n_tasks = 1
n_tests = 1
- gen_config = MagicMock(run_build_variant="variant", fallback_num_sub_suites=1,
- project="project", build_variant="build_variant", task_id="task_id",
- target_resmoke_time=60)
- evg_api = MagicMock()
-
+ target_file = "target_file.json"
+ mock_evg_api = MagicMock()
+ split_config = build_mock_split_task_config()
+ configure_dependencies(mock_evg_api, split_config)
tests_by_task = create_multiversion_tests_by_task_mock(n_tasks, n_tests)
- build_variant = under_test.create_multiversion_generate_tasks_config(
- tests_by_task, evg_api, gen_config)
- evg_config_dict = build_variant.as_dict()
+
+ orchestrator = under_test.MultiversionBurnInOrchestrator()
+ generated_config = orchestrator.generate_configuration(tests_by_task, target_file,
+ "build_variant")
+
+ evg_config = [
+ config for config in generated_config.file_list if config.file_name == target_file
+ ]
+ self.assertEqual(1, len(evg_config))
+ evg_config = evg_config[0]
+ evg_config_dict = json.loads(evg_config.content)
tasks = evg_config_dict["tasks"]
self.assertEqual(len(tasks), NUM_REPL_MIXED_VERSION_CONFIGS * n_tests)
+ @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
@patch(
"buildscripts.evergreen_gen_multiversion_tests.get_backports_required_hash_for_shell_version"
)
@@ -164,20 +255,28 @@ class TestCreateMultiversionGenerateTasksConfig(unittest.TestCase):
mock_hash.return_value = MONGO_4_2_HASH
n_tasks = 2
n_tests = 1
- gen_config = MagicMock(run_build_variant="variant", fallback_num_sub_suites=1,
- project="project", build_variant="build_variant", task_id="task_id",
- target_resmoke_time=60)
- evg_api = MagicMock()
-
+ target_file = "target_file.json"
+ mock_evg_api = MagicMock()
+ split_config = build_mock_split_task_config()
+ configure_dependencies(mock_evg_api, split_config)
tests_by_task = create_multiversion_tests_by_task_mock(n_tasks, n_tests)
- build_variant = under_test.create_multiversion_generate_tasks_config(
- tests_by_task, evg_api, gen_config)
- evg_config_dict = build_variant.as_dict()
+
+ orchestrator = under_test.MultiversionBurnInOrchestrator()
+ generated_config = orchestrator.generate_configuration(tests_by_task, target_file,
+ "build_variant")
+
+ evg_config = [
+ config for config in generated_config.file_list if config.file_name == target_file
+ ]
+ self.assertEqual(1, len(evg_config))
+ evg_config = evg_config[0]
+ evg_config_dict = json.loads(evg_config.content)
tasks = evg_config_dict["tasks"]
self.assertEqual(
len(tasks),
(NUM_REPL_MIXED_VERSION_CONFIGS + NUM_SHARDED_MIXED_VERSION_CONFIGS) * n_tests)
+ @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
@patch(
"buildscripts.evergreen_gen_multiversion_tests.get_backports_required_hash_for_shell_version"
)
@@ -185,18 +284,26 @@ class TestCreateMultiversionGenerateTasksConfig(unittest.TestCase):
mock_hash.return_value = MONGO_4_2_HASH
n_tasks = 1
n_tests = 2
- gen_config = MagicMock(run_build_variant="variant", fallback_num_sub_suites=1,
- project="project", build_variant="build_variant", task_id="task_id",
- target_resmoke_time=60)
- evg_api = MagicMock()
-
+ target_file = "target_file.json"
+ mock_evg_api = MagicMock()
+ split_config = build_mock_split_task_config()
+ configure_dependencies(mock_evg_api, split_config)
tests_by_task = create_multiversion_tests_by_task_mock(n_tasks, n_tests)
- build_variant = under_test.create_multiversion_generate_tasks_config(
- tests_by_task, evg_api, gen_config)
- evg_config_dict = build_variant.as_dict()
+
+ orchestrator = under_test.MultiversionBurnInOrchestrator()
+ generated_config = orchestrator.generate_configuration(tests_by_task, target_file,
+ "build_variant")
+
+ evg_config = [
+ config for config in generated_config.file_list if config.file_name == target_file
+ ]
+ self.assertEqual(1, len(evg_config))
+ evg_config = evg_config[0]
+ evg_config_dict = json.loads(evg_config.content)
tasks = evg_config_dict["tasks"]
self.assertEqual(len(tasks), NUM_REPL_MIXED_VERSION_CONFIGS * n_tests)
+ @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
@patch(
"buildscripts.evergreen_gen_multiversion_tests.get_backports_required_hash_for_shell_version"
)
@@ -204,15 +311,22 @@ class TestCreateMultiversionGenerateTasksConfig(unittest.TestCase):
mock_hash.return_value = MONGO_4_2_HASH
n_tasks = 2
n_tests = 3
- gen_config = MagicMock(run_build_variant="variant", fallback_num_sub_suites=1,
- project="project", build_variant="build_variant", task_id="task_id",
- target_resmoke_time=60)
- evg_api = MagicMock()
-
+ target_file = "target_file.json"
+ mock_evg_api = MagicMock()
+ split_config = build_mock_split_task_config()
+ configure_dependencies(mock_evg_api, split_config)
tests_by_task = create_multiversion_tests_by_task_mock(n_tasks, n_tests)
- build_variant = under_test.create_multiversion_generate_tasks_config(
- tests_by_task, evg_api, gen_config)
- evg_config_dict = build_variant.as_dict()
+
+ orchestrator = under_test.MultiversionBurnInOrchestrator()
+ generated_config = orchestrator.generate_configuration(tests_by_task, target_file,
+ "build_variant")
+
+ evg_config = [
+ config for config in generated_config.file_list if config.file_name == target_file
+ ]
+ self.assertEqual(1, len(evg_config))
+ evg_config = evg_config[0]
+ evg_config_dict = json.loads(evg_config.content)
tasks = evg_config_dict["tasks"]
self.assertEqual(
len(tasks),
diff --git a/buildscripts/tests/test_evergreen_gen_fuzzer_tests.py b/buildscripts/tests/test_evergreen_gen_fuzzer_tests.py
deleted file mode 100644
index 71b5129999b..00000000000
--- a/buildscripts/tests/test_evergreen_gen_fuzzer_tests.py
+++ /dev/null
@@ -1,85 +0,0 @@
-"""Unit tests for the evergreen_gen_fuzzer_tests.py script."""
-
-import unittest
-import mock
-
-from shrub.v2 import BuildVariant, ShrubProject
-
-from buildscripts import evergreen_gen_fuzzer_tests as under_test
-
-# pylint: disable=missing-docstring,protected-access
-
-
-class TestCreateFuzzerTask(unittest.TestCase):
- @staticmethod
- def _create_options_mock():
- options = mock.Mock(spec=under_test.ConfigOptions)
- options.num_tasks = 15
- options.name = "test_task"
- options.use_multiversion = False
- options.npm_command = "jstestfuzz"
- options.num_files = 314
- options.jstestfuzz_vars = "var 1 var 2"
- options.resmoke_args = "resmoke args"
- options.variant = "build variant"
- options.continue_on_failure = "false"
- options.resmoke_jobs_max = 0
- options.should_shuffle = "false"
- options.timeout_secs = "1800"
- options.suite = "test_suite"
-
- return options
-
- def test_evg_config_is_created_without_multiversion(self):
- build_variant = BuildVariant("build variant")
- options = self._create_options_mock()
-
- under_test.create_fuzzer_task(options, build_variant)
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
-
- self.assertEqual(options.num_tasks, len(config["tasks"]))
-
- self.assertEqual("setup jstestfuzz", config["tasks"][0]["commands"][1]["func"])
-
- command1 = config["tasks"][0]["commands"][2]
- self.assertIn(str(options.num_files), command1["vars"]["jstestfuzz_vars"])
- self.assertIn(options.npm_command, command1["vars"]["npm_command"])
- self.assertEqual("run jstestfuzz", command1["func"])
-
- buildvariant = config["buildvariants"][0]
- self.assertEqual(options.variant, buildvariant["name"])
- self.assertEqual(options.num_tasks, len(buildvariant["tasks"]))
- self.assertEqual(1, len(buildvariant["display_tasks"][0]["execution_tasks"]))
- self.assertEqual(under_test.GEN_PARENT_TASK, buildvariant["display_tasks"][0]["name"])
- self.assertIn(options.name + "_gen", buildvariant["display_tasks"][0]["execution_tasks"])
- self.assertEqual(options.num_tasks,
- len(buildvariant["display_tasks"][1]["execution_tasks"]))
- self.assertEqual(options.name, buildvariant["display_tasks"][1]["name"])
-
- def test_evg_config_is_created_with_multiversion(self):
- build_variant = BuildVariant("build variant")
- options = self._create_options_mock()
- options.use_multiversion = "/data/multiversion"
-
- under_test.create_fuzzer_task(options, build_variant)
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
-
- self.assertEqual("do multiversion setup", config["tasks"][0]["commands"][2]["func"])
- self.assertEqual("/data/multiversion",
- config["tasks"][0]["commands"][5]["vars"]["task_path_suffix"])
-
- def test_with_large_distro(self):
- build_variant = BuildVariant("build variant")
- options = self._create_options_mock()
- options.large_distro_name = "large build variant"
- options.use_large_distro = True
-
- under_test.create_fuzzer_task(options, build_variant)
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
-
- for variant in config["buildvariants"]:
- for task in variant["tasks"]:
- self.assertEqual(task["distros"], [options.large_distro_name])
diff --git a/buildscripts/tests/test_evergreen_gen_multiversion_tests.py b/buildscripts/tests/test_evergreen_gen_multiversion_tests.py
index 1cb4930460c..e93b05aa4ca 100644
--- a/buildscripts/tests/test_evergreen_gen_multiversion_tests.py
+++ b/buildscripts/tests/test_evergreen_gen_multiversion_tests.py
@@ -14,59 +14,6 @@ from buildscripts.util.fileops import read_yaml_file
# pylint: disable=missing-docstring, no-self-use
-class TestRun(unittest.TestCase):
- def setUp(self):
- self._tmpdir = TemporaryDirectory()
-
- def tearDown(self):
- self._tmpdir.cleanup()
- under_test.CONFIG_DIR = generate_resmoke.DEFAULT_CONFIG_VALUES
-
- @patch.object(under_test.EvergreenMultiversionConfigGenerator, 'generate_evg_tasks')
- @patch('buildscripts.evergreen_generate_resmoke_tasks.should_tasks_be_generated')
- @patch('buildscripts.evergreen_gen_multiversion_tests.write_file_to_dir')
- def test_empty_result_config_fails(self, generate_evg_tasks, should_tasks_be_generated,
- write_file_to_dir):
- # pylint: disable=unused-argument
- ''' Hijacks the write_file_to_dir function to prevent the configuration
- from being written to disk, and ensure the command fails '''
- under_test.CONFIG_DIR = self._tmpdir.name
-
- # NamedTemporaryFile doesn't work too well on Windows. We need to
- # close the fd's so that run_generate_tasks can open the files,
- # so we override the delete-on-close behaviour on Windows, and manually
- # handle cleanup later
- is_windows = os.name == 'nt'
- with NamedTemporaryFile(mode='w',
- delete=not is_windows) as expansions_file, NamedTemporaryFile(
- mode='w', delete=not is_windows) as evg_conf:
- expansions_file.write(EXPANSIONS)
- expansions_file.flush()
- should_tasks_be_generated.return_value = True
- if is_windows:
- # on windows we need to close the fd's so that
- # run_generate_tasks can open the file handle
- expansions_file.close()
- evg_conf.close()
-
- runner = CliRunner()
- result = runner.invoke(
- under_test.run_generate_tasks,
- ['--expansion-file', expansions_file.name, '--evergreen-config', evg_conf.name])
- self.assertEqual(result.exit_code, 1, result)
- self.assertTrue(isinstance(result.exception, RuntimeError))
- self.assertEqual(
- str(result.exception),
- f"Multiversion suite generator unexpectedly yielded no configuration in '{self._tmpdir.name}'"
- )
- self.assertEqual(write_file_to_dir.call_count, 1)
- if is_windows:
- # on windows we need to manually delete these files, since
- # we've disabled the delete-on-close mechanics
- os.remove(expansions_file.name)
- os.remove(evg_conf.name)
-
-
class TestGenerateExcludeYaml(unittest.TestCase):
def setUp(self):
self._tmpdir = TemporaryDirectory()
@@ -257,14 +204,5 @@ class TestGenerateExcludeYaml(unittest.TestCase):
self.assert_contents(expected)
-EXPANSIONS = """task: t
-build_variant: bv
-fallback_num_sub_suites: 5
-project: p
-task_id: t0
-task_name: t
-use_multiversion: "true"
-"""
-
if __name__ == '__main__':
unittest.main()
diff --git a/buildscripts/tests/test_evergreen_generate_resmoke_tasks.py b/buildscripts/tests/test_evergreen_generate_resmoke_tasks.py
index 4dbb2d1f4c8..5ff8e6ba1d2 100644
--- a/buildscripts/tests/test_evergreen_generate_resmoke_tasks.py
+++ b/buildscripts/tests/test_evergreen_generate_resmoke_tasks.py
@@ -1,33 +1,27 @@
"""Unit tests for the generate_resmoke_suite script."""
-
-import datetime
+from datetime import datetime, timedelta
import json
import os
from tempfile import TemporaryDirectory
import sys
import unittest
+import inject
import requests
import yaml
from mock import patch, MagicMock
-from shrub.v2 import BuildVariant, ShrubProject
-from shrub.variant import DisplayTaskDefinition
-
-from buildscripts.util.teststats import TestRuntime
+from evergreen import EvergreenApi
from buildscripts import evergreen_generate_resmoke_tasks as under_test
+from buildscripts.task_generation.gen_config import GenerationConfiguration
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyConfig
+from buildscripts.task_generation.suite_split import SuiteSplitConfig
+from buildscripts.task_generation.suite_split_strategies import SplitStrategy, FallbackStrategy, \
+ greedy_division, round_robin_fallback
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
-# pylint: disable=too-many-locals,too-many-lines,too-many-public-methods
-
-_DATE = datetime.datetime(2018, 7, 15)
-
-NS = "buildscripts.evergreen_generate_resmoke_tasks"
-
-
-def ns(relative_name): # pylint: disable=invalid-name
- """Return a full name from a name relative to the test module"s name space."""
- return NS + "." + relative_name
+# pylint: disable=too-many-locals,too-many-lines,too-many-public-methods,no-value-for-parameter
def tst_stat_mock(file, duration, pass_count):
@@ -64,28 +58,39 @@ def mock_resmoke_config_file(test_list, filename):
fileh.write(yaml.safe_dump(config))
-class TestAcceptance(unittest.TestCase):
- """A suite of Acceptance tests for evergreen_generate_resmoke_tasks."""
+def configure_dependencies(evg_api, evg_expansions, config_dir,
+ test_suites_dir=under_test.DEFAULT_TEST_SUITE_DIR):
+ start_date = datetime.utcnow()
+ end_date = start_date - timedelta(weeks=2)
- def setUp(self):
- under_test.Suite._current_index = 0
+ def dependencies(binder: inject.Binder) -> None:
+ binder.bind(SuiteSplitConfig, evg_expansions.get_suite_split_config(start_date, end_date))
+ binder.bind(SplitStrategy, greedy_division)
+ binder.bind(FallbackStrategy, round_robin_fallback)
+ binder.bind(GenTaskOptions, evg_expansions.get_evg_config_gen_options(config_dir))
+ binder.bind(EvergreenApi, evg_api)
+ binder.bind(GenerationConfiguration,
+ GenerationConfiguration.from_yaml_file(under_test.GENERATE_CONFIG_FILE))
+ binder.bind(ResmokeProxyConfig, ResmokeProxyConfig(resmoke_suite_dir=test_suites_dir))
- @staticmethod
- def _mock_config():
- return {
- "build_variant": "build_variant",
- "fallback_num_sub_suites": 14,
- "project": "mongodb-mongo-master",
- "task_id": "task314",
- "task_name": "some_task_gen",
- "max_sub_suites": 100,
- }
+ inject.clear_and_configure(dependencies)
- @staticmethod
- def _config_options(config_values):
- return under_test.ConfigOptions(config_values, under_test.REQUIRED_CONFIG_KEYS,
- under_test.DEFAULT_CONFIG_VALUES,
- under_test.CONFIG_FORMAT_FN)
+
+def build_mock_evg_expansions(target_resmoke_time=under_test.DEFAULT_TARGET_RESMOKE_TIME):
+ return under_test.EvgExpansions(
+ build_variant="build_variant",
+ max_sub_suites=100,
+ project="mongodb-mongo-master",
+ task_id="task314",
+ task_name="some_task_gen",
+ target_resmoke_time=target_resmoke_time,
+ build_id="build_id",
+ revision="abc123",
+ )
+
+
+class TestAcceptance(unittest.TestCase):
+ """A suite of Acceptance tests for evergreen_generate_resmoke_tasks."""
@staticmethod
def _mock_evg_api(successful_task=False):
@@ -99,12 +104,10 @@ class TestAcceptance(unittest.TestCase):
return evg_api_mock
@staticmethod
- def _prep_dirs(tmpdir, mock_config):
+ def _prep_dirs(tmpdir):
target_directory = os.path.join(tmpdir, "output")
source_directory = os.path.join(tmpdir, "input")
os.makedirs(source_directory)
- mock_config["generated_config_dir"] = target_directory
- mock_config["test_suites_dir"] = source_directory
return target_directory, source_directory
@@ -125,51 +128,56 @@ class TestAcceptance(unittest.TestCase):
When it attempts to run again,
It does not generate any files.
"""
- evg_api_mock = self._mock_evg_api(successful_task=True)
-
- mock_config = self._mock_config()
- config = self._config_options(mock_config)
+ mock_evg_api = self._mock_evg_api(successful_task=True)
+ mock_evg_expansions = build_mock_evg_expansions()
with TemporaryDirectory() as tmpdir:
- mock_config["generated_config_dir"] = tmpdir
- under_test.GenerateSubSuites(evg_api_mock, config).run()
+ configure_dependencies(mock_evg_api, mock_evg_expansions, tmpdir)
+
+ orchestrator = under_test.EvgGenResmokeTaskOrchestrator()
+ orchestrator.generate_task(mock_evg_expansions.task_id,
+ mock_evg_expansions.get_suite_split_params(),
+ mock_evg_expansions.get_gen_params())
self.assertEqual(0, len(os.listdir(tmpdir)))
- @patch(ns("suitesconfig.get_suite"))
+ @patch("buildscripts.resmokelib.suitesconfig.get_suite")
def test_when_evg_test_stats_is_down(self, suites_config_mock):
"""
Given Evergreen historic test stats endpoint is disabled,
When evergreen_generate_resmoke_tasks attempts to generate suites,
It generates suites based on "fallback_num_sub_suites".
"""
- evg_api_mock = mock_test_stats_unavailable(self._mock_evg_api())
-
- mock_config = self._mock_config()
- config = self._config_options(mock_config)
- task = mock_config["task_name"][:-4]
-
n_tests = 100
+ mock_evg_api = mock_test_stats_unavailable(self._mock_evg_api())
+ mock_evg_expansions = build_mock_evg_expansions()
+ task = mock_evg_expansions.task_name[:-4]
with TemporaryDirectory() as tmpdir:
- target_directory, source_directory = self._prep_dirs(tmpdir, mock_config)
+ target_directory, source_directory = self._prep_dirs(tmpdir)
+ configure_dependencies(mock_evg_api, mock_evg_expansions, target_directory,
+ source_directory)
+
suite_path = os.path.join(source_directory, task)
- test_list = self._mock_test_files(source_directory, n_tests, 5, evg_api_mock,
+ test_list = self._mock_test_files(source_directory, n_tests, 5, mock_evg_api,
suites_config_mock)
mock_resmoke_config_file(test_list, suite_path + ".yml")
- under_test.GenerateSubSuites(evg_api_mock, config).run()
+ orchestrator = under_test.EvgGenResmokeTaskOrchestrator()
+ orchestrator.generate_task(mock_evg_expansions.task_id,
+ mock_evg_expansions.get_suite_split_params(),
+ mock_evg_expansions.get_gen_params())
# Were all the config files created? There should be one for each suite as well as
# the evergreen json config.
generated_files = os.listdir(target_directory)
# The expected suite count is the number of fallback suites + the _misc suite.
- expected_suite_count = mock_config["fallback_num_sub_suites"] + 1
+ expected_suite_count = mock_evg_expansions.max_sub_suites + 1
# We expect files for all the suites + the evergreen json config.
self.assertEqual(expected_suite_count + 1, len(generated_files))
# Taking a closer look at the evergreen json config.
- expected_shrub_file = f"{config.task}.json"
+ expected_shrub_file = f"{task}.json"
self.assertIn(expected_shrub_file, generated_files)
with open(os.path.join(target_directory, expected_shrub_file)) as fileh:
shrub_config = json.load(fileh)
@@ -180,32 +188,31 @@ class TestAcceptance(unittest.TestCase):
@unittest.skipIf(
sys.platform.startswith("win"), "Since this test is messing with directories, "
"windows does not handle test generation correctly")
- @patch(ns("suitesconfig.get_suite"))
+ @patch("buildscripts.resmokelib.suitesconfig.get_suite")
def test_with_each_test_in_own_task(self, suites_config_mock):
"""
Given a task with all tests having a historic runtime over the target,
When evergreen_generate_resmoke_tasks attempts to generate suites,
It generates a suite for each test.
"""
- evg_api_mock = self._mock_evg_api()
-
- mock_config = self._mock_config()
- config = self._config_options(mock_config)
- task = mock_config["task_name"][:-4]
-
- mock_config['target_resmoke_time'] = 10 # 10 minute max test time.
n_tests = 4
+ mock_evg_api = self._mock_evg_api()
+ mock_evg_expansions = build_mock_evg_expansions(target_resmoke_time=10)
+ task = mock_evg_expansions.task_name[:-4]
with TemporaryDirectory() as tmpdir:
- target_directory, source_directory = self._prep_dirs(tmpdir, mock_config)
+ target_directory, source_directory = self._prep_dirs(tmpdir)
+ configure_dependencies(mock_evg_api, mock_evg_expansions, target_directory,
+ source_directory)
suite_path = os.path.join(source_directory, task)
- test_list = self._mock_test_files(source_directory, n_tests, 15 * 60, evg_api_mock,
+ test_list = self._mock_test_files(source_directory, n_tests, 15 * 60, mock_evg_api,
suites_config_mock)
mock_resmoke_config_file(test_list, suite_path + ".yml")
- under_test.enable_logging(True)
-
- under_test.GenerateSubSuites(evg_api_mock, config).run()
+ orchestrator = under_test.EvgGenResmokeTaskOrchestrator()
+ orchestrator.generate_task(mock_evg_expansions.task_id,
+ mock_evg_expansions.get_suite_split_params(),
+ mock_evg_expansions.get_gen_params())
# Were all the config files created? There should be one for each suite as well as
# the evergreen json config.
@@ -216,7 +223,7 @@ class TestAcceptance(unittest.TestCase):
self.assertEqual(expected_suite_count + 1, len(generated_files))
# Taking a closer look at the evergreen json config.
- expected_shrub_file = f"{config.task}.json"
+ expected_shrub_file = f"{task}.json"
self.assertIn(expected_shrub_file, generated_files)
with open(os.path.join(target_directory, expected_shrub_file)) as fileh:
shrub_config = json.load(fileh)
@@ -234,1019 +241,3 @@ class TestHelperMethods(unittest.TestCase):
input_task_name = "sharded_multi_stmt_txn_jscore_passthroug"
self.assertEqual("sharded_multi_stmt_txn_jscore_passthroug",
under_test.remove_gen_suffix(input_task_name))
-
- def test_string_contains_any_of_args(self):
- args = ["repeatSuites", "repeat"]
- string = "--suite=suite 0.yml --originSuite=suite resmoke_args --repeat=5"
- self.assertEqual(True, under_test.string_contains_any_of_args(string, args))
-
- def test_string_contains_any_of_args_for_empty_args(self):
- args = []
- string = "--suite=suite 0.yml --originSuite=suite resmoke_args --repeat=5"
- self.assertEqual(False, under_test.string_contains_any_of_args(string, args))
-
- def test_string_contains_any_of_args_for_non_matching_args(self):
- args = ["random_string_1", "random_string_2", "random_string_3"]
- string = "--suite=suite 0.yml --originSuite=suite resmoke_args --repeat=5"
- self.assertEqual(False, under_test.string_contains_any_of_args(string, args))
-
-
-class TestConfigOptions(unittest.TestCase):
- def test_lookup_missing_required_key_throws_exception(self):
- config = {}
- required_keys = {"key1"}
-
- config_options = under_test.ConfigOptions(config, required_keys=required_keys)
-
- with self.assertRaises(KeyError):
- config_options.key1 # pylint: disable=pointless-statement
-
- def test_default_options_use_default_value(self):
- config = {}
- defaults = {"key1": "value1"}
-
- config_options = under_test.ConfigOptions(config, defaults=defaults)
-
- self.assertEqual(defaults["key1"], config_options.key1)
-
- def test_unknown_values_return_none(self):
- config = {}
-
- config_options = under_test.ConfigOptions(config)
-
- self.assertIsNone(config_options.key1)
-
- def test_set_values_are_used(self):
- config = {"key1": "not_the_default_value"}
- defaults = {"key1": "value1"}
-
- config_options = under_test.ConfigOptions(config, defaults=defaults)
-
- self.assertEqual(config["key1"], config_options.key1)
-
- def test_depends_on_splits_values(self):
- config = {"depends_on": "value1,value2,value3"}
-
- config_options = under_test.ConfigOptions(config)
-
- self.assertEqual(3, len(config_options.depends_on))
-
- def test_suite_uses_task_value_if_no_suite(self):
- config = {"task": "task_value"}
-
- config_options = under_test.ConfigOptions(config)
-
- self.assertEqual(config["task"], config_options.suite)
-
- def test_suite_uses_suite_if_provided(self):
- config = {"task": "task_value", "suite": "suite_value"}
-
- config_options = under_test.ConfigOptions(config)
-
- self.assertEqual(config["suite"], config_options.suite)
-
- def test_task_uses_task_name(self):
- config = {"task_name": "task_value"}
-
- config_options = under_test.ConfigOptions(config)
-
- self.assertEqual(config["task_name"], config_options.task)
-
- def test_run_tests_task_uses_task_name(self):
- config = {"task_name": "task_value"}
-
- config_options = under_test.ConfigOptions(config)
-
- self.assertEqual(config["task_name"], config_options.run_tests_task)
-
- def test_run_tests_build_variant_uses_build_variant(self):
- config = {"build_variant": "my-build-variant"}
-
- config_options = under_test.ConfigOptions(config)
-
- self.assertEqual(config["build_variant"], config_options.run_tests_build_variant)
-
- def test_run_tests_build_id_uses_build_id(self):
- config = {"build_id": "my_build_id"}
-
- config_options = under_test.ConfigOptions(config)
-
- self.assertEqual(config["build_id"], config_options.run_tests_build_id)
-
- def test_create_misc_suite(self):
- config = {}
-
- config_options = under_test.ConfigOptions(config)
-
- self.assertTrue(config_options.create_misc_suite)
-
- def test_item_with_format_function_works(self):
- config = {"number": "1"}
- formats = {"number": int}
-
- config_options = under_test.ConfigOptions(config, formats=formats)
-
- self.assertEqual(1, config_options.number)
- self.assertIsInstance(config_options.number, int)
-
-
-class DivideRemainingTestsAmongSuitesTest(unittest.TestCase):
- @staticmethod
- def generate_tests_runtimes(n_tests):
- tests_runtimes = []
- # Iterating backwards so the list is sorted by descending runtimes
- for idx in range(n_tests - 1, -1, -1):
- name = "test_{0}".format(idx)
- tests_runtimes.append((name, 2 * idx))
-
- return tests_runtimes
-
- def test_each_suite_gets_one_test(self):
- suites = [under_test.Suite(f"suite_{i}") for i in range(3)]
- tests_runtimes = self.generate_tests_runtimes(3)
-
- under_test.divide_remaining_tests_among_suites(tests_runtimes, suites)
-
- for suite in suites:
- self.assertEqual(suite.get_test_count(), 1)
-
- def test_each_suite_gets_at_least_one_test(self):
- suites = [under_test.Suite(f"suite_{i}") for i in range(3)]
- tests_runtimes = self.generate_tests_runtimes(5)
-
- under_test.divide_remaining_tests_among_suites(tests_runtimes, suites)
-
- total_tests = 0
- for suite in suites:
- total_tests += suite.get_test_count()
- self.assertGreaterEqual(suite.get_test_count(), 1)
-
- self.assertEqual(total_tests, len(tests_runtimes))
-
-
-class DivideTestsIntoSuitesByMaxtimeTest(unittest.TestCase):
- def test_if_less_total_than_max_only_one_suite_created(self):
- max_time = 20
- tests_runtimes = [
- ("test1", 5),
- ("test2", 4),
- ("test3", 3),
- ]
-
- suites = under_test.divide_tests_into_suites("suite_name", tests_runtimes, max_time)
- self.assertEqual(len(suites), 1)
- self.assertEqual(suites[0].get_test_count(), 3)
- self.assertEqual(suites[0].get_runtime(), 12)
-
- def test_if_each_test_should_be_own_suite(self):
- max_time = 5
- tests_runtimes = [
- ("test1", 5),
- ("test2", 4),
- ("test3", 3),
- ]
-
- suites = under_test.divide_tests_into_suites("suite_name", tests_runtimes, max_time)
- self.assertEqual(len(suites), 3)
-
- def test_if_test_is_greater_than_max_it_goes_alone(self):
- max_time = 7
- tests_runtimes = [
- ("test1", 15),
- ("test2", 4),
- ("test3", 3),
- ]
-
- suites = under_test.divide_tests_into_suites("suite_name", tests_runtimes, max_time)
- self.assertEqual(len(suites), 2)
- self.assertEqual(suites[0].get_test_count(), 1)
- self.assertEqual(suites[0].get_runtime(), 15)
-
- def test_max_sub_suites_options(self):
- max_time = 5
- max_suites = 2
- tests_runtimes = [
- ("test1", 5),
- ("test2", 4),
- ("test3", 3),
- ("test4", 4),
- ("test5", 3),
- ]
-
- suites = under_test.divide_tests_into_suites("suite_name", tests_runtimes, max_time,
- max_suites=max_suites)
- self.assertEqual(len(suites), max_suites)
- total_tests = 0
- for suite in suites:
- total_tests += suite.get_test_count()
- self.assertEqual(total_tests, len(tests_runtimes))
-
- def test_max_tests_per_suites_is_one(self):
- max_time = 5
- num_tests = 10
- tests_runtimes = [(f"tests_{i}", i) for i in range(num_tests)]
-
- suites = under_test.divide_tests_into_suites("suite_name", tests_runtimes, max_time,
- max_tests_per_suite=1)
-
- self.assertEqual(len(suites), num_tests)
-
- def test_max_tests_per_suites_is_less_than_number_of_tests(self):
- max_time = 100
- num_tests = 10
- tests_runtimes = [(f"tests_{i}", 1) for i in range(num_tests)]
-
- suites = under_test.divide_tests_into_suites("suite_name", tests_runtimes, max_time,
- max_tests_per_suite=2)
-
- self.assertEqual(len(suites), num_tests // 2)
-
- def test_max_suites_overrides_max_tests_per_suite(self):
- max_time = 100
- num_tests = 10
- max_suites = 2
- tests_runtimes = [(f"tests_{i}", 1) for i in range(num_tests)]
-
- suites = under_test.divide_tests_into_suites("suite_name", tests_runtimes, max_time,
- max_suites=max_suites, max_tests_per_suite=2)
-
- self.assertEqual(len(suites), max_suites)
-
-
-class SuiteTest(unittest.TestCase):
- def test_adding_tests_increases_count_and_runtime(self):
- suite = under_test.Suite("suite name")
- suite.add_test("test1", 10)
- suite.add_test("test2", 12)
- suite.add_test("test3", 7)
-
- self.assertEqual(suite.get_test_count(), 3)
- self.assertEqual(suite.get_runtime(), 29)
- self.assertTrue(suite.should_overwrite_timeout())
-
- def test_suites_without_full_runtime_history_should_not_be_overridden(self):
- suite = under_test.Suite("suite name")
- suite.add_test("test1", 10)
- suite.add_test("test2", 0)
- suite.add_test("test3", 7)
-
- self.assertFalse(suite.should_overwrite_timeout())
-
- def test_suites_are_properly_indexed(self):
- under_test.Suite._current_index = 0
- n_suites = 5
- suites = [under_test.Suite(f"suite_{i}") for i in range(n_suites)]
-
- for i in range(n_suites):
- self.assertEqual(i, suites[i].index)
-
- def test_suite_name(self):
- suite = under_test.Suite("suite_name")
- suite.index = 3
- under_test.Suite._current_index = 314
-
- self.assertEqual("suite_name_003", suite.name)
-
-
-def create_suite(count=3, start=0):
- """ Create a suite with count tests."""
- suite = under_test.Suite("suite")
- for i in range(start, start + count):
- suite.add_test("test{}".format(i), 1)
- return suite
-
-
-class UpdateSuiteConfigTest(unittest.TestCase):
- def test_roots_are_updated(self):
- config = {"selector": {}}
-
- updated_config = under_test.update_suite_config(config, "root value")
- self.assertEqual("root value", updated_config["selector"]["roots"])
-
- def test_excluded_files_not_included_if_not_specified(self):
- config = {"selector": {"excluded_files": "files to exclude"}}
-
- updated_config = under_test.update_suite_config(config, excludes=None)
- self.assertNotIn("exclude_files", updated_config["selector"])
-
- def test_excluded_files_added_to_misc(self):
- config = {"selector": {}}
-
- updated_config = under_test.update_suite_config(config, excludes="files to exclude")
- self.assertEqual("files to exclude", updated_config["selector"]["exclude_files"])
-
- def test_excluded_files_extended_in_misc(self):
- config = {"selector": {"exclude_files": ["file 0", "file 1"]}}
-
- updated_config = under_test.update_suite_config(config, excludes=["file 2", "file 3"])
- self.assertEqual(4, len(updated_config["selector"]["exclude_files"]))
- for exclude in ["file 0", "file 1", "file 2", "file 3"]:
- self.assertIn(exclude, updated_config["selector"]["exclude_files"])
-
-
-class CalculateTimeoutTest(unittest.TestCase):
- def test_min_timeout(self):
- self.assertEqual(under_test.MIN_TIMEOUT_SECONDS + under_test.AVG_SETUP_TIME,
- under_test.calculate_timeout(15, 1))
-
- def test_over_timeout_by_one_minute(self):
- self.assertEqual(660, under_test.calculate_timeout(301, 1))
-
- def test_float_runtimes(self):
- self.assertEqual(660, under_test.calculate_timeout(300.14, 1))
-
- def test_scaling_factor(self):
- scaling_factor = 10
- self.assertEqual(
- under_test.MIN_TIMEOUT_SECONDS * scaling_factor + under_test.AVG_SETUP_TIME,
- under_test.calculate_timeout(30, scaling_factor))
-
-
-class TimeoutEstimateTest(unittest.TestCase):
- def test_too_high_a_timeout_raises_errors(self):
- timeout_est = under_test.TimeoutEstimate(
- max_test_runtime=5, expected_task_runtime=under_test.MAX_EXPECTED_TIMEOUT)
-
- with self.assertRaises(ValueError):
- timeout_est.generate_timeout_cmd(is_patch=True, repeat_factor=1)
-
-
-class EvergreenConfigGeneratorTest(unittest.TestCase):
- @staticmethod
- def generate_mock_suites(count):
- suites = []
- for idx in range(count):
- suite = MagicMock()
- suite.name = "suite {0}".format(idx)
- suite.max_runtime = 5.28
- suite.get_runtime = lambda: 100.874
- suite.get_timeout_estimate.return_value = under_test.TimeoutEstimate(
- max_test_runtime=5.28, expected_task_runtime=100.874)
- suites.append(suite)
-
- return suites
-
- @staticmethod
- def generate_mock_options():
- options = MagicMock()
- options.resmoke_args = "resmoke_args"
- options.run_multiple_jobs = "true"
- options.variant = "buildvariant"
- options.suite = "suite"
- options.task = "suite"
- options.use_default_timeouts = False
- options.use_large_distro = None
- options.use_multiversion = False
- options.is_patch = True
- options.repeat_suites = 1
- options.generated_config_dir = "config_dir"
- options.generate_display_task.return_value = DisplayTaskDefinition("task")
- options.create_misc_suite = True
- options.display_task_name = "task_name"
-
- return options
-
- def test_evg_config_is_created(self):
- options = self.generate_mock_options()
- suites = self.generate_mock_suites(3)
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- generator.generate_config(build_variant)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
- self.assertEqual(len(config["tasks"]), len(suites) + 1)
- command1 = config["tasks"][0]["commands"][2]
- self.assertIn(options.resmoke_args, command1["vars"]["resmoke_args"])
- self.assertIn(" --originSuite=suite", command1["vars"]["resmoke_args"])
- self.assertIn(options.run_multiple_jobs, command1["vars"]["run_multiple_jobs"])
- self.assertEqual("run generated tests", command1["func"])
-
- def test_evg_config_is_created_with_diff_task_and_suite(self):
- options = self.generate_mock_options()
- options.task = "task"
- options.display_task_name = "display task"
- options.generate_display_task.return_value = DisplayTaskDefinition("task")
- suites = self.generate_mock_suites(3)
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- generator.generate_config(build_variant)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
- self.assertEqual(len(config["tasks"]), len(suites) + 1)
- display_task = config["buildvariants"][0]["display_tasks"][0]
- self.assertEqual(options.display_task_name, display_task["name"])
-
- task = config["tasks"][0]
- self.assertIn(options.variant, task["name"])
- self.assertIn(options.suite, task["commands"][2]["vars"]["resmoke_args"])
-
- def test_evg_config_can_use_large_distro(self):
- options = self.generate_mock_options()
- options.use_large_distro = "true"
- options.large_distro_name = "large distro name"
- suites = self.generate_mock_suites(3)
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- generator.generate_config(build_variant)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
-
- self.assertEqual(len(config["tasks"]), len(suites) + 1)
- self.assertEqual(options.large_distro_name,
- config["buildvariants"][0]["tasks"][0]["distros"][0])
-
- def test_build_variant_without_large_distro_defined_fails(self):
- options = self.generate_mock_options()
- options.use_large_distro = "true"
- options.large_distro_name = None
- suites = self.generate_mock_suites(3)
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- with self.assertRaises(ValueError):
- generator.generate_config(build_variant)
-
- def test_build_variant_without_large_distro_defined_can_be_ignored(self):
- options = self.generate_mock_options()
- options.use_large_distro = "true"
- options.large_distro_name = None
- suites = self.generate_mock_suites(3)
- build_variant = BuildVariant("variant")
- generate_config = under_test.GenerationConfiguration(
- build_variant_large_distro_exceptions={"variant"})
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock(),
- generate_config)
- generator.generate_config(build_variant)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
-
- self.assertEqual(len(config["tasks"]), len(suites) + 1)
- self.assertIsNone(config["buildvariants"][0]["tasks"][0].get("distros"))
-
- def test_selecting_tasks(self):
- is_task_dependency = under_test.EvergreenConfigGenerator._is_task_dependency
- self.assertFalse(is_task_dependency("sharding", "sharding"))
- self.assertFalse(is_task_dependency("sharding", "other_task"))
- self.assertFalse(is_task_dependency("sharding", "sharding_gen"))
-
- self.assertTrue(is_task_dependency("sharding", "sharding_0"))
- self.assertTrue(is_task_dependency("sharding", "sharding_314"))
- self.assertTrue(is_task_dependency("sharding", "sharding_misc"))
-
- def test_get_tasks_depends_on(self):
- options = self.generate_mock_options()
- suites = self.generate_mock_suites(3)
-
- cfg_generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- cfg_generator.build_tasks = [
- MagicMock(display_name="sharding_gen"),
- MagicMock(display_name="sharding_0"),
- MagicMock(display_name="other_task"),
- MagicMock(display_name="other_task_2"),
- MagicMock(display_name="sharding_1"),
- MagicMock(display_name="compile"),
- MagicMock(display_name="sharding_misc"),
- ]
-
- dependent_tasks = cfg_generator._get_tasks_for_depends_on("sharding")
- self.assertEqual(3, len(dependent_tasks))
- self.assertIn("sharding_0", dependent_tasks)
- self.assertIn("sharding_1", dependent_tasks)
- self.assertIn("sharding_misc", dependent_tasks)
-
- def test_specified_dependencies_are_added(self):
- options = self.generate_mock_options()
- options.depends_on = ["sharding"]
- options.is_patch = False
- suites = self.generate_mock_suites(3)
-
- cfg_generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- cfg_generator.build_tasks = [
- MagicMock(display_name="sharding_gen"),
- MagicMock(display_name="sharding_0"),
- MagicMock(display_name="other_task"),
- MagicMock(display_name="other_task_2"),
- MagicMock(display_name="sharding_1"),
- MagicMock(display_name="compile"),
- MagicMock(display_name="sharding_misc"),
- ]
-
- dependencies = cfg_generator._get_dependencies()
- self.assertEqual(4, len(dependencies))
-
- def test_evg_config_has_timeouts_for_repeated_suites(self):
- options = self.generate_mock_options()
- options.repeat_suites = 5
- suites = self.generate_mock_suites(3)
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- generator.generate_config(build_variant)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
- self.assertEqual(len(config["tasks"]), len(suites) + 1)
- command1 = config["tasks"][0]["commands"][2]
- self.assertIn(" --repeatSuites=5 ", command1["vars"]["resmoke_args"])
- self.assertIn(options.resmoke_args, command1["vars"]["resmoke_args"])
- timeout_cmd = config["tasks"][0]["commands"][0]
- self.assertEqual("timeout.update", timeout_cmd["command"])
- expected_timeout = under_test.calculate_timeout(suites[0].max_runtime, 3) * 5
- self.assertEqual(expected_timeout, timeout_cmd["params"]["timeout_secs"])
- expected_exec_timeout = under_test.calculate_timeout(suites[0].get_runtime(), 3) * 5
- self.assertEqual(expected_exec_timeout, timeout_cmd["params"]["exec_timeout_secs"])
-
- def test_evg_config_does_not_fails_if_timeout_too_high_on_mainline(self):
- options = self.generate_mock_options()
- options.is_patch = False
- options.repeat_suites = under_test.MAX_EXPECTED_TIMEOUT
- suites = self.generate_mock_suites(3)
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- generator.generate_config(build_variant)
-
- config = build_variant.as_dict()
- self.assertEqual(len(config["tasks"]), len(suites) + 1)
-
- def test_evg_config_does_not_overwrite_repeatSuites_resmoke_arg_with_repeatSuites_default(self):
- options = self.generate_mock_options()
- options.resmoke_args = "resmoke_args --repeatSuites=5"
- suites = self.generate_mock_suites(1)
-
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- generator.generate_config(build_variant)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
- command1 = config["tasks"][0]["commands"][2]
- self.assertIn("--repeatSuites=5", command1["vars"]["resmoke_args"])
- self.assertNotIn("--repeatSuites=1", command1["vars"]["resmoke_args"])
-
- def test_evg_config_does_not_overwrite_repeat_resmoke_arg_with_repeatSuites_default(self):
- options = self.generate_mock_options()
- options.resmoke_args = "resmoke_args --repeat=5"
- suites = self.generate_mock_suites(1)
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- generator.generate_config(build_variant)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
- command1 = config["tasks"][0]["commands"][2]
- self.assertIn("--repeat=5", command1["vars"]["resmoke_args"])
- self.assertNotIn("--repeatSuites=1", command1["vars"]["resmoke_args"])
-
- def test_suites_without_enough_info_should_not_include_timeouts(self):
- suite_without_timing_info = 1
- options = self.generate_mock_options()
- suites = self.generate_mock_suites(3)
- suites[
- suite_without_timing_info].get_timeout_estimate.return_value = under_test.TimeoutEstimate.no_timeouts(
- )
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- generator.generate_config(build_variant)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
- timeout_cmd = config["tasks"][suite_without_timing_info]["commands"][0]
- self.assertNotIn("command", timeout_cmd)
- self.assertEqual("do setup", timeout_cmd["func"])
-
- def test_timeout_info_not_included_if_use_default_timeouts_set(self):
- suite_without_timing_info = 1
- options = self.generate_mock_options()
- suites = self.generate_mock_suites(3)
- options.use_default_timeouts = True
- build_variant = BuildVariant("variant")
-
- generator = under_test.EvergreenConfigGenerator(suites, options, MagicMock())
- generator.generate_config(build_variant)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- config = shrub_project.as_dict()
- timeout_cmd = config["tasks"][suite_without_timing_info]["commands"][0]
- self.assertNotIn("command", timeout_cmd)
- self.assertEqual("do setup", timeout_cmd["func"])
-
-
-class GenerateSubSuitesTest(unittest.TestCase):
- @staticmethod
- def get_mock_options(n_fallback=2, max_sub_suites=100):
- options = MagicMock()
- options.target_resmoke_time = 10
- options.fallback_num_sub_suites = n_fallback
- options.max_tests_per_suite = None
- options.max_sub_suites = max_sub_suites
- return options
-
- @staticmethod
- def get_test_list(n_tests):
- return [f"test{i}.js" for i in range(n_tests)]
-
- @patch(ns("read_suite_config"))
- def test_calculate_suites(self, mock_read_suite_config):
- mock_read_suite_config.return_value = {}
- evg = MagicMock()
- evg.test_stats_by_project.return_value = [
- tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)
- ]
- config_options = self.get_mock_options()
- config_options.max_sub_suites = 1000
- config_options.selected_tests_to_run = None
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
-
- with patch("os.path.exists") as exists_mock, patch(ns("suitesconfig")) as suitesconfig_mock:
- exists_mock.return_value = True
- suitesconfig_mock.get_suite.return_value.tests = \
- [stat.test_file for stat in evg.test_stats_by_project.return_value]
- suites = gen_sub_suites.calculate_suites(_DATE, _DATE)
-
- # There are 100 tests taking 1 minute, with a target of 10 min we expect 10 suites.
- self.assertEqual(10, len(suites))
- for suite in suites:
- self.assertEqual(10, len(suite.tests))
-
- def test_calculate_suites_fallback(self):
- n_tests = 100
- n_fallback = 2
- evg = mock_test_stats_unavailable(MagicMock())
- config_options = self.get_mock_options(n_fallback=n_fallback)
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(n_tests))
-
- suites = gen_sub_suites.calculate_suites(_DATE, _DATE)
-
- self.assertEqual(n_fallback, len(suites))
- for suite in suites:
- self.assertEqual(n_tests / n_fallback, len(suite.tests))
-
- self.assertEqual(n_tests, len(gen_sub_suites.test_list))
-
- def test_max_sub_suites_overrides_fallback(self):
- n_tests = 100
- n_fallback = 5
- max_sub_suites = 1
- evg = mock_test_stats_unavailable(MagicMock())
- config_options = self.get_mock_options(n_fallback=n_fallback, max_sub_suites=max_sub_suites)
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(n_tests))
-
- suites = gen_sub_suites.calculate_suites(_DATE, _DATE)
-
- self.assertEqual(max_sub_suites, len(suites))
- for suite in suites:
- self.assertEqual(n_tests, len(suite.tests))
-
- self.assertEqual(n_tests, len(gen_sub_suites.test_list))
-
- def test_calculate_suites_more_fallback_suites_than_tests(self):
- n_tests = 5
- n_fallback = 10
- evg = mock_test_stats_unavailable(MagicMock())
- config_options = self.get_mock_options(n_fallback=n_fallback)
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(n_tests))
-
- suites = gen_sub_suites.calculate_suites(_DATE, _DATE)
-
- self.assertEqual(n_tests, len(suites))
- for suite in suites:
- self.assertEqual(1, len(suite.tests))
-
- self.assertEqual(n_tests, len(gen_sub_suites.test_list))
-
- def test_calculate_suites_uses_fallback_for_no_results(self):
- n_tests = 100
- evg = MagicMock()
- evg.test_stats_by_project.return_value = []
- config_options = self.get_mock_options()
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(n_tests))
- suites = gen_sub_suites.calculate_suites(_DATE, _DATE)
-
- self.assertEqual(gen_sub_suites.config_options.fallback_num_sub_suites, len(suites))
- for suite in suites:
- self.assertEqual(50, len(suite.tests))
-
- self.assertEqual(n_tests, len(gen_sub_suites.test_list))
-
- def test_calculate_suites_uses_fallback_if_only_results_are_filtered(self):
- n_tests = 100
- evg = MagicMock()
- evg.test_stats_by_project.return_value = [
- tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)
- ]
- config_options = self.get_mock_options()
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(n_tests))
- with patch("os.path.exists") as exists_mock:
- exists_mock.return_value = False
- suites = gen_sub_suites.calculate_suites(_DATE, _DATE)
-
- self.assertEqual(gen_sub_suites.config_options.fallback_num_sub_suites, len(suites))
- for suite in suites:
- self.assertEqual(50, len(suite.tests))
-
- self.assertEqual(n_tests, len(gen_sub_suites.test_list))
-
- def test_calculate_suites_error(self):
- response = MagicMock()
- response.status_code = requests.codes.INTERNAL_SERVER_ERROR
- evg = MagicMock()
- evg.test_stats_by_project.side_effect = requests.HTTPError(response=response)
- config_options = self.get_mock_options()
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(100))
-
- with self.assertRaises(requests.HTTPError):
- gen_sub_suites.calculate_suites(_DATE, _DATE)
-
- @patch(ns("read_suite_config"))
- def test_calculate_suites_with_selected_tests_to_run(self, mock_read_suite_config):
- mock_read_suite_config.return_value = {}
- evg = MagicMock()
- evg.test_stats_by_project.return_value = [
- tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)
- ]
- config_options = self.get_mock_options()
- config_options.max_sub_suites = 1000
- config_options.selected_tests_to_run = ["test1.js", "test2.js"]
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
-
- with patch("os.path.exists") as exists_mock, patch(ns("suitesconfig")) as suitesconfig_mock:
- exists_mock.return_value = True
- suitesconfig_mock.get_suite.return_value.tests = \
- [stat.test_file for stat in evg.test_stats_by_project.return_value]
- suites = gen_sub_suites.calculate_suites(_DATE, _DATE)
-
- # There are 100 tests taking 1 minute, with a target of 10 min we expect 10 suites.
- # However, since we have selected only 2 tests to run, test1.js and
- # test2.js, only 1 suite should be created.
- self.assertEqual(1, len(suites))
- for suite in suites:
- self.assertEqual(2, len(suite.tests))
-
- def test_filter_missing_files(self):
- tests_runtimes = [
- TestRuntime(test_name="dir1/file1.js", runtime=20.32),
- TestRuntime(test_name="dir2/file2.js", runtime=24.32),
- TestRuntime(test_name="dir1/file3.js", runtime=36.32),
- ]
-
- with patch(ns("suitesconfig")) as suitesconfig_mock:
- evg = MagicMock()
- suitesconfig_mock.get_suite.return_value.tests = \
- [runtime[0] for runtime in tests_runtimes]
- config_options = MagicMock(suite="suite")
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
-
- with patch("os.path.exists") as exists_mock:
- exists_mock.side_effect = [False, True, True]
- filtered_list = gen_sub_suites.filter_existing_tests(tests_runtimes)
-
- self.assertEqual(2, len(filtered_list))
- self.assertNotIn(tests_runtimes[0], filtered_list)
- self.assertIn(tests_runtimes[2], filtered_list)
- self.assertIn(tests_runtimes[1], filtered_list)
-
- @patch(ns('_parser.set_run_options'))
- def test_filter_denylist_files(self, set_run_options_mock):
- tests_runtimes = [
- TestRuntime(test_name="dir1/file1.js", runtime=20.32),
- TestRuntime(test_name="dir2/file2.js", runtime=24.32),
- TestRuntime(test_name="dir1/file3.js", runtime=36.32),
- ]
-
- denylisted_test = tests_runtimes[1][0]
-
- with patch("os.path.exists") as exists_mock, patch(ns("suitesconfig")) as suitesconfig_mock:
- exists_mock.return_value = True
- evg = MagicMock()
- suitesconfig_mock.get_suite.return_value.tests = \
- [runtime[0] for runtime in tests_runtimes if runtime[0] != denylisted_test]
- config_options = MagicMock(suite="suite")
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- filtered_list = gen_sub_suites.filter_existing_tests(tests_runtimes)
-
- self.assertEqual(2, len(filtered_list))
- self.assertNotIn(denylisted_test, filtered_list)
- self.assertIn(tests_runtimes[2], filtered_list)
- self.assertIn(tests_runtimes[0], filtered_list)
-
- @patch(ns('_parser.set_run_options'))
- def test_filter_denylist_files_for_windows(self, set_run_options_mock):
- tests_runtimes = [
- TestRuntime(test_name="dir1/file1.js", runtime=20.32),
- TestRuntime(test_name="dir2/file2.js", runtime=24.32),
- TestRuntime(test_name="dir1/dir3/file3.js", runtime=36.32),
- ]
-
- denylisted_test = tests_runtimes[1][0]
-
- with patch("os.path.exists") as exists_mock, patch(ns("suitesconfig")) as suitesconfig_mock:
- exists_mock.return_value = True
- evg = MagicMock()
- suitesconfig_mock.get_suite.return_value.tests = [
- runtime[0].replace("/", "\\") for runtime in tests_runtimes
- if runtime[0] != denylisted_test
- ]
- config_options = MagicMock(suite="suite")
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- filtered_list = gen_sub_suites.filter_existing_tests(tests_runtimes)
-
- self.assertNotIn(denylisted_test, filtered_list)
- self.assertIn(tests_runtimes[2], filtered_list)
- self.assertIn(tests_runtimes[0], filtered_list)
- self.assertEqual(2, len(filtered_list))
-
- def test_is_asan_build_on_asan_builds(self):
- evg = MagicMock()
- config_options = MagicMock(
- suite="suite",
- san_options="ASAN_OPTIONS=\"detect_leaks=1:check_initialization_order=true\"")
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
-
- self.assertTrue(gen_sub_suites._is_asan_build())
-
- def test_is_asan_build_with_no_san_options(self):
- evg = MagicMock()
- config_options = MagicMock(suite="suite", san_options=None)
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
-
- self.assertFalse(gen_sub_suites._is_asan_build())
-
- def test_is_asan_build_with_san_options_non_asan(self):
- evg = MagicMock()
- config_options = MagicMock(suite="suite",
- san_options="SAN_OPTIONS=\"check_initialization_order=true\"")
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
-
- self.assertFalse(gen_sub_suites._is_asan_build())
-
- def test_clean_every_n_cadence_on_asan(self):
- evg = MagicMock()
- config_options = MagicMock(
- suite="suite",
- san_options="ASAN_OPTIONS=\"detect_leaks=1:check_initialization_order=true\"")
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
-
- cadence = gen_sub_suites._get_clean_every_n_cadence()
-
- self.assertEqual(1, cadence)
-
- @patch(ns("read_suite_config"))
- def test_clean_every_n_cadence_from_hook_config(self, mock_read_suite_config):
- evg = MagicMock()
- config_options = MagicMock(
- suite="suite",
- san_options=None,
- )
- expected_n = 42
- mock_read_suite_config.return_value = {
- "executor": {
- "hooks": [{
- "class": "hook1",
- }, {
- "class": under_test.CLEAN_EVERY_N_HOOK,
- "n": expected_n,
- }]
- }
- }
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- cadence = gen_sub_suites._get_clean_every_n_cadence()
-
- self.assertEqual(expected_n, cadence)
-
- @patch(ns("read_suite_config"))
- def test_clean_every_n_cadence_no_n_in_hook_config(self, mock_read_suite_config):
- evg = MagicMock()
- config_options = MagicMock(
- suite="suite",
- san_options=None,
- )
- mock_read_suite_config.return_value = {
- "executor": {
- "hooks": [{
- "class": "hook1",
- }, {
- "class": under_test.CLEAN_EVERY_N_HOOK,
- }]
- }
- }
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- cadence = gen_sub_suites._get_clean_every_n_cadence()
-
- self.assertEqual(1, cadence)
-
- @patch(ns("read_suite_config"))
- def test_clean_every_n_cadence_no_hook_config(self, mock_read_suite_config):
- evg = MagicMock()
- config_options = MagicMock(
- suite="suite",
- san_options=None,
- )
- mock_read_suite_config.return_value = {"executor": {"hooks": [{
- "class": "hook1",
- }, ]}}
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- cadence = gen_sub_suites._get_clean_every_n_cadence()
-
- self.assertEqual(1, cadence)
-
- @patch(ns("suitesconfig.get_suite"))
- def test_list_tests_can_handle_strings_and_lists(self, mock_get_suite):
- evg = MagicMock()
- mock_suite = MagicMock(
- tests=["test0", "test1", ["test2a", "tests2b", "test2c"], "test3", ["test4a"]])
- config_options = MagicMock(
- suite="suite",
- san_options=None,
- )
- mock_get_suite.return_value = mock_suite
-
- gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
- test_list = gen_sub_suites.list_tests()
-
- self.assertEqual(len(test_list), 7)
-
-
-class TestShouldTasksBeGenerated(unittest.TestCase):
- def test_during_first_execution(self):
- evg_api = MagicMock()
- task_id = "task_id"
- evg_api.task_by_id.return_value.execution = 0
-
- self.assertTrue(under_test.should_tasks_be_generated(evg_api, task_id))
- evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
-
- def test_after_successful_execution(self):
- evg_api = MagicMock()
- task_id = "task_id"
- task = evg_api.task_by_id.return_value
- task.execution = 1
- task.get_execution.return_value.is_success.return_value = True
-
- self.assertFalse(under_test.should_tasks_be_generated(evg_api, task_id))
- evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
-
- def test_after_multiple_successful_execution(self):
- evg_api = MagicMock()
- task_id = "task_id"
- task = evg_api.task_by_id.return_value
- task.execution = 5
- task.get_execution.return_value.is_success.return_value = True
-
- self.assertFalse(under_test.should_tasks_be_generated(evg_api, task_id))
- evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
-
- def test_after_failed_execution(self):
- evg_api = MagicMock()
- task_id = "task_id"
- task = evg_api.task_by_id.return_value
- task.execution = 1
- task.get_execution.return_value.is_success.return_value = False
-
- self.assertTrue(under_test.should_tasks_be_generated(evg_api, task_id))
- evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
-
- def test_after_multiple_failed_execution(self):
- evg_api = MagicMock()
- task_id = "task_id"
- task = evg_api.task_by_id.return_value
- task.execution = 5
- task.get_execution.return_value.is_success.return_value = False
-
- self.assertTrue(under_test.should_tasks_be_generated(evg_api, task_id))
- evg_api.task_by_id.assert_called_with(task_id, fetch_all_executions=True)
diff --git a/buildscripts/tests/test_selected_tests.py b/buildscripts/tests/test_selected_tests.py
index b1adad986f8..6f34cfff60d 100644
--- a/buildscripts/tests/test_selected_tests.py
+++ b/buildscripts/tests/test_selected_tests.py
@@ -2,19 +2,30 @@
import json
import sys
import unittest
+from datetime import datetime, timedelta
from typing import Dict, Any
+import inject
from mock import MagicMock, patch
-from shrub.v2 import BuildVariant, ShrubProject
+from evergreen import EvergreenApi
# pylint: disable=wrong-import-position
import buildscripts.ciconfig.evergreen as _evergreen
from buildscripts.burn_in_tests import TaskInfo
-from buildscripts.evergreen_generate_resmoke_tasks import Suite
+from buildscripts.patch_builds.selected_tests.selected_tests_client import SelectedTestsClient, \
+ TestMappingsResponse, TestMapping, TestFileInstance, TaskMappingsResponse, TaskMapInstance, \
+ TaskMapping
+from buildscripts.selected_tests import EvgExpansions
+from buildscripts.task_generation.gen_config import GenerationConfiguration
+from buildscripts.task_generation.resmoke_proxy import ResmokeProxyConfig
+from buildscripts.task_generation.suite_split import SuiteSplitConfig
+from buildscripts.task_generation.suite_split_strategies import SplitStrategy, greedy_division, \
+ FallbackStrategy, round_robin_fallback
+from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
from buildscripts.tests.test_burn_in_tests import get_evergreen_config, mock_changed_git_files
from buildscripts import selected_tests as under_test
-# pylint: disable=missing-docstring,invalid-name,unused-argument,protected-access
+# pylint: disable=missing-docstring,invalid-name,unused-argument,protected-access,no-value-for-parameter
NS = "buildscripts.selected_tests"
@@ -34,12 +45,30 @@ def empty_build_variant(variant_name: str) -> Dict[str, Any]:
}
+def configure_dependencies(evg_api, evg_expansions, evg_project_config, selected_test_client,
+ test_suites_dir=under_test.DEFAULT_TEST_SUITE_DIR):
+ start_date = datetime.utcnow()
+ end_date = start_date - timedelta(weeks=2)
+
+ def dependencies(binder: inject.Binder) -> None:
+ binder.bind(EvgExpansions, evg_expansions)
+ binder.bind(_evergreen.EvergreenProjectConfig, evg_project_config)
+ binder.bind(SuiteSplitConfig, evg_expansions.build_suite_split_config(start_date, end_date))
+ binder.bind(SplitStrategy, greedy_division)
+ binder.bind(FallbackStrategy, round_robin_fallback)
+ binder.bind(GenTaskOptions, evg_expansions.build_gen_task_options())
+ binder.bind(EvergreenApi, evg_api)
+ binder.bind(GenerationConfiguration,
+ GenerationConfiguration.from_yaml_file(under_test.GENERATE_CONFIG_FILE))
+ binder.bind(ResmokeProxyConfig, ResmokeProxyConfig(resmoke_suite_dir=test_suites_dir))
+ binder.bind(SelectedTestsClient, selected_test_client)
+
+ inject.clear_and_configure(dependencies)
+
+
class TestAcceptance(unittest.TestCase):
"""A suite of Acceptance tests for selected_tests."""
- def setUp(self):
- Suite._current_index = 0
-
@staticmethod
def _mock_evg_api():
evg_api_mock = MagicMock()
@@ -49,233 +78,124 @@ class TestAcceptance(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
def test_when_no_mappings_are_found_for_changed_files(self):
- evg_api_mock = self._mock_evg_api()
- evg_config = get_evergreen_config("etc/evergreen.yml")
- selected_tests_service_mock = MagicMock()
- selected_tests_service_mock.get_test_mappings.return_value = []
- selected_tests_variant_expansions = {
- "task_name": "selected_tests_gen", "build_variant": "selected-tests",
- "build_id": "my_build_id", "project": "mongodb-mongo-master",
- "version_id": "my_version", "task_id": "task_id"
- }
+ mock_evg_api = self._mock_evg_api()
+ mock_evg_config = get_evergreen_config("etc/evergreen.yml")
+ mock_evg_expansions = under_test.EvgExpansions(
+ task_id="task_id",
+ task_name="selected_tests_gen",
+ build_variant="selected-tests",
+ build_id="my_build_id",
+ project="mongodb-mongo-master",
+ revision="abc123",
+ version_id="my_version",
+ )
+ mock_selected_tests_client = MagicMock()
+ mock_selected_tests_client.get_test_mappings.return_value = TestMappingsResponse(
+ test_mappings=[])
+ configure_dependencies(mock_evg_api, mock_evg_expansions, mock_evg_config,
+ mock_selected_tests_client)
repos = [mock_changed_git_files([])]
- config_dict = under_test.run(evg_api_mock, evg_config, selected_tests_service_mock,
- selected_tests_variant_expansions, repos)
+ selected_tests = under_test.SelectedTestsOrchestrator()
+ changed_files = selected_tests.find_changed_files(repos, "task_id")
+ generated_config = selected_tests.generate_version(changed_files)
# assert that config_dict does not contain keys for any generated task configs
- self.assertEqual(config_dict.keys(), {"selected_tests_config.json"})
+ self.assertEqual(len(generated_config.file_list), 1)
+ self.assertEqual(generated_config.file_list[0].file_name, "selected_tests_config.json")
@unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
def test_when_test_mappings_are_found_for_changed_files(self):
- evg_api_mock = self._mock_evg_api()
- evg_config = get_evergreen_config("etc/evergreen.yml")
- selected_tests_service_mock = MagicMock()
- selected_tests_service_mock.get_test_mappings.return_value = [
- {
- "source_file": "src/file1.cpp",
- "test_files": [{"name": "jstests/auth/auth1.js"}],
- },
- ]
- selected_tests_variant_expansions = {
- "task_name": "selected_tests_gen",
- "build_variant": "selected-tests",
- "build_id": "my_build_id",
- "project": "mongodb-mongo-master",
- "version_id": "my_version",
- "task_id": "task_id",
- "max_sub_suites": 3,
- }
+ mock_evg_api = self._mock_evg_api()
+ mock_evg_config = get_evergreen_config("etc/evergreen.yml")
+ mock_evg_expansions = under_test.EvgExpansions(
+ task_id="task_id",
+ task_name="selected_tests_gen",
+ build_variant="selected-tests",
+ build_id="my_build_id",
+ project="mongodb-mongo-master",
+ revision="abc123",
+ version_id="my_version",
+ )
+ mock_test_mapping = TestMapping(
+ branch="master", project="mongodb-mongo-master", repo="mongodb/mongo",
+ source_file="src/file1.cpp", source_file_seen_count=8,
+ test_files=[TestFileInstance(name="jstests/auth/auth1.js", test_file_seen_count=3)])
+ mock_selected_tests_client = MagicMock()
+ mock_selected_tests_client.get_test_mappings.return_value = TestMappingsResponse(
+ test_mappings=[mock_test_mapping])
+ configure_dependencies(mock_evg_api, mock_evg_expansions, mock_evg_config,
+ mock_selected_tests_client)
repos = [mock_changed_git_files(["src/file1.cpp"])]
- config_dict = under_test.run(evg_api_mock, evg_config, selected_tests_service_mock,
- selected_tests_variant_expansions, repos)
+ selected_tests = under_test.SelectedTestsOrchestrator()
+ changed_files = selected_tests.find_changed_files(repos, "task_id")
+ generated_config = selected_tests.generate_version(changed_files)
- self.assertIn("selected_tests_config.json", config_dict)
+ files_to_generate = {gen_file.file_name for gen_file in generated_config.file_list}
+ self.assertIn("selected_tests_config.json", files_to_generate)
# assert that generated suite files have the suite name and the variant name in the
# filename, to prevent tasks on different variants from using the same suite file
- self.assertIn("auth_enterprise-rhel-80-64-bit-dynamic-required_0.yml", config_dict)
+ self.assertIn("auth_0_enterprise-rhel-80-64-bit-dynamic-required.yml", files_to_generate)
- # assert that tasks are generated on all required build variants
- build_variants_with_generated_tasks = json.loads(
- config_dict["selected_tests_config.json"])["buildvariants"]
- self.assertEqual(
- len(build_variants_with_generated_tasks), len(evg_config.get_required_variants()))
+ generated_evg_config_raw = [
+ gen_file.content for gen_file in generated_config.file_list
+ if gen_file.file_name == "selected_tests_config.json"
+ ][0]
+ generated_evg_config = json.loads(generated_evg_config_raw)
+ build_variants_with_generated_tasks = generated_evg_config["buildvariants"]
# jstests/auth/auth1.js belongs to two suites, auth and auth_audit,
- # max_sub_suites = 3, resulting in 3 subtasks being generated
- # for each, hence 6 tasks total
rhel_80_with_generated_tasks = next(
(variant for variant in build_variants_with_generated_tasks
if variant["name"] == "enterprise-rhel-80-64-bit-dynamic-required"), None)
- self.assertEqual(len(rhel_80_with_generated_tasks["tasks"]), 6)
+ self.assertEqual(len(rhel_80_with_generated_tasks["tasks"]), 2)
@unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
def test_when_task_mappings_are_found_for_changed_files(self):
- evg_api_mock = self._mock_evg_api()
- evg_config = get_evergreen_config("etc/evergreen.yml")
- selected_tests_service_mock = MagicMock()
- selected_tests_service_mock.get_task_mappings.return_value = [
- {
- "source_file": "src/file1.cpp",
- "tasks": [{"name": "auth"}],
- },
- ]
- selected_tests_variant_expansions = {
- "task_name": "selected_tests_gen",
- "build_variant": "selected-tests",
- "build_id": "my_build_id",
- "project": "mongodb-mongo-master",
- "version_id": "my_version",
- "task_id": "task id",
- "max_sub_suites": 3,
- }
+ mock_evg_api = self._mock_evg_api()
+ mock_evg_config = get_evergreen_config("etc/evergreen.yml")
+ mock_evg_expansions = under_test.EvgExpansions(
+ task_id="task_id",
+ task_name="selected_tests_gen",
+ build_variant="selected-tests",
+ build_id="my_build_id",
+ project="mongodb-mongo-master",
+ revision="abc123",
+ version_id="my_version",
+ )
+ mock_task_mapping = TaskMapping(
+ branch="master", project="mongodb-mongo-master", repo="mongodb/mongo",
+ source_file="src/file1.cpp", source_file_seen_count=8,
+ tasks=[TaskMapInstance(name="auth", variant="enterprise-rhel-80", flip_count=5)])
+ mock_selected_tests_client = MagicMock()
+ mock_selected_tests_client.get_task_mappings.return_value = TaskMappingsResponse(
+ task_mappings=[mock_task_mapping])
+ configure_dependencies(mock_evg_api, mock_evg_expansions, mock_evg_config,
+ mock_selected_tests_client)
repos = [mock_changed_git_files(["src/file1.cpp"])]
- config_dict = under_test.run(evg_api_mock, evg_config, selected_tests_service_mock,
- selected_tests_variant_expansions, repos)
+ selected_tests = under_test.SelectedTestsOrchestrator()
+ changed_files = selected_tests.find_changed_files(repos, "task_id")
+ generated_config = selected_tests.generate_version(changed_files)
- self.assertIn("selected_tests_config.json", config_dict)
+ files_to_generate = {gen_file.file_name for gen_file in generated_config.file_list}
+ self.assertIn("selected_tests_config.json", files_to_generate)
+ generated_evg_config_raw = [
+ gen_file.content for gen_file in generated_config.file_list
+ if gen_file.file_name == "selected_tests_config.json"
+ ][0]
+ generated_evg_config = json.loads(generated_evg_config_raw)
# the auth task's generator task, max_sub_suites is 3,
# resulting in 3 subtasks being generated, plus a _misc task, hence 4
# tasks total
- build_variants_with_generated_tasks = json.loads(
- config_dict["selected_tests_config.json"])["buildvariants"]
+ build_variants_with_generated_tasks = generated_evg_config["buildvariants"]
rhel_80_with_generated_tasks = next(
(variant for variant in build_variants_with_generated_tasks
if variant["name"] == "enterprise-rhel-80-64-bit-dynamic-required"), None)
- self.assertEqual(len(rhel_80_with_generated_tasks["tasks"]), 4)
-
-
-class TestSelectedTestsConfigOptions(unittest.TestCase):
- def test_overwrites_overwrite_filepath_config(self):
- origin_variant_expansions = {"key1": 0}
- selected_tests_variant_expansions = {"key1": 1}
- overwrites = {"key1": 2}
- required_keys = {"key1"}
- defaults = {}
- formats = {"key1": int}
-
- config_options = under_test.SelectedTestsConfigOptions.from_file(
- origin_variant_expansions, selected_tests_variant_expansions, overwrites, required_keys,
- defaults, formats)
-
- self.assertEqual(overwrites["key1"], config_options.key1)
-
- def test_overwrites_overwrite_defaults(self):
- origin_variant_expansions = {}
- selected_tests_variant_expansions = {"key1": 1}
- overwrites = {"key1": 2}
- required_keys = {"key1"}
- defaults = {"key1": 3}
- formats = {"key1": int}
-
- config_options = under_test.SelectedTestsConfigOptions.from_file(
- origin_variant_expansions, selected_tests_variant_expansions, overwrites, required_keys,
- defaults, formats)
-
- self.assertEqual(overwrites["key1"], config_options.key1)
-
- def test_selected_tests_config_overrides_origin_expansions(self):
- origin_variant_expansions = {"key1": 0}
- selected_tests_variant_expansions = {"key1": 1}
- overwrites = {}
- required_keys = {"key1"}
- defaults = {}
- formats = {"key1": int}
-
- config_options = under_test.SelectedTestsConfigOptions.from_file(
- origin_variant_expansions, selected_tests_variant_expansions, overwrites, required_keys,
- defaults, formats)
-
- self.assertEqual(selected_tests_variant_expansions["key1"], config_options.key1)
-
- def test_run_tests_task(self):
- config_options = under_test.SelectedTestsConfigOptions(
- {"name_of_generating_task": "my_task_gen"}, {}, {}, {})
-
- self.assertEqual(config_options.run_tests_task, "my_task")
-
- def test_run_tests_build_variant(self):
- config_options = under_test.SelectedTestsConfigOptions(
- {"name_of_generating_build_variant": "my-build-variant"}, {}, {}, {})
-
- self.assertEqual(config_options.run_tests_build_variant, "my-build-variant")
-
- def test_run_tests_build_id(self):
- config_options = under_test.SelectedTestsConfigOptions(
- {"name_of_generating_build_id": "my_build_id"}, {}, {}, {})
-
- self.assertEqual(config_options.run_tests_build_id, "my_build_id")
-
- def test_create_misc_suite_with_no_selected_tests_to_run(self):
- config_options = under_test.SelectedTestsConfigOptions({}, {}, {}, {})
-
- self.assertTrue(config_options.create_misc_suite)
-
- def test_create_misc_suite_with_selected_tests_to_run(self):
- config_options = under_test.SelectedTestsConfigOptions(
- {"selected_tests_to_run": {"my_test.js"}}, {}, {}, {})
-
- self.assertFalse(config_options.create_misc_suite)
-
-
-class TestFindSelectedTestFiles(unittest.TestCase):
- @patch(ns("is_file_a_test_file"))
- @patch(ns("SelectedTestsService"))
- def test_related_files_returned_from_selected_tests_service(self, selected_tests_service_mock,
- is_file_a_test_file_mock):
- is_file_a_test_file_mock.return_value = True
- changed_files = {"src/file1.cpp", "src/file2.js"}
- selected_tests_service_mock.get_test_mappings.return_value = [
- {
- "source_file": "src/file1.cpp",
- "test_files": [{"name": "jstests/file-1.js"}],
- },
- {
- "source_file": "src/file2.cpp",
- "test_files": [{"name": "jstests/file-3.js"}],
- },
- ]
-
- related_test_files = under_test._find_selected_test_files(selected_tests_service_mock,
- changed_files)
-
- self.assertEqual(related_test_files, {"jstests/file-1.js", "jstests/file-3.js"})
-
- @patch(ns("is_file_a_test_file"))
- @patch(ns("SelectedTestsService"))
- def test_related_files_returned_are_not_valid_test_files(self, selected_tests_service_mock,
- is_file_a_test_file_mock):
- is_file_a_test_file_mock.return_value = False
- changed_files = {"src/file1.cpp", "src/file2.js"}
- selected_tests_service_mock.get_test_mappings.return_value = [
- {
- "source_file": "src/file1.cpp",
- "test_files": [{"name": "jstests/file-1.js"}],
- },
- {
- "source_file": "src/file2.cpp",
- "test_files": [{"name": "jstests/file-3.js"}],
- },
- ]
-
- related_test_files = under_test._find_selected_test_files(selected_tests_service_mock,
- changed_files)
-
- self.assertEqual(related_test_files, set())
-
- @patch(ns("SelectedTestsService"))
- def test_no_related_files_returned(self, selected_tests_service_mock):
- selected_tests_service_mock.get_test_mappings.return_value = set()
- changed_files = {"src/file1.cpp", "src/file2.js"}
-
- related_test_files = under_test._find_selected_test_files(selected_tests_service_mock,
- changed_files)
-
- self.assertEqual(related_test_files, set())
+ self.assertEqual(len(rhel_80_with_generated_tasks["tasks"]), 5)
class TestExcludeTask(unittest.TestCase):
@@ -297,85 +217,42 @@ class TestExcludeTask(unittest.TestCase):
self.assertEqual(under_test._exclude_task(task), True)
-class TestFindSelectedTasks(unittest.TestCase):
- @patch(ns("SelectedTestsService"))
- def test_related_tasks_returned_from_selected_tests_service(self, selected_tests_service_mock):
- selected_tests_service_mock.get_task_mappings.return_value = [
- {
- "source_file": "src/file1.cpp",
- "tasks": [{"name": "my_task_1"}],
- },
- {
- "source_file": "src/file2.cpp",
- "tasks": [{"name": "my_task_2"}],
- },
- ]
- changed_files = {"src/file1.cpp", "src/file2.js"}
-
- related_tasks = under_test._find_selected_tasks(selected_tests_service_mock, changed_files)
-
- self.assertEqual(related_tasks, {"my_task_1", "my_task_2"})
-
-
-class TestGetSelectedTestsTaskConfiguration(unittest.TestCase):
- def test_gets_values(self):
- selected_tests_variant_expansions = {
- "task_name": "my_task", "build_variant": "my-build-variant", "build_id": "my_build_id"
- }
-
- selected_tests_task_config = under_test._get_selected_tests_task_config(
- selected_tests_variant_expansions)
-
- self.assertEqual(selected_tests_task_config["name_of_generating_task"], "my_task")
- self.assertEqual(selected_tests_task_config["name_of_generating_build_variant"],
- "my-build-variant")
- self.assertEqual(selected_tests_task_config["name_of_generating_build_id"], "my_build_id")
+def build_mock_evg_task(name, cmd_func="generate resmoke tasks",
+ resmoke_args="--storageEngine=wiredTiger"):
+ return _evergreen.Task({
+ "name": name,
+ "commands": [{
+ "func": cmd_func,
+ "vars": {"resmoke_args": resmoke_args, },
+ }],
+ })
class TestGetEvgTaskConfig(unittest.TestCase):
- @patch(ns("_get_selected_tests_task_config"))
- def test_task_is_a_generate_resmoke_task(self, selected_tests_config_mock):
- selected_tests_config_mock.return_value = {"selected_tests_key": "selected_tests_value"}
+ def test_task_is_a_generate_resmoke_task(self):
build_variant_conf = MagicMock()
build_variant_conf.name = "variant"
- task = _evergreen.Task({
- "name":
- "auth_gen",
- "commands": [{
- "func": "generate resmoke tasks",
- "vars": {
- "fallback_num_sub_suites": "4",
- "resmoke_args": "--storageEngine=wiredTiger",
- },
- }],
- })
-
- evg_task_config = under_test._get_evg_task_config({}, task, build_variant_conf)
-
- self.assertEqual(evg_task_config["task_name"], "auth_gen")
+ task = build_mock_evg_task("auth_gen")
+
+ task_config_service = under_test.TaskConfigService()
+ evg_task_config = task_config_service.get_evg_task_config(task, build_variant_conf)
+
+ self.assertEqual(evg_task_config["task_name"], "auth")
self.assertEqual(evg_task_config["build_variant"], "variant")
self.assertIsNone(evg_task_config.get("suite"))
self.assertEqual(
evg_task_config["resmoke_args"],
"--storageEngine=wiredTiger",
)
- self.assertEqual(evg_task_config["fallback_num_sub_suites"], "4")
- self.assertEqual(evg_task_config["selected_tests_key"], "selected_tests_value")
- @patch(ns("_get_selected_tests_task_config"))
- def test_task_is_not_a_generate_resmoke_task(self, selected_tests_config_mock):
+ def test_task_is_not_a_generate_resmoke_task(self):
build_variant_conf = MagicMock()
build_variant_conf.name = "variant"
- task = _evergreen.Task({
- "name":
- "jsCore_auth",
- "commands": [{
- "func": "run tests",
- "vars": {"resmoke_args": "--suites=core_auth --storageEngine=wiredTiger"}
- }],
- })
+ task = build_mock_evg_task("jsCore_auth", "run tests",
+ "--suites=core_auth --storageEngine=wiredTiger")
- evg_task_config = under_test._get_evg_task_config({}, task, build_variant_conf)
+ task_config_service = under_test.TaskConfigService()
+ evg_task_config = task_config_service.get_evg_task_config(task, build_variant_conf)
self.assertEqual(evg_task_config["task_name"], "jsCore_auth")
self.assertEqual(evg_task_config["build_variant"], "variant")
@@ -384,41 +261,16 @@ class TestGetEvgTaskConfig(unittest.TestCase):
evg_task_config["resmoke_args"],
"--storageEngine=wiredTiger",
)
- self.assertEqual(evg_task_config["fallback_num_sub_suites"], "1")
-
-
-class TestUpdateConfigDictWithTask(unittest.TestCase):
- @patch(ns("SelectedTestsConfigOptions"))
- @patch(ns("GenerateSubSuites"))
- def test_no_suites_or_tasks_are_generated(self, generate_subsuites_mock,
- selected_tests_config_options_mock):
- generate_subsuites_mock.return_value.generate_suites_config.return_value = {}
-
- def generate_task_config(shrub_config, suites):
- pass
-
- generate_subsuites_mock.return_value.generate_task_config.side_effect = generate_task_config
-
- build_variant = BuildVariant("variant")
- config_dict_of_suites_and_tasks = {}
- under_test._update_config_with_task(
- MagicMock(), build_variant, config_options=MagicMock(),
- config_dict_of_suites_and_tasks=config_dict_of_suites_and_tasks)
-
- shrub_project = ShrubProject.empty().add_build_variant(build_variant)
- self.assertEqual(config_dict_of_suites_and_tasks, {})
- self.assertEqual(shrub_project.as_dict(), empty_build_variant("variant"))
class TestGetTaskConfigsForTestMappings(unittest.TestCase):
- @patch(ns("_get_evg_task_config"))
@patch(ns("_exclude_task"))
@patch(ns("_find_task"))
- def test_get_config_for_test_mapping(self, find_task_mock, exclude_task_mock,
- get_evg_task_config_mock):
+ def test_get_config_for_test_mapping(self, find_task_mock, exclude_task_mock):
find_task_mock.side_effect = [
- _evergreen.Task({"name": "jsCore_auth"}),
- _evergreen.Task({"name": "auth_gen"})
+ build_mock_evg_task("jsCore_auth", "run tests"),
+ build_mock_evg_task("auth_gen", "run tests",
+ "--suites=core_auth --storageEngine=wiredTiger"),
]
exclude_task_mock.return_value = False
tests_by_task = {
@@ -442,175 +294,108 @@ class TestGetTaskConfigsForTestMappings(unittest.TestCase):
distro="",
),
}
- get_evg_task_config_mock.side_effect = [{"task_config_key": "task_config_value_1"},
- {"task_config_key": "task_config_value_2"}]
- task_configs = under_test._get_task_configs_for_test_mappings({}, tests_by_task,
- MagicMock())
+ task_config_service = under_test.TaskConfigService()
+ task_configs = task_config_service.get_task_configs_for_test_mappings(
+ tests_by_task, MagicMock())
- self.assertEqual(task_configs["jsCore_auth"]["task_config_key"], "task_config_value_1")
+ self.assertEqual(task_configs["jsCore_auth"]["resmoke_args"], "--storageEngine=wiredTiger")
self.assertEqual(
task_configs["jsCore_auth"]["selected_tests_to_run"],
{"jstests/core/currentop_waiting_for_latch.js", "jstests/core/latch_analyzer.js"})
- self.assertEqual(task_configs["auth_gen"]["task_config_key"], "task_config_value_2")
+ self.assertEqual(task_configs["auth_gen"]["suite"], "core_auth")
self.assertEqual(task_configs["auth_gen"]["selected_tests_to_run"],
{'jstests/auth/auth3.js'})
- @patch(ns("_get_evg_task_config"))
@patch(ns("_exclude_task"))
@patch(ns("_find_task"))
- def test_get_config_for_test_mapping_when_task_should_be_excluded(
- self, find_task_mock, exclude_task_mock, get_evg_task_config_mock):
- find_task_mock.return_value = _evergreen.Task({"name": "jsCore_auth"})
+ def test_get_config_for_test_mapping_when_task_should_be_excluded(self, find_task_mock,
+ exclude_task_mock):
+ find_task_mock.return_value = build_mock_evg_task(
+ "jsCore_auth", "run tests", "--suites=core_auth --storageEngine=wiredTiger")
exclude_task_mock.return_value = True
tests_by_task = {
- "jsCore_auth": {
- "tests": [
- "jstests/core/currentop_waiting_for_latch.js",
- "jstests/core/latch_analyzer.js",
- ],
- },
+ "jsCore_auth":
+ TaskInfo(
+ display_task_name="task 1",
+ tests=[
+ "jstests/core/currentop_waiting_for_latch.js",
+ "jstests/core/latch_analyzer.js",
+ ],
+ resmoke_args="",
+ use_multiversion=None,
+ distro="",
+ ),
}
- get_evg_task_config_mock.return_value = {"task_config_key": "task_config_value_1"}
- task_configs = under_test._get_task_configs_for_test_mappings({}, tests_by_task,
- MagicMock())
+ task_config_service = under_test.TaskConfigService()
+ task_configs = task_config_service.get_task_configs_for_test_mappings(
+ tests_by_task, MagicMock())
self.assertEqual(task_configs, {})
- @patch(ns("_get_evg_task_config"))
@patch(ns("_find_task"))
- def test_get_config_for_test_mapping_when_task_does_not_exist(self, find_task_mock,
- get_evg_task_config_mock):
+ def test_get_config_for_test_mapping_when_task_does_not_exist(self, find_task_mock):
find_task_mock.return_value = None
tests_by_task = {
- "jsCore_auth": {
- "tests": [
- "jstests/core/currentop_waiting_for_latch.js",
- "jstests/core/latch_analyzer.js",
- ],
- },
+ "jsCore_auth":
+ TaskInfo(
+ display_task_name="task 1",
+ tests=[
+ "jstests/core/currentop_waiting_for_latch.js",
+ "jstests/core/latch_analyzer.js",
+ ],
+ resmoke_args="",
+ use_multiversion=None,
+ distro="",
+ ),
}
- get_evg_task_config_mock.return_value = {"task_config_key": "task_config_value_1"}
- task_configs = under_test._get_task_configs_for_test_mappings({}, tests_by_task,
- MagicMock())
+ task_config_service = under_test.TaskConfigService()
+ task_configs = task_config_service.get_task_configs_for_test_mappings(
+ tests_by_task, MagicMock())
self.assertEqual(task_configs, {})
class TestGetTaskConfigsForTaskMappings(unittest.TestCase):
- @patch(ns("_get_evg_task_config"))
@patch(ns("_exclude_task"))
@patch(ns("_find_task"))
- def test_get_config_for_task_mapping(self, find_task_mock, exclude_task_mock,
- get_evg_task_config_mock):
- find_task_mock.side_effect = [
- _evergreen.Task({"name": "task_1"}),
- _evergreen.Task({"name": "task_2"})
- ]
+ def test_get_config_for_task_mapping(self, find_task_mock, exclude_task_mock):
+ find_task_mock.side_effect = [build_mock_evg_task("task_1"), build_mock_evg_task("task_2")]
exclude_task_mock.return_value = False
tasks = ["task_1", "task_2"]
- get_evg_task_config_mock.side_effect = [{"task_config_key": "task_config_value_1"},
- {"task_config_key": "task_config_value_2"}]
- task_configs = under_test._get_task_configs_for_task_mappings({}, tasks, MagicMock())
+ task_config_service = under_test.TaskConfigService()
+ task_configs = task_config_service.get_task_configs_for_task_mappings(tasks, MagicMock())
- self.assertEqual(task_configs["task_1"]["task_config_key"], "task_config_value_1")
- self.assertEqual(task_configs["task_2"]["task_config_key"], "task_config_value_2")
+ self.assertEqual(task_configs["task_1"]["resmoke_args"], "--storageEngine=wiredTiger")
+ self.assertEqual(task_configs["task_2"]["resmoke_args"], "--storageEngine=wiredTiger")
- @patch(ns("_get_evg_task_config"))
@patch(ns("_exclude_task"))
@patch(ns("_find_task"))
- def test_get_config_for_task_mapping_when_task_should_be_excluded(
- self, find_task_mock, exclude_task_mock, get_evg_task_config_mock):
- find_task_mock.return_value = _evergreen.Task({"name": "task_1"})
+ def test_get_config_for_task_mapping_when_task_should_be_excluded(self, find_task_mock,
+ exclude_task_mock):
+ find_task_mock.return_value = build_mock_evg_task("task_1")
exclude_task_mock.return_value = True
tasks = ["task_1"]
- get_evg_task_config_mock.return_value = {"task_config_key": "task_config_value_1"}
- task_configs = under_test._get_task_configs_for_task_mappings({}, tasks, MagicMock())
+ task_config_service = under_test.TaskConfigService()
+ task_configs = task_config_service.get_task_configs_for_task_mappings(tasks, MagicMock())
self.assertEqual(task_configs, {})
- @patch(ns("_get_evg_task_config"))
@patch(ns("_find_task"))
- def test_get_config_for_task_mapping_when_task_does_not_exist(self, find_task_mock,
- get_evg_task_config_mock):
+ def test_get_config_for_task_mapping_when_task_does_not_exist(self, find_task_mock):
find_task_mock.return_value = None
tasks = ["task_1"]
- get_evg_task_config_mock.return_value = {"task_config_key": "task_config_value_1"}
- task_configs = under_test._get_task_configs_for_task_mappings({}, tasks, MagicMock())
+ task_config_service = under_test.TaskConfigService()
+ task_configs = task_config_service.get_task_configs_for_task_mappings(tasks, MagicMock())
self.assertEqual(task_configs, {})
-class TestGetTaskConfigs(unittest.TestCase):
- @patch(ns("_find_selected_test_files"))
- @patch(ns("create_task_list_for_tests"))
- @patch(ns("_get_task_configs_for_test_mappings"))
- @patch(ns("_find_selected_tasks"))
- def test_with_related_tests_but_no_related_tasks(
- self, find_selected_tasks_mock, get_task_configs_for_test_mappings_mock,
- create_task_list_for_tests_mock, find_selected_test_files_mock):
- find_selected_test_files_mock.return_value = {"jstests/file-1.js", "jstests/file-3.js"}
- get_task_configs_for_test_mappings_mock.return_value = {
- "task_config_key": "task_config_value_1"
- }
- find_selected_tasks_mock.return_value = set()
- changed_files = {"src/file1.cpp", "src/file2.js"}
-
- task_configs = under_test._get_task_configs(MagicMock(), MagicMock(), {}, MagicMock(),
- changed_files)
-
- self.assertEqual(task_configs["task_config_key"], "task_config_value_1")
-
- @patch(ns("_find_selected_test_files"))
- @patch(ns("create_task_list_for_tests"))
- @patch(ns("_get_task_configs_for_task_mappings"))
- @patch(ns("_find_selected_tasks"))
- def test_with_no_related_tests_but_related_tasks(
- self, find_selected_tasks_mock, get_task_configs_for_task_mappings_mock,
- create_task_list_for_tests_mock, find_selected_test_files_mock):
- find_selected_test_files_mock.return_value = {}
- find_selected_tasks_mock.return_value = {"jsCore_auth", "auth_gen"}
- get_task_configs_for_task_mappings_mock.return_value = {
- "task_config_key": "task_config_value_2"
- }
- changed_files = {"src/file1.cpp", "src/file2.js"}
-
- task_configs = under_test._get_task_configs(MagicMock(), MagicMock(), {}, MagicMock(),
- changed_files)
-
- self.assertEqual(task_configs["task_config_key"], "task_config_value_2")
-
- @patch(ns("_find_selected_test_files"))
- @patch(ns("create_task_list_for_tests"))
- @patch(ns("_get_task_configs_for_test_mappings"))
- @patch(ns("_get_task_configs_for_task_mappings"))
- @patch(ns("_find_selected_tasks"))
- # pylint: disable=too-many-arguments
- def test_task_mapping_configs_will_overwrite_test_mapping_configs(
- self, find_selected_tasks_mock, get_task_configs_for_task_mappings_mock,
- get_task_configs_for_test_mappings_mock, create_task_list_for_tests_mock,
- find_selected_test_files_mock):
- find_selected_test_files_mock.return_value = {"jstests/file-1.js", "jstests/file-3.js"}
- get_task_configs_for_test_mappings_mock.return_value = {
- "task_config_key": "task_config_value_1"
- }
- find_selected_tasks_mock.return_value = {"jsCore_auth", "auth_gen"}
- get_task_configs_for_task_mappings_mock.return_value = {
- "task_config_key": "task_config_value_2"
- }
- changed_files = {"src/file1.cpp", "src/file2.js"}
-
- task_configs = under_test._get_task_configs(MagicMock(), MagicMock(), {}, MagicMock(),
- changed_files)
-
- self.assertEqual(task_configs["task_config_key"], "task_config_value_2")
-
-
class TestRemoveRepoPathPrefix(unittest.TestCase):
def test_file_is_in_enterprise_modules(self):
filepath = under_test._remove_repo_path_prefix(
@@ -622,45 +407,3 @@ class TestRemoveRepoPathPrefix(unittest.TestCase):
filepath = under_test._remove_repo_path_prefix("other_directory/src/file1.cpp")
self.assertEqual(filepath, "other_directory/src/file1.cpp")
-
-
-class TestRemoveTaskConfigsAlreadyInBuild(unittest.TestCase):
- def test_tasks_are_already_in_build(self):
- task_configs = {
- "aggregation": {"build_variant": "linux-64-debug"},
- "jsCore": {"build_variant": "linux-64-debug"}
- }
- evg_api = MagicMock()
- aggregation_task = MagicMock(display_name="aggregation")
- evg_api.version_by_id.return_value.build_by_variant.return_value.get_tasks.return_value = [
- aggregation_task
- ]
- build_variant_config = MagicMock()
- version_id = "version_id"
- under_test.remove_task_configs_already_in_build(task_configs, evg_api, build_variant_config,
- version_id)
-
- self.assertNotIn("aggregation", task_configs)
- self.assertIn("jsCore", task_configs)
-
- def test_no_build_exists(self):
- task_configs = {"aggregation": {"build_variant": "linux-64-debug"}}
- evg_api = MagicMock()
- evg_api.version_by_id.return_value.build_by_variant.side_effect = KeyError
- build_variant_config = MagicMock()
- version_id = "version_id"
- under_test.remove_task_configs_already_in_build(task_configs, evg_api, build_variant_config,
- version_id)
-
- self.assertIn("aggregation", task_configs)
-
- def test_no_tasks_already_in_build(self):
- task_configs = {"aggregation": {"build_variant": "linux-64-debug"}}
- evg_api = MagicMock()
- evg_api.version_by_id.return_value.build_by_variant.return_value.get_tasks.return_value = []
- build_variant_config = MagicMock()
- version_id = "version_id"
- under_test.remove_task_configs_already_in_build(task_configs, evg_api, build_variant_config,
- version_id)
-
- self.assertIn("aggregation", task_configs)
diff --git a/buildscripts/util/cmdutils.py b/buildscripts/util/cmdutils.py
new file mode 100644
index 00000000000..d5d8b167a8b
--- /dev/null
+++ b/buildscripts/util/cmdutils.py
@@ -0,0 +1,29 @@
+"""Utilities for build commandline applications."""
+import logging
+import sys
+
+import structlog
+
+EXTERNAL_LOGGERS = {
+ "evergreen",
+ "git",
+ "inject",
+ "urllib3",
+}
+
+
+def enable_logging(verbose: bool) -> None:
+ """
+ Enable logging for execution.
+
+ :param verbose: Should verbose logging be enabled.
+ """
+ level = logging.DEBUG if verbose else logging.INFO
+ logging.basicConfig(
+ format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s",
+ level=level,
+ stream=sys.stdout,
+ )
+ structlog.configure(logger_factory=structlog.stdlib.LoggerFactory())
+ for log_name in EXTERNAL_LOGGERS:
+ logging.getLogger(log_name).setLevel(logging.WARNING)
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 78ce0d99f62..a56a6bd6f0d 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -943,7 +943,7 @@ functions:
aws_key: ${aws_key}
aws_secret: ${aws_secret}
bucket: mciuploads
- remote_file: ${project}/${build_variant}/${revision}/generate_tasks/${task}_gen-${build_id}.tgz
+ remote_file: ${project}/${gen_task_config_location}
local_file: "generate_tasks_config.tgz"
"extract generated test configuration": &extract_generated_test_configuration
@@ -967,7 +967,7 @@ functions:
- command: archive.targz_pack
params:
target: generate_tasks_config.tgz
- source_dir: src/selected_tests_config
+ source_dir: src/generated_resmoke_config
include:
- "*"
- command: s3.put
@@ -985,7 +985,7 @@ functions:
params:
optional: true
files:
- - src/selected_tests_config/*.json
+ - src/generated_resmoke_config/*.json
"generate burn in tags":
- *f_expansions_write
@@ -1384,6 +1384,7 @@ functions:
- *set_up_venv
- *upload_pip_requirements
+ - *configure_evergreen_api_credentials
- *f_expansions_write
- command: subprocess.exec
params:
@@ -1417,6 +1418,7 @@ functions:
- command: generate.tasks
params:
+ optional: true
files:
- src/generated_resmoke_config/${name}.json
@@ -2895,13 +2897,13 @@ tasks:
- command: archive.targz_pack
params:
target: src/burn_in_tests_multiversion_gen.tgz
- source_dir: src
+ source_dir: src/generated_resmoke_config
include:
- burn_in_tests_multiversion_gen.json
- command: archive.targz_pack
params:
- target: generate_tasks_config.tgz
+ target: src/generate_tasks_config.tgz
source_dir: src/generated_resmoke_config
include:
- "*"
@@ -2910,7 +2912,7 @@ tasks:
params:
aws_key: ${aws_key}
aws_secret: ${aws_secret}
- local_file: generate_tasks_config.tgz
+ local_file: src/generate_tasks_config.tgz
remote_file: ${project}/${build_variant}/${revision}/generate_tasks/burn_in_tests_multiversion_gen-${build_id}.tgz
bucket: mciuploads
permissions: public-read
@@ -2932,7 +2934,7 @@ tasks:
- command: generate.tasks
params:
files:
- - src/burn_in_tests_multiversion_gen.json
+ - src/generated_resmoke_config/burn_in_tests_multiversion_gen.json
- <<: *benchmark_template
name: benchmarks_orphaned
@@ -3784,7 +3786,6 @@ tasks:
suite: replica_sets_jscore_passthrough
resmoke_args: --storageEngine=wiredTiger --includeWithAnyTags=multiversion_sanity_check
task_path_suffix: /data/multiversion
- fallback_num_sub_suites: 1
- name: replica_sets_jscore_multiversion_passthrough_gen
tags: ["multiversion_passthrough"]
@@ -3794,7 +3795,6 @@ tasks:
suite: replica_sets_jscore_passthrough
resmoke_args: --storageEngine=wiredTiger
task_path_suffix: /data/multiversion
- fallback_num_sub_suites: 4
# Check that the mutational fuzzer can parse JS files modified in a patch build.
- name: lint_fuzzer_sanity_patch
@@ -3929,11 +3929,9 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: aggregation
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- <<: *task_template
name: aggregation_sharded_collections_passthrough
@@ -3971,7 +3969,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 4
- name: burn_in_tags_gen
depends_on:
@@ -4010,7 +4007,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 4
- <<: *task_template
name: change_streams
@@ -4030,7 +4026,6 @@ tasks:
suite: change_streams
resmoke_args: --storageEngine=wiredTiger
task_path_suffix: /data/multiversion
- fallback_num_sub_suites: 4
- <<: *task_template
name: change_streams_update_v1_oplog
@@ -4096,7 +4091,6 @@ tasks:
suite: change_streams_sharded_collections_passthrough
resmoke_args: --storageEngine=wiredTiger
task_path_suffix: /data/multiversion
- fallback_num_sub_suites: 4
- <<: *task_template
name: change_streams_whole_db_passthrough
@@ -4641,7 +4635,6 @@ tasks:
vars:
dependsOn: jsCore
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 1
- name: sharded_causally_consistent_jscore_txns_passthrough_without_snapshot_gen
tags: ["sharding", "wo_snapshot", "causally_consistent", "jscore"]
@@ -4651,7 +4644,6 @@ tasks:
dependsOn: jsCore
suite: sharded_causally_consistent_jscore_txns_passthrough
resmoke_args: --storageEngine=wiredTiger --excludeWithAnyTags=uses_snapshot_read_concern
- fallback_num_sub_suites: 1
- name: causally_consistent_hedged_reads_jscore_passthrough_gen
tags: ["causally_consistent", "sharding", "jscore"]
@@ -4660,7 +4652,6 @@ tasks:
vars:
dependsOn: jsCore
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 40
- <<: *task_template
name: sharded_collections_causally_consistent_jscore_txns_passthrough
@@ -4684,17 +4675,14 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- name: replica_sets_reconfig_jscore_stepdown_passthrough_gen
commands:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- <<: *task_template
name: replica_sets_reconfig_kill_primary_jscore_passthrough
@@ -4715,10 +4703,8 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 1
- <<: *task_template
name: replica_sets_large_txns_format_jscore_passthrough
@@ -4743,10 +4729,8 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- <<: *task_template
name: replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough
@@ -4772,7 +4756,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- name: replica_sets_initsync_jscore_passthrough_gen
tags: ["replica_sets", "san", "large"]
@@ -4780,7 +4763,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- <<: *task_template
name: replica_sets_initsync_static_jscore_passthrough
@@ -4834,7 +4816,6 @@ tasks:
vars:
resmoke_args: "--storageEngine=wiredTiger"
use_multiversion: /data/multiversion
- fallback_num_sub_suites: 1
- name: multiversion_gen
commands:
@@ -4842,7 +4823,6 @@ tasks:
vars:
resmoke_args: "--storageEngine=wiredTiger"
use_multiversion: /data/multiversion
- fallback_num_sub_suites: 1
# Tests the runFeatureFlagMultiversionTest helper.
# This requires the 'featureFlagToaster' and 'featureFlagSpoon' parameters to be set to true on
@@ -4853,7 +4833,6 @@ tasks:
vars:
resmoke_args: "--storageEngine=wiredTiger"
use_multiversion: /data/multiversion
- fallback_num_sub_suites: 1
- name: unittest_shell_hang_analyzer_gen
commands:
@@ -4861,7 +4840,6 @@ tasks:
vars:
suite: unittest_shell_hang_analyzer
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 1
- name: noPassthrough_gen
tags: ["misc_js"]
@@ -4871,7 +4849,6 @@ tasks:
suite: no_passthrough
resmoke_args: --storageEngine=wiredTiger
use_large_distro: "true"
- fallback_num_sub_suites: 12
# Only run hot_backups tests for hot_backups variant.
- name: noPassthroughHotBackups_gen
@@ -4881,7 +4858,6 @@ tasks:
suite: no_passthrough
resmoke_args: --storageEngine=wiredTiger src/mongo/db/modules/*/jstests/hot_backups/*.js
use_large_distro: "true"
- fallback_num_sub_suites: 12
- name: noPassthroughWithMongod_gen
tags: ["misc_js"]
@@ -4891,7 +4867,6 @@ tasks:
suite: no_passthrough_with_mongod
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- <<: *task_template
name: bulk_gle_passthrough
@@ -4908,7 +4883,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 4
resmoke_jobs_max: 1
- <<: *task_template
@@ -4939,7 +4913,6 @@ tasks:
suite: sharded_collections_jscore_passthrough
resmoke_args: --storageEngine=wiredTiger
task_path_suffix: /data/multiversion
- fallback_num_sub_suites: 4
- <<: *task_template
name: sharding_jscore_passthrough
@@ -4959,7 +4932,6 @@ tasks:
suite: sharding_jscore_passthrough
resmoke_args: --storageEngine=wiredTiger
task_path_suffix: /data/multiversion
- fallback_num_sub_suites: 4
- <<: *task_template
name: sharding_jscore_op_query_passthrough
@@ -4985,9 +4957,7 @@ tasks:
- func: "generate resmoke tasks"
vars:
suite: sharding_jscore_passthrough
- depends_on: jsCore
resmoke_args: --storageEngine=wiredTiger --shellReadMode=legacy --shellWriteMode=compatibility --excludeWithAnyTags=requires_find_command,requires_timeseries
- fallback_num_sub_suites: 11
- <<: *task_template
name: sharded_multi_stmt_txn_jscore_passthrough
@@ -5003,10 +4973,8 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 28
resmoke_jobs_max: 0 # No cap on number of jobs.
- name: multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough_gen
@@ -5014,79 +4982,63 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 21
- name: multi_stmt_txn_jscore_passthrough_with_migration_gen
tags: ["multi_stmt"]
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 19
- name: multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough_gen
tags: ["multi_shard"]
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 48
- name: multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough_gen
tags: ["multi_shard"]
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 37
- name: tenant_migration_jscore_passthrough_gen
tags: ["tenant_migration"]
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 10
- name: tenant_migration_causally_consistent_jscore_passthrough_gen
tags: ["tenant_migration"]
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 10
- name: tenant_migration_multi_stmt_txn_jscore_passthrough_gen
tags: ["tenant_migration", "txn"]
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 10
- name: tenant_migration_stepdown_jscore_passthrough_gen
tags: ["tenant_migration"]
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 10
- name: tenant_migration_terminate_primary_jscore_passthrough_gen
tags: ["tenant_migration"]
@@ -5113,9 +5065,7 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 2
resmoke_jobs_max: 1
- <<: *task_template
@@ -5165,7 +5115,6 @@ tasks:
vars:
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- <<: *task_template
name: concurrency_replication_multiversion_gen
@@ -5176,7 +5125,6 @@ tasks:
suite: concurrency_replication
resmoke_args: --storageEngine=wiredTiger
task_path_suffix: /data/multiversion
- fallback_num_sub_suites: 4
- name: concurrency_replication_causal_consistency_gen
tags: ["concurrency", "repl", "large", "non_live_record"]
@@ -5185,7 +5133,6 @@ tasks:
vars:
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- <<: *task_template
name: concurrency_replication_multi_stmt_txn
@@ -5235,7 +5182,6 @@ tasks:
vars:
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- name: concurrency_replication_wiredtiger_eviction_debug_gen
tags: ["concurrency", "repl", "debug_only"]
@@ -5244,7 +5190,6 @@ tasks:
vars:
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- name: concurrency_sharded_replication_gen
tags: ["concurrency", "common", "read_concern_maj", "large", "sharded"]
@@ -5252,7 +5197,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 3
use_large_distro: "true"
resmoke_jobs_max: 1
@@ -5264,7 +5208,6 @@ tasks:
suite: concurrency_sharded_replication
resmoke_args: --storageEngine=wiredTiger
task_path_suffix: /data/multiversion
- fallback_num_sub_suites: 4
- name: concurrency_sharded_replication_with_balancer_gen
tags: ["concurrency", "common", "read_concern_maj", "large", "sharded"]
@@ -5272,7 +5215,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 3
use_large_distro: "true"
resmoke_jobs_max: 1
@@ -5283,7 +5225,6 @@ tasks:
vars:
suite: concurrency_sharded_replication
resmoke_args: "--excludeWithAnyTags=uses_transactions --storageEngine=wiredTiger"
- fallback_num_sub_suites: 3
use_large_distro: "true"
resmoke_jobs_max: 1
@@ -5294,7 +5235,6 @@ tasks:
vars:
suite: concurrency_sharded_replication_with_balancer
resmoke_args: "--excludeWithAnyTags=uses_transactions --storageEngine=wiredTiger"
- fallback_num_sub_suites: 3
use_large_distro: "true"
resmoke_jobs_max: 1
@@ -5304,7 +5244,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 3
use_large_distro: "true"
resmoke_jobs_max: 1
@@ -5314,7 +5253,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 3
use_large_distro: "true"
resmoke_jobs_max: 1
@@ -5324,7 +5262,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 3
use_large_distro: "true"
resmoke_jobs_max: 1
@@ -5334,7 +5271,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 3
use_large_distro: "true"
resmoke_jobs_max: 1
@@ -5344,7 +5280,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 3
use_large_distro: "true"
resmoke_jobs_max: 1
@@ -5356,7 +5291,6 @@ tasks:
use_large_distro: "true"
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- name: concurrency_sharded_kill_primary_with_balancer_gen
tags: ["concurrency", "stepdowns", "kill_terminate", "sharded"]
@@ -5366,7 +5300,6 @@ tasks:
use_large_distro: "true"
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- name: concurrency_sharded_multi_stmt_txn_gen
tags: ["concurrency", "large", "sharded"]
@@ -5376,7 +5309,6 @@ tasks:
use_large_distro: "true"
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- name: concurrency_sharded_multi_stmt_txn_with_balancer_gen
tags: ["concurrency", "large", "sharded"]
@@ -5386,7 +5318,6 @@ tasks:
use_large_distro: "true"
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- name: concurrency_sharded_local_read_write_multi_stmt_txn_gen
tags: ["concurrency", "large", "sharded"]
@@ -5396,7 +5327,6 @@ tasks:
use_large_distro: "true"
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- name: concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer_gen
tags: ["concurrency", "large", "sharded"]
@@ -5406,7 +5336,6 @@ tasks:
use_large_distro: "true"
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- name: concurrency_sharded_multi_stmt_txn_with_stepdowns_gen
tags: ["concurrency", "stepdowns", "large", "sharded"]
@@ -5416,7 +5345,6 @@ tasks:
use_large_distro: "true"
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- <<: *task_template
name: concurrency_sharded_multi_stmt_txn_terminate_primary_gen
@@ -5427,7 +5355,6 @@ tasks:
use_large_distro: "true"
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- <<: *task_template
name: concurrency_sharded_multi_stmt_txn_kill_primary_gen
@@ -5438,7 +5365,6 @@ tasks:
use_large_distro: "true"
resmoke_args: "--storageEngine=wiredTiger"
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- name: concurrency_simultaneous_gen
tags: ["concurrency", "common"]
@@ -5447,7 +5373,6 @@ tasks:
vars:
resmoke_args: --storageEngine=wiredTiger
resmoke_jobs_max: 1
- fallback_num_sub_suites: 3
- <<: *task_template
name: concurrency_simultaneous_replication
@@ -5493,10 +5418,8 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 10
- <<: *task_template
name: write_concern_majority_passthrough
@@ -5521,10 +5444,8 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 10
- <<: *task_template
name: cwrwc_wc_majority_passthrough
@@ -5539,10 +5460,8 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- depends_on: jsCore
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 12
- name: replica_sets_gen
tags: ["replica_sets", "san", "large"]
@@ -5550,7 +5469,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- name: replica_sets_ese_gen
tags: ["replica_sets", "encrypt", "san"]
@@ -5559,7 +5477,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- name: replica_sets_ese_gcm_gen
tags: ["replica_sets", "encrypt", "san", "gcm"]
@@ -5568,7 +5485,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- name: replica_sets_auth_gen
tags: ["replica_sets", "common", "san", "auth"]
@@ -5577,7 +5493,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 14
- name: replica_sets_large_txns_format_gen
tags: ["replica_sets", "multi_oplog", "san"]
@@ -5585,7 +5500,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- name: replica_sets_max_mirroring_gen
tags: ["replica_sets", "san"]
@@ -5593,7 +5507,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- name: replica_sets_update_v1_oplog_gen
tags: ["replica_sets", "san"]
@@ -5601,7 +5514,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- <<: *task_template
name: replica_sets_multiversion_gen
@@ -5610,7 +5522,6 @@ tasks:
- func: "generate randomized multiversion tasks"
vars:
resmoke_args: --storageEngine=wiredTiger --tagFile=generated_resmoke_config/multiversion_exclude_tags.yml
- fallback_num_sub_suites: 8
use_multiversion: /data/multiversion
suite: replica_sets_multiversion
@@ -5629,7 +5540,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 32
- name: sharding_multiversion_gen
tags: ["random_multiversion_ds"]
@@ -5638,7 +5548,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger --tagFile=generated_resmoke_config/multiversion_exclude_tags.yml
- fallback_num_sub_suites: 32
use_multiversion: /data/multiversion
suite: sharding_multiversion
@@ -5649,7 +5558,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 32
- name: sharding_csrs_continuous_config_stepdown_gen
tags: ["sharding", "common", "csrs", "non_live_record"]
@@ -5659,7 +5567,6 @@ tasks:
suite: sharding_continuous_config_stepdown
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 25
- name: sharding_ese_gen
tags: ["sharding", "encrypt"]
@@ -5668,7 +5575,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 32
- name: sharding_ese_gcm_gen
tags: ["sharding", "encrypt", "gcm"]
@@ -5677,7 +5583,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 32
- name: sharding_op_query_gen
tags: ["sharding", "common", "op_query"]
@@ -5687,7 +5592,6 @@ tasks:
suite: sharding
use_large_distro: "true"
resmoke_args: --shellReadMode=legacy --storageEngine=wiredTiger --excludeWithAnyTags=requires_find_command
- fallback_num_sub_suites: 31
- name: sharding_auth_gen
tags: ["sharding", "auth"]
@@ -5696,7 +5600,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 31
- name: sharding_auth_audit_gen
tags: ["auth", "audit", "non_live_record"]
@@ -5705,7 +5608,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 31
- name: sharding_last_lts_mongos_and_mixed_shards_gen
tags: ["sharding", "common", "multiversion"]
@@ -5715,7 +5617,6 @@ tasks:
use_large_distro: "true"
use_multiversion: /data/multiversion
resmoke_args: --tagFile=generated_resmoke_config/multiversion_exclude_tags.yml
- fallback_num_sub_suites: 24
- name: sharding_update_v1_oplog_gen
tags: ["sharding", "common"]
@@ -5724,7 +5625,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 31
- <<: *task_template
name: snmp
@@ -5743,7 +5643,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: "--storageEngine=wiredTiger --mongodSetParameters='{logComponentVerbosity: {network: 2, replication: {heartbeats: 2}}}'"
- fallback_num_sub_suites: 5
- name: sslSpecial_gen
tags: ["encrypt", "ssl"]
@@ -5752,7 +5651,6 @@ tasks:
vars:
suite: ssl_special
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 3
- name: ssl_x509_gen
tags: ["encrypt", "ssl"]
@@ -5761,7 +5659,6 @@ tasks:
vars:
suite: ssl_x509
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 3
- <<: *task_template
name: jsCore_decimal
@@ -5804,7 +5701,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 14
- name: causally_consistent_jscore_passthrough_auth_gen
tags: ["causally_consistent"]
@@ -5812,7 +5708,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- name: causally_consistent_read_concern_snapshot_passthrough_gen
tags: ["causally_consistent", "read_write_concern", "durable_history"]
@@ -5820,7 +5715,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- name: sharded_causally_consistent_read_concern_snapshot_passthrough_gen
tags: ["causally_consistent", "read_write_concern", "durable_history"]
@@ -5828,7 +5722,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- name: sharded_causally_consistent_jscore_passthrough_gen
tags: ["causally_consistent"]
@@ -5836,7 +5729,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 15
- name: retryable_writes_jscore_passthrough_gen
tags: ["retry"]
@@ -5845,7 +5737,6 @@ tasks:
vars:
use_large_distro: "true"
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 9
- name: logical_session_cache_replication_default_refresh_jscore_passthrough_gen
tags: ["logical_session_cache", "repl"]
@@ -5853,7 +5744,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 17
- name: logical_session_cache_replication_100ms_refresh_jscore_passthrough_gen
tags: ["logical_session_cache", "repl"]
@@ -5861,7 +5751,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 17
- name: logical_session_cache_replication_1sec_refresh_jscore_passthrough_gen
tags: ["logical_session_cache", "one_sec", "repl"]
@@ -5869,7 +5758,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 16
- name: logical_session_cache_replication_10sec_refresh_jscore_passthrough_gen
tags: ["logical_session_cache", "repl"]
@@ -5877,7 +5765,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 16
- name: logical_session_cache_sharding_default_refresh_jscore_passthrough_gen
tags: ["logical_session_cache"]
@@ -5885,7 +5772,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 12
- name: logical_session_cache_sharding_100ms_refresh_jscore_passthrough_gen
tags: ["logical_session_cache"]
@@ -5893,7 +5779,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 48
- name: logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough_gen
tags: ["logical_session_cache"]
@@ -5901,7 +5786,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 10
- name: logical_session_cache_sharding_1sec_refresh_jscore_passthrough_gen
tags: ["logical_session_cache", "one_sec"]
@@ -5909,7 +5793,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 20
- name: logical_session_cache_sharding_10sec_refresh_jscore_passthrough_gen
tags: ["logical_session_cache"]
@@ -5917,7 +5800,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 14
- name: logical_session_cache_standalone_default_refresh_jscore_passthrough_gen
tags: ["logical_session_cache"]
@@ -5925,7 +5807,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- name: logical_session_cache_standalone_100ms_refresh_jscore_passthrough_gen
tags: ["logical_session_cache"]
@@ -5933,7 +5814,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- name: logical_session_cache_standalone_1sec_refresh_jscore_passthrough_gen
tags: ["logical_session_cache", "one_sec"]
@@ -5941,7 +5821,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- name: logical_session_cache_standalone_10sec_refresh_jscore_passthrough_gen
tags: ["logical_session_cache"]
@@ -5949,7 +5828,6 @@ tasks:
- func: "generate resmoke tasks"
vars:
resmoke_args: --storageEngine=wiredTiger
- fallback_num_sub_suites: 5
- <<: *task_template
name: retryable_writes_jscore_stepdown_passthrough
diff --git a/etc/pip/components/evergreen.req b/etc/pip/components/evergreen.req
index 210be9a6499..e4dda2bd320 100644
--- a/etc/pip/components/evergreen.req
+++ b/etc/pip/components/evergreen.req
@@ -1,5 +1,6 @@
click ~= 7.0
dataclasses; python_version < "3.7"
+inject ~= 4.3.1
GitPython ~= 3.1.7
psutil
pydantic ~= 1.7.3
diff --git a/evergreen/burn_in_tests_multiversion.sh b/evergreen/burn_in_tests_multiversion.sh
index 90597f6114d..0aa24319b53 100755
--- a/evergreen/burn_in_tests_multiversion.sh
+++ b/evergreen/burn_in_tests_multiversion.sh
@@ -17,6 +17,6 @@ fi
burn_in_args="$burn_in_args"
# Evergreen executable is in $HOME.
-PATH="$PATH:$HOME" eval $python buildscripts/burn_in_tests_multiversion.py --task_id=${task_id} --project=${project} $build_variant_opts --distro=${distro_id} --generate-tasks-file=burn_in_tests_multiversion_gen.json $burn_in_args --verbose
+PATH="$PATH:$HOME" eval $python buildscripts/burn_in_tests_multiversion.py --task_id=${task_id} --project=${project} $build_variant_opts --distro=${distro_id} --generate-tasks-file=burn_in_tests_multiversion_gen.json $burn_in_args --verbose --revision=${revision} --build-id=${build_id}
PATH="$PATH:/data/multiversion"
$python buildscripts/evergreen_gen_multiversion_tests.py generate-exclude-tags
diff --git a/evergreen/resmoke_tasks_generate.sh b/evergreen/resmoke_tasks_generate.sh
index 44eba5e8685..7ca1245b4b7 100644
--- a/evergreen/resmoke_tasks_generate.sh
+++ b/evergreen/resmoke_tasks_generate.sh
@@ -1,6 +1,11 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
+if [ -n "$GENERATE_BUILD_VARIANTS" ]; then
+ echo "Skipping generation since 'generate_build_variants' is set."
+ exit 0
+fi
+
cd src
set -o errexit