summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/burn_in_tags.py22
-rw-r--r--buildscripts/burn_in_tests.py135
-rwxr-xr-xbuildscripts/evergreen_generate_resmoke_tasks.py83
-rw-r--r--buildscripts/tests/test_burn_in_tags.py17
-rw-r--r--buildscripts/tests/test_burn_in_tests.py206
-rw-r--r--buildscripts/tests/test_evergreen_generate_resmoke_tasks.py83
-rw-r--r--buildscripts/tests/util/test_teststats.py78
-rw-r--r--buildscripts/util/teststats.py83
-rw-r--r--etc/evergreen.yml11
9 files changed, 533 insertions, 185 deletions
diff --git a/buildscripts/burn_in_tags.py b/buildscripts/burn_in_tags.py
index fd861022fd1..15b00e5c1c6 100644
--- a/buildscripts/burn_in_tags.py
+++ b/buildscripts/burn_in_tags.py
@@ -11,6 +11,8 @@ from shrub.config import Configuration
from shrub.variant import TaskSpec
from shrub.variant import Variant
+from evergreen.api import RetryingEvergreenApi
+
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
@@ -18,13 +20,13 @@ if __name__ == "__main__" and __package__ is None:
# pylint: disable=wrong-import-position
import buildscripts.util.read_config as read_config
from buildscripts.ciconfig import evergreen
-from buildscripts.burn_in_tests import create_tests_by_task
-from buildscripts.burn_in_tests import create_generate_tasks_config
+from buildscripts.burn_in_tests import create_generate_tasks_config, create_tests_by_task
# pylint: enable=wrong-import-position
CONFIG_DIRECTORY = "generated_burn_in_tags_config"
CONFIG_FILE = "burn_in_tags_gen.json"
EVERGREEN_FILE = "etc/evergreen.yml"
+EVG_CONFIG_FILE = ".evergreen.yml"
ConfigOptions = namedtuple("ConfigOptions", [
"buildvariant",
@@ -37,6 +39,7 @@ ConfigOptions = namedtuple("ConfigOptions", [
"repeat_tests_secs",
"repeat_tests_min",
"repeat_tests_max",
+ "project",
])
@@ -59,10 +62,11 @@ def _get_config_options(expansions_file_data, buildvariant, run_buildvariant):
repeat_tests_min = int(expansions_file_data["repeat_tests_min"])
repeat_tests_max = int(expansions_file_data["repeat_tests_max"])
repeat_tests_secs = float(expansions_file_data["repeat_tests_secs"])
+ project = expansions_file_data["project"]
return ConfigOptions(buildvariant, run_buildvariant, base_commit, max_revisions, branch,
check_evergreen, distro, repeat_tests_secs, repeat_tests_min,
- repeat_tests_max)
+ repeat_tests_max, project)
def _create_evg_buildvariant_map(expansions_file_data):
@@ -110,10 +114,11 @@ def _generate_evg_buildvariant(shrub_config, buildvariant, run_buildvariant):
new_variant.modules(modules)
-def _generate_evg_tasks(shrub_config, expansions_file_data, buildvariant_map):
+def _generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data, buildvariant_map):
"""
Generate burn in tests tasks for a given shrub config and group of buildvariants.
+ :param evergreen_api: Evergreen.py object.
:param shrub_config: Shrub config object that the build variants will be built upon.
:param expansions_file_data: Config data file to use.
:param buildvariant_map: Map of base buildvariants to their generated buildvariant.
@@ -123,7 +128,8 @@ def _generate_evg_tasks(shrub_config, expansions_file_data, buildvariant_map):
tests_by_task = create_tests_by_task(config_options)
if tests_by_task:
_generate_evg_buildvariant(shrub_config, buildvariant, run_buildvariant)
- create_generate_tasks_config(shrub_config, config_options, tests_by_task, False)
+ create_generate_tasks_config(evergreen_api, shrub_config, config_options, tests_by_task,
+ False)
def _write_to_file(shrub_config):
@@ -139,7 +145,7 @@ def _write_to_file(shrub_config):
file_handle.write(shrub_config.to_json())
-def main():
+def main(evergreen_api):
"""Execute Main program."""
parser = argparse.ArgumentParser(description=main.__doc__)
@@ -150,9 +156,9 @@ def main():
shrub_config = Configuration()
buildvariant_map = _create_evg_buildvariant_map(expansions_file_data)
- _generate_evg_tasks(shrub_config, expansions_file_data, buildvariant_map)
+ _generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data, buildvariant_map)
_write_to_file(shrub_config)
if __name__ == '__main__':
- main()
+ main(RetryingEvergreenApi.get_api(config_file=EVG_CONFIG_FILE))
diff --git a/buildscripts/burn_in_tests.py b/buildscripts/burn_in_tests.py
index 12a78b307d9..9f098cf0ff2 100644
--- a/buildscripts/burn_in_tests.py
+++ b/buildscripts/burn_in_tests.py
@@ -11,15 +11,22 @@ import re
import shlex
import sys
import urllib.parse
+import datetime
+import logging
+
+from math import ceil
-import requests
import yaml
+import requests
from shrub.config import Configuration
from shrub.command import CommandDefinition
from shrub.task import TaskDependency
from shrub.variant import DisplayTaskDefinition
from shrub.variant import TaskSpec
+from shrub.operations import CmdTimeoutUpdate
+
+from evergreen.api import RetryingEvergreenApi
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
@@ -30,12 +37,20 @@ from buildscripts import git
from buildscripts import resmokelib
from buildscripts.ciconfig import evergreen
from buildscripts.client import evergreen as evergreen_client
+from buildscripts.util import teststats
# pylint: enable=wrong-import-position
+LOGGER = logging.getLogger(__name__)
+
API_REST_PREFIX = "/rest/v1/"
API_SERVER_DEFAULT = "https://evergreen.mongodb.com"
+AVG_TEST_RUNTIME_ANALYSIS_DAYS = 14
+AVG_TEST_TIME_MULTIPLIER = 3
+CONFIG_FILE = "../src/.evergreen.yml"
REPEAT_SUITES = 2
EVERGREEN_FILE = "etc/evergreen.yml"
+MIN_AVG_TEST_OVERFLOW_SEC = 60
+MIN_AVG_TEST_TIME_SEC = 5 * 60
# The executor_file and suite_files defaults are required to make the suite resolver work
# correctly.
SELECTOR_FILE = "etc/burn_in_tests.yml"
@@ -97,6 +112,9 @@ def parse_command_line():
parser.add_option("--reportFile", dest="report_file", default="report.json",
help="Write a JSON file with test results. Default is '%default'.")
+ parser.add_option("--project", dest="project", default="mongodb-mongo-master",
+ help="The project the test history will be requested for.")
+
parser.add_option("--testListFile", dest="test_list_file", default=None, metavar="TESTLIST",
help="Load a JSON file with tests to run.")
@@ -461,7 +479,101 @@ def _get_run_buildvariant(options):
return options.buildvariant
-def create_generate_tasks_config(evg_config, options, tests_by_task, include_gen_task):
+def _parse_avg_test_runtime(test, task_avg_test_runtime_stats):
+ """
+ Parse list of teststats to find runtime for particular test.
+
+ :param task_avg_test_runtime_stats: Teststat data.
+ :param test: Test name.
+ :return: Historical average runtime of the test.
+ """
+ for test_stat in task_avg_test_runtime_stats:
+ if test_stat.test_name == test:
+ return test_stat.runtime
+ return None
+
+
+def _calculate_timeout(avg_test_runtime):
+ """
+ Calculate timeout_secs for the Evergreen task.
+
+ :param avg_test_runtime: How long a test has historically taken to run.
+ :return: The test runtime times AVG_TEST_TIME_MULTIPLIER, or MIN_AVG_TEST_TIME_SEC (whichever
+ is higher).
+ """
+ return max(MIN_AVG_TEST_TIME_SEC, ceil(avg_test_runtime * AVG_TEST_TIME_MULTIPLIER))
+
+
+def _calculate_exec_timeout(options, avg_test_runtime):
+ """
+ Calculate exec_timeout_secs for the Evergreen task.
+
+ :param avg_test_runtime: How long a test has historically taken to run.
+ :return: repeat_tests_secs + an amount of padding time so that the test has time to finish on
+ its final run.
+ """
+ test_execution_time_over_limit = avg_test_runtime - (
+ options.repeat_tests_secs % avg_test_runtime)
+ test_execution_time_over_limit = max(MIN_AVG_TEST_OVERFLOW_SEC, test_execution_time_over_limit)
+ return ceil(options.repeat_tests_secs +
+ (test_execution_time_over_limit * AVG_TEST_TIME_MULTIPLIER))
+
+
+def _generate_timeouts(options, commands, test, task_avg_test_runtime_stats):
+ """
+ Add timeout.update command to list of commands for a burn in execution task.
+
+ :param options: Command line options.
+ :param commands: List of commands for a burn in execution task.
+ :param test: Test name.
+ :param task_avg_test_runtime_stats: Teststat data.
+ """
+ if task_avg_test_runtime_stats:
+ avg_test_runtime = _parse_avg_test_runtime(test, task_avg_test_runtime_stats)
+ if avg_test_runtime:
+ cmd_timeout = CmdTimeoutUpdate()
+ LOGGER.debug("Avg test runtime for test %s is: %s", test, avg_test_runtime)
+
+ timeout = _calculate_timeout(avg_test_runtime)
+ cmd_timeout.timeout(timeout)
+
+ exec_timeout = _calculate_exec_timeout(options, avg_test_runtime)
+ cmd_timeout.exec_timeout(exec_timeout)
+
+ commands.append(cmd_timeout.validate().resolve())
+
+
+def _get_task_runtime_history(evergreen_api, project, task, variant):
+ """
+ Fetch historical average runtime for all tests in a task from Evergreen API.
+
+ :param evergreen_api: Evergreen API.
+ :param project: Project name.
+ :param task: Task name.
+ :param variant: Variant name.
+ :return: Test historical runtimes, parsed into teststat objects.
+ """
+ try:
+ end_date = datetime.datetime.utcnow().replace(microsecond=0)
+ start_date = end_date - datetime.timedelta(days=AVG_TEST_RUNTIME_ANALYSIS_DAYS)
+ data = evergreen_api.test_stats_by_project(
+ project, after_date=start_date.strftime("%Y-%m-%d"),
+ before_date=end_date.strftime("%Y-%m-%d"), tasks=[task], variants=[variant],
+ group_by="test", group_num_days=AVG_TEST_RUNTIME_ANALYSIS_DAYS)
+ test_runtimes = teststats.TestStats(data).get_tests_runtimes()
+ LOGGER.debug("Test_runtime data parsed from Evergreen history: %s", test_runtimes)
+ return test_runtimes
+ except requests.HTTPError as err:
+ if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
+ # Evergreen may return a 503 when the service is degraded.
+ # We fall back to returning no test history
+ return []
+ else:
+ raise
+
+
+def create_generate_tasks_config(evergreen_api, evg_config, options, tests_by_task,
+ include_gen_task):
"""Create the config for the Evergreen generate.tasks file."""
# pylint: disable=too-many-locals
task_specs = []
@@ -470,6 +582,8 @@ def create_generate_tasks_config(evg_config, options, tests_by_task, include_gen
task_names.append(BURN_IN_TESTS_GEN_TASK)
for task in sorted(tests_by_task):
multiversion_path = tests_by_task[task].get("use_multiversion")
+ task_avg_test_runtime_stats = _get_task_runtime_history(evergreen_api, options.project,
+ task, options.buildvariant)
for test_num, test in enumerate(tests_by_task[task]["tests"]):
sub_task_name = _sub_task_name(options, task, test_num)
task_names.append(sub_task_name)
@@ -485,6 +599,7 @@ def create_generate_tasks_config(evg_config, options, tests_by_task, include_gen
get_resmoke_repeat_options(options), test),
}
commands = []
+ _generate_timeouts(options, commands, test, task_avg_test_runtime_stats)
commands.append(CommandDefinition().function("do setup"))
if multiversion_path:
run_tests_vars["task_path_suffix"] = multiversion_path
@@ -525,11 +640,11 @@ def create_tests_by_task(options):
return tests_by_task
-def create_generate_tasks_file(options, tests_by_task):
+def create_generate_tasks_file(evergreen_api, options, tests_by_task):
"""Create the Evergreen generate.tasks file."""
evg_config = Configuration()
- evg_config = create_generate_tasks_config(evg_config, options, tests_by_task,
+ evg_config = create_generate_tasks_config(evergreen_api, evg_config, options, tests_by_task,
include_gen_task=True)
_write_json_file(evg_config.to_map(), options.generate_tasks_file)
@@ -561,9 +676,15 @@ def run_tests(no_exec, tests_by_task, resmoke_cmd, report_file):
_write_json_file(test_results, report_file)
-def main():
+def main(evergreen_api):
"""Execute Main program."""
+ logging.basicConfig(
+ format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s",
+ level=logging.DEBUG,
+ stream=sys.stdout,
+ )
+
options, args = parse_command_line()
resmoke_cmd = _set_resmoke_cmd(options, args)
@@ -585,10 +706,10 @@ def main():
_write_json_file(tests_by_task, options.test_list_outfile)
if options.generate_tasks_file:
- create_generate_tasks_file(options, tests_by_task)
+ create_generate_tasks_file(evergreen_api, options, tests_by_task)
else:
run_tests(options.no_exec, tests_by_task, resmoke_cmd, options.report_file)
if __name__ == "__main__":
- main()
+ main(RetryingEvergreenApi.get_api(config_file=CONFIG_FILE))
diff --git a/buildscripts/evergreen_generate_resmoke_tasks.py b/buildscripts/evergreen_generate_resmoke_tasks.py
index 757c01697f2..80ef5cb8e2b 100755
--- a/buildscripts/evergreen_generate_resmoke_tasks.py
+++ b/buildscripts/evergreen_generate_resmoke_tasks.py
@@ -36,6 +36,7 @@ import buildscripts.resmokelib.suitesconfig as suitesconfig # pylint: disable=w
import buildscripts.util.read_config as read_config # pylint: disable=wrong-import-position
import buildscripts.util.taskname as taskname # pylint: disable=wrong-import-position
import buildscripts.util.testname as testname # pylint: disable=wrong-import-position
+import buildscripts.util.teststats as teststats # pylint: disable=wrong-import-position
LOGGER = logging.getLogger(__name__)
@@ -463,77 +464,6 @@ class EvergreenConfigGenerator(object):
return self.evg_config
-def normalize_test_name(test_name):
- """Normalize test names that may have been run on windows or unix."""
- return test_name.replace("\\", "/")
-
-
-class TestStats(object):
- """Represent the test statistics for the task that is being analyzed."""
-
- def __init__(self, evg_test_stats_results):
- """Initialize the TestStats with raw results from the Evergreen API."""
- # Mapping from test_file to {"num_run": X, "duration": Y} for tests
- self._runtime_by_test = defaultdict(dict)
- # Mapping from test_name to {"num_run": X, "duration": Y} for hooks
- self._hook_runtime_by_test = defaultdict(dict)
-
- for doc in evg_test_stats_results:
- self._add_stats(doc)
-
- def _add_stats(self, test_stats):
- """Add the statistics found in a document returned by the Evergreen test_stats/ endpoint."""
- test_file = testname.normalize_test_file(test_stats.test_file)
- duration = test_stats.avg_duration_pass
- num_run = test_stats.num_pass
- is_hook = testname.is_resmoke_hook(test_file)
- if is_hook:
- self._add_test_hook_stats(test_file, duration, num_run)
- else:
- self._add_test_stats(test_file, duration, num_run)
-
- def _add_test_stats(self, test_file, duration, num_run):
- """Add the statistics for a test."""
- self._add_runtime_info(self._runtime_by_test, test_file, duration, num_run)
-
- def _add_test_hook_stats(self, test_file, duration, num_run):
- """Add the statistics for a hook."""
- test_name = testname.split_test_hook_name(test_file)[0]
- self._add_runtime_info(self._hook_runtime_by_test, test_name, duration, num_run)
-
- @staticmethod
- def _add_runtime_info(runtime_dict, test_name, duration, num_run):
- runtime_info = runtime_dict[test_name]
- if not runtime_info:
- runtime_info["duration"] = duration
- runtime_info["num_run"] = num_run
- else:
- runtime_info["duration"] = TestStats._average(
- runtime_info["duration"], runtime_info["num_run"], duration, num_run)
- runtime_info["num_run"] += num_run
-
- @staticmethod
- def _average(value_a, num_a, value_b, num_b):
- """Compute a weighted average of 2 values with associated numbers."""
- divisor = num_a + num_b
- if divisor == 0:
- return 0
- else:
- return float(value_a * num_a + value_b * num_b) / divisor
-
- def get_tests_runtimes(self):
- """Return the list of (test_file, runtime_in_secs) tuples ordered by decreasing runtime."""
- tests = []
- for test_file, runtime_info in list(self._runtime_by_test.items()):
- duration = runtime_info["duration"]
- test_name = testname.get_short_name_from_test_file(test_file)
- hook_runtime_info = self._hook_runtime_by_test[test_name]
- if hook_runtime_info:
- duration += hook_runtime_info["duration"]
- tests.append((normalize_test_name(test_file), duration))
- return sorted(tests, key=lambda x: x[1], reverse=True)
-
-
class Suite(object):
"""A suite of tests that can be run by evergreen."""
@@ -667,18 +597,21 @@ class Main(object):
def calculate_suites_from_evg_stats(self, data, execution_time_secs):
"""Divide tests into suites that can be run in less than the specified execution time."""
- test_stats = TestStats(data)
+ test_stats = teststats.TestStats(data)
tests_runtimes = self.filter_existing_tests(test_stats.get_tests_runtimes())
if not tests_runtimes:
return self.calculate_fallback_suites()
- self.test_list = [info[0] for info in tests_runtimes]
+ self.test_list = [info.test_name for info in tests_runtimes]
return divide_tests_into_suites(tests_runtimes, execution_time_secs,
self.options.max_sub_suites)
def filter_existing_tests(self, tests_runtimes):
"""Filter out tests that do not exist in the filesystem."""
- all_tests = [normalize_test_name(test) for test in self.list_tests()]
- return [info for info in tests_runtimes if os.path.exists(info[0]) and info[0] in all_tests]
+ all_tests = [teststats.normalize_test_name(test) for test in self.list_tests()]
+ return [
+ info for info in tests_runtimes
+ if os.path.exists(info.test_name) and info.test_name in all_tests
+ ]
def calculate_fallback_suites(self):
"""Divide tests into a fixed number of suites."""
diff --git a/buildscripts/tests/test_burn_in_tags.py b/buildscripts/tests/test_burn_in_tags.py
index 4a687b3ecc0..b036370fdf9 100644
--- a/buildscripts/tests/test_burn_in_tags.py
+++ b/buildscripts/tests/test_burn_in_tags.py
@@ -1,9 +1,12 @@
"""Unit tests for the burn_in_tags.py script."""
+import datetime
import os
import unittest
import mock
+from mock import Mock
+
from shrub.config import Configuration
from buildscripts import burn_in_tags
@@ -33,6 +36,7 @@ def get_expansions_data():
"repeat_tests_min": 2,
"repeat_tests_secs": 600,
"revision": "fake_sha",
+ "project": "fake_project",
} # yapf: disable
def get_evergreen_config():
@@ -91,7 +95,10 @@ class TestGenerateEvgTasks(unittest.TestCase):
"enterprise-rhel-62-64-bit-majority-read-concern-off-required",
} # yapf: disable
shrub_config = Configuration()
- burn_in_tags._generate_evg_tasks(shrub_config, expansions_file_data, buildvariant_map)
+ evergreen_api = Mock()
+ burn_in_tags._generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data,
+ buildvariant_map)
+
self.assertEqual(shrub_config.to_map(), {})
@mock.patch(ns("evergreen"))
@@ -113,7 +120,13 @@ class TestGenerateEvgTasks(unittest.TestCase):
"enterprise-rhel-62-64-bit-majority-read-concern-off-required",
} # yapf: disable
shrub_config = Configuration()
- burn_in_tags._generate_evg_tasks(shrub_config, expansions_file_data, buildvariant_map)
+ evergreen_api = Mock()
+ evergreen_api.test_stats_by_project.return_value = [
+ Mock(test_file="dir/test2.js", avg_duration_pass=10)
+ ]
+ burn_in_tags._generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data,
+ buildvariant_map)
+
generated_config = shrub_config.to_map()
self.assertEqual(len(generated_config["buildvariants"]), 2)
first_generated_build_variant = generated_config["buildvariants"][0]
diff --git a/buildscripts/tests/test_burn_in_tests.py b/buildscripts/tests/test_burn_in_tests.py
index afbfac59737..ac0f8fe44da 100644
--- a/buildscripts/tests/test_burn_in_tests.py
+++ b/buildscripts/tests/test_burn_in_tests.py
@@ -3,14 +3,19 @@
from __future__ import absolute_import
import collections
+import datetime
import os
import sys
import subprocess
import unittest
+from math import ceil
from mock import Mock, mock_open, patch, MagicMock
+import requests
+
import buildscripts.burn_in_tests as burn_in
+import buildscripts.util.teststats as teststats_utils
import buildscripts.ciconfig.evergreen as evg
# pylint: disable=missing-docstring,protected-access,too-many-lines
@@ -18,6 +23,7 @@ import buildscripts.ciconfig.evergreen as evg
BURN_IN = "buildscripts.burn_in_tests"
EVG_CI = "buildscripts.ciconfig.evergreen"
EVG_CLIENT = "buildscripts.client.evergreen"
+_DATE = datetime.datetime(2018, 7, 15)
GIT = "buildscripts.git"
RESMOKELIB = "buildscripts.resmokelib"
@@ -128,6 +134,23 @@ def _mock_parser():
return parser
+def _mock_evergreen_api():
+ evergreen_api = Mock()
+ evergreen_api.test_stats_by_project.return_value = [
+ Mock(
+ test_file="jstests/test1.js",
+ task_name="task1",
+ variant="variant1",
+ distro="distro1",
+ date=_DATE,
+ num_pass=1,
+ num_fail=0,
+ avg_duration_pass=10,
+ )
+ ]
+ return evergreen_api
+
+
class TestValidateOptions(unittest.TestCase):
@staticmethod
def _mock_options():
@@ -371,6 +394,112 @@ class TestGetRunBuildvariant(unittest.TestCase):
self.assertEqual(buildvariant, burn_in._get_run_buildvariant(options))
+class TestParseAvgTestRuntime(unittest.TestCase):
+ def test__parse_avg_test_runtime(self):
+ task_avg_test_runtime_stats = [
+ teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=30.2),
+ teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)
+ ]
+ result = burn_in._parse_avg_test_runtime("dir/test2.js", task_avg_test_runtime_stats)
+ self.assertEqual(result, 455.1)
+
+
+class TestCalculateTimeout(unittest.TestCase):
+ def test__calculate_timeout(self):
+ avg_test_runtime = 455.1
+ expected_result = ceil(avg_test_runtime * burn_in.AVG_TEST_TIME_MULTIPLIER)
+ self.assertEqual(expected_result, burn_in._calculate_timeout(avg_test_runtime))
+
+ def test__calculate_timeout_avg_is_less_than_min(self):
+ avg_test_runtime = 10
+ self.assertEqual(burn_in.MIN_AVG_TEST_TIME_SEC,
+ burn_in._calculate_timeout(avg_test_runtime))
+
+
+class TestCalculateExecTimeout(unittest.TestCase):
+ def test__calculate_exec_timeout(self):
+ avg_test_runtime = 455.1
+ repeat_tests_secs = 600
+ options = Mock(repeat_tests_secs=repeat_tests_secs)
+ expected_result = repeat_tests_secs + (
+ (avg_test_runtime -
+ (repeat_tests_secs % avg_test_runtime)) * burn_in.AVG_TEST_TIME_MULTIPLIER)
+ self.assertEqual(
+ ceil(expected_result), burn_in._calculate_exec_timeout(options, avg_test_runtime))
+
+
+class TestGenerateTimeouts(unittest.TestCase):
+ def test__generate_timeouts(self):
+ shrub_commands = []
+ task_avg_test_runtime_stats = [
+ teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)
+ ]
+ options = Mock(repeat_tests_secs=600)
+ test_name = "dir/test2.js"
+ burn_in._generate_timeouts(options, shrub_commands, test_name, task_avg_test_runtime_stats)
+
+ self.assertEqual(len(shrub_commands), 1)
+ command_definition = shrub_commands[0]
+ self.assertEqual(command_definition.to_map()['params']['exec_timeout_secs'], 1531)
+ self.assertEqual(command_definition.to_map()['params']['timeout_secs'], 1366)
+
+ def test__generate_timeouts_no_results(self):
+ shrub_commands = []
+ task_avg_test_runtime_stats = []
+ options = Mock(repeat_tests_secs=600)
+ test_name = "dir/new_test.js"
+ burn_in._generate_timeouts(options, shrub_commands, test_name, task_avg_test_runtime_stats)
+
+ self.assertEqual(len(shrub_commands), 0)
+
+ def test__generate_timeouts_avg_runtime_is_zero(self):
+ shrub_commands = []
+ task_avg_test_runtime_stats = [
+ teststats_utils.TestRuntime(test_name="dir/test_with_zero_runtime.js", runtime=0)
+ ]
+ options = Mock(repeat_tests_secs=600)
+ test_name = "dir/test_with_zero_runtime.js"
+ burn_in._generate_timeouts(options, shrub_commands, test_name, task_avg_test_runtime_stats)
+
+ self.assertEqual(len(shrub_commands), 0)
+
+
+class TestGetTaskRuntimeHistory(unittest.TestCase):
+ def test__get_task_runtime_history(self):
+ evergreen_api = Mock()
+ evergreen_api.test_stats_by_project.return_value = [
+ Mock(
+ test_file="dir/test2.js",
+ task_name="task1",
+ variant="variant1",
+ distro="distro1",
+ date=_DATE,
+ num_pass=1,
+ num_fail=0,
+ avg_duration_pass=10.1,
+ )
+ ]
+ analysis_duration = burn_in.AVG_TEST_RUNTIME_ANALYSIS_DAYS
+ end_date = datetime.datetime.utcnow().replace(microsecond=0)
+ start_date = end_date - datetime.timedelta(days=analysis_duration)
+
+ result = burn_in._get_task_runtime_history(evergreen_api, "project1", "task1", "variant1")
+ self.assertEqual(result, [("dir/test2.js", 10.1)])
+ evergreen_api.test_stats_by_project.assert_called_with(
+ "project1", after_date=start_date.strftime("%Y-%m-%d"),
+ before_date=end_date.strftime("%Y-%m-%d"), group_by="test", group_num_days=14,
+ tasks=["task1"], variants=["variant1"])
+
+ def test__get_task_runtime_history_evg_degraded_mode_error(self): # pylint: disable=invalid-name
+ response = Mock()
+ response.status_code = requests.codes.SERVICE_UNAVAILABLE
+ evergreen_api = Mock()
+ evergreen_api.test_stats_by_project.side_effect = requests.HTTPError(response=response)
+
+ result = burn_in._get_task_runtime_history(evergreen_api, "project1", "task1", "variant1")
+ self.assertEqual(result, [])
+
+
class TestGetTaskName(unittest.TestCase):
def test__get_task_name(self):
name = "mytask"
@@ -474,7 +603,9 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
options = Mock()
options.buildvariant = None
options.run_buildvariant = None
+ options.repeat_tests_secs = 600
options.distro = None
+ options.branch = "master"
return options
@staticmethod
@@ -485,12 +616,45 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
return tests
def test_create_generate_tasks_file_tasks(self):
+ evergreen_api = Mock()
+ evergreen_api.test_stats_by_project.return_value = [
+ Mock(
+ test_file="jstests/test1.js",
+ task_name="task1",
+ variant="variant1",
+ distro="distro1",
+ date=_DATE,
+ num_pass=1,
+ num_fail=0,
+ avg_duration_pass=10,
+ ),
+ Mock(
+ test_file="jstests/test2.js",
+ task_name="task1",
+ variant="variant1",
+ distro="distro1",
+ date=_DATE,
+ num_pass=1,
+ num_fail=0,
+ avg_duration_pass=10,
+ ),
+ Mock(
+ test_file="jstests/multi1.js",
+ task_name="task1",
+ variant="variant1",
+ distro="distro1",
+ date=_DATE,
+ num_pass=1,
+ num_fail=0,
+ avg_duration_pass=10,
+ )
+ ]
options = self._options_mock()
options.buildvariant = "myvariant"
tests_by_task = TESTS_BY_TASK
test_tasks = self._get_tests(tests_by_task)
with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(options, tests_by_task)
+ burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
evg_config = mock_write_json.call_args_list[0][0][0]
evg_tasks = evg_config["tasks"]
self.assertEqual(len(evg_tasks), len(test_tasks))
@@ -499,10 +663,10 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
self.assertEqual(task["name"], "burn_in:myvariant_task1_0")
self.assertEqual(len(task["depends_on"]), 1)
self.assertEqual(task["depends_on"][0]["name"], "compile")
- self.assertEqual(len(task["commands"]), 2)
- self.assertEqual(task["commands"][0]["func"], "do setup")
- self.assertEqual(task["commands"][1]["func"], "run tests")
- resmoke_args = task["commands"][1]["vars"]["resmoke_args"]
+ self.assertEqual(len(task["commands"]), 3)
+ self.assertEqual(task["commands"][1]["func"], "do setup")
+ self.assertEqual(task["commands"][2]["func"], "run tests")
+ resmoke_args = task["commands"][2]["vars"]["resmoke_args"]
self.assertIn("--suites=suite1", resmoke_args)
self.assertIn("jstests/test1.js", resmoke_args)
# Check task1 - test2.js
@@ -510,10 +674,10 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
self.assertEqual(task["name"], "burn_in:myvariant_task1_1")
self.assertEqual(len(task["depends_on"]), 1)
self.assertEqual(task["depends_on"][0]["name"], "compile")
- self.assertEqual(len(task["commands"]), 2)
- self.assertEqual(task["commands"][0]["func"], "do setup")
- self.assertEqual(task["commands"][1]["func"], "run tests")
- resmoke_args = task["commands"][1]["vars"]["resmoke_args"]
+ self.assertEqual(len(task["commands"]), 3)
+ self.assertEqual(task["commands"][1]["func"], "do setup")
+ self.assertEqual(task["commands"][2]["func"], "run tests")
+ resmoke_args = task["commands"][2]["vars"]["resmoke_args"]
self.assertIn("--suites=suite1", resmoke_args)
self.assertIn("jstests/test2.js", resmoke_args)
# task[2] - task[5] are similar to task[0] & task[1]
@@ -522,21 +686,22 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
self.assertEqual(taskmulti["name"], "burn_in:myvariant_taskmulti_0")
self.assertEqual(len(taskmulti["depends_on"]), 1)
self.assertEqual(taskmulti["depends_on"][0]["name"], "compile")
- self.assertEqual(len(taskmulti["commands"]), 3)
- self.assertEqual(taskmulti["commands"][0]["func"], "do setup")
- self.assertEqual(taskmulti["commands"][1]["func"], "do multiversion setup")
- self.assertEqual(taskmulti["commands"][2]["func"], "run tests")
- resmoke_args = taskmulti["commands"][2]["vars"]["resmoke_args"]
+ self.assertEqual(len(taskmulti["commands"]), 4)
+ self.assertEqual(taskmulti["commands"][1]["func"], "do setup")
+ self.assertEqual(taskmulti["commands"][2]["func"], "do multiversion setup")
+ self.assertEqual(taskmulti["commands"][3]["func"], "run tests")
+ resmoke_args = taskmulti["commands"][3]["vars"]["resmoke_args"]
self.assertIn("--suites=suite4", resmoke_args)
self.assertIn("jstests/multi1.js", resmoke_args)
- self.assertEqual(taskmulti["commands"][2]["vars"]["task_path_suffix"], "/data/multi")
+ self.assertEqual(taskmulti["commands"][3]["vars"]["task_path_suffix"], "/data/multi")
def test_create_generate_tasks_file_variants(self):
+ evergreen_api = _mock_evergreen_api()
options = self._options_mock()
options.buildvariant = "myvariant"
tests_by_task = TESTS_BY_TASK
with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(options, tests_by_task)
+ burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
evg_config = mock_write_json.call_args_list[0][0][0]
self.assertEqual(len(evg_config["buildvariants"]), 1)
self.assertEqual(evg_config["buildvariants"][0]["name"], "myvariant")
@@ -556,12 +721,13 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
self.assertEqual(execution_tasks[7], "burn_in:myvariant_taskmulti_0")
def test_create_generate_tasks_file_run_variants(self):
+ evergreen_api = _mock_evergreen_api()
options = self._options_mock()
options.buildvariant = "myvariant"
options.run_buildvariant = "run_variant"
tests_by_task = TESTS_BY_TASK
with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(options, tests_by_task)
+ burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
evg_config = mock_write_json.call_args_list[0][0][0]
self.assertEqual(len(evg_config["buildvariants"]), 1)
self.assertEqual(evg_config["buildvariants"][0]["name"], "run_variant")
@@ -581,13 +747,14 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
self.assertEqual(execution_tasks[7], "burn_in:run_variant_taskmulti_0")
def test_create_generate_tasks_file_distro(self):
+ evergreen_api = _mock_evergreen_api()
options = self._options_mock()
options.buildvariant = "myvariant"
options.distro = "mydistro"
tests_by_task = TESTS_BY_TASK
test_tasks = self._get_tests(tests_by_task)
with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(options, tests_by_task)
+ burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
evg_config = mock_write_json.call_args_list[0][0][0]
self.assertEqual(len(evg_config["tasks"]), len(test_tasks))
self.assertEqual(len(evg_config["buildvariants"]), 1)
@@ -597,12 +764,13 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
self.assertEqual(task["distros"][0], options.distro)
def test_create_generate_tasks_file_no_tasks(self):
+ evergreen_api = _mock_evergreen_api()
variant = "myvariant"
options = self._options_mock()
options.buildvariant = variant
tests_by_task = {}
with patch(BURN_IN + "._write_json_file") as mock_write_json:
- burn_in.create_generate_tasks_file(options, tests_by_task)
+ burn_in.create_generate_tasks_file(evergreen_api, options, tests_by_task)
evg_config = mock_write_json.call_args_list[0][0][0]
self.assertEqual(len(evg_config), 1)
self.assertEqual(len(evg_config["buildvariants"]), 1)
diff --git a/buildscripts/tests/test_evergreen_generate_resmoke_tasks.py b/buildscripts/tests/test_evergreen_generate_resmoke_tasks.py
index 70eb53ea2d0..bc8c9d72984 100644
--- a/buildscripts/tests/test_evergreen_generate_resmoke_tasks.py
+++ b/buildscripts/tests/test_evergreen_generate_resmoke_tasks.py
@@ -13,6 +13,7 @@ from mock import patch, mock_open, call, Mock, MagicMock
from buildscripts import evergreen_generate_resmoke_tasks as grt
from buildscripts.evergreen_generate_resmoke_tasks import string_contains_any_of_args, \
prepare_directory_for_suite, remove_gen_suffix, render_suite, render_misc_suite
+import buildscripts.util.teststats as teststats_utils
# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
@@ -52,62 +53,6 @@ class TestHelperMethods(unittest.TestCase):
self.assertEqual(False, string_contains_any_of_args(string, args))
-class TestTestStats(unittest.TestCase):
- def test_no_hooks(self):
- evg_results = [
- self._make_evg_result("dir/test1.js", 1, 10),
- self._make_evg_result("dir/test2.js", 1, 30),
- self._make_evg_result("dir/test1.js", 2, 25),
- ]
- test_stats = grt.TestStats(evg_results)
- expected_runtimes = [
- ("dir/test2.js", 30),
- ("dir/test1.js", 20),
- ]
- self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
-
- def test_hooks(self):
- evg_results = [
- self._make_evg_result("dir/test1.js", 1, 10),
- self._make_evg_result("dir/test2.js", 1, 30),
- self._make_evg_result("dir/test1.js", 2, 25),
- self._make_evg_result("dir/test3.js", 5, 10),
- self._make_evg_result("test3:CleanEveryN", 10, 30),
- self._make_evg_result("test3:CheckReplDBHash", 10, 35),
- ]
- test_stats = grt.TestStats(evg_results)
- expected_runtimes = [
- ("dir/test3.js", 42.5),
- ("dir/test2.js", 30),
- ("dir/test1.js", 20),
- ]
- self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
-
- def test_zero_runs(self):
- evg_results = [
- self._make_evg_result("dir/test1.js", 0, 0),
- self._make_evg_result("dir/test1.js", 0, 0),
- ]
- test_stats = grt.TestStats(evg_results)
- expected_runtimes = [
- ("dir/test1.js", 0),
- ]
- self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
-
- @staticmethod
- def _make_evg_result(test_file="dir/test1.js", num_pass=0, duration=0):
- return Mock(
- test_file=test_file,
- task_name="task1",
- variant="variant1",
- distro="distro1",
- date=_DATE,
- num_pass=num_pass,
- num_fail=0,
- avg_duration_pass=duration,
- )
-
-
class DivideRemainingTestsAmongSuitesTest(unittest.TestCase):
@staticmethod
def generate_tests_runtimes(n_tests):
@@ -547,14 +492,6 @@ class EvergreenConfigGeneratorTest(unittest.TestCase):
self.assertEqual("do setup", timeout_cmd["func"])
-class NormalizeTestNameTest(unittest.TestCase):
- def test_unix_names(self):
- self.assertEqual("/home/user/test.js", grt.normalize_test_name("/home/user/test.js"))
-
- def test_windows_names(self):
- self.assertEqual("/home/user/test.js", grt.normalize_test_name("\\home\\user\\test.js"))
-
-
class MainTest(unittest.TestCase):
@staticmethod
def get_mock_options():
@@ -662,9 +599,9 @@ class MainTest(unittest.TestCase):
def test_filter_missing_files(self):
tests_runtimes = [
- ("dir1/file1.js", 20.32),
- ("dir2/file2.js", 24.32),
- ("dir1/file3.js", 36.32),
+ teststats_utils.TestRuntime(test_name="dir1/file1.js", runtime=20.32),
+ teststats_utils.TestRuntime(test_name="dir2/file2.js", runtime=24.32),
+ teststats_utils.TestRuntime(test_name="dir1/file3.js", runtime=36.32),
]
with patch("os.path.exists") as exists_mock, patch(ns("suitesconfig")) as suitesconfig_mock:
@@ -684,9 +621,9 @@ class MainTest(unittest.TestCase):
def test_filter_blacklist_files(self):
tests_runtimes = [
- ("dir1/file1.js", 20.32),
- ("dir2/file2.js", 24.32),
- ("dir1/file3.js", 36.32),
+ teststats_utils.TestRuntime(test_name="dir1/file1.js", runtime=20.32),
+ teststats_utils.TestRuntime(test_name="dir2/file2.js", runtime=24.32),
+ teststats_utils.TestRuntime(test_name="dir1/file3.js", runtime=36.32),
]
blacklisted_test = tests_runtimes[1][0]
@@ -708,9 +645,9 @@ class MainTest(unittest.TestCase):
def test_filter_blacklist_files_for_windows(self):
tests_runtimes = [
- ("dir1/file1.js", 20.32),
- ("dir2/file2.js", 24.32),
- ("dir1/dir3/file3.js", 36.32),
+ teststats_utils.TestRuntime(test_name="dir1/file1.js", runtime=20.32),
+ teststats_utils.TestRuntime(test_name="dir2/file2.js", runtime=24.32),
+ teststats_utils.TestRuntime(test_name="dir1/dir3/file3.js", runtime=36.32),
]
blacklisted_test = tests_runtimes[1][0]
diff --git a/buildscripts/tests/util/test_teststats.py b/buildscripts/tests/util/test_teststats.py
new file mode 100644
index 00000000000..6b9b94970e5
--- /dev/null
+++ b/buildscripts/tests/util/test_teststats.py
@@ -0,0 +1,78 @@
+"""Unit tests for the util.teststats module."""
+
+import datetime
+import unittest
+
+from mock import Mock
+
+import buildscripts.util.teststats as teststats_utils
+
+# pylint: disable=missing-docstring
+
+_DATE = datetime.datetime(2018, 7, 15)
+
+
+class NormalizeTestNameTest(unittest.TestCase):
+ def test_unix_names(self):
+ self.assertEqual("/home/user/test.js",
+ teststats_utils.normalize_test_name("/home/user/test.js"))
+
+ def test_windows_names(self):
+ self.assertEqual("/home/user/test.js",
+ teststats_utils.normalize_test_name("\\home\\user\\test.js"))
+
+
+class TestTestStats(unittest.TestCase):
+ def test_no_hooks(self):
+ evg_results = [
+ self._make_evg_result("dir/test1.js", 1, 10),
+ self._make_evg_result("dir/test2.js", 1, 30),
+ self._make_evg_result("dir/test1.js", 2, 25),
+ ]
+ test_stats = teststats_utils.TestStats(evg_results)
+ expected_runtimes = [
+ teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=30),
+ teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=20),
+ ]
+ self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
+
+ def test_hooks(self):
+ evg_results = [
+ self._make_evg_result("dir/test1.js", 1, 10),
+ self._make_evg_result("dir/test2.js", 1, 30),
+ self._make_evg_result("dir/test1.js", 2, 25),
+ self._make_evg_result("dir/test3.js", 5, 10),
+ self._make_evg_result("test3:CleanEveryN", 10, 30),
+ self._make_evg_result("test3:CheckReplDBHash", 10, 35),
+ ]
+ test_stats = teststats_utils.TestStats(evg_results)
+ expected_runtimes = [
+ teststats_utils.TestRuntime(test_name="dir/test3.js", runtime=42.5),
+ teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=30),
+ teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=20),
+ ]
+ self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
+
+ def test_zero_runs(self):
+ evg_results = [
+ self._make_evg_result("dir/test1.js", 0, 0),
+ self._make_evg_result("dir/test1.js", 0, 0),
+ ]
+ test_stats = teststats_utils.TestStats(evg_results)
+ expected_runtimes = [
+ teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=0),
+ ]
+ self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
+
+ @staticmethod
+ def _make_evg_result(test_file="dir/test1.js", num_pass=0, duration=0):
+ return Mock(
+ test_file=test_file,
+ task_name="task1",
+ variant="variant1",
+ distro="distro1",
+ date=_DATE,
+ num_pass=num_pass,
+ num_fail=0,
+ avg_duration_pass=duration,
+ )
diff --git a/buildscripts/util/teststats.py b/buildscripts/util/teststats.py
new file mode 100644
index 00000000000..2c09019cd76
--- /dev/null
+++ b/buildscripts/util/teststats.py
@@ -0,0 +1,83 @@
+"""Utility to support parsing a TestStat."""
+
+import sys
+import os
+import logging
+
+from collections import defaultdict
+from collections import namedtuple
+import buildscripts.util.testname as testname # pylint: disable=wrong-import-position
+
+TestRuntime = namedtuple('TestRuntime', ['test_name', 'runtime'])
+
+
+def normalize_test_name(test_name):
+ """Normalize test names that may have been run on windows or unix."""
+ return test_name.replace("\\", "/")
+
+
+class TestStats(object):
+ """Represent the test statistics for the task that is being analyzed."""
+
+ def __init__(self, evg_test_stats_results):
+ """Initialize the TestStats with raw results from the Evergreen API."""
+ # Mapping from test_file to {"num_run": X, "duration": Y} for tests
+ self._runtime_by_test = defaultdict(dict)
+ # Mapping from test_name to {"num_run": X, "duration": Y} for hooks
+ self._hook_runtime_by_test = defaultdict(dict)
+
+ for doc in evg_test_stats_results:
+ self._add_stats(doc)
+
+ def _add_stats(self, test_stats):
+ """Add the statistics found in a document returned by the Evergreen test_stats/ endpoint."""
+ test_file = testname.normalize_test_file(test_stats.test_file)
+ duration = test_stats.avg_duration_pass
+ num_run = test_stats.num_pass
+ is_hook = testname.is_resmoke_hook(test_file)
+ if is_hook:
+ self._add_test_hook_stats(test_file, duration, num_run)
+ else:
+ self._add_test_stats(test_file, duration, num_run)
+
+ def _add_test_stats(self, test_file, duration, num_run):
+ """Add the statistics for a test."""
+ self._add_runtime_info(self._runtime_by_test, test_file, duration, num_run)
+
+ def _add_test_hook_stats(self, test_file, duration, num_run):
+ """Add the statistics for a hook."""
+ test_name = testname.split_test_hook_name(test_file)[0]
+ self._add_runtime_info(self._hook_runtime_by_test, test_name, duration, num_run)
+
+ @staticmethod
+ def _add_runtime_info(runtime_dict, test_name, duration, num_run):
+ runtime_info = runtime_dict[test_name]
+ if not runtime_info:
+ runtime_info["duration"] = duration
+ runtime_info["num_run"] = num_run
+ else:
+ runtime_info["duration"] = TestStats._average(
+ runtime_info["duration"], runtime_info["num_run"], duration, num_run)
+ runtime_info["num_run"] += num_run
+
+ @staticmethod
+ def _average(value_a, num_a, value_b, num_b):
+ """Compute a weighted average of 2 values with associated numbers."""
+ divisor = num_a + num_b
+ if divisor == 0:
+ return 0
+ else:
+ return float(value_a * num_a + value_b * num_b) / divisor
+
+ def get_tests_runtimes(self):
+ """Return the list of (test_file, runtime_in_secs) tuples ordered by decreasing runtime."""
+ tests = []
+ for test_file, runtime_info in list(self._runtime_by_test.items()):
+ duration = runtime_info["duration"]
+ test_name = testname.get_short_name_from_test_file(test_file)
+ hook_runtime_info = self._hook_runtime_by_test[test_name]
+ if hook_runtime_info:
+ duration += hook_runtime_info["duration"]
+ test = TestRuntime(test_name=normalize_test_name(test_file), runtime=duration)
+ tests.append(test)
+ return sorted(tests, key=lambda x: x.runtime, reverse=True)
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 7aad0ddb07a..ab79d42cedc 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -1487,6 +1487,7 @@ functions:
- command: expansions.write
params:
file: expansions.yml
+ - *configure_evergreen_api_credentials
- command: shell.exec
params:
working_dir: src
@@ -4860,6 +4861,14 @@ tasks:
set -o errexit
set -o verbose
mkdir ../src
+ - func: "configure evergreen api credentials"
+ - command: shell.exec
+ params:
+ working_dir: burn_in_tests_clonedir
+ shell: bash
+ script: |
+ set -o errexit
+ set -o verbose
${activate_virtualenv}
# If this is a scheduled build, we check for changes against the last scheduled commit.
if [ "${is_patch}" != "true" ]; then
@@ -4875,7 +4884,7 @@ tasks:
# Increase the burn_in repetition from 2 to 1000 executions or 10 minutes
burn_in_args="$burn_in_args --repeatTestsMin=2 --repeatTestsMax=1000 --repeatTestsSecs=600"
# Evergreen executable is in $HOME.
- PATH=$PATH:$HOME $python buildscripts/burn_in_tests.py --branch=${branch_name} $build_variant_opts --distro=${distro_id} --generateTasksFile=../src/burn_in_tests_gen.json --noExec $burn_in_args
+ PATH=$PATH:$HOME $python buildscripts/burn_in_tests.py --branch=${branch_name} --project=${project} $build_variant_opts --distro=${distro_id} --generateTasksFile=../src/burn_in_tests_gen.json --noExec $burn_in_args
- command: archive.targz_pack
params:
target: src/burn_in_tests_gen.tgz