summaryrefslogtreecommitdiff
path: root/buildscripts
diff options
context:
space:
mode:
authorJeff Zambory <jeff.zambory@mongodb.com>2022-11-22 19:22:39 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-24 14:30:25 +0000
commit3568ba71bb3d57ce5176b44206ad66b05d29e407 (patch)
tree8619b7236981e2d67cf9d90360186c2a2c5225b8 /buildscripts
parent1557a827b0ea179cf64b3e4139169033e2181a59 (diff)
downloadmongo-3568ba71bb3d57ce5176b44206ad66b05d29e407.tar.gz
SERVER-71473: Begin using the new test stats location
Diffstat (limited to 'buildscripts')
-rw-r--r--buildscripts/burn_in_tags.py2
-rw-r--r--buildscripts/evergreen_burn_in_tests.py30
-rwxr-xr-xbuildscripts/evergreen_task_timeout.py8
-rw-r--r--buildscripts/resmoke_tests_runtime_validate.py25
-rw-r--r--buildscripts/task_generation/suite_split.py26
-rw-r--r--buildscripts/tests/task_generation/test_suite_split.py67
-rw-r--r--buildscripts/tests/test_burn_in_tags.py20
-rw-r--r--buildscripts/tests/test_evergreen_burn_in_tests.py55
-rw-r--r--buildscripts/tests/test_selected_tests.py10
-rw-r--r--buildscripts/tests/timeouts/test_timeout_service.py112
-rw-r--r--buildscripts/tests/util/test_teststats.py2
-rw-r--r--buildscripts/timeouts/timeout_service.py24
-rw-r--r--buildscripts/util/teststats.py83
13 files changed, 218 insertions, 246 deletions
diff --git a/buildscripts/burn_in_tags.py b/buildscripts/burn_in_tags.py
index 5acb2e75861..1785f29e052 100644
--- a/buildscripts/burn_in_tags.py
+++ b/buildscripts/burn_in_tags.py
@@ -160,7 +160,7 @@ def _generate_evg_tasks(evergreen_api: EvergreenApi, shrub_project: ShrubProject
repeat_tests_max=config_options.repeat_tests_max,
repeat_tests_secs=config_options.repeat_tests_secs)
- burn_in_generator = GenerateBurnInExecutor(gen_config, repeat_config, evergreen_api)
+ burn_in_generator = GenerateBurnInExecutor(gen_config, repeat_config)
burn_in_generator.generate_tasks_for_variant(tests_by_task, shrub_build_variant)
shrub_project.add_build_variant(shrub_build_variant)
diff --git a/buildscripts/evergreen_burn_in_tests.py b/buildscripts/evergreen_burn_in_tests.py
index a628a9a5d20..bc92a2fef33 100644
--- a/buildscripts/evergreen_burn_in_tests.py
+++ b/buildscripts/evergreen_burn_in_tests.py
@@ -34,7 +34,6 @@ DEFAULT_VARIANT = "enterprise-rhel-80-64-bit-dynamic-required"
BURN_IN_TESTS_GEN_TASK = "burn_in_tests_gen"
BURN_IN_TESTS_TASK = "burn_in_tests"
BURN_IN_ENV_VAR = "BURN_IN_TESTS"
-AVG_TEST_RUNTIME_ANALYSIS_DAYS = 14
AVG_TEST_SETUP_SEC = 4 * 60
AVG_TEST_TIME_MULTIPLIER = 3
MIN_AVG_TEST_OVERFLOW_SEC = float(60)
@@ -328,23 +327,17 @@ class GenerateBurnInExecutor(BurnInExecutor):
# pylint: disable=too-many-arguments
def __init__(self, generate_config: GenerateConfig, repeat_config: RepeatConfig,
- evg_api: EvergreenApi, generate_tasks_file: Optional[str] = None,
- history_end_date: Optional[datetime] = None) -> None:
+ generate_tasks_file: Optional[str] = None) -> None:
"""
Create a new generate burn-in executor.
:param generate_config: Configuration for how to generate tasks.
:param repeat_config: Configuration for how tests should be repeated.
- :param evg_api: Evergreen API client.
:param generate_tasks_file: File to write generated task configuration to.
- :param history_end_date: End date of range to query for historic test data.
"""
self.generate_config = generate_config
self.repeat_config = repeat_config
- self.evg_api = evg_api
self.generate_tasks_file = generate_tasks_file
- self.history_end_date = history_end_date if history_end_date else datetime.utcnow()\
- .replace(microsecond=0)
def get_task_runtime_history(self, task: str) -> List[TestRuntime]:
"""
@@ -353,21 +346,10 @@ class GenerateBurnInExecutor(BurnInExecutor):
:param task: Task to query.
:return: List of runtime histories for all tests in specified task.
"""
- try:
- project = self.generate_config.project
- variant = self.generate_config.build_variant
- end_date = self.history_end_date
- start_date = end_date - timedelta(days=AVG_TEST_RUNTIME_ANALYSIS_DAYS)
- test_stats = HistoricTaskData.from_evg(self.evg_api, project, start_date=start_date,
- end_date=end_date, task=task, variant=variant)
- return test_stats.get_tests_runtimes()
- except requests.HTTPError as err:
- if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
- # Evergreen may return a 503 when the service is degraded.
- # We fall back to returning no test history
- return []
- else:
- raise
+ project = self.generate_config.project
+ variant = self.generate_config.build_variant
+ test_stats = HistoricTaskData.from_s3(project, task, variant)
+ return test_stats.get_tests_runtimes()
def _get_existing_tasks(self) -> Optional[Set[ExistingTask]]:
"""Get any existing tasks that should be included in the generated display task."""
@@ -434,7 +416,7 @@ def burn_in(task_id: str, build_variant: str, generate_config: GenerateConfig,
:param install_dir: Path to bin directory of a testable installation
"""
change_detector = EvergreenFileChangeDetector(task_id, evg_api, os.environ)
- executor = GenerateBurnInExecutor(generate_config, repeat_config, evg_api, generate_tasks_file)
+ executor = GenerateBurnInExecutor(generate_config, repeat_config, generate_tasks_file)
burn_in_orchestrator = BurnInOrchestrator(change_detector, executor, evg_conf, install_dir)
burn_in_orchestrator.burn_in(repos, build_variant)
diff --git a/buildscripts/evergreen_task_timeout.py b/buildscripts/evergreen_task_timeout.py
index 5c0eabf7aef..f35a6c6c897 100755
--- a/buildscripts/evergreen_task_timeout.py
+++ b/buildscripts/evergreen_task_timeout.py
@@ -7,7 +7,7 @@ import math
import os
import shlex
import sys
-from datetime import datetime, timedelta
+from datetime import timedelta
from pathlib import Path
from typing import Dict, List, Optional
@@ -19,7 +19,7 @@ from evergreen import EvergreenApi, RetryingEvergreenApi
from buildscripts.ciconfig.evergreen import (EvergreenProjectConfig, parse_evergreen_file)
from buildscripts.task_generation.resmoke_proxy import ResmokeProxyService
-from buildscripts.timeouts.timeout_service import (TimeoutParams, TimeoutService, TimeoutSettings)
+from buildscripts.timeouts.timeout_service import (TimeoutParams, TimeoutService)
from buildscripts.util.cmdutils import enable_logging
from buildscripts.util.taskname import determine_task_base_name
@@ -369,9 +369,6 @@ def main():
options = parser.parse_args()
- end_date = datetime.now()
- start_date = end_date - HISTORY_LOOKBACK
-
timeout_override = timedelta(seconds=options.timeout) if options.timeout else None
exec_timeout_override = timedelta(
seconds=options.exec_timeout) if options.exec_timeout else None
@@ -386,7 +383,6 @@ def main():
binder.bind(
EvergreenApi,
RetryingEvergreenApi.get_api(config_file=os.path.expanduser(options.evg_api_config)))
- binder.bind(TimeoutSettings, TimeoutSettings(start_date=start_date, end_date=end_date))
binder.bind(TimeoutOverrides, timeout_overrides)
binder.bind(EvergreenProjectConfig,
parse_evergreen_file(os.path.expanduser(options.evg_project_config)))
diff --git a/buildscripts/resmoke_tests_runtime_validate.py b/buildscripts/resmoke_tests_runtime_validate.py
index cffcbf48e9d..5c17d1ee5f8 100644
--- a/buildscripts/resmoke_tests_runtime_validate.py
+++ b/buildscripts/resmoke_tests_runtime_validate.py
@@ -3,7 +3,6 @@
import json
import sys
from collections import namedtuple
-from datetime import datetime, timedelta
from statistics import mean
from typing import Dict, List
@@ -13,7 +12,8 @@ import structlog
from buildscripts.resmokelib.testing.report import TestInfo, TestReport
from buildscripts.resmokelib.utils import get_task_name_without_suffix
from buildscripts.util.cmdutils import enable_logging
-from evergreen import RetryingEvergreenApi, TestStats
+
+from buildscripts.util.teststats import HistoricTaskData, HistoricalTestInformation
LOGGER = structlog.get_logger("buildscripts.resmoke_tests_runtime_validate")
@@ -34,17 +34,12 @@ def parse_resmoke_report(report_file: str) -> List[TestInfo]:
return [test_info for test_info in test_report.test_infos if "jstests" in test_info.test_file]
-def get_historic_stats(evg_api_config: str, project_id: str, test_files: List[str], task_name: str,
- build_variant: str) -> List[TestStats]:
+def get_historic_stats(project_id: str, task_name: str,
+ build_variant: str) -> List[HistoricalTestInformation]:
"""Get historic test stats."""
- evg_api = RetryingEvergreenApi.get_api(config_file=evg_api_config)
- before_date = datetime.today()
- after_date = before_date - timedelta(days=LOOK_BACK_NUM_DAYS)
base_task_name = get_task_name_without_suffix(task_name, build_variant).replace(
BURN_IN_PREFIX, "")
- return evg_api.test_stats_by_project(project_id=project_id, after_date=after_date,
- before_date=before_date, tests=test_files,
- tasks=[base_task_name], variants=[build_variant])
+ return HistoricTaskData.get_stats_from_s3(project_id, base_task_name, build_variant)
def make_stats_map(stats: List[_TestData]) -> Dict[str, List[float]]:
@@ -63,13 +58,10 @@ def make_stats_map(stats: List[_TestData]) -> Dict[str, List[float]]:
@click.command()
@click.option("--resmoke-report-file", type=str, required=True,
help="Location of resmoke's report JSON file.")
-@click.option("--evg-api-config", type=str, required=True,
- help="Location of evergreen api configuration.")
@click.option("--project-id", type=str, required=True, help="Evergreen project id.")
@click.option("--build-variant", type=str, required=True, help="Evergreen build variant name.")
@click.option("--task-name", type=str, required=True, help="Evergreen task name.")
-def main(resmoke_report_file: str, evg_api_config: str, project_id: str, build_variant: str,
- task_name: str) -> None:
+def main(resmoke_report_file: str, project_id: str, build_variant: str, task_name: str) -> None:
"""Compare resmoke tests runtime with historic stats."""
enable_logging(verbose=False)
@@ -79,10 +71,9 @@ def main(resmoke_report_file: str, evg_api_config: str, project_id: str, build_v
for test_info in current_test_infos
])
- historic_stats = get_historic_stats(evg_api_config, project_id, list(current_stats_map.keys()),
- task_name, build_variant)
+ historic_stats = get_historic_stats(project_id, task_name, build_variant)
historic_stats_map = make_stats_map([
- _TestData(test_stats.test_file, test_stats.avg_duration_pass)
+ _TestData(test_stats.test_name, test_stats.avg_duration_pass)
for test_stats in historic_stats
])
diff --git a/buildscripts/task_generation/suite_split.py b/buildscripts/task_generation/suite_split.py
index 5e1e9d32115..a65fcae68f0 100644
--- a/buildscripts/task_generation/suite_split.py
+++ b/buildscripts/task_generation/suite_split.py
@@ -201,25 +201,15 @@ class SuiteSplitService:
if self.config.default_to_fallback:
return self.calculate_fallback_suites(params)
- try:
- evg_stats = HistoricTaskData.from_evg(self.evg_api, self.config.evg_project,
- self.config.start_date, self.config.end_date,
- params.task_name, params.build_variant)
- if not evg_stats:
- LOGGER.debug("No test history, using fallback suites")
- # This is probably a new suite, since there is no test history, just use the
- # fallback values.
- return self.calculate_fallback_suites(params)
+ evg_stats = HistoricTaskData.from_s3(self.config.evg_project, params.task_name,
+ params.build_variant)
+
+ if evg_stats:
return self.calculate_suites_from_evg_stats(evg_stats, params)
- except requests.HTTPError as err:
- if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
- # Evergreen may return a 503 when the service is degraded.
- # We fall back to splitting the tests into a fixed number of suites.
- LOGGER.warning("Received 503 from Evergreen, "
- "dividing the tests evenly among suites")
- return self.calculate_fallback_suites(params)
- else:
- raise
+
+ LOGGER.debug("No test history, using fallback suites")
+ # Since there is no test history this is probably a new suite, just use the fallback values.
+ return self.calculate_fallback_suites(params)
def calculate_fallback_suites(self, params: SuiteSplitParameters) -> GeneratedSuite:
"""Divide tests into a fixed number of suites."""
diff --git a/buildscripts/tests/task_generation/test_suite_split.py b/buildscripts/tests/task_generation/test_suite_split.py
index 7c54ed489aa..b2b197ed7c5 100644
--- a/buildscripts/tests/task_generation/test_suite_split.py
+++ b/buildscripts/tests/task_generation/test_suite_split.py
@@ -8,7 +8,7 @@ import requests
import buildscripts.task_generation.suite_split as under_test
from buildscripts.task_generation.suite_split_strategies import greedy_division, \
round_robin_fallback
-from buildscripts.util.teststats import TestRuntime
+from buildscripts.util.teststats import TestRuntime, HistoricalTestInformation
# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
@@ -31,7 +31,12 @@ def build_mock_service(evg_api=None, split_config=None, resmoke_proxy=None):
def tst_stat_mock(file, duration, pass_count):
- return MagicMock(test_file=file, avg_duration_pass=duration, num_pass=pass_count)
+ return HistoricalTestInformation(
+ test_name=file,
+ num_pass=pass_count,
+ num_fail=0,
+ avg_duration_pass=duration,
+ )
def build_mock_split_config(target_resmoke_time=None, max_sub_suites=None):
@@ -115,15 +120,16 @@ class TestGeneratedSuite(unittest.TestCase):
class TestSplitSuite(unittest.TestCase):
- def test_calculate_suites(self):
+ @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3")
+ def test_calculate_suites(self, get_stats_from_s3_mock):
mock_test_stats = [tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)]
split_config = build_mock_split_config(target_resmoke_time=10)
split_params = build_mock_split_params()
suite_split_service = build_mock_service(split_config=split_config)
- suite_split_service.evg_api.test_stats_by_project.return_value = mock_test_stats
+ get_stats_from_s3_mock.return_value = mock_test_stats
suite_split_service.resmoke_proxy.list_tests.return_value = [
- stat.test_file for stat in mock_test_stats
+ stat.test_name for stat in mock_test_stats
]
suite_split_service.resmoke_proxy.read_suite_config.return_value = {}
@@ -137,32 +143,15 @@ class TestSplitSuite(unittest.TestCase):
for sub_suite in suite.sub_suites:
self.assertEqual(10, len(sub_suite.test_list))
- def test_calculate_suites_fallback_on_error(self):
- n_tests = 100
- max_sub_suites = 4
- split_config = build_mock_split_config(max_sub_suites=max_sub_suites)
- split_params = build_mock_split_params()
-
- suite_split_service = build_mock_service(split_config=split_config)
- mock_evg_error(suite_split_service.evg_api)
- suite_split_service.resmoke_proxy.list_tests.return_value = [
- f"test_{i}.js" for i in range(n_tests)
- ]
-
- suite = suite_split_service.split_suite(split_params)
-
- self.assertEqual(max_sub_suites, len(suite))
- for sub_suite in suite.sub_suites:
- self.assertEqual(n_tests / max_sub_suites, len(sub_suite.test_list))
-
- def test_calculate_suites_uses_fallback_on_no_results(self):
+ @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3")
+ def test_calculate_suites_uses_fallback_on_no_results(self, get_stats_from_s3_mock):
n_tests = 100
max_sub_suites = 5
split_config = build_mock_split_config(max_sub_suites=max_sub_suites)
split_params = build_mock_split_params()
suite_split_service = build_mock_service(split_config=split_config)
- suite_split_service.evg_api.test_stats_by_project.return_value = []
+ get_stats_from_s3_mock.return_value = []
suite_split_service.resmoke_proxy.list_tests.return_value = [
f"test_{i}.js" for i in range(n_tests)
]
@@ -173,7 +162,9 @@ class TestSplitSuite(unittest.TestCase):
for sub_suite in suite.sub_suites:
self.assertEqual(n_tests / max_sub_suites, len(sub_suite.test_list))
- def test_calculate_suites_uses_fallback_if_only_results_are_filtered(self):
+ @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3")
+ def test_calculate_suites_uses_fallback_if_only_results_are_filtered(
+ self, get_stats_from_s3_mock):
n_tests = 100
max_sub_suites = 10
mock_test_stats = [tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)]
@@ -182,7 +173,7 @@ class TestSplitSuite(unittest.TestCase):
split_params = build_mock_split_params()
suite_split_service = build_mock_service(split_config=split_config)
- suite_split_service.evg_api.test_stats_by_project.return_value = mock_test_stats
+ get_stats_from_s3_mock.return_value = mock_test_stats
suite_split_service.resmoke_proxy.list_tests.return_value = [
f"test_{i}.js" for i in range(n_tests)
]
@@ -198,31 +189,17 @@ class TestSplitSuite(unittest.TestCase):
for sub_suite in suite.sub_suites:
self.assertEqual(n_tests / max_sub_suites, len(sub_suite.test_list))
- def test_calculate_suites_fail_on_unexpected_error(self):
- n_tests = 100
- max_sub_suites = 4
- split_config = build_mock_split_config(max_sub_suites=max_sub_suites)
- split_params = build_mock_split_params()
-
- suite_split_service = build_mock_service(split_config=split_config)
- mock_evg_error(suite_split_service.evg_api, error_code=requests.codes.INTERNAL_SERVER_ERROR)
- suite_split_service.resmoke_proxy.list_tests.return_value = [
- f"test_{i}.js" for i in range(n_tests)
- ]
-
- with self.assertRaises(requests.HTTPError):
- suite_split_service.split_suite(split_params)
-
- def test_calculate_suites_will_filter_specified_tests(self):
+ @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3")
+ def test_calculate_suites_will_filter_specified_tests(self, get_stats_from_s3_mock):
mock_test_stats = [tst_stat_mock(f"test_{i}.js", 60, 1) for i in range(100)]
split_config = build_mock_split_config(target_resmoke_time=10)
split_params = build_mock_split_params(
test_filter=lambda t: t in {"test_1.js", "test_2.js"})
suite_split_service = build_mock_service(split_config=split_config)
- suite_split_service.evg_api.test_stats_by_project.return_value = mock_test_stats
+ get_stats_from_s3_mock.return_value = mock_test_stats
suite_split_service.resmoke_proxy.list_tests.return_value = [
- stat.test_file for stat in mock_test_stats
+ stat.test_name for stat in mock_test_stats
]
suite_split_service.resmoke_proxy.read_suite_config.return_value = {}
diff --git a/buildscripts/tests/test_burn_in_tags.py b/buildscripts/tests/test_burn_in_tags.py
index ec53a02d161..c2c20756e72 100644
--- a/buildscripts/tests/test_burn_in_tags.py
+++ b/buildscripts/tests/test_burn_in_tags.py
@@ -14,8 +14,9 @@ from buildscripts.tests.test_burn_in_tests import ns as burn_in_tests_ns
from buildscripts.ciconfig.evergreen import EvergreenProjectConfig
import buildscripts.burn_in_tags as under_test
+from buildscripts.util.teststats import HistoricalTestInformation
-# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access
+# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access,too-many-arguments
EMPTY_PROJECT = {
"buildvariants": [],
@@ -105,7 +106,9 @@ class TestGenerateEvgTasks(unittest.TestCase):
self.assertEqual(shrub_config.as_dict(), EMPTY_PROJECT)
@patch(ns("create_tests_by_task"))
- def test_generate_evg_tasks_one_test_changed(self, create_tests_by_task_mock):
+ @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3")
+ def test_generate_evg_tasks_one_test_changed(self, get_stats_from_s3_mock,
+ create_tests_by_task_mock):
evg_conf_mock = get_evergreen_config()
create_tests_by_task_mock.return_value = {
"aggregation_mongos_passthrough": TaskInfo(
@@ -127,8 +130,13 @@ class TestGenerateEvgTasks(unittest.TestCase):
shrub_config = ShrubProject.empty()
evergreen_api = MagicMock()
repo = MagicMock(working_dir=os.getcwd())
- evergreen_api.test_stats_by_project.return_value = [
- MagicMock(test_file="dir/test2.js", avg_duration_pass=10)
+ get_stats_from_s3_mock.return_value = [
+ HistoricalTestInformation(
+ test_name="dir/test2.js",
+ num_pass=1,
+ num_fail=0,
+ avg_duration_pass=10,
+ )
]
under_test._generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data,
buildvariant_map, [repo], evg_conf_mock, 'install-dir/bin')
@@ -219,8 +227,9 @@ class TestAcceptance(unittest.TestCase):
@patch(ns("_create_evg_build_variant_map"))
@patch(ns("EvergreenFileChangeDetector"))
@patch(burn_in_tests_ns("create_test_membership_map"))
+ @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3")
def test_tests_generated_if_a_file_changed(
- self, create_test_membership_map_mock, find_changed_tests_mock,
+ self, get_stats_from_s3_mock, create_test_membership_map_mock, find_changed_tests_mock,
create_evg_build_variant_map_mock, write_to_file_mock):
"""
Given a git repository with changes,
@@ -236,6 +245,7 @@ class TestAcceptance(unittest.TestCase):
'jstests/slow1/large_role_chain.js',
'jstests/aggregation/accumulators/accumulator_js.js'
}
+ get_stats_from_s3_mock.return_value = []
under_test.burn_in(EXPANSIONS_FILE_DATA, evg_conf, MagicMock(), repos, 'install_dir/bin')
diff --git a/buildscripts/tests/test_evergreen_burn_in_tests.py b/buildscripts/tests/test_evergreen_burn_in_tests.py
index ee77ced2579..3ea78039930 100644
--- a/buildscripts/tests/test_evergreen_burn_in_tests.py
+++ b/buildscripts/tests/test_evergreen_burn_in_tests.py
@@ -89,7 +89,8 @@ class TestAcceptance(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
@patch(ns("write_file"))
- def test_tests_generated_if_a_file_changed(self, write_json_mock):
+ @patch(ns("HistoricTaskData.get_stats_from_s3"))
+ def test_tests_generated_if_a_file_changed(self, get_stats_from_s3_mock, write_json_mock):
"""
Given a git repository with changes,
When burn_in_tests is run,
@@ -108,6 +109,7 @@ class TestAcceptance(unittest.TestCase):
) # yapf: disable
mock_evg_conf = get_evergreen_config("etc/evergreen.yml")
mock_evg_api = MagicMock()
+ get_stats_from_s3_mock.return_value = []
under_test.burn_in("task_id", variant, gen_config, repeat_config, mock_evg_api,
mock_evg_conf, repos, "testfile.json", 'install-dir/bin')
@@ -241,41 +243,30 @@ class TestGenerateTimeouts(unittest.TestCase):
class TestGetTaskRuntimeHistory(unittest.TestCase):
- def test_get_task_runtime_history(self):
- mock_evg_api = MagicMock()
- mock_evg_api.test_stats_by_project.return_value = [
- MagicMock(
- test_file="dir/test2.js",
- task_name="task1",
- variant="variant1",
- distro="distro1",
- date=datetime.utcnow().date(),
+ @patch(ns("HistoricTaskData.get_stats_from_s3"))
+ def test_get_task_runtime_history(self, get_stats_from_s3_mock):
+ test_stats = [
+ teststats_utils.HistoricalTestInformation(
+ test_name="dir/test2.js",
num_pass=1,
num_fail=0,
avg_duration_pass=10.1,
)
]
- analysis_duration = under_test.AVG_TEST_RUNTIME_ANALYSIS_DAYS
- end_date = datetime.utcnow().replace(microsecond=0)
- start_date = end_date - timedelta(days=analysis_duration)
+ get_stats_from_s3_mock.return_value = test_stats
mock_gen_config = MagicMock(project="project1", build_variant="variant1")
- executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock(), mock_evg_api,
- history_end_date=end_date)
+ executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock())
result = executor.get_task_runtime_history("task1")
self.assertEqual(result, [("dir/test2.js", 10.1)])
- mock_evg_api.test_stats_by_project.assert_called_with(
- "project1", after_date=start_date, before_date=end_date, group_by="test",
- group_num_days=14, tasks=["task1"], variants=["variant1"])
- def test_get_task_runtime_history_evg_degraded_mode_error(self):
- mock_response = MagicMock(status_code=requests.codes.SERVICE_UNAVAILABLE)
- mock_evg_api = MagicMock()
- mock_evg_api.test_stats_by_project.side_effect = requests.HTTPError(response=mock_response)
+ @patch(ns("HistoricTaskData.get_stats_from_s3"))
+ def test_get_task_runtime_history_when_s3_has_no_data(self, get_stats_from_s3_mock):
+ get_stats_from_s3_mock.return_value = []
mock_gen_config = MagicMock(project="project1", build_variant="variant1")
- executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock(), mock_evg_api)
+ executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock())
result = executor.get_task_runtime_history("task1")
self.assertEqual(result, [])
@@ -321,7 +312,8 @@ class TestCreateGenerateTasksConfig(unittest.TestCase):
self.assertEqual(0, len(evg_config_dict["tasks"]))
@unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
- def test_one_task_one_test(self):
+ @patch(ns("HistoricTaskData.get_stats_from_s3"))
+ def test_one_task_one_test(self, get_stats_from_s3_mock):
n_tasks = 1
n_tests = 1
resmoke_options = "options for resmoke"
@@ -329,10 +321,10 @@ class TestCreateGenerateTasksConfig(unittest.TestCase):
gen_config = MagicMock(run_build_variant="variant", distro=None)
repeat_config = MagicMock()
repeat_config.generate_resmoke_options.return_value = resmoke_options
- mock_evg_api = MagicMock()
tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
+ get_stats_from_s3_mock.return_value = []
- executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api)
+ executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config)
executor.generate_tasks_for_variant(tests_by_task, build_variant)
shrub_config = ShrubProject.empty().add_build_variant(build_variant)
@@ -345,16 +337,17 @@ class TestCreateGenerateTasksConfig(unittest.TestCase):
self.assertIn("tests_0", cmd[2]["vars"]["resmoke_args"])
@unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
- def test_n_task_m_test(self):
+ @patch(ns("HistoricTaskData.get_stats_from_s3"))
+ def test_n_task_m_test(self, get_stats_from_s3_mock):
n_tasks = 3
n_tests = 5
build_variant = BuildVariant("build variant")
gen_config = MagicMock(run_build_variant="variant", distro=None)
repeat_config = MagicMock()
tests_by_task = create_tests_by_task_mock(n_tasks, n_tests)
- mock_evg_api = MagicMock()
+ get_stats_from_s3_mock.return_value = []
- executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api)
+ executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config)
executor.generate_tasks_for_variant(tests_by_task, build_variant)
evg_config_dict = build_variant.as_dict()
@@ -369,14 +362,12 @@ class TestCreateGenerateTasksFile(unittest.TestCase):
gen_config = MagicMock(require_multiversion_setup=False)
repeat_config = MagicMock()
tests_by_task = MagicMock()
- mock_evg_api = MagicMock()
validate_mock.return_value = False
exit_mock.side_effect = ValueError("exiting")
with self.assertRaises(ValueError):
- executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api,
- "gen_file.json")
+ executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, "gen_file.json")
executor.execute(tests_by_task)
exit_mock.assert_called_once()
diff --git a/buildscripts/tests/test_selected_tests.py b/buildscripts/tests/test_selected_tests.py
index 70689345b69..6a407983a9f 100644
--- a/buildscripts/tests/test_selected_tests.py
+++ b/buildscripts/tests/test_selected_tests.py
@@ -24,7 +24,7 @@ from buildscripts.task_generation.task_types.gentask_options import GenTaskOptio
from buildscripts.tests.test_burn_in_tests import get_evergreen_config, mock_changed_git_files
from buildscripts import selected_tests as under_test
-# pylint: disable=missing-docstring,invalid-name,unused-argument,protected-access,no-value-for-parameter
+# pylint: disable=missing-docstring,invalid-name,unused-argument,protected-access,no-value-for-parameter,too-many-locals
NS = "buildscripts.selected_tests"
@@ -101,7 +101,9 @@ class TestAcceptance(unittest.TestCase):
self.assertEqual(generated_config.file_list[0].file_name, "selected_tests_config.json")
@unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
- def test_when_test_mappings_are_found_for_changed_files(self):
+ @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3")
+ def test_when_test_mappings_are_found_for_changed_files(self, get_stats_from_s3_mock):
+ get_stats_from_s3_mock.return_value = []
mock_evg_api = self._mock_evg_api()
mock_evg_config = get_evergreen_config("etc/evergreen.yml")
mock_evg_expansions = under_test.EvgExpansions(
@@ -149,7 +151,9 @@ class TestAcceptance(unittest.TestCase):
self.assertEqual(len(rhel_80_with_generated_tasks["tasks"]), 2)
@unittest.skipIf(sys.platform.startswith("win"), "not supported on windows")
- def test_when_task_mappings_are_found_for_changed_files(self):
+ @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3")
+ def test_when_task_mappings_are_found_for_changed_files(self, get_stats_from_s3_mock):
+ get_stats_from_s3_mock.return_value = []
mock_evg_api = self._mock_evg_api()
mock_evg_config = get_evergreen_config("etc/evergreen.yml")
mock_evg_expansions = under_test.EvgExpansions(
diff --git a/buildscripts/tests/timeouts/test_timeout_service.py b/buildscripts/tests/timeouts/test_timeout_service.py
index bb0dd8a0c3e..bb0550659c6 100644
--- a/buildscripts/tests/timeouts/test_timeout_service.py
+++ b/buildscripts/tests/timeouts/test_timeout_service.py
@@ -1,41 +1,42 @@
"""Unit tests for timeout_service.py."""
import random
import unittest
-from datetime import datetime, timedelta
-from unittest.mock import MagicMock
+from unittest.mock import MagicMock, patch
from requests.exceptions import HTTPError
-from evergreen import EvergreenApi
import buildscripts.timeouts.timeout_service as under_test
from buildscripts.task_generation.resmoke_proxy import ResmokeProxyService
-from buildscripts.util.teststats import HistoricTaskData
+from buildscripts.util.teststats import HistoricTaskData, HistoricTestInfo
# pylint: disable=missing-docstring,no-self-use,invalid-name,protected-access
+NS = "buildscripts.timeouts.timeout_service"
-def build_mock_service(evg_api=None, resmoke_proxy=None):
- end_date = datetime.now()
- start_date = end_date - timedelta(weeks=2)
- timeout_settings = under_test.TimeoutSettings(
- end_date=end_date,
- start_date=start_date,
- )
+
+def ns(relative_name): # pylint: disable=invalid-name
+ """Return a full name from a name relative to the test module"s name space."""
+ return NS + "." + relative_name
+
+
+def build_mock_service(resmoke_proxy=None):
return under_test.TimeoutService(
- evg_api=evg_api if evg_api else MagicMock(spec_set=EvergreenApi),
- resmoke_proxy=resmoke_proxy if resmoke_proxy else MagicMock(spec_set=ResmokeProxyService),
- timeout_settings=timeout_settings)
+ resmoke_proxy=resmoke_proxy if resmoke_proxy else MagicMock(spec_set=ResmokeProxyService))
def tst_stat_mock(file, duration, pass_count):
- return MagicMock(test_file=file, avg_duration_pass=duration, num_pass=pass_count)
+ return MagicMock(test_name=file, avg_duration_pass=duration, num_pass=pass_count, hooks=[])
+
+
+def tst_runtime_mock(file, duration, pass_count):
+ return MagicMock(test_name=file, avg_duration_pass=duration, num_pass=pass_count)
class TestGetTimeoutEstimate(unittest.TestCase):
- def test_no_stats_should_return_default_timeout(self):
- mock_evg_api = MagicMock(spec_set=EvergreenApi)
- mock_evg_api.test_stats_by_project.return_value = []
- timeout_service = build_mock_service(evg_api=mock_evg_api)
+ @patch(ns("HistoricTaskData.from_s3"))
+ def test_no_stats_should_return_default_timeout(self, from_s3_mock: MagicMock):
+ timeout_service = build_mock_service()
+ from_s3_mock.return_value = []
timeout_params = under_test.TimeoutParams(
evg_project="my project",
build_variant="bv",
@@ -48,13 +49,17 @@ class TestGetTimeoutEstimate(unittest.TestCase):
self.assertFalse(timeout.is_specified())
- def test_a_test_with_missing_history_should_cause_a_default_timeout(self):
- mock_evg_api = MagicMock(spec_set=EvergreenApi)
- test_stats = [tst_stat_mock(f"test_{i}.js", 60, 1) for i in range(30)]
- mock_evg_api.test_stats_by_project.return_value = test_stats
+ @patch(ns("HistoricTaskData.from_s3"))
+ def test_a_test_with_missing_history_should_cause_a_default_timeout(
+ self, from_s3_mock: MagicMock):
+ test_stats = [
+ HistoricTestInfo(test_name=f"test_{i}.js", avg_duration=60, num_pass=1, hooks=[])
+ for i in range(30)
+ ]
+ from_s3_mock.return_value = HistoricTaskData(test_stats)
mock_resmoke_proxy = MagicMock(spec_set=ResmokeProxyService)
mock_resmoke_proxy.list_tests.return_value = ["test_with_no_stats.js"]
- timeout_service = build_mock_service(evg_api=mock_evg_api, resmoke_proxy=mock_resmoke_proxy)
+ timeout_service = build_mock_service(resmoke_proxy=mock_resmoke_proxy)
timeout_params = under_test.TimeoutParams(
evg_project="my project",
build_variant="bv",
@@ -67,14 +72,19 @@ class TestGetTimeoutEstimate(unittest.TestCase):
self.assertFalse(timeout.is_specified())
- def test_a_test_with_zero_runtime_history_should_cause_a_default_timeout(self):
- mock_evg_api = MagicMock(spec_set=EvergreenApi)
- test_stats = [tst_stat_mock(f"test_{i}.js", 60, 1) for i in range(30)]
- test_stats.append(tst_stat_mock("zero.js", 0.0, 1))
- mock_evg_api.test_stats_by_project.return_value = test_stats
+ @patch(ns("HistoricTaskData.from_s3"))
+ def test_a_test_with_zero_runtime_history_should_cause_a_default_timeout(
+ self, from_s3_mock: MagicMock):
+ test_stats = [
+ HistoricTestInfo(test_name=f"test_{i}.js", avg_duration=60, num_pass=1, hooks=[])
+ for i in range(30)
+ ]
+ test_stats.append(
+ HistoricTestInfo(test_name="zero.js", avg_duration=0.0, num_pass=1, hooks=[]))
+ from_s3_mock.return_value = HistoricTaskData(test_stats)
mock_resmoke_proxy = MagicMock(spec_set=ResmokeProxyService)
- mock_resmoke_proxy.list_tests.return_value = [ts.test_file for ts in test_stats]
- timeout_service = build_mock_service(evg_api=mock_evg_api, resmoke_proxy=mock_resmoke_proxy)
+ mock_resmoke_proxy.list_tests.return_value = [ts.test_name for ts in test_stats]
+ timeout_service = build_mock_service(resmoke_proxy=mock_resmoke_proxy)
timeout_params = under_test.TimeoutParams(
evg_project="my project",
build_variant="bv",
@@ -87,15 +97,19 @@ class TestGetTimeoutEstimate(unittest.TestCase):
self.assertFalse(timeout.is_specified())
- def test_all_tests_with_runtime_history_should_use_custom_timeout(self):
- mock_evg_api = MagicMock(spec_set=EvergreenApi)
+ @patch(ns("HistoricTaskData.from_s3"))
+ def test_all_tests_with_runtime_history_should_use_custom_timeout(self,
+ from_s3_mock: MagicMock):
n_tests = 30
test_runtime = 600
- test_stats = [tst_stat_mock(f"test_{i}.js", test_runtime, 1) for i in range(n_tests)]
- mock_evg_api.test_stats_by_project.return_value = test_stats
+ test_stats = [
+ HistoricTestInfo(test_name=f"test_{i}.js", avg_duration=test_runtime, num_pass=1,
+ hooks=[]) for i in range(n_tests)
+ ]
+ from_s3_mock.return_value = HistoricTaskData(test_stats)
mock_resmoke_proxy = MagicMock(spec_set=ResmokeProxyService)
- mock_resmoke_proxy.list_tests.return_value = [ts.test_file for ts in test_stats]
- timeout_service = build_mock_service(evg_api=mock_evg_api, resmoke_proxy=mock_resmoke_proxy)
+ mock_resmoke_proxy.list_tests.return_value = [ts.test_name for ts in test_stats]
+ timeout_service = build_mock_service(resmoke_proxy=mock_resmoke_proxy)
timeout_params = under_test.TimeoutParams(
evg_project="my project",
build_variant="bv",
@@ -149,10 +163,10 @@ class TestGetTaskHookOverhead(unittest.TestCase):
class TestLookupHistoricStats(unittest.TestCase):
- def test_no_stats_from_evergreen_should_return_none(self):
- mock_evg_api = MagicMock(spec_set=EvergreenApi)
- mock_evg_api.test_stats_by_project.return_value = []
- timeout_service = build_mock_service(evg_api=mock_evg_api)
+ @patch(ns("HistoricTaskData.from_s3"))
+ def test_no_stats_from_evergreen_should_return_none(self, from_s3_mock: MagicMock):
+ from_s3_mock.return_value = None
+ timeout_service = build_mock_service()
timeout_params = under_test.TimeoutParams(
evg_project="my project",
build_variant="bv",
@@ -165,10 +179,10 @@ class TestLookupHistoricStats(unittest.TestCase):
self.assertIsNone(stats)
- def test_errors_from_evergreen_should_return_none(self):
- mock_evg_api = MagicMock(spec_set=EvergreenApi)
- mock_evg_api.test_stats_by_project.side_effect = HTTPError("failed to connect")
- timeout_service = build_mock_service(evg_api=mock_evg_api)
+ @patch(ns("HistoricTaskData.from_s3"))
+ def test_errors_from_evergreen_should_return_none(self, from_s3_mock: MagicMock):
+ from_s3_mock.side_effect = HTTPError("failed to connect")
+ timeout_service = build_mock_service()
timeout_params = under_test.TimeoutParams(
evg_project="my project",
build_variant="bv",
@@ -181,11 +195,11 @@ class TestLookupHistoricStats(unittest.TestCase):
self.assertIsNone(stats)
- def test_stats_from_evergreen_should_return_the_stats(self):
- mock_evg_api = MagicMock(spec_set=EvergreenApi)
+ @patch(ns("HistoricTaskData.from_s3"))
+ def test_stats_from_evergreen_should_return_the_stats(self, from_s3_mock: MagicMock):
test_stats = [tst_stat_mock(f"test_{i}.js", 60, 1) for i in range(100)]
- mock_evg_api.test_stats_by_project.return_value = test_stats
- timeout_service = build_mock_service(evg_api=mock_evg_api)
+ from_s3_mock.return_value = HistoricTaskData(test_stats)
+ timeout_service = build_mock_service()
timeout_params = under_test.TimeoutParams(
evg_project="my project",
build_variant="bv",
diff --git a/buildscripts/tests/util/test_teststats.py b/buildscripts/tests/util/test_teststats.py
index ebba930d032..4c54952368c 100644
--- a/buildscripts/tests/util/test_teststats.py
+++ b/buildscripts/tests/util/test_teststats.py
@@ -80,7 +80,7 @@ class TestHistoricTaskData(unittest.TestCase):
@staticmethod
def _make_evg_result(test_file="dir/test1.js", num_pass=0, duration=0):
return Mock(
- test_file=test_file,
+ test_name=test_file,
task_name="task1",
variant="variant1",
distro="distro1",
diff --git a/buildscripts/timeouts/timeout_service.py b/buildscripts/timeouts/timeout_service.py
index 8c0d5ad58cd..68238010092 100644
--- a/buildscripts/timeouts/timeout_service.py
+++ b/buildscripts/timeouts/timeout_service.py
@@ -1,5 +1,4 @@
"""Service for determining task timeouts."""
-from datetime import datetime
from typing import Any, Dict, NamedTuple, Optional
import inject
@@ -31,29 +30,17 @@ class TimeoutParams(NamedTuple):
is_asan: bool
-class TimeoutSettings(NamedTuple):
- """Settings for determining timeouts."""
-
- start_date: datetime
- end_date: datetime
-
-
class TimeoutService:
"""A service for determining task timeouts."""
@inject.autoparams()
- def __init__(self, evg_api: EvergreenApi, resmoke_proxy: ResmokeProxyService,
- timeout_settings: TimeoutSettings) -> None:
+ def __init__(self, resmoke_proxy: ResmokeProxyService) -> None:
"""
Initialize the service.
- :param evg_api: Evergreen API client.
:param resmoke_proxy: Proxy to query resmoke.
- :param timeout_settings: Settings for how timeouts are calculated.
"""
- self.evg_api = evg_api
self.resmoke_proxy = resmoke_proxy
- self.timeout_settings = timeout_settings
def get_timeout_estimate(self, timeout_params: TimeoutParams) -> TimeoutEstimate:
"""
@@ -129,7 +116,8 @@ class TimeoutService:
return n_expected_runs * avg_clean_every_n_runtime
return 0.0
- def lookup_historic_stats(self, timeout_params: TimeoutParams) -> Optional[HistoricTaskData]:
+ @staticmethod
+ def lookup_historic_stats(timeout_params: TimeoutParams) -> Optional[HistoricTaskData]:
"""
Lookup historic test results stats for the given task.
@@ -137,10 +125,8 @@ class TimeoutService:
:return: Historic test results if they exist.
"""
try:
- evg_stats = HistoricTaskData.from_evg(
- self.evg_api, timeout_params.evg_project, self.timeout_settings.start_date,
- self.timeout_settings.end_date, timeout_params.task_name,
- timeout_params.build_variant)
+ evg_stats = HistoricTaskData.from_s3(
+ timeout_params.evg_project, timeout_params.task_name, timeout_params.build_variant)
if not evg_stats:
LOGGER.warning("No historic runtime information available")
return None
diff --git a/buildscripts/util/teststats.py b/buildscripts/util/teststats.py
index a52fa3c79a4..bc941917c2b 100644
--- a/buildscripts/util/teststats.py
+++ b/buildscripts/util/teststats.py
@@ -1,15 +1,33 @@
"""Utility to support parsing a TestStat."""
from collections import defaultdict
from dataclasses import dataclass
-from datetime import datetime
from itertools import chain
-from typing import NamedTuple, List, Callable, Optional
+from json import JSONDecodeError
-from evergreen import EvergreenApi, TestStats
+from typing import NamedTuple, List, Callable, Optional
+import requests
+from requests.adapters import HTTPAdapter, Retry
from buildscripts.util.testname import split_test_hook_name, is_resmoke_hook, get_short_name_from_test_file
TASK_LEVEL_HOOKS = {"CleanEveryN"}
+TESTS_STATS_S3_LOCATION = "https://mongo-test-stats.s3.amazonaws.com"
+
+
+class HistoricalTestInformation(NamedTuple):
+ """
+ Container for information about the historical runtime of a test.
+
+ test_name: Name of test.
+ avg_duration_pass: Average of runtime of test that passed.
+ num_pass: Number of times the test has passed.
+ num_fail: Number of times the test has failed.
+ """
+
+ test_name: str
+ num_pass: int
+ num_fail: int
+ avg_duration_pass: float
class TestRuntime(NamedTuple):
@@ -74,9 +92,9 @@ class HistoricHookInfo(NamedTuple):
avg_duration: float
@classmethod
- def from_test_stats(cls, test_stats: TestStats) -> "HistoricHookInfo":
+ def from_test_stats(cls, test_stats: HistoricalTestInformation) -> "HistoricHookInfo":
"""Create an instance from a test_stats object."""
- return cls(hook_id=test_stats.test_file, num_pass=test_stats.num_pass,
+ return cls(hook_id=test_stats.test_name, num_pass=test_stats.num_pass,
avg_duration=test_stats.avg_duration_pass)
def test_name(self) -> str:
@@ -101,10 +119,10 @@ class HistoricTestInfo(NamedTuple):
hooks: List[HistoricHookInfo]
@classmethod
- def from_test_stats(cls, test_stats: TestStats,
+ def from_test_stats(cls, test_stats: HistoricalTestInformation,
hooks: List[HistoricHookInfo]) -> "HistoricTestInfo":
"""Create an instance from a test_stats object."""
- return cls(test_name=test_stats.test_file, num_pass=test_stats.num_pass,
+ return cls(test_name=test_stats.test_name, num_pass=test_stats.num_pass,
avg_duration=test_stats.avg_duration_pass, hooks=hooks)
def normalized_test_name(self) -> str:
@@ -137,46 +155,59 @@ class HistoricTaskData(object):
"""Initialize the TestStats with raw results from the Evergreen API."""
self.historic_test_results = historic_test_results
- # pylint: disable=too-many-arguments
+ @staticmethod
+ def get_stats_from_s3(project: str, task: str, variant: str) -> List[HistoricalTestInformation]:
+ """
+ Retrieve test stats from s3 for a given task.
+
+ :param project: Project to query.
+ :param task: Task to query.
+ :param variant: Build variant to query.
+ :return: A list of the Test stats for the specified task.
+ """
+ session = requests.Session()
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
+ session.mount('https://', HTTPAdapter(max_retries=retries))
+
+ response = session.get(f"{TESTS_STATS_S3_LOCATION}/{project}/{variant}/{task}")
+
+ try:
+ data = response.json()
+ return [HistoricalTestInformation(**item) for item in data]
+ except JSONDecodeError:
+ return []
+
@classmethod
- def from_evg(cls, evg_api: EvergreenApi, project: str, start_date: datetime, end_date: datetime,
- task: str, variant: str) -> "HistoricTaskData":
+ def from_s3(cls, project: str, task: str, variant: str) -> "HistoricTaskData":
"""
- Retrieve test stats from evergreen for a given task.
+ Retrieve test stats from s3 for a given task.
- :param evg_api: Evergreen API client.
:param project: Project to query.
- :param start_date: Start date to query.
- :param end_date: End date to query.
:param task: Task to query.
:param variant: Build variant to query.
:return: Test stats for the specified task.
"""
- days = (end_date - start_date).days
- historic_stats = evg_api.test_stats_by_project(
- project, after_date=start_date, before_date=end_date, tasks=[task], variants=[variant],
- group_by="test", group_num_days=days)
-
- return cls.from_stats_list(historic_stats)
+ historical_test_data = cls.get_stats_from_s3(project, task, variant)
+ return cls.from_stats_list(historical_test_data)
@classmethod
- def from_stats_list(cls, historic_stats: List[TestStats]) -> "HistoricTaskData":
+ def from_stats_list(
+ cls, historical_test_data: List[HistoricalTestInformation]) -> "HistoricTaskData":
"""
Build historic task data from a list of historic stats.
- :param historic_stats: List of historic stats to build from.
+ :param historical_test_data: A list of information about the runtime of a test.
:return: Historic task data from the list of stats.
"""
-
hooks = defaultdict(list)
- for hook in [stat for stat in historic_stats if is_resmoke_hook(stat.test_file)]:
+ for hook in [stat for stat in historical_test_data if is_resmoke_hook(stat.test_name)]:
historical_hook = HistoricHookInfo.from_test_stats(hook)
hooks[historical_hook.test_name()].append(historical_hook)
return cls([
HistoricTestInfo.from_test_stats(stat,
- hooks[get_short_name_from_test_file(stat.test_file)])
- for stat in historic_stats if not is_resmoke_hook(stat.test_file)
+ hooks[get_short_name_from_test_file(stat.test_name)])
+ for stat in historical_test_data if not is_resmoke_hook(stat.test_name)
])
def get_tests_runtimes(self) -> List[TestRuntime]: