diff options
author | Mikhail Shchatko <mikhail.shchatko@mongodb.com> | 2022-11-23 19:30:30 +0200 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-11-24 14:26:39 +0000 |
commit | 079f0696a4693b6a65275d5d307b9f598df9669c (patch) | |
tree | 3393e98db95e8716d7921f6a808962f0c71e3e8f | |
parent | b2fa75ac0b97688390df6ac7e2e8b0325e21e454 (diff) | |
download | mongo-079f0696a4693b6a65275d5d307b9f598df9669c.tar.gz |
SERVER-71533 Update legacy task generation to use new test stats location
-rw-r--r-- | buildscripts/burn_in_tags.py | 2 | ||||
-rw-r--r-- | buildscripts/evergreen_burn_in_tests.py | 30 | ||||
-rw-r--r-- | buildscripts/task_generation/suite_split.py | 26 | ||||
-rw-r--r-- | buildscripts/tests/task_generation/test_suite_split.py | 67 | ||||
-rw-r--r-- | buildscripts/tests/test_burn_in_tags.py | 20 | ||||
-rw-r--r-- | buildscripts/tests/test_evergreen_burn_in_tests.py | 49 | ||||
-rw-r--r-- | buildscripts/tests/test_selected_tests.py | 10 | ||||
-rw-r--r-- | buildscripts/tests/util/test_teststats.py | 2 | ||||
-rw-r--r-- | buildscripts/util/teststats.py | 82 |
9 files changed, 137 insertions, 151 deletions
diff --git a/buildscripts/burn_in_tags.py b/buildscripts/burn_in_tags.py index 92fb5c2b890..450b82648a4 100644 --- a/buildscripts/burn_in_tags.py +++ b/buildscripts/burn_in_tags.py @@ -163,7 +163,7 @@ def _generate_evg_tasks(evergreen_api: EvergreenApi, shrub_project: ShrubProject repeat_tests_max=config_options.repeat_tests_max, repeat_tests_secs=config_options.repeat_tests_secs) - burn_in_generator = GenerateBurnInExecutor(gen_config, repeat_config, evergreen_api) + burn_in_generator = GenerateBurnInExecutor(gen_config, repeat_config) burn_in_generator.add_config_for_build_variant(shrub_build_variant, tests_by_task) shrub_project.add_build_variant(shrub_build_variant) diff --git a/buildscripts/evergreen_burn_in_tests.py b/buildscripts/evergreen_burn_in_tests.py index 387f11b3333..97671901b41 100644 --- a/buildscripts/evergreen_burn_in_tests.py +++ b/buildscripts/evergreen_burn_in_tests.py @@ -32,7 +32,6 @@ EVERGREEN_FILE = "etc/evergreen.yml" BURN_IN_TESTS_GEN_TASK = "burn_in_tests_gen" BURN_IN_TESTS_TASK = "burn_in_tests" TASK_WITH_ARTIFACTS = "archive_dist_test_debug" -AVG_TEST_RUNTIME_ANALYSIS_DAYS = 14 AVG_TEST_SETUP_SEC = 4 * 60 AVG_TEST_TIME_MULTIPLIER = 3 MIN_AVG_TEST_OVERFLOW_SEC = float(60) @@ -274,23 +273,17 @@ class GenerateBurnInExecutor(BurnInExecutor): # pylint: disable=too-many-arguments def __init__(self, generate_config: GenerateConfig, repeat_config: RepeatConfig, - evg_api: EvergreenApi, generate_tasks_file: Optional[str] = None, - history_end_date: Optional[datetime] = None) -> None: + generate_tasks_file: Optional[str] = None) -> None: """ Create a new generate burn-in executor. :param generate_config: Configuration for how to generate tasks. :param repeat_config: Configuration for how tests should be repeated. - :param evg_api: Evergreen API client. :param generate_tasks_file: File to write generated task configuration to. - :param history_end_date: End date of range to query for historic test data. """ self.generate_config = generate_config self.repeat_config = repeat_config - self.evg_api = evg_api self.generate_tasks_file = generate_tasks_file - self.history_end_date = history_end_date if history_end_date else datetime.utcnow()\ - .replace(microsecond=0) def get_task_runtime_history(self, task: str) -> List[TestRuntime]: """ @@ -299,21 +292,10 @@ class GenerateBurnInExecutor(BurnInExecutor): :param task: Task to query. :return: List of runtime histories for all tests in specified task. """ - try: - project = self.generate_config.project - variant = self.generate_config.build_variant - end_date = self.history_end_date - start_date = end_date - timedelta(days=AVG_TEST_RUNTIME_ANALYSIS_DAYS) - test_stats = HistoricTaskData.from_evg(self.evg_api, project, start_date=start_date, - end_date=end_date, task=task, variant=variant) - return test_stats.get_tests_runtimes() - except requests.HTTPError as err: - if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE: - # Evergreen may return a 503 when the service is degraded. - # We fall back to returning no test history - return [] - else: - raise + project = self.generate_config.project + variant = self.generate_config.build_variant + test_stats = HistoricTaskData.from_s3(project, task, variant) + return test_stats.get_tests_runtimes() def create_generated_tasks(self, tests_by_task: Dict[str, TaskInfo]) -> Set[Task]: """ @@ -400,7 +382,7 @@ def burn_in(task_id: str, build_variant: str, generate_config: GenerateConfig, :param install_dir: Path to bin directory of a testable installation """ change_detector = EvergreenFileChangeDetector(task_id, evg_api) - executor = GenerateBurnInExecutor(generate_config, repeat_config, evg_api, generate_tasks_file) + executor = GenerateBurnInExecutor(generate_config, repeat_config, generate_tasks_file) burn_in_orchestrator = BurnInOrchestrator(change_detector, executor, evg_conf) burn_in_orchestrator.burn_in(repos, build_variant, install_dir) diff --git a/buildscripts/task_generation/suite_split.py b/buildscripts/task_generation/suite_split.py index e1bfbf1e6c2..47268b93e80 100644 --- a/buildscripts/task_generation/suite_split.py +++ b/buildscripts/task_generation/suite_split.py @@ -290,25 +290,15 @@ class SuiteSplitService: if self.config.default_to_fallback: return self.calculate_fallback_suites(params) - try: - evg_stats = HistoricTaskData.from_evg(self.evg_api, self.config.evg_project, - self.config.start_date, self.config.end_date, - params.task_name, params.build_variant) - if not evg_stats: - LOGGER.debug("No test history, using fallback suites") - # This is probably a new suite, since there is no test history, just use the - # fallback values. - return self.calculate_fallback_suites(params) + evg_stats = HistoricTaskData.from_s3(self.config.evg_project, params.task_name, + params.build_variant) + + if evg_stats: return self.calculate_suites_from_evg_stats(evg_stats, params) - except requests.HTTPError as err: - if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE: - # Evergreen may return a 503 when the service is degraded. - # We fall back to splitting the tests into a fixed number of suites. - LOGGER.warning("Received 503 from Evergreen, " - "dividing the tests evenly among suites") - return self.calculate_fallback_suites(params) - else: - raise + + LOGGER.debug("No test history, using fallback suites") + # Since there is no test history this is probably a new suite, just use the fallback values. + return self.calculate_fallback_suites(params) def calculate_fallback_suites(self, params: SuiteSplitParameters) -> GeneratedSuite: """Divide tests into a fixed number of suites.""" diff --git a/buildscripts/tests/task_generation/test_suite_split.py b/buildscripts/tests/task_generation/test_suite_split.py index 95536d111a3..0ff5574632d 100644 --- a/buildscripts/tests/task_generation/test_suite_split.py +++ b/buildscripts/tests/task_generation/test_suite_split.py @@ -8,7 +8,7 @@ import requests import buildscripts.task_generation.suite_split as under_test from buildscripts.task_generation.suite_split_strategies import greedy_division, \ round_robin_fallback -from buildscripts.util.teststats import TestRuntime +from buildscripts.util.teststats import TestRuntime, HistoricalTestInformation # pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access @@ -31,7 +31,12 @@ def build_mock_service(evg_api=None, split_config=None, resmoke_proxy=None): def tst_stat_mock(file, duration, pass_count): - return MagicMock(test_file=file, avg_duration_pass=duration, num_pass=pass_count) + return HistoricalTestInformation( + test_name=file, + num_pass=pass_count, + num_fail=0, + avg_duration_pass=duration, + ) def build_mock_split_config(target_resmoke_time=None, max_sub_suites=None): @@ -120,15 +125,16 @@ class TestGeneratedSuite(unittest.TestCase): class TestSplitSuite(unittest.TestCase): - def test_calculate_suites(self): + @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3") + def test_calculate_suites(self, get_stats_from_s3_mock): mock_test_stats = [tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)] split_config = build_mock_split_config(target_resmoke_time=10) split_params = build_mock_split_params() suite_split_service = build_mock_service(split_config=split_config) - suite_split_service.evg_api.test_stats_by_project.return_value = mock_test_stats + get_stats_from_s3_mock.return_value = mock_test_stats suite_split_service.resmoke_proxy.list_tests.return_value = [ - stat.test_file for stat in mock_test_stats + stat.test_name for stat in mock_test_stats ] suite_split_service.resmoke_proxy.read_suite_config.return_value = {} @@ -142,32 +148,15 @@ class TestSplitSuite(unittest.TestCase): for sub_suite in suite.sub_suites: self.assertEqual(10, len(sub_suite.test_list)) - def test_calculate_suites_fallback_on_error(self): - n_tests = 100 - max_sub_suites = 4 - split_config = build_mock_split_config(max_sub_suites=max_sub_suites) - split_params = build_mock_split_params() - - suite_split_service = build_mock_service(split_config=split_config) - mock_evg_error(suite_split_service.evg_api) - suite_split_service.resmoke_proxy.list_tests.return_value = [ - f"test_{i}.js" for i in range(n_tests) - ] - - suite = suite_split_service.split_suite(split_params) - - self.assertEqual(max_sub_suites, len(suite)) - for sub_suite in suite.sub_suites: - self.assertEqual(n_tests / max_sub_suites, len(sub_suite.test_list)) - - def test_calculate_suites_uses_fallback_on_no_results(self): + @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3") + def test_calculate_suites_uses_fallback_on_no_results(self, get_stats_from_s3_mock): n_tests = 100 max_sub_suites = 5 split_config = build_mock_split_config(max_sub_suites=max_sub_suites) split_params = build_mock_split_params() suite_split_service = build_mock_service(split_config=split_config) - suite_split_service.evg_api.test_stats_by_project.return_value = [] + get_stats_from_s3_mock.return_value = [] suite_split_service.resmoke_proxy.list_tests.return_value = [ f"test_{i}.js" for i in range(n_tests) ] @@ -178,7 +167,9 @@ class TestSplitSuite(unittest.TestCase): for sub_suite in suite.sub_suites: self.assertEqual(n_tests / max_sub_suites, len(sub_suite.test_list)) - def test_calculate_suites_uses_fallback_if_only_results_are_filtered(self): + @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3") + def test_calculate_suites_uses_fallback_if_only_results_are_filtered( + self, get_stats_from_s3_mock): n_tests = 100 max_sub_suites = 10 mock_test_stats = [tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)] @@ -187,7 +178,7 @@ class TestSplitSuite(unittest.TestCase): split_params = build_mock_split_params() suite_split_service = build_mock_service(split_config=split_config) - suite_split_service.evg_api.test_stats_by_project.return_value = mock_test_stats + get_stats_from_s3_mock.return_value = mock_test_stats suite_split_service.resmoke_proxy.list_tests.return_value = [ f"test_{i}.js" for i in range(n_tests) ] @@ -203,31 +194,17 @@ class TestSplitSuite(unittest.TestCase): for sub_suite in suite.sub_suites: self.assertEqual(n_tests / max_sub_suites, len(sub_suite.test_list)) - def test_calculate_suites_fail_on_unexpected_error(self): - n_tests = 100 - max_sub_suites = 4 - split_config = build_mock_split_config(max_sub_suites=max_sub_suites) - split_params = build_mock_split_params() - - suite_split_service = build_mock_service(split_config=split_config) - mock_evg_error(suite_split_service.evg_api, error_code=requests.codes.INTERNAL_SERVER_ERROR) - suite_split_service.resmoke_proxy.list_tests.return_value = [ - f"test_{i}.js" for i in range(n_tests) - ] - - with self.assertRaises(requests.HTTPError): - suite_split_service.split_suite(split_params) - - def test_calculate_suites_will_filter_specified_tests(self): + @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3") + def test_calculate_suites_will_filter_specified_tests(self, get_stats_from_s3_mock): mock_test_stats = [tst_stat_mock(f"test_{i}.js", 60, 1) for i in range(100)] split_config = build_mock_split_config(target_resmoke_time=10) split_params = build_mock_split_params( test_filter=lambda t: t in {"test_1.js", "test_2.js"}) suite_split_service = build_mock_service(split_config=split_config) - suite_split_service.evg_api.test_stats_by_project.return_value = mock_test_stats + get_stats_from_s3_mock.return_value = mock_test_stats suite_split_service.resmoke_proxy.list_tests.return_value = [ - stat.test_file for stat in mock_test_stats + stat.test_name for stat in mock_test_stats ] suite_split_service.resmoke_proxy.read_suite_config.return_value = {} diff --git a/buildscripts/tests/test_burn_in_tags.py b/buildscripts/tests/test_burn_in_tags.py index 28c1ac07413..bbccff9694e 100644 --- a/buildscripts/tests/test_burn_in_tags.py +++ b/buildscripts/tests/test_burn_in_tags.py @@ -14,8 +14,9 @@ from buildscripts.tests.test_burn_in_tests import ns as burn_in_tests_ns from buildscripts.ciconfig.evergreen import EvergreenProjectConfig import buildscripts.burn_in_tags as under_test +from buildscripts.util.teststats import HistoricalTestInformation -# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access +# pylint: disable=missing-docstring,invalid-name,unused-argument,no-self-use,protected-access,too-many-arguments EMPTY_PROJECT = { "buildvariants": [], @@ -119,7 +120,9 @@ class TestGenerateEvgTasks(unittest.TestCase): self.assertEqual(shrub_config.as_dict(), EMPTY_PROJECT) @patch(ns("create_tests_by_task")) - def test_generate_evg_tasks_one_test_changed(self, create_tests_by_task_mock): + @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3") + def test_generate_evg_tasks_one_test_changed(self, get_stats_from_s3_mock, + create_tests_by_task_mock): evg_conf_mock = get_evergreen_config() create_tests_by_task_mock.return_value = { "aggregation_mongos_passthrough": TaskInfo( @@ -139,8 +142,13 @@ class TestGenerateEvgTasks(unittest.TestCase): shrub_config = ShrubProject.empty() evergreen_api = MagicMock() repo = MagicMock(working_dir=os.getcwd()) - evergreen_api.test_stats_by_project.return_value = [ - MagicMock(test_file="dir/test2.js", avg_duration_pass=10) + get_stats_from_s3_mock.return_value = [ + HistoricalTestInformation( + test_name="dir/test2.js", + num_pass=1, + num_fail=0, + avg_duration_pass=10, + ) ] under_test._generate_evg_tasks(evergreen_api, shrub_config, expansions_file_data, buildvariant_map, [repo], evg_conf_mock, 'install-dir/bin') @@ -231,8 +239,9 @@ class TestAcceptance(unittest.TestCase): @patch(ns("_create_evg_build_variant_map")) @patch(ns("EvergreenFileChangeDetector")) @patch(burn_in_tests_ns("create_test_membership_map")) + @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3") def test_tests_generated_if_a_file_changed( - self, create_test_membership_map_mock, find_changed_tests_mock, + self, get_stats_from_s3_mock, create_test_membership_map_mock, find_changed_tests_mock, create_evg_build_variant_map_mock, write_to_file_mock): """ Given a git repository with changes, @@ -248,6 +257,7 @@ class TestAcceptance(unittest.TestCase): 'jstests/slow1/large_role_chain.js', 'jstests/aggregation/accumulators/accumulator_js.js' } + get_stats_from_s3_mock.return_value = [] under_test.burn_in(EXPANSIONS_FILE_DATA, evg_conf, MagicMock(), repos, 'install_dir/bin') diff --git a/buildscripts/tests/test_evergreen_burn_in_tests.py b/buildscripts/tests/test_evergreen_burn_in_tests.py index c64f07befcb..309b7007b1b 100644 --- a/buildscripts/tests/test_evergreen_burn_in_tests.py +++ b/buildscripts/tests/test_evergreen_burn_in_tests.py @@ -88,7 +88,8 @@ class TestAcceptance(unittest.TestCase): @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows") @patch(ns("write_file")) - def test_tests_generated_if_a_file_changed(self, write_json_mock): + @patch(ns("HistoricTaskData.get_stats_from_s3")) + def test_tests_generated_if_a_file_changed(self, get_stats_from_s3_mock, write_json_mock): """ Given a git repository with changes, When burn_in_tests is run, @@ -108,6 +109,7 @@ class TestAcceptance(unittest.TestCase): ) # yapf: disable mock_evg_conf = get_evergreen_config("etc/evergreen.yml") mock_evg_api = MagicMock() + get_stats_from_s3_mock.return_value = [] under_test.burn_in("task_id", variant, gen_config, repeat_config, mock_evg_api, mock_evg_conf, repos, "testfile.json", 'install-dir/bin') @@ -244,41 +246,30 @@ class TestGenerateTimeouts(unittest.TestCase): class TestGetTaskRuntimeHistory(unittest.TestCase): - def test_get_task_runtime_history(self): - mock_evg_api = MagicMock() - mock_evg_api.test_stats_by_project.return_value = [ - MagicMock( - test_file="dir/test2.js", - task_name="task1", - variant="variant1", - distro="distro1", - date=datetime.utcnow().date(), + @patch(ns("HistoricTaskData.get_stats_from_s3")) + def test_get_task_runtime_history(self, get_stats_from_s3_mock): + test_stats = [ + teststats_utils.HistoricalTestInformation( + test_name="dir/test2.js", num_pass=1, num_fail=0, avg_duration_pass=10.1, ) ] - analysis_duration = under_test.AVG_TEST_RUNTIME_ANALYSIS_DAYS - end_date = datetime.utcnow().replace(microsecond=0) - start_date = end_date - timedelta(days=analysis_duration) + get_stats_from_s3_mock.return_value = test_stats mock_gen_config = MagicMock(project="project1", build_variant="variant1") - executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock(), mock_evg_api, - history_end_date=end_date) + executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock()) result = executor.get_task_runtime_history("task1") self.assertEqual(result, [("dir/test2.js", 10.1)]) - mock_evg_api.test_stats_by_project.assert_called_with( - "project1", after_date=start_date, before_date=end_date, group_by="test", - group_num_days=14, tasks=["task1"], variants=["variant1"]) - def test_get_task_runtime_history_evg_degraded_mode_error(self): - mock_response = MagicMock(status_code=requests.codes.SERVICE_UNAVAILABLE) - mock_evg_api = MagicMock() - mock_evg_api.test_stats_by_project.side_effect = requests.HTTPError(response=mock_response) + @patch(ns("HistoricTaskData.get_stats_from_s3")) + def test_get_task_runtime_history_when_s3_has_no_data(self, get_stats_from_s3_mock): + get_stats_from_s3_mock.return_value = [] mock_gen_config = MagicMock(project="project1", build_variant="variant1") - executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock(), mock_evg_api) + executor = under_test.GenerateBurnInExecutor(mock_gen_config, MagicMock()) result = executor.get_task_runtime_history("task1") self.assertEqual(result, []) @@ -324,7 +315,8 @@ class TestCreateGenerateTasksConfig(unittest.TestCase): self.assertEqual(0, len(evg_config_dict["tasks"])) @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows") - def test_one_task_one_test(self): + @patch(ns("HistoricTaskData.get_stats_from_s3")) + def test_one_task_one_test(self, get_stats_from_s3_mock): n_tasks = 1 n_tests = 1 resmoke_options = "options for resmoke" @@ -334,6 +326,7 @@ class TestCreateGenerateTasksConfig(unittest.TestCase): repeat_config.generate_resmoke_options.return_value = resmoke_options mock_evg_api = MagicMock() tests_by_task = create_tests_by_task_mock(n_tasks, n_tests) + get_stats_from_s3_mock.return_value = [] executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api) executor.add_config_for_build_variant(build_variant, tests_by_task) @@ -348,7 +341,8 @@ class TestCreateGenerateTasksConfig(unittest.TestCase): self.assertIn("tests_0", cmd[1]["vars"]["resmoke_args"]) @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows") - def test_n_task_m_test(self): + @patch(ns("HistoricTaskData.get_stats_from_s3")) + def test_n_task_m_test(self, get_stats_from_s3_mock): n_tasks = 3 n_tests = 5 build_variant = BuildVariant("build variant") @@ -356,6 +350,7 @@ class TestCreateGenerateTasksConfig(unittest.TestCase): repeat_config = MagicMock() tests_by_task = create_tests_by_task_mock(n_tasks, n_tests) mock_evg_api = MagicMock() + get_stats_from_s3_mock.return_value = [] executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api) executor.add_config_for_build_variant(build_variant, tests_by_task) @@ -372,14 +367,12 @@ class TestCreateGenerateTasksFile(unittest.TestCase): gen_config = MagicMock(require_multiversion=False) repeat_config = MagicMock() tests_by_task = MagicMock() - mock_evg_api = MagicMock() validate_mock.return_value = False exit_mock.side_effect = ValueError("exiting") with self.assertRaises(ValueError): - executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, mock_evg_api, - "gen_file.json") + executor = under_test.GenerateBurnInExecutor(gen_config, repeat_config, "gen_file.json") executor.execute(tests_by_task) exit_mock.assert_called_once() diff --git a/buildscripts/tests/test_selected_tests.py b/buildscripts/tests/test_selected_tests.py index 377ee59bc6b..0d194368ea3 100644 --- a/buildscripts/tests/test_selected_tests.py +++ b/buildscripts/tests/test_selected_tests.py @@ -25,7 +25,7 @@ from buildscripts.task_generation.task_types.gentask_options import GenTaskOptio from buildscripts.tests.test_burn_in_tests import get_evergreen_config, mock_changed_git_files from buildscripts import selected_tests as under_test -# pylint: disable=missing-docstring,invalid-name,unused-argument,protected-access,no-value-for-parameter +# pylint: disable=missing-docstring,invalid-name,unused-argument,protected-access,no-value-for-parameter,too-many-locals NS = "buildscripts.selected_tests" @@ -104,7 +104,9 @@ class TestAcceptance(unittest.TestCase): self.assertEqual(generated_config.file_list[0].file_name, "selected_tests_config.json") @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows") - def test_when_test_mappings_are_found_for_changed_files(self): + @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3") + def test_when_test_mappings_are_found_for_changed_files(self, get_stats_from_s3_mock): + get_stats_from_s3_mock.return_value = [] mock_evg_api = self._mock_evg_api() mock_evg_config = get_evergreen_config("etc/evergreen.yml") mock_evg_expansions = under_test.EvgExpansions( @@ -152,7 +154,9 @@ class TestAcceptance(unittest.TestCase): self.assertEqual(len(rhel_80_with_generated_tasks["tasks"]), 2) @unittest.skipIf(sys.platform.startswith("win"), "not supported on windows") - def test_when_task_mappings_are_found_for_changed_files(self): + @patch("buildscripts.util.teststats.HistoricTaskData.get_stats_from_s3") + def test_when_task_mappings_are_found_for_changed_files(self, get_stats_from_s3_mock): + get_stats_from_s3_mock.return_value = [] mock_evg_api = self._mock_evg_api() mock_evg_config = get_evergreen_config("etc/evergreen.yml") mock_evg_expansions = under_test.EvgExpansions( diff --git a/buildscripts/tests/util/test_teststats.py b/buildscripts/tests/util/test_teststats.py index b3405ecc00d..5949758559e 100644 --- a/buildscripts/tests/util/test_teststats.py +++ b/buildscripts/tests/util/test_teststats.py @@ -79,7 +79,7 @@ class TestHistoricTaskData(unittest.TestCase): @staticmethod def _make_evg_result(test_file="dir/test1.js", num_pass=0, duration=0): return Mock( - test_file=test_file, + test_name=test_file, task_name="task1", variant="variant1", distro="distro1", diff --git a/buildscripts/util/teststats.py b/buildscripts/util/teststats.py index b8c0578c19e..5336aa8cd5c 100644 --- a/buildscripts/util/teststats.py +++ b/buildscripts/util/teststats.py @@ -1,15 +1,32 @@ """Utility to support parsing a TestStat.""" from collections import defaultdict from dataclasses import dataclass -from datetime import datetime from itertools import chain +from json import JSONDecodeError from typing import NamedTuple, List, Callable, Optional - -from evergreen import EvergreenApi, TestStats +import requests +from requests.adapters import HTTPAdapter, Retry from buildscripts.util.testname import split_test_hook_name, is_resmoke_hook, get_short_name_from_test_file TASK_LEVEL_HOOKS = {"CleanEveryN"} +TESTS_STATS_S3_LOCATION = "https://mongo-test-stats.s3.amazonaws.com" + + +class HistoricalTestInformation(NamedTuple): + """ + Container for information about the historical runtime of a test. + + test_name: Name of test. + avg_duration_pass: Average of runtime of test that passed. + num_pass: Number of times the test has passed. + num_fail: Number of times the test has failed. + """ + + test_name: str + num_pass: int + num_fail: int + avg_duration_pass: float class TestRuntime(NamedTuple): @@ -74,9 +91,9 @@ class HistoricHookInfo(NamedTuple): avg_duration: float @classmethod - def from_test_stats(cls, test_stats: TestStats) -> "HistoricHookInfo": + def from_test_stats(cls, test_stats: HistoricalTestInformation) -> "HistoricHookInfo": """Create an instance from a test_stats object.""" - return cls(hook_id=test_stats.test_file, num_pass=test_stats.num_pass, + return cls(hook_id=test_stats.test_name, num_pass=test_stats.num_pass, avg_duration=test_stats.avg_duration_pass) def test_name(self) -> str: @@ -101,10 +118,10 @@ class HistoricTestInfo(NamedTuple): hooks: List[HistoricHookInfo] @classmethod - def from_test_stats(cls, test_stats: TestStats, + def from_test_stats(cls, test_stats: HistoricalTestInformation, hooks: List[HistoricHookInfo]) -> "HistoricTestInfo": """Create an instance from a test_stats object.""" - return cls(test_name=test_stats.test_file, num_pass=test_stats.num_pass, + return cls(test_name=test_stats.test_name, num_pass=test_stats.num_pass, avg_duration=test_stats.avg_duration_pass, hooks=hooks) def normalized_test_name(self) -> str: @@ -134,46 +151,59 @@ class HistoricTaskData(object): """Initialize the TestStats with raw results from the Evergreen API.""" self.historic_test_results = historic_test_results - # pylint: disable=too-many-arguments + @staticmethod + def get_stats_from_s3(project: str, task: str, variant: str) -> List[HistoricalTestInformation]: + """ + Retrieve test stats from s3 for a given task. + + :param project: Project to query. + :param task: Task to query. + :param variant: Build variant to query. + :return: A list of the Test stats for the specified task. + """ + session = requests.Session() + retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504]) + session.mount('https://', HTTPAdapter(max_retries=retries)) + + response = session.get(f"{TESTS_STATS_S3_LOCATION}/{project}/{variant}/{task}") + + try: + data = response.json() + return [HistoricalTestInformation(**item) for item in data] + except JSONDecodeError: + return [] + @classmethod - def from_evg(cls, evg_api: EvergreenApi, project: str, start_date: datetime, end_date: datetime, - task: str, variant: str) -> "HistoricTaskData": + def from_s3(cls, project: str, task: str, variant: str) -> "HistoricTaskData": """ - Retrieve test stats from evergreen for a given task. + Retrieve test stats from s3 for a given task. - :param evg_api: Evergreen API client. :param project: Project to query. - :param start_date: Start date to query. - :param end_date: End date to query. :param task: Task to query. :param variant: Build variant to query. :return: Test stats for the specified task. """ - days = (end_date - start_date).days - historic_stats = evg_api.test_stats_by_project( - project, after_date=start_date, before_date=end_date, tasks=[task], variants=[variant], - group_by="test", group_num_days=days) - - return cls.from_stats_list(historic_stats) + historical_test_data = cls.get_stats_from_s3(project, task, variant) + return cls.from_stats_list(historical_test_data) @classmethod - def from_stats_list(cls, historic_stats: List[TestStats]) -> "HistoricTaskData": + def from_stats_list( + cls, historical_test_data: List[HistoricalTestInformation]) -> "HistoricTaskData": """ Build historic task data from a list of historic stats. - :param historic_stats: List of historic stats to build from. + :param historical_test_data: A list of information about the runtime of a test. :return: Historic task data from the list of stats. """ - hooks = defaultdict(list) - for hook in [stat for stat in historic_stats if is_resmoke_hook(stat.test_file)]: + for hook in [stat for stat in historical_test_data if is_resmoke_hook(stat.test_name)]: historical_hook = HistoricHookInfo.from_test_stats(hook) hooks[historical_hook.test_name()].append(historical_hook) return cls([ HistoricTestInfo.from_test_stats(stat, - hooks[get_short_name_from_test_file(stat.test_file)]) - for stat in historic_stats if not is_resmoke_hook(stat.test_file) + hooks[get_short_name_from_test_file(stat.test_name)]) + for stat in historical_test_data if not is_resmoke_hook(stat.test_name) ]) def get_tests_runtimes(self) -> List[TestRuntime]: |