diff options
author | Jim OLeary <jim.oleary@gmail.com> | 2019-07-10 10:30:23 +0100 |
---|---|---|
committer | Jim OLeary <jim.oleary@gmail.com> | 2019-07-11 13:32:46 +0100 |
commit | 888f3013fa3fe2dafb248e3996b4667c8075bda3 (patch) | |
tree | 7bc3e7bbb6b52066930cd30b643132edd2bc144e | |
parent | 659c4946cb7e2f18937cbe085b03ec247dd52635 (diff) | |
download | mongo-888f3013fa3fe2dafb248e3996b4667c8075bda3.tar.gz |
SERVER-41990 Burn_in should not average hook times for unique hooks
-rw-r--r-- | buildscripts/tests/util/test_teststats.py | 19 | ||||
-rw-r--r-- | buildscripts/util/teststats.py | 23 |
2 files changed, 28 insertions, 14 deletions
diff --git a/buildscripts/tests/util/test_teststats.py b/buildscripts/tests/util/test_teststats.py index 6b9b94970e5..4da8d6942d8 100644 --- a/buildscripts/tests/util/test_teststats.py +++ b/buildscripts/tests/util/test_teststats.py @@ -47,7 +47,24 @@ class TestTestStats(unittest.TestCase): ] test_stats = teststats_utils.TestStats(evg_results) expected_runtimes = [ - teststats_utils.TestRuntime(test_name="dir/test3.js", runtime=42.5), + teststats_utils.TestRuntime(test_name="dir/test3.js", runtime=75), + teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=30), + teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=20), + ] + self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes()) + + def test_hook_first(self): + evg_results = [ + self._make_evg_result("test3:CleanEveryN", 10, 35), + self._make_evg_result("dir/test1.js", 1, 10), + self._make_evg_result("dir/test2.js", 1, 30), + self._make_evg_result("dir/test1.js", 2, 25), + self._make_evg_result("dir/test3.js", 5, 10), + self._make_evg_result("test3:CheckReplDBHash", 10, 35), + ] + test_stats = teststats_utils.TestStats(evg_results) + expected_runtimes = [ + teststats_utils.TestRuntime(test_name="dir/test3.js", runtime=80), teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=30), teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=20), ] diff --git a/buildscripts/util/teststats.py b/buildscripts/util/teststats.py index 2c09019cd76..796428d5a03 100644 --- a/buildscripts/util/teststats.py +++ b/buildscripts/util/teststats.py @@ -1,9 +1,5 @@ """Utility to support parsing a TestStat.""" -import sys -import os -import logging - from collections import defaultdict from collections import namedtuple import buildscripts.util.testname as testname # pylint: disable=wrong-import-position @@ -23,8 +19,9 @@ class TestStats(object): """Initialize the TestStats with raw results from the Evergreen API.""" # Mapping from test_file to {"num_run": X, "duration": Y} for tests self._runtime_by_test = defaultdict(dict) - # Mapping from test_name to {"num_run": X, "duration": Y} for hooks - self._hook_runtime_by_test = defaultdict(dict) + # Mapping from 'test_name:hook_name' to + # {'test_name': {'hook_name': {"num_run": X, "duration": Y}}} + self._hook_runtime_by_test = defaultdict(lambda: defaultdict(dict)) for doc in evg_test_stats_results: self._add_stats(doc) @@ -42,16 +39,17 @@ class TestStats(object): def _add_test_stats(self, test_file, duration, num_run): """Add the statistics for a test.""" - self._add_runtime_info(self._runtime_by_test, test_file, duration, num_run) + runtime_info = self._runtime_by_test[test_file] + self._add_runtime_info(runtime_info, duration, num_run) def _add_test_hook_stats(self, test_file, duration, num_run): """Add the statistics for a hook.""" - test_name = testname.split_test_hook_name(test_file)[0] - self._add_runtime_info(self._hook_runtime_by_test, test_name, duration, num_run) + test_name, hook_name = testname.split_test_hook_name(test_file) + runtime_info = self._hook_runtime_by_test[test_name][hook_name] + self._add_runtime_info(runtime_info, duration, num_run) @staticmethod - def _add_runtime_info(runtime_dict, test_name, duration, num_run): - runtime_info = runtime_dict[test_name] + def _add_runtime_info(runtime_info, duration, num_run): if not runtime_info: runtime_info["duration"] = duration runtime_info["num_run"] = num_run @@ -75,8 +73,7 @@ class TestStats(object): for test_file, runtime_info in list(self._runtime_by_test.items()): duration = runtime_info["duration"] test_name = testname.get_short_name_from_test_file(test_file) - hook_runtime_info = self._hook_runtime_by_test[test_name] - if hook_runtime_info: + for _, hook_runtime_info in self._hook_runtime_by_test[test_name].items(): duration += hook_runtime_info["duration"] test = TestRuntime(test_name=normalize_test_name(test_file), runtime=duration) tests.append(test) |