diff options
Diffstat (limited to 'testsuite/driver')
-rw-r--r-- | testsuite/driver/perf_notes.py | 3 | ||||
-rw-r--r-- | testsuite/driver/testglobals.py | 18 |
2 files changed, 12 insertions, 9 deletions
diff --git a/testsuite/driver/perf_notes.py b/testsuite/driver/perf_notes.py index 931af03751..5c36bc2251 100644 --- a/testsuite/driver/perf_notes.py +++ b/testsuite/driver/perf_notes.py @@ -9,6 +9,7 @@ # (which defaults to 'local' if not given by --test-env). # +from enum import Enum import colorsys import tempfile import json @@ -62,7 +63,7 @@ PerfStat = namedtuple('PerfStat', ['test_env','test','way','metric','value']) # A baseline recovered form stored metrics. Baseline = namedtuple('Baseline', ['perfStat','commit','commitDepth']) -class MetricChange: +class MetricChange(Enum): NewMetric = 'NewMetric' NoChange = 'NoChange' Increase = 'Increase' diff --git a/testsuite/driver/testglobals.py b/testsuite/driver/testglobals.py index 3d273cb647..c89e225c72 100644 --- a/testsuite/driver/testglobals.py +++ b/testsuite/driver/testglobals.py @@ -225,20 +225,22 @@ class TestRun: self.n_expected_failures = 0 self.missing_libs = [] # type: List[TestResult] - self.framework_failures = [] - self.framework_warnings = [] + self.framework_failures = [] # type: List[TestResult] + self.framework_warnings = [] # type: List[TestResult] - self.expected_passes = [] - self.unexpected_passes = [] - self.unexpected_failures = [] - self.unexpected_stat_failures = [] - self.fragile_results = [] + self.expected_passes = [] # type: List[TestResult] + self.unexpected_passes = [] # type: List[TestResult] + self.unexpected_failures = [] # type: List[TestResult] + self.unexpected_stat_failures = [] # type: List[TestResult] + + # Results from tests that have been marked as fragile + self.fragile_results = [] # type: List[TestResult] # List of all metrics measured in this test run. # [(change, PerfStat)] where change is one of the MetricChange # constants: NewMetric, NoChange, Increase, Decrease. # NewMetric happens when the previous git commit has no metric recorded. - self.metrics = [] + self.metrics = [] # type: List[Tuple[MetricChange, PerfStat]] global t t = TestRun() |