summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy/performance_tests/perftest.py
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-09-20 16:13:48 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-09-20 16:13:48 +0200
commit0b3dc81d9701aea106543b49bde511a5697cdd6e (patch)
tree1382a17542d8ca4d0e054b9b143021d8f471e33b /Tools/Scripts/webkitpy/performance_tests/perftest.py
parent6dbcd09121fe266c7704a524b5cbd7f2754659c0 (diff)
downloadqtwebkit-0b3dc81d9701aea106543b49bde511a5697cdd6e.tar.gz
Imported WebKit commit 6dbad7b03986b50773637200cddddeeeb92745cc (http://svn.webkit.org/repository/webkit/trunk@129129)
Another update that should fix the initial build in the CI system
Diffstat (limited to 'Tools/Scripts/webkitpy/performance_tests/perftest.py')
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftest.py24
1 files changed, 10 insertions, 14 deletions
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
index 79899f974..69d9363f4 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -114,8 +114,8 @@ class PerfTest(object):
_description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
_result_classes = ['Time', 'JS Heap', 'Malloc']
_result_class_regex = re.compile(r'^(?P<resultclass>' + r'|'.join(_result_classes) + '):')
- _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
- _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
+ _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit']
+ _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
def parse_output(self, output):
test_failed = False
@@ -138,10 +138,7 @@ class PerfTest(object):
score = self._score_regex.match(line)
if score:
key = score.group('key')
- if ', ' in score.group('value'):
- value = [float(number) for number in score.group('value').split(', ')]
- else:
- value = float(score.group('value'))
+ value = float(score.group('value'))
unit = score.group('unit')
name = test_name
if result_class != 'Time':
@@ -211,24 +208,23 @@ class PageLoadingPerfTest(PerfTest):
continue
test_times.append(output.test_time * 1000)
- sorted_test_times = sorted(test_times)
+ test_times = sorted(test_times)
# Compute the mean and variance using a numerically stable algorithm.
squareSum = 0
mean = 0
- valueSum = sum(sorted_test_times)
- for i, time in enumerate(sorted_test_times):
+ valueSum = sum(test_times)
+ for i, time in enumerate(test_times):
delta = time - mean
sweep = i + 1.0
mean += delta / sweep
squareSum += delta * delta * (i / sweep)
middle = int(len(test_times) / 2)
- results = {'values': test_times,
- 'avg': mean,
- 'min': sorted_test_times[0],
- 'max': sorted_test_times[-1],
- 'median': sorted_test_times[middle] if len(sorted_test_times) % 2 else (sorted_test_times[middle - 1] + sorted_test_times[middle]) / 2,
+ results = {'avg': mean,
+ 'min': min(test_times),
+ 'max': max(test_times),
+ 'median': test_times[middle] if len(test_times) % 2 else (test_times[middle - 1] + test_times[middle]) / 2,
'stdev': math.sqrt(squareSum),
'unit': 'ms'}
self.output_statistics(self.test_name(), results, '')