summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy/performance_tests/perftest.py
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-09-24 13:09:44 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-09-24 13:09:44 +0200
commitdc6262b587c71c14e30d93e57ed812e36a79a33e (patch)
tree03ff986e7aa38bba0c0ef374f44fda52aff93f01 /Tools/Scripts/webkitpy/performance_tests/perftest.py
parent02e1fbbefd49229b102ef107bd70ce974a2d85fb (diff)
downloadqtwebkit-dc6262b587c71c14e30d93e57ed812e36a79a33e.tar.gz
Imported WebKit commit 6339232fec7f5d9984a33388aecfd2cbc7832053 (http://svn.webkit.org/repository/webkit/trunk@129343)
New snapshot with build fixes for latest qtbase
Diffstat (limited to 'Tools/Scripts/webkitpy/performance_tests/perftest.py')
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftest.py31
1 files changed, 20 insertions, 11 deletions
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
index 69d9363f4..1dfbcd229 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -114,8 +114,8 @@ class PerfTest(object):
_description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
_result_classes = ['Time', 'JS Heap', 'Malloc']
_result_class_regex = re.compile(r'^(?P<resultclass>' + r'|'.join(_result_classes) + '):')
- _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit']
- _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
+ _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
+ _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
def parse_output(self, output):
test_failed = False
@@ -138,7 +138,10 @@ class PerfTest(object):
score = self._score_regex.match(line)
if score:
key = score.group('key')
- value = float(score.group('value'))
+ if ', ' in score.group('value'):
+ value = [float(number) for number in score.group('value').split(', ')]
+ else:
+ value = float(score.group('value'))
unit = score.group('unit')
name = test_name
if result_class != 'Time':
@@ -154,7 +157,12 @@ class PerfTest(object):
test_failed = True
_log.error(line)
- if test_failed or set(self._statistics_keys) != set(results[test_name].keys()):
+ if test_failed:
+ return None
+
+ if set(self._statistics_keys) != set(results[test_name].keys() + ['values']):
+ # values is not provided by Dromaeo tests.
+ _log.error("The test didn't report all statistics.")
return None
for result_name in ordered_results_keys:
@@ -208,23 +216,24 @@ class PageLoadingPerfTest(PerfTest):
continue
test_times.append(output.test_time * 1000)
- test_times = sorted(test_times)
+ sorted_test_times = sorted(test_times)
# Compute the mean and variance using a numerically stable algorithm.
squareSum = 0
mean = 0
- valueSum = sum(test_times)
- for i, time in enumerate(test_times):
+ valueSum = sum(sorted_test_times)
+ for i, time in enumerate(sorted_test_times):
delta = time - mean
sweep = i + 1.0
mean += delta / sweep
squareSum += delta * delta * (i / sweep)
middle = int(len(test_times) / 2)
- results = {'avg': mean,
- 'min': min(test_times),
- 'max': max(test_times),
- 'median': test_times[middle] if len(test_times) % 2 else (test_times[middle - 1] + test_times[middle]) / 2,
+ results = {'values': test_times,
+ 'avg': mean,
+ 'min': sorted_test_times[0],
+ 'max': sorted_test_times[-1],
+ 'median': sorted_test_times[middle] if len(sorted_test_times) % 2 else (sorted_test_times[middle - 1] + sorted_test_times[middle]) / 2,
'stdev': math.sqrt(squareSum),
'unit': 'ms'}
self.output_statistics(self.test_name(), results, '')