summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy/performance_tests
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-09-20 16:13:48 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-09-20 16:13:48 +0200
commit0b3dc81d9701aea106543b49bde511a5697cdd6e (patch)
tree1382a17542d8ca4d0e054b9b143021d8f471e33b /Tools/Scripts/webkitpy/performance_tests
parent6dbcd09121fe266c7704a524b5cbd7f2754659c0 (diff)
downloadqtwebkit-0b3dc81d9701aea106543b49bde511a5697cdd6e.tar.gz
Imported WebKit commit 6dbad7b03986b50773637200cddddeeeb92745cc (http://svn.webkit.org/repository/webkit/trunk@129129)
Another update that should fix the initial build in the CI system
Diffstat (limited to 'Tools/Scripts/webkitpy/performance_tests')
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftest.py24
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftest_unittest.py10
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner.py5
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py26
4 files changed, 20 insertions, 45 deletions
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
index 79899f974..69d9363f4 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -114,8 +114,8 @@ class PerfTest(object):
_description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
_result_classes = ['Time', 'JS Heap', 'Malloc']
_result_class_regex = re.compile(r'^(?P<resultclass>' + r'|'.join(_result_classes) + '):')
- _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
- _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
+ _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit']
+ _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
def parse_output(self, output):
test_failed = False
@@ -138,10 +138,7 @@ class PerfTest(object):
score = self._score_regex.match(line)
if score:
key = score.group('key')
- if ', ' in score.group('value'):
- value = [float(number) for number in score.group('value').split(', ')]
- else:
- value = float(score.group('value'))
+ value = float(score.group('value'))
unit = score.group('unit')
name = test_name
if result_class != 'Time':
@@ -211,24 +208,23 @@ class PageLoadingPerfTest(PerfTest):
continue
test_times.append(output.test_time * 1000)
- sorted_test_times = sorted(test_times)
+ test_times = sorted(test_times)
# Compute the mean and variance using a numerically stable algorithm.
squareSum = 0
mean = 0
- valueSum = sum(sorted_test_times)
- for i, time in enumerate(sorted_test_times):
+ valueSum = sum(test_times)
+ for i, time in enumerate(test_times):
delta = time - mean
sweep = i + 1.0
mean += delta / sweep
squareSum += delta * delta * (i / sweep)
middle = int(len(test_times) / 2)
- results = {'values': test_times,
- 'avg': mean,
- 'min': sorted_test_times[0],
- 'max': sorted_test_times[-1],
- 'median': sorted_test_times[middle] if len(sorted_test_times) % 2 else (sorted_test_times[middle - 1] + sorted_test_times[middle]) / 2,
+ results = {'avg': mean,
+ 'min': min(test_times),
+ 'max': max(test_times),
+ 'median': test_times[middle] if len(test_times) % 2 else (test_times[middle - 1] + test_times[middle]) / 2,
'stdev': math.sqrt(squareSum),
'unit': 'ms'}
self.output_statistics(self.test_name(), results, '')
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
index 27a4bb385..2b35e71ee 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
@@ -50,7 +50,6 @@ class MainTest(unittest.TestCase):
'Ignoring warm-up run (1115)',
'',
'Time:',
- 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
'avg 1100 ms',
'median 1101 ms',
'stdev 11 ms',
@@ -61,8 +60,7 @@ class MainTest(unittest.TestCase):
try:
test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
self.assertEqual(test.parse_output(output),
- {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
- 'values': [i for i in range(1, 20)]}})
+ {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}})
finally:
pass
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
@@ -78,7 +76,6 @@ class MainTest(unittest.TestCase):
'some-unrecognizable-line',
'',
'Time:'
- 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
'avg 1100 ms',
'median 1101 ms',
'stdev 11 ms',
@@ -112,13 +109,12 @@ class TestPageLoadingPerfTest(unittest.TestCase):
def test_run(self):
test = PageLoadingPerfTest(None, 'some-test', '/path/some-dir/some-test')
- driver = TestPageLoadingPerfTest.MockDriver(range(1, 21))
+ driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
output_capture = OutputCapture()
output_capture.capture_output()
try:
self.assertEqual(test.run(driver, None),
- {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms',
- 'values': [i * 1000 for i in range(2, 21)]}})
+ {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms'}})
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
index e01b2aedb..1ef3cf07e 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -191,11 +191,6 @@ class PerfTestsRunner(object):
if not output:
return self.EXIT_CODE_BAD_MERGE
results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
- else:
- # FIXME: Remove this code once webkit-perf.appspot.com supported "values".
- for result in output['results'].values():
- if isinstance(result, dict) and 'values' in result:
- del result['values']
self._generate_output_files(output_json_path, results_page_path, output)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
index d46d7e73e..4ca9500ac 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
@@ -92,7 +92,6 @@ Ignoring warm-up run (1502)
1471
Time:
-values 1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471 ms
avg 1489.05 ms
median 1487 ms
stdev 14.46 ms
@@ -104,7 +103,6 @@ max 1510 ms
Ignoring warm-up run (1115)
Time:
-values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
avg 1100 ms
median 1101 ms
stdev 11 ms
@@ -116,7 +114,6 @@ max 1120 ms
Ignoring warm-up run (1115)
Time:
-values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
avg 1100 ms
median 1101 ms
stdev 11 ms
@@ -124,7 +121,6 @@ min 1080 ms
max 1120 ms
JS Heap:
-values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
avg 832000 bytes
median 829000 bytes
stdev 15000 bytes
@@ -132,7 +128,6 @@ min 811000 bytes
max 848000 bytes
Malloc:
-values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
avg 532000 bytes
median 529000 bytes
stdev 13000 bytes
@@ -291,10 +286,9 @@ max 548000 bytes
'Finished: 0.1 s',
'', '']))
results = runner.load_output_json()[0]['results']
- values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
- self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms', 'values': values})
- self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes', 'values': values})
- self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes', 'values': values})
+ self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms'})
+ self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes'})
+ self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes'})
def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, expected_exit_code=0):
filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
@@ -336,12 +330,6 @@ max 548000 bytes
return logs
_event_target_wrapper_and_inspector_results = {
- "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms",
- "values": [1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471]},
- "inspector/pass.html:group_name:test_name": 42}
-
- # FIXME: Remove this variance once perf-o-matic supported "values".
- _event_target_wrapper_and_inspector_results_without_values = {
"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
"inspector/pass.html:group_name:test_name": 42}
@@ -350,7 +338,7 @@ max 548000 bytes
'--test-results-server=some.host'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
self.assertEqual(runner.load_output_json(), {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
"webkit-revision": "5678", "branch": "webkit-trunk"})
def test_run_with_description(self):
@@ -359,7 +347,7 @@ max 548000 bytes
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
self.assertEqual(runner.load_output_json(), {
"timestamp": 123456789, "description": "some description",
- "results": self._event_target_wrapper_and_inspector_results_without_values,
+ "results": self._event_target_wrapper_and_inspector_results,
"webkit-revision": "5678", "branch": "webkit-trunk"})
def create_runner_and_setup_results_template(self, args=[]):
@@ -449,7 +437,7 @@ max 548000 bytes
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
self.assertEqual(runner.load_output_json(), {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
"webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"})
def test_run_with_bad_slave_config_json(self):
@@ -468,7 +456,7 @@ max 548000 bytes
port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
self.assertEqual(runner.load_output_json(), {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
"webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"})
def test_run_with_upload_json(self):