diff options
Diffstat (limited to 'Tools/Scripts/webkitpy/performance_tests')
4 files changed, 120 insertions, 58 deletions
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py index fdac35b11..32b9d8bc6 100644 --- a/Tools/Scripts/webkitpy/performance_tests/perftest.py +++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py @@ -1,5 +1,6 @@ #!/usr/bin/env python # Copyright (C) 2012 Google Inc. All rights reserved. +# Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -202,11 +203,40 @@ class ChromiumStylePerfTest(PerfTest): class PageLoadingPerfTest(PerfTest): + _FORCE_GC_FILE = 'resources/force-gc.html' + def __init__(self, port, test_name, path_or_url): super(PageLoadingPerfTest, self).__init__(port, test_name, path_or_url) + self.force_gc_test = self._port.host.filesystem.join(self._port.perf_tests_dir(), self._FORCE_GC_FILE) + + def run_single(self, driver, path_or_url, time_out_ms, should_run_pixel_test=False): + # Force GC to prevent pageload noise. See https://bugs.webkit.org/show_bug.cgi?id=98203 + super(PageLoadingPerfTest, self).run_single(driver, self.force_gc_test, time_out_ms, False) + return super(PageLoadingPerfTest, self).run_single(driver, path_or_url, time_out_ms, should_run_pixel_test) + + def calculate_statistics(self, values): + sorted_values = sorted(values) + + # Compute the mean and variance using Knuth's online algorithm (has good numerical stability). + squareSum = 0 + mean = 0 + for i, time in enumerate(sorted_values): + delta = time - mean + sweep = i + 1.0 + mean += delta / sweep + squareSum += delta * (time - mean) + + middle = int(len(sorted_values) / 2) + result = {'avg': mean, + 'min': sorted_values[0], + 'max': sorted_values[-1], + 'median': sorted_values[middle] if len(sorted_values) % 2 else (sorted_values[middle - 1] + sorted_values[middle]) / 2, + 'stdev': math.sqrt(squareSum / (len(sorted_values) - 1))} + return result def run(self, driver, time_out_ms): - test_times = [] + results = {} + results.setdefault(self.test_name(), {'unit': 'ms', 'values': []}) for i in range(0, 20): output = self.run_single(driver, self.path_or_url(), time_out_ms) @@ -214,30 +244,25 @@ class PageLoadingPerfTest(PerfTest): return None if i == 0: continue - test_times.append(output.test_time * 1000) - sorted_test_times = sorted(test_times) + results[self.test_name()]['values'].append(output.test_time * 1000) - # Compute the mean and variance using a numerically stable algorithm. - squareSum = 0 - mean = 0 - valueSum = sum(sorted_test_times) - for i, time in enumerate(sorted_test_times): - delta = time - mean - sweep = i + 1.0 - mean += delta / sweep - squareSum += delta * delta * (i / sweep) - - middle = int(len(test_times) / 2) - results = {'values': test_times, - 'avg': mean, - 'min': sorted_test_times[0], - 'max': sorted_test_times[-1], - 'median': sorted_test_times[middle] if len(sorted_test_times) % 2 else (sorted_test_times[middle - 1] + sorted_test_times[middle]) / 2, - 'stdev': math.sqrt(squareSum), - 'unit': 'ms'} - self.output_statistics(self.test_name(), results, '') - return {self.test_name(): results} + if not output.measurements: + continue + + for result_class, result in output.measurements.items(): + name = self.test_name() + ':' + result_class + if not name in results: + results.setdefault(name, {'values': []}) + results[name]['values'].append(result) + if result_class == 'Malloc' or result_class == 'JSHeap': + results[name]['unit'] = 'bytes' + + for result_class in results.keys(): + results[result_class].update(self.calculate_statistics(results[result_class]['values'])) + self.output_statistics(result_class, results[result_class], '') + + return results class ReplayServer(object): diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py index 27a4bb385..4410903e9 100755 --- a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py +++ b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py @@ -43,6 +43,10 @@ from webkitpy.performance_tests.perftest import PerfTestFactory from webkitpy.performance_tests.perftest import ReplayPerfTest +class MockPort(TestPort): + def __init__(self, custom_run_test=None): + super(MockPort, self).__init__(host=MockHost(), custom_run_test=custom_run_test) + class MainTest(unittest.TestCase): def test_parse_output(self): output = DriverOutput('\n'.join([ @@ -98,39 +102,69 @@ class MainTest(unittest.TestCase): class TestPageLoadingPerfTest(unittest.TestCase): class MockDriver(object): - def __init__(self, values): + def __init__(self, values, test, measurements=None): self._values = values self._index = 0 + self._test = test + self._measurements = measurements def run_test(self, input, stop_when_done): + if input.test_name == self._test.force_gc_test: + return value = self._values[self._index] self._index += 1 if isinstance(value, str): return DriverOutput('some output', image=None, image_hash=None, audio=None, error=value) else: - return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1]) + return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1], measurements=self._measurements) def test_run(self): - test = PageLoadingPerfTest(None, 'some-test', '/path/some-dir/some-test') - driver = TestPageLoadingPerfTest.MockDriver(range(1, 21)) + port = MockPort() + test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test') + driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test) output_capture = OutputCapture() output_capture.capture_output() try: self.assertEqual(test.run(driver, None), - {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms', + {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms', 'values': [i * 1000 for i in range(2, 21)]}}) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') - self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 23874.6727726 ms, min= 2000 ms, max= 20000 ms\n') + self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n') + + def test_run_with_memory_output(self): + port = MockPort() + test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test') + memory_results = {'Malloc': 10, 'JSHeap': 5} + self.maxDiff = None + driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test, memory_results) + output_capture = OutputCapture() + output_capture.capture_output() + try: + self.assertEqual(test.run(driver, None), + {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms', + 'values': [i * 1000 for i in range(2, 21)]}, + 'some-test:Malloc': {'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes', + 'values': [10] * 19}, + 'some-test:JSHeap': {'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes', + 'values': [5] * 19}}) + finally: + actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() + self.assertEqual(actual_stdout, '') + self.assertEqual(actual_stderr, '') + self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n' + + 'RESULT some-test: Malloc= 10.0 bytes\nmedian= 10 bytes, stdev= 0.0 bytes, min= 10 bytes, max= 10 bytes\n' + + 'RESULT some-test: JSHeap= 5.0 bytes\nmedian= 5 bytes, stdev= 0.0 bytes, min= 5 bytes, max= 5 bytes\n') def test_run_with_bad_output(self): output_capture = OutputCapture() output_capture.capture_output() try: - test = PageLoadingPerfTest(None, 'some-test', '/path/some-dir/some-test') - driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]) + port = MockPort() + test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test') + driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], test) self.assertEqual(test.run(driver, None), None) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() @@ -141,7 +175,7 @@ class TestPageLoadingPerfTest(unittest.TestCase): class TestReplayPerfTest(unittest.TestCase): - class ReplayTestPort(TestPort): + class ReplayTestPort(MockPort): def __init__(self, custom_run_test=None): class ReplayTestDriver(TestDriver): @@ -149,7 +183,7 @@ class TestReplayPerfTest(unittest.TestCase): return custom_run_test(text_input, stop_when_done) if custom_run_test else None self._custom_driver_class = ReplayTestDriver - super(self.__class__, self).__init__(host=MockHost()) + super(self.__class__, self).__init__() def _driver_class(self): return self._custom_driver_class @@ -179,6 +213,9 @@ class TestReplayPerfTest(unittest.TestCase): loaded_pages = [] def run_test(test_input, stop_when_done): + if test_input.test_name == test.force_gc_test: + loaded_pages.append(test_input) + return if test_input.test_name != "about:blank": self.assertEqual(test_input.test_name, 'http://some-test/') loaded_pages.append(test_input) @@ -196,8 +233,9 @@ class TestReplayPerfTest(unittest.TestCase): finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() - self.assertEqual(len(loaded_pages), 1) - self.assertEqual(loaded_pages[0].test_name, 'http://some-test/') + self.assertEqual(len(loaded_pages), 2) + self.assertEqual(loaded_pages[0].test_name, test.force_gc_test) + self.assertEqual(loaded_pages[1].test_name, 'http://some-test/') self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, '') @@ -262,8 +300,9 @@ class TestReplayPerfTest(unittest.TestCase): finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() - self.assertEqual(len(loaded_pages), 1) - self.assertEqual(loaded_pages[0].test_name, 'http://some-test/') + self.assertEqual(len(loaded_pages), 2) + self.assertEqual(loaded_pages[0].test_name, test.force_gc_test) + self.assertEqual(loaded_pages[1].test_name, 'http://some-test/') self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, 'error: some-test.replay\nsome error\n') @@ -316,15 +355,15 @@ class TestReplayPerfTest(unittest.TestCase): class TestPerfTestFactory(unittest.TestCase): def test_regular_test(self): - test = PerfTestFactory.create_perf_test(None, 'some-dir/some-test', '/path/some-dir/some-test') + test = PerfTestFactory.create_perf_test(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test') self.assertEqual(test.__class__, PerfTest) def test_inspector_test(self): - test = PerfTestFactory.create_perf_test(None, 'inspector/some-test', '/path/inspector/some-test') + test = PerfTestFactory.create_perf_test(MockPort(), 'inspector/some-test', '/path/inspector/some-test') self.assertEqual(test.__class__, ChromiumStylePerfTest) def test_page_loading_test(self): - test = PerfTestFactory.create_perf_test(None, 'PageLoad/some-test', '/path/PageLoad/some-test') + test = PerfTestFactory.create_perf_test(MockPort(), 'PageLoad/some-test', '/path/PageLoad/some-test') self.assertEqual(test.__class__, PageLoadingPerfTest) diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py index c34d0b3e4..42e0d96e1 100755 --- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py +++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py @@ -29,19 +29,17 @@ """Run Inspector's perf tests in perf mode.""" +import os import json import logging import optparse -import re -import sys import time from webkitpy.common import find_files +from webkitpy.common.checkout.scm.detection import SCMDetector from webkitpy.common.host import Host from webkitpy.common.net.file_uploader import FileUploader -from webkitpy.layout_tests.views import printing from webkitpy.performance_tests.perftest import PerfTestFactory -from webkitpy.performance_tests.perftest import ReplayPerfTest _log = logging.getLogger(__name__) @@ -65,7 +63,7 @@ class PerfTestsRunner(object): else: self._host = Host() self._port = self._host.port_factory.get(self._options.platform, self._options) - self._host._initialize_scm() + self._host.initialize_scm() self._webkit_base_dir_len = len(self._port.webkit_base()) self._base_path = self._port.perf_tests_dir() self._results = {} @@ -73,6 +71,9 @@ class PerfTestsRunner(object): @staticmethod def _parse_args(args=None): + def _expand_path(option, opt_str, value, parser): + path = os.path.expandvars(os.path.expanduser(value)) + setattr(parser.values, option.dest, path) perf_option_list = [ optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration", help='Set the configuration to Debug'), @@ -98,15 +99,12 @@ class PerfTestsRunner(object): help="Pause before running the tests to let user attach a performance monitor."), optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True, help="Do no generate results JSON and results page."), - optparse.make_option("--output-json-path", + optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str", help="Path to generate a JSON file at; may contain previous results if it already exists."), optparse.make_option("--reset-results", action="store_true", help="Clears the content in the generated JSON file before adding the results."), - optparse.make_option("--slave-config-json-path", + optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str", help="Only used on bots. Path to a slave configuration file."), - optparse.make_option("--source-json-path", dest="slave_config_json_path", - # FIXME: Remove this option once build.webkit.org is updated to use --slave-config-json-path. - help="Deprecated. Overrides --slave-config-json-path."), optparse.make_option("--description", help="Add a description to the output JSON file if one is generated"), optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results", @@ -181,11 +179,6 @@ class PerfTestsRunner(object): def _generate_and_show_results(self): options = self._options - if options.test_results_server: - # Remove this code once build.webkit.org started using --no-show-results and --reset-results - options.reset_results = True - options.show_results = False - output_json_path = self._output_json_path() output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number) @@ -213,7 +206,8 @@ class PerfTestsRunner(object): if description: contents['description'] = description for (name, path) in self._port.repository_paths(): - contents[name + '-revision'] = self._host.scm().svn_revision(path) + scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm() + contents[name + '-revision'] = scm.svn_revision(path) # FIXME: Add --branch or auto-detect the branch we're in for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform, diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py index d3de7b3df..6119c61d3 100755 --- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py +++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py @@ -473,7 +473,7 @@ max 548000 bytes def test_run_with_slave_config_json(self): runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json', - '--source-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host']) + '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host']) port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}') self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True) self.assertEqual(runner.load_output_json(), [{ @@ -625,8 +625,10 @@ max 548000 bytes '--builder-name', 'webkit-mac-1', '--build-number=56', '--time-out-ms=42', + '--no-show-results', + '--reset-results', '--output-json-path=a/output.json', - '--source-json-path=a/source.json', + '--slave-config-json-path=a/source.json', '--test-results-server=somehost', '--debug']) self.assertEqual(options.build, True) @@ -636,6 +638,8 @@ max 548000 bytes self.assertEqual(options.build_number, '56') self.assertEqual(options.time_out_ms, '42') self.assertEqual(options.configuration, 'Debug') + self.assertEqual(options.show_results, False) + self.assertEqual(options.reset_results, True) self.assertEqual(options.output_json_path, 'a/output.json') self.assertEqual(options.slave_config_json_path, 'a/source.json') self.assertEqual(options.test_results_server, 'somehost') |