diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2012-08-21 10:57:44 +0200 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2012-08-21 10:57:44 +0200 |
commit | 5ef7c8a6a70875d4430752d146bdcb069605d71d (patch) | |
tree | f6256640b6c46d7da221435803cae65326817ba2 /Tools/Scripts/webkitpy | |
parent | decad929f578d8db641febc8740649ca6c574638 (diff) | |
download | qtwebkit-5ef7c8a6a70875d4430752d146bdcb069605d71d.tar.gz |
Imported WebKit commit 356d83016b090995d08ad568f2d2c243aa55e831 (http://svn.webkit.org/repository/webkit/trunk@126147)
New snapshot including various build fixes for newer Qt 5
Diffstat (limited to 'Tools/Scripts/webkitpy')
28 files changed, 237 insertions, 161 deletions
diff --git a/Tools/Scripts/webkitpy/common/config/build.py b/Tools/Scripts/webkitpy/common/config/build.py index fcb5e62c3..2ecacc7ad 100644 --- a/Tools/Scripts/webkitpy/common/config/build.py +++ b/Tools/Scripts/webkitpy/common/config/build.py @@ -37,7 +37,7 @@ def _should_file_trigger_build(target_platform, file): # and start using it for their bots. Someone familiar with each platform # will have to figure out what the right set of directories/patterns is for # that platform. - assert(target_platform in ("mac-leopard", "mac-lion", "mac-snowleopard", "win")) + assert(target_platform in ("mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win")) directories = [ # Directories that shouldn't trigger builds on any bots. @@ -68,9 +68,9 @@ def _should_file_trigger_build(target_platform, file): ("gtk", ["gtk"]), ("mac", ["chromium-mac", "mac"]), ("mac-leopard", ["mac-leopard"]), - ("mac-lion", ["mac", "win"]), + ("mac-lion", ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]), ("mac-snowleopard", ["mac-leopard", "mac-snowleopard"]), - ("mac-wk2", ["mac-lion", "mac-snowleopard", "win"]), + ("mac-wk2", ["mac-lion", "mac-snowleopard", "mac-mountainlion", "win"]), ("objc", ["mac"]), ("qt", ["qt"]), ("skia", ["chromium"]), diff --git a/Tools/Scripts/webkitpy/common/config/build_unittest.py b/Tools/Scripts/webkitpy/common/config/build_unittest.py index bdc340489..c496179e4 100644 --- a/Tools/Scripts/webkitpy/common/config/build_unittest.py +++ b/Tools/Scripts/webkitpy/common/config/build_unittest.py @@ -31,7 +31,7 @@ class ShouldBuildTest(unittest.TestCase): (["GNUmakefile.am", "Source/WebCore/GNUmakefile.am"], ["gtk"]), (["Websites/bugs.webkit.org/foo", "Source/WebCore/bar"], ["*"]), (["Websites/bugs.webkit.org/foo"], []), - (["Source/JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-leopard", "mac-lion", "mac-snowleopard"]), + (["Source/JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]), (["Source/JavaScriptCore/JavaScriptCore.vcproj/foo", "Source/WebKit2/win/WebKit2.vcproj", "Source/WebKit/win/WebKit.sln", "Tools/WebKitTestRunner/Configurations/WebKitTestRunnerCommon.vsprops"], ["win"]), (["LayoutTests/platform/mac/foo", "Source/WebCore/bar"], ["*"]), (["LayoutTests/foo"], ["*"]), @@ -41,17 +41,17 @@ class ShouldBuildTest(unittest.TestCase): (["LayoutTests/platform/mac-leopard/foo"], ["mac-leopard"]), (["LayoutTests/platform/mac-lion/foo"], ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]), (["LayoutTests/platform/mac-snowleopard/foo"], ["mac-leopard", "mac-snowleopard"]), - (["LayoutTests/platform/mac-wk2/Skipped"], ["mac-lion", "mac-snowleopard", "win"]), - (["LayoutTests/platform/mac/foo"], ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]), + (["LayoutTests/platform/mac-wk2/Skipped"], ["mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]), + (["LayoutTests/platform/mac/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]), (["LayoutTests/platform/win-xp/foo"], ["win"]), (["LayoutTests/platform/win-wk2/foo"], ["win"]), (["LayoutTests/platform/win/foo"], ["win"]), - (["Source/WebCore.exp.in", "Source/WebKit/mac/WebKit.exp"], ["mac-leopard", "mac-lion", "mac-snowleopard"]), - (["Source/WebCore/mac/foo"], ["chromium-mac", "mac-leopard", "mac-lion", "mac-snowleopard"]), + (["Source/WebCore.exp.in", "Source/WebKit/mac/WebKit.exp"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]), + (["Source/WebCore/mac/foo"], ["chromium-mac", "mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]), (["Source/WebCore/win/foo"], ["chromium-win", "win"]), - (["Source/WebCore/platform/graphics/gpu/foo"], ["mac-leopard", "mac-lion", "mac-snowleopard"]), + (["Source/WebCore/platform/graphics/gpu/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]), (["Source/WebCore/platform/wx/wxcode/win/foo"], []), - (["Source/WebCore/rendering/RenderThemeMac.mm", "Source/WebCore/rendering/RenderThemeMac.h"], ["mac-leopard", "mac-lion", "mac-snowleopard"]), + (["Source/WebCore/rendering/RenderThemeMac.mm", "Source/WebCore/rendering/RenderThemeMac.h"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]), (["Source/WebCore/rendering/RenderThemeChromiumLinux.h"], ["chromium-linux"]), (["Source/WebCore/rendering/RenderThemeWinCE.h"], []), (["Tools/BuildSlaveSupport/build.webkit.org-config/public_html/LeaksViewer/LeaksViewer.js"], []), @@ -61,7 +61,7 @@ class ShouldBuildTest(unittest.TestCase): for files, platforms in self._should_build_tests: # FIXME: We should test more platforms here once # build._should_file_trigger_build is implemented for them. - for platform in ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]: + for platform in ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]: should_build = platform in platforms or "*" in platforms self.assertEqual(build.should_build(platform, files), should_build, "%s should%s have built but did%s (files: %s)" % (platform, "" if should_build else "n't", "n't" if should_build else "", str(files))) diff --git a/Tools/Scripts/webkitpy/common/config/committers.py b/Tools/Scripts/webkitpy/common/config/committers.py index ddfddf9a9..4c2216289 100644 --- a/Tools/Scripts/webkitpy/common/config/committers.py +++ b/Tools/Scripts/webkitpy/common/config/committers.py @@ -131,6 +131,7 @@ contributors_who_are_not_committers = [ Contributor("Eric Penner", "epenner@chromium.org", "epenner"), Contributor("Felician Marton", ["felician@inf.u-szeged.hu", "marton.felician.zoltan@stud.u-szeged.hu"], "Felician"), Contributor("Finnur Thorarinsson", ["finnur@chromium.org", "finnur.webkit@gmail.com"], "finnur"), + Contributor("Forms Bugs", "forms-bugs@chromium.org"), Contributor("Grace Kloba", "klobag@chromium.org", "klobag"), Contributor("Greg Simon", "gregsimon@chromium.org", "gregsimon"), Contributor("Gregg Tavares", ["gman@google.com", "gman@chromium.org"], "gman"), @@ -143,6 +144,7 @@ contributors_who_are_not_committers = [ Contributor("John Bauman", ["jbauman@chromium.org", "jbauman@google.com"], "jbauman"), Contributor("John Mellor", "johnme@chromium.org", "johnme"), Contributor("Kulanthaivel Palanichamy", "kulanthaivel@codeaurora.org", "kvel"), + Contributor("Kiran Muppala", "cmuppala@apple.com", "kiranm"), Contributor(u"Michael Br\u00fcning", "michael.bruning@nokia.com", "mibrunin"), Contributor("Mihai Balan", "mibalan@adobe.com", "miChou"), Contributor("Min Qin", "qinmin@chromium.org"), @@ -153,6 +155,7 @@ contributors_who_are_not_committers = [ Contributor("Pravin D", "pravind.2k4@gmail.com", 'pravind'), Contributor("Radar WebKit Bug Importer", "webkit-bug-importer@group.apple.com"), Contributor("Raul Hudea", "rhudea@adobe.com", "rhudea"), + Contributor("Roger Fong", "roger_fong@apple.com", "rfong"), Contributor("Roland Takacs", "rtakacs@inf.u-szeged.hu", "rtakacs"), Contributor(u"Sami Ky\u00f6stil\u00e4", "skyostil@chromium.org", "skyostil"), Contributor("Szilard Ledan-Muntean", "szledan@inf.u-szeged.hu", "szledan"), @@ -161,6 +164,7 @@ contributors_who_are_not_committers = [ Contributor("Terry Anderson", "tdanderson@chromium.org", "tdanderson"), Contributor("Tien-Ren Chen", "trchen@chromium.org", "trchen"), Contributor("WebKit Review Bot", "webkit.review.bot@gmail.com", "sheriff-bot"), + Contributor("Web Components Team", "webcomponents-bugzilla@chromium.org"), Contributor("Wyatt Carss", ["wcarss@chromium.org", "wcarss@google.com"], "wcarss"), Contributor("Zeev Lieber", "zlieber@chromium.org"), Contributor("Zoltan Arvai", "zarvai@inf.u-szeged.hu", "azbest_hu"), diff --git a/Tools/Scripts/webkitpy/common/config/ports.py b/Tools/Scripts/webkitpy/common/config/ports.py index 1d76b4218..884380e65 100644 --- a/Tools/Scripts/webkitpy/common/config/ports.py +++ b/Tools/Scripts/webkitpy/common/config/ports.py @@ -160,6 +160,7 @@ class EflPort(DeprecatedPort): def build_webkit_command(self, build_style=None): command = super(EflPort, self).build_webkit_command(build_style=build_style) command.append("--efl") + command.append("--update-efl") command.append(super(EflPort, self).makeArgs()) return command diff --git a/Tools/Scripts/webkitpy/common/config/ports_unittest.py b/Tools/Scripts/webkitpy/common/config/ports_unittest.py index df5bf7352..272052339 100644 --- a/Tools/Scripts/webkitpy/common/config/ports_unittest.py +++ b/Tools/Scripts/webkitpy/common/config/ports_unittest.py @@ -46,6 +46,11 @@ class DeprecatedPortTest(unittest.TestCase): self.assertEquals(GtkPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--gtk", "--update-gtk", DeprecatedPort().makeArgs()]) self.assertEquals(GtkPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--gtk", "--update-gtk", DeprecatedPort().makeArgs()]) + def test_efl_port(self): + self.assertEquals(EflPort().flag(), "--port=efl") + self.assertEquals(EflPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--efl", "--update-efl", DeprecatedPort().makeArgs()]) + self.assertEquals(EflPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--efl", "--update-efl", DeprecatedPort().makeArgs()]) + def test_qt_port(self): self.assertEquals(QtPort().flag(), "--port=qt") self.assertEquals(QtPort().run_webkit_tests_command(), DeprecatedPort().script_shell_command("run-webkit-tests")) diff --git a/Tools/Scripts/webkitpy/common/config/watchlist b/Tools/Scripts/webkitpy/common/config/watchlist index e5c9c2776..ce7b7fe9a 100755 --- a/Tools/Scripts/webkitpy/common/config/watchlist +++ b/Tools/Scripts/webkitpy/common/config/watchlist @@ -212,7 +212,9 @@ "Battery": { "filename": r"Source/WebCore/Modules/battery", }, - + "WTF": { + "filename": r"Source/WTF/wtf", + }, }, "CC_RULES": { # Note: All email addresses listed must be registered with bugzilla. @@ -257,6 +259,7 @@ "WebIDL": [ "abarth@webkit.org", "ojan@chromium.org" ], "WebKitGTKTranslations": [ "gns@gnome.org", "mrobinson@webkit.org" ], "webkitpy": [ "abarth@webkit.org", "ojan@chromium.org", "dpranke@chromium.org" ], + "WTF": [ "benjamin@webkit.org",], }, "MESSAGE_RULES": { "ChromiumPublicApi": [ "Please wait for approval from abarth@webkit.org, dglazkov@chromium.org, " diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py index ae3422561..a9df942da 100644 --- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py +++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py @@ -315,12 +315,19 @@ class Worker(object): test_input.should_run_pixel_test = self._port.should_run_as_pixel_test(test_input) def _run_test(self, test_input): + self._batch_count += 1 + + stop_when_done = False + if self._batch_size > 0 and self._batch_count >= self._batch_size: + self._batch_count = 0 + stop_when_done = True + self._update_test_input(test_input) test_timeout_sec = self._timeout(test_input) start = time.time() self._caller.post('started_test', test_input, test_timeout_sec) - result = self._run_test_with_timeout(test_input, test_timeout_sec) + result = self._run_test_with_timeout(test_input, test_timeout_sec, stop_when_done) elapsed_time = time.time() - start self._caller.post('finished_test', result, elapsed_time) @@ -359,13 +366,12 @@ class Worker(object): _log.debug("%s killing driver" % self._name) driver.stop() - def _run_test_with_timeout(self, test_input, timeout): + def _run_test_with_timeout(self, test_input, timeout, stop_when_done): if self._options.run_singly: - return self._run_test_in_another_thread(test_input, timeout) - return self._run_test_in_this_thread(test_input) + return self._run_test_in_another_thread(test_input, timeout, stop_when_done) + return self._run_test_in_this_thread(test_input, stop_when_done) def _clean_up_after_test(self, test_input, result): - self._batch_count += 1 test_name = test_input.test_name self._tests_run_file.write(test_name + "\n") @@ -385,11 +391,7 @@ class Worker(object): else: _log.debug("%s %s passed" % (self._name, test_name)) - if self._batch_size > 0 and self._batch_count >= self._batch_size: - self._kill_driver() - self._batch_count = 0 - - def _run_test_in_another_thread(self, test_input, thread_timeout_sec): + def _run_test_in_another_thread(self, test_input, thread_timeout_sec, stop_when_done): """Run a test in a separate thread, enforcing a hard time limit. Since we can only detect the termination of a thread, not any internal @@ -412,7 +414,7 @@ class Worker(object): self.result = None def run(self): - self.result = worker._run_single_test(driver, test_input) + self.result = worker._run_single_test(driver, test_input, stop_when_done) thread = SingleTestThread() thread.start() @@ -435,7 +437,7 @@ class Worker(object): result = test_results.TestResult(test_input.test_name, failures=[], test_run_time=0) return result - def _run_test_in_this_thread(self, test_input): + def _run_test_in_this_thread(self, test_input, stop_when_done): """Run a single test file using a shared DumpRenderTree process. Args: @@ -447,11 +449,11 @@ class Worker(object): self._kill_driver() if not self._driver: self._driver = self._port.create_driver(self._worker_number) - return self._run_single_test(self._driver, test_input) + return self._run_single_test(self._driver, test_input, stop_when_done) - def _run_single_test(self, driver, test_input): + def _run_single_test(self, driver, test_input, stop_when_done): return single_test_runner.run_single_test(self._port, self._options, - test_input, driver, self._name) + test_input, driver, self._name, stop_when_done) class TestShard(object): diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py index b36130ded..7379d97c3 100644 --- a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py +++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py @@ -41,15 +41,15 @@ from webkitpy.layout_tests.models.test_results import TestResult _log = logging.getLogger(__name__) -def run_single_test(port, options, test_input, driver, worker_name): - runner = SingleTestRunner(options, port, driver, test_input, worker_name) +def run_single_test(port, options, test_input, driver, worker_name, stop_when_done): + runner = SingleTestRunner(options, port, driver, test_input, worker_name, stop_when_done) return runner.run() class SingleTestRunner(object): (ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update') - def __init__(self, options, port, driver, test_input, worker_name): + def __init__(self, options, port, driver, test_input, worker_name, stop_when_done): self._options = options self._port = port self._filesystem = port.host.filesystem @@ -59,6 +59,7 @@ class SingleTestRunner(object): self._test_name = test_input.test_name self._should_run_pixel_test = test_input.should_run_pixel_test self._reference_files = test_input.reference_files + self._stop_when_done = stop_when_done if self._reference_files: # Detect and report a test which has a wrong combination of expectation files. @@ -102,7 +103,7 @@ class SingleTestRunner(object): return self._run_compare_test() def _run_compare_test(self): - driver_output = self._driver.run_test(self._driver_input()) + driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done) expected_driver_output = self._expected_driver_output() if self._options.ignore_metrics: @@ -116,7 +117,7 @@ class SingleTestRunner(object): return test_result def _run_rebaseline(self): - driver_output = self._driver.run_test(self._driver_input()) + driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done) failures = self._handle_error(driver_output) test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, None, failures) # FIXME: It the test crashed or timed out, it might be better to avoid @@ -265,7 +266,10 @@ class SingleTestRunner(object): elif driver_output.image_hash != expected_driver_output.image_hash: diff_result = self._port.diff_image(driver_output.image, expected_driver_output.image) err_str = diff_result[2] - if err_str: + # FIXME: see https://bugs.webkit.org/show_bug.cgi?id=94277 and + # https://bugs.webkit.org/show_bug.cgi?id=81962; ImageDiff doesn't + # seem to be working with WTR properly and tons of tests are failing. + if err_str and not self._options.webkit_test_runner: _log.warning(' %s : %s' % (self._test_name, err_str)) failures.append(test_failures.FailureImageHashMismatch()) driver_output.error = (driver_output.error or '') + err_str @@ -279,7 +283,7 @@ class SingleTestRunner(object): return failures def _run_reftest(self): - test_output = self._driver.run_test(self._driver_input()) + test_output = self._driver.run_test(self._driver_input(), self._stop_when_done) total_test_time = 0 reference_output = None test_result = None @@ -293,7 +297,7 @@ class SingleTestRunner(object): putAllMismatchBeforeMatch = sorted for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files): reference_test_name = self._port.relative_test_filename(reference_filename) - reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, test_output.image_hash, should_run_pixel_test=True)) + reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, test_output.image_hash, should_run_pixel_test=True), self._stop_when_done) test_result = self._compare_output_with_reference(test_output, reference_output, reference_filename, expectation == '!=') if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures): @@ -316,6 +320,10 @@ class SingleTestRunner(object): if failures: return TestResult(self._test_name, failures, total_test_time, has_stderr) + if self._options.webkit_test_runner and not self._options.pixel_tests: + # don't check pixel results for WTR/WK2; they're broken. + return TestResult(self._test_name, failures, total_test_time, has_stderr) + if not driver_output1.image_hash and not driver_output2.image_hash: failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename)) elif mismatch: diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py index bab741839..d3015dbdd 100644 --- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py +++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py @@ -206,10 +206,14 @@ class TestExpectationParser(object): self._allow_rebaseline_modifier = allow_rebaseline_modifier def parse(self, filename, expectations_string): - expectations = TestExpectationParser._tokenize_list(filename, expectations_string) - for expectation_line in expectations: - self._parse_line(expectation_line) - return expectations + expectation_lines = [] + line_number = 0 + for line in expectations_string.split("\n"): + line_number += 1 + test_expectation = self._tokenize_line(filename, line, line_number) + self._parse_line(test_expectation) + expectation_lines.append(test_expectation) + return expectation_lines def expectation_for_skipped_test(self, test_name): expectation_line = TestExpectationLine() @@ -231,8 +235,6 @@ class TestExpectationParser(object): if not expectation_line.name: return - self._check_modifiers_against_expectations(expectation_line) - expectation_line.is_file = self._port.test_isfile(expectation_line.name) if not expectation_line.is_file and self._check_path_does_not_exist(expectation_line): return @@ -251,7 +253,14 @@ class TestExpectationParser(object): has_wontfix = False has_bugid = False parsed_specifiers = set() - for modifier in expectation_line.modifiers: + + modifiers = [modifier.lower() for modifier in expectation_line.modifiers] + expectations = [expectation.lower() for expectation in expectation_line.expectations] + + if self.SLOW_MODIFIER in modifiers and self.TIMEOUT_EXPECTATION in expectations: + expectation_line.warnings.append('A test can not be both SLOW and TIMEOUT. If it times out indefinitely, then it should be just TIMEOUT.') + + for modifier in modifiers: if modifier in TestExpectations.MODIFIERS: expectation_line.parsed_modifiers.append(modifier) if modifier == self.WONTFIX_MODIFIER: @@ -268,7 +277,7 @@ class TestExpectationParser(object): if not expectation_line.parsed_bug_modifiers and not has_wontfix and not has_bugid: expectation_line.warnings.append('Test lacks BUG modifier.') - if self._allow_rebaseline_modifier and self.REBASELINE_MODIFIER in expectation_line.modifiers: + if self._allow_rebaseline_modifier and self.REBASELINE_MODIFIER in modifiers: expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.') expectation_line.matching_configurations = self._test_configuration_converter.to_config_set(parsed_specifiers, expectation_line.warnings) @@ -283,10 +292,6 @@ class TestExpectationParser(object): result.add(expectation) expectation_line.parsed_expectations = result - def _check_modifiers_against_expectations(self, expectation_line): - if self.SLOW_MODIFIER in expectation_line.modifiers and self.TIMEOUT_EXPECTATION in expectation_line.expectations: - expectation_line.warnings.append('A test can not be both SLOW and TIMEOUT. If it times out indefinitely, then it should be just TIMEOUT.') - def _check_path_does_not_exist(self, expectation_line): # WebKit's way of skipping tests is to add a -disabled suffix. # So we should consider the path existing if the path or the @@ -324,7 +329,7 @@ class TestExpectationParser(object): expectation_line.matching_tests.append(expectation_line.path) @classmethod - def _tokenize(cls, filename, expectation_string, line_number): + def _tokenize_line(cls, filename, expectation_string, line_number): """Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance. The format of a test expectation line is: @@ -364,20 +369,9 @@ class TestExpectationParser(object): return expectation_line @classmethod - def _tokenize_list(cls, filename, expectations_string): - """Returns a list of TestExpectationLines, one for each line in expectations_string.""" - expectation_lines = [] - line_number = 0 - for line in expectations_string.split("\n"): - line_number += 1 - expectation_lines.append(cls._tokenize(filename, line, line_number)) - return expectation_lines - - @classmethod def _split_space_separated(cls, space_separated_string): """Splits a space-separated string into an array.""" - # FIXME: Lower-casing is necessary to support legacy code. Need to eliminate. - return [part.strip().lower() for part in space_separated_string.strip().split(' ')] + return [part.strip() for part in space_separated_string.strip().split(' ')] class TestExpectationLine(object): @@ -857,7 +851,7 @@ class TestExpectations(object): def without_rebaseline_modifier(expectation): return not (not expectation.is_invalid() and expectation.name in except_these_tests and - "rebaseline" in expectation.modifiers and + 'rebaseline' in expectation.parsed_modifiers and filename == expectation.filename) return TestExpectationSerializer.list_to_string(filter(without_rebaseline_modifier, self._expectations)) diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py index b65151d72..9fc118360 100644 --- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py @@ -488,7 +488,7 @@ class RebaseliningTest(Base): class TestExpectationParserTests(unittest.TestCase): def _tokenize(self, line): - return TestExpectationParser._tokenize('path', line, 0) + return TestExpectationParser._tokenize_line('path', line, 0) def test_tokenize_blank(self): expectation = self._tokenize('') @@ -529,15 +529,15 @@ class TestExpectationParserTests(unittest.TestCase): def test_tokenize_valid_with_comment(self): expectation = self._tokenize('FOO : bar = BAZ //Qux.') self.assertEqual(expectation.comment, 'Qux.') - self.assertEqual(str(expectation.modifiers), '[\'foo\']') - self.assertEqual(str(expectation.expectations), '[\'baz\']') + self.assertEqual(str(expectation.modifiers), "['FOO']") + self.assertEqual(str(expectation.expectations), "['BAZ']") self.assertEqual(len(expectation.warnings), 0) def test_tokenize_valid_with_multiple_modifiers(self): expectation = self._tokenize('FOO1 FOO2 : bar = BAZ //Qux.') self.assertEqual(expectation.comment, 'Qux.') - self.assertEqual(str(expectation.modifiers), '[\'foo1\', \'foo2\']') - self.assertEqual(str(expectation.expectations), '[\'baz\']') + self.assertEqual(str(expectation.modifiers), "['FOO1', 'FOO2']") + self.assertEqual(str(expectation.expectations), "['BAZ']") self.assertEqual(len(expectation.warnings), 0) def test_parse_empty_string(self): @@ -560,7 +560,7 @@ class TestExpectationSerializerTests(unittest.TestCase): unittest.TestCase.__init__(self, testFunc) def _tokenize(self, line): - return TestExpectationParser._tokenize('path', line, 0) + return TestExpectationParser._tokenize_line('path', line, 0) def assert_round_trip(self, in_string, expected_string=None): expectation = self._tokenize(in_string) @@ -569,7 +569,9 @@ class TestExpectationSerializerTests(unittest.TestCase): self.assertEqual(expected_string, self._serializer.to_string(expectation)) def assert_list_round_trip(self, in_string, expected_string=None): - expectations = TestExpectationParser._tokenize_list('path', in_string) + host = MockHost() + parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], allow_rebaseline_modifier=False) + expectations = parser.parse('path', in_string) if expected_string is None: expected_string = in_string self.assertEqual(expected_string, TestExpectationSerializer.list_to_string(expectations, self._converter)) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py index cf7104c28..c6b5c6802 100755 --- a/Tools/Scripts/webkitpy/layout_tests/port/base.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py @@ -60,6 +60,7 @@ from webkitpy.layout_tests.port import driver from webkitpy.layout_tests.port import http_lock from webkitpy.layout_tests.port import image_diff from webkitpy.layout_tests.port import server_process +from webkitpy.layout_tests.port.factory import PortFactory from webkitpy.layout_tests.servers import apache_http_server from webkitpy.layout_tests.servers import http_server from webkitpy.layout_tests.servers import websocket_server @@ -158,6 +159,12 @@ class Port(object): return 50 * 1000 return 35 * 1000 + def driver_stop_timeout(self): + """ Returns the amount of time in seconds to wait before killing the process in driver.stop().""" + # We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as + # well (for things like ASAN, Valgrind, etc.) + return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms() + def wdiff_available(self): if self._wdiff_available is None: self._wdiff_available = self.check_wdiff(logging=False) @@ -198,7 +205,7 @@ class Port(object): def baseline_search_path(self): - return self.get_option('additional_platform_directory', []) + self.default_baseline_search_path() + return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path() def default_baseline_search_path(self): """Return a list of absolute paths to directories to search under for @@ -211,6 +218,14 @@ class Port(object): search_paths.append(self.port_name) return map(self._webkit_baseline_path, search_paths) + @memoized + def _compare_baseline(self): + factory = PortFactory(self.host) + target_port = self.get_option('compare_port') + if target_port: + return factory.get(target_port).default_baseline_search_path() + return [] + def check_build(self, needs_http): """This routine is used to ensure that the build is up to date and all the needed binaries are present.""" @@ -1099,15 +1114,6 @@ class Port(object): def default_configuration(self): return self._config.default_configuration() - def process_kill_time(self): - """ Returns the amount of time in seconds to wait before killing the process. - - Within server_process.stop there is a time delta before the test is explictly - killed. By changing this the time can be extended in case the process needs - more time to cleanly exit on its own. - """ - return 3.0 - # # PROTECTED ROUTINES # diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py index 7106a20f5..64ba6003b 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py @@ -173,6 +173,10 @@ class ChromiumAndroidPort(chromium.ChromiumPort): # marked as slow tests on desktop platforms. return 10 * 1000 + def driver_stop_timeout(self): + # DRT doesn't respond to closing stdin, so we might as well stop the driver immediately. + return 0.0 + def default_child_processes(self): return len(self._get_devices()) @@ -262,7 +266,9 @@ class ChromiumAndroidPort(chromium.ChromiumPort): def create_driver(self, worker_number, no_timeout=False): # We don't want the default DriverProxy which is not compatible with our driver. # See comments in ChromiumAndroidDriver.start(). - return ChromiumAndroidDriver(self, worker_number, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout) + return ChromiumAndroidDriver(self, worker_number, pixel_tests=self.get_option('pixel_tests'), + # Force no timeout to avoid DumpRenderTree timeouts before NRWT. + no_timeout=True) def driver_cmd_line(self): # Override to return the actual DumpRenderTree command line. @@ -545,13 +551,13 @@ class ChromiumAndroidDriver(driver.Driver): not self._file_exists_on_device(self._out_fifo_path) and not self._file_exists_on_device(self._err_fifo_path)) - def run_test(self, driver_input): + def run_test(self, driver_input, stop_when_done): base = self._port.lookup_virtual_test_base(driver_input.test_name) if base: driver_input = copy.copy(driver_input) driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name) driver_input.test_name = base - return super(ChromiumAndroidDriver, self).run_test(driver_input) + return super(ChromiumAndroidDriver, self).run_test(driver_input, stop_when_done) def start(self, pixel_tests, per_test_args): # Only one driver instance is allowed because of the nature of Android activity. @@ -657,15 +663,10 @@ class ChromiumAndroidDriver(driver.Driver): self._read_stderr_process.kill() self._read_stderr_process = None - # Stop and kill server_process because our pipe reading/writing processes won't quit - # by itself on close of the pipes. - if self._server_process: - self._server_process.stop(kill_directly=True) - self._server_process = None super(ChromiumAndroidDriver, self).stop() if self._forwarder_process: - self._forwarder_process.stop(kill_directly=True) + self._forwarder_process.kill() self._forwarder_process = None if not ChromiumAndroidDriver._loop_with_timeout(self._remove_all_pipes, DRT_START_STOP_TIMEOUT_SECS): diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver.py b/Tools/Scripts/webkitpy/layout_tests/port/driver.py index 85049970b..c343c570d 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/driver.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/driver.py @@ -136,7 +136,7 @@ class Driver(object): def __del__(self): self.stop() - def run_test(self, driver_input): + def run_test(self, driver_input, stop_when_done): """Run a single test and return the results. Note that it is okay if a test times out or crashes and leaves @@ -158,14 +158,27 @@ class Driver(object): text, audio = self._read_first_block(deadline) # First block is either text or audio image, actual_image_hash = self._read_optional_image_block(deadline) # The second (optional) block is image data. - # We may not have read all of the output if an error (crash) occured. - # Since some platforms output the stacktrace over error, we should - # dump any buffered error into self.error_from_test. - # FIXME: We may need to also read stderr until the process dies? - self.error_from_test += self._server_process.pop_all_buffered_stderr() + crashed = self.has_crashed() + timed_out = self._server_process.timed_out + if text and ('Timed out waiting for final message from web process' in text): + # FIXME: This is a hack to work around the issues in https://bugs.webkit.org/show_bug.cgi?id=94505. + # We need to either fix the underlying problem in WTR or return a more canonical error. + if not timed_out: + _log.warning("webprocess timed out but WTR didn't, killing WTR") + timed_out = True + else: + _log.warning("webprocess timed out and so did WTR") + + if stop_when_done or crashed or timed_out: + # We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output. + # In the timeout case, we kill the hung process as well. + out, err = self._server_process.stop(self._port.driver_stop_timeout() if stop_when_done else 0.0) + text += out + self.error_from_test += err + self._server_process = None crash_log = None - if self.has_crashed(): + if crashed: self.error_from_test, crash_log = self._get_crash_log(text, self.error_from_test, newer_than=start_time) # If we don't find a crash log use a placeholder error message instead. @@ -175,15 +188,9 @@ class Driver(object): if self._subprocess_was_unresponsive: crash_log += ' Process failed to become responsive before timing out.' - timeout = self._server_process.timed_out - if timeout: - # DRT doesn't have a built in timer to abort the test, so we might as well - # kill the process directly and not wait for it to shut down cleanly (since it may not). - self._server_process.kill() - return DriverOutput(text, image, actual_image_hash, audio, - crash=self.has_crashed(), test_time=time.time() - test_begin_time, - timeout=timeout, error=self.error_from_test, + crash=crashed, test_time=time.time() - test_begin_time, + timeout=timed_out, error=self.error_from_test, crashed_process_name=self._crashed_process_name, crashed_pid=self._crashed_pid, crash_log=crash_log) @@ -273,7 +280,7 @@ class Driver(object): def stop(self): if self._server_process: - self._server_process.stop() + self._server_process.stop(self._port.driver_stop_timeout()) self._server_process = None if self._driver_tempdir: @@ -476,20 +483,20 @@ class DriverProxy(object): def uri_to_test(self, uri): return self._driver.uri_to_test(uri) - def run_test(self, driver_input): + def run_test(self, driver_input, stop_when_done): base = self._port.lookup_virtual_test_base(driver_input.test_name) if base: virtual_driver_input = copy.copy(driver_input) virtual_driver_input.test_name = base virtual_driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name) - return self.run_test(virtual_driver_input) + return self.run_test(virtual_driver_input, stop_when_done) pixel_tests_needed = driver_input.should_run_pixel_test cmd_line_key = self._cmd_line_as_key(pixel_tests_needed, driver_input.args) if not cmd_line_key in self._running_drivers: self._running_drivers[cmd_line_key] = self._make_driver(pixel_tests_needed) - return self._running_drivers[cmd_line_key].run_test(driver_input) + return self._running_drivers[cmd_line_key].run_test(driver_input, stop_when_done) def start(self): # FIXME: Callers shouldn't normally call this, since this routine diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py index 5e2019b1b..2457c2ca0 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py @@ -182,7 +182,7 @@ class DriverTest(unittest.TestCase): def has_crashed(self): return self.crashed - def stop(self): + def stop(self, timeout): pass def assert_crash(driver, error_line, crashed, name, pid, unresponsive=False): diff --git a/Tools/Scripts/webkitpy/layout_tests/port/efl.py b/Tools/Scripts/webkitpy/layout_tests/port/efl.py index e5635744d..9301c4334 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/efl.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/efl.py @@ -94,6 +94,16 @@ class EflPort(Port, PulseAudioSanitizer): dyn_path = self._build_path('lib', 'libwebcore_efl.so') return static_path if self._filesystem.exists(static_path) else dyn_path + def _search_paths(self): + search_paths = [] + if self.get_option('webkit_test_runner'): + search_paths.append(self.port_name + '-wk2') + search_paths.append(self.port_name) + return search_paths + + def expectations_files(self): + return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self._search_paths()])) + def show_results_html_file(self, results_filename): # FIXME: We should find a way to share this implmentation with Gtk, # or teach run-launcher how to call run-safari and move this down to WebKitPort. diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py index aa1e3024f..b772323b5 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py @@ -140,7 +140,7 @@ class ServerProcess(object): try: self._proc.stdin.write(bytes) except IOError, e: - self.stop() + self.stop(0.0) # stop() calls _reset(), so we have to set crashed to True after calling stop(). self._crashed = True @@ -213,12 +213,12 @@ class ServerProcess(object): output, self._error = self._split_string_after_index(self._error, bytes_count) return output - def _wait_for_data_and_update_buffers_using_select(self, deadline): + def _wait_for_data_and_update_buffers_using_select(self, deadline, stopping=False): out_fd = self._proc.stdout.fileno() err_fd = self._proc.stderr.fileno() select_fds = (out_fd, err_fd) try: - read_fds, _, _ = select.select(select_fds, [], select_fds, deadline - time.time()) + read_fds, _, _ = select.select(select_fds, [], select_fds, max(deadline - time.time(), 0)) except select.error, e: # We can ignore EINVAL since it's likely the process just crashed and we'll # figure that out the next time through the loop in _read(). @@ -229,16 +229,22 @@ class ServerProcess(object): try: if out_fd in read_fds: data = self._proc.stdout.read() - if not data: - _log.warning('unexpected EOF of stdout') - self._crashed = True + if not data and not stopping: + if self._proc.poll() is not None: + _log.warning('unexpected EOF of stdout, %s crashed' % self._name) + self._crashed = True + else: + _log.warning('unexpected EOF of stdout, %s still alive' % self._name) self._output += data if err_fd in read_fds: data = self._proc.stderr.read() - if not data: - _log.warning('unexpected EOF of stderr') - self._crashed = True + if not data and not stopping: + if self._proc.poll() is not None: + _log.warning('unexpected EOF on stderr, %s crashed' % self._name) + self._crashed = True + else: + _log.warning('unexpected EOF on stderr, %s is still alive' % self._name) self._error += data except IOError, e: # We can ignore the IOErrors because we will detect if the subporcess crashed @@ -307,41 +313,44 @@ class ServerProcess(object): if not self._proc: self._start() - def stop(self, kill_directly=False): + def stop(self, timeout_secs=3.0): if not self._proc: - return + return (None, None) - # Only bother to check for leaks if the process is still running. + # Only bother to check for leaks or stderr if the process is still running. if self.poll() is None: self._port.check_for_leaks(self.name(), self.pid()) + now = time.time() self._proc.stdin.close() - self._proc.stdout.close() - if self._proc.stderr: - self._proc.stderr.close() - - if kill_directly: - self.kill() + if not timeout_secs: + self._kill() elif not self._host.platform.is_win(): - # Closing stdin/stdout/stderr hangs sometimes on OS X, - # and anyway we don't want to hang the harness if DumpRenderTree - # is buggy, so we wait a couple seconds to give DumpRenderTree a - # chance to clean up, but then force-kill the process if necessary. - timeout = time.time() + self._port.process_kill_time() - while self._proc.poll() is None and time.time() < timeout: + # FIXME: Why aren't we calling this on win? + deadline = now + timeout_secs + while self._proc.poll() is None and time.time() < deadline: time.sleep(0.01) if self._proc.poll() is None: _log.warning('stopping %s timed out, killing it' % self._name) - self.kill() + self._kill() _log.warning('killed') + + # read any remaining data on the pipes and return it. + if self._use_win32_apis: + self._wait_for_data_and_update_buffers_using_win32_apis(now) + else: + self._wait_for_data_and_update_buffers_using_select(now, stopping=True) + out, err = self._output, self._error self._reset() + return (out, err) def kill(self): - if self._proc: - self._host.executive.kill_process(self._proc.pid) - if self._proc.poll() is not None: - self._proc.wait() - self._reset() + self.stop(0.0) + + def _kill(self): + self._host.executive.kill_process(self._proc.pid) + if self._proc.poll() is not None: + self._proc.wait() def replace_outputs(self, stdout, stderr): assert self._proc diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py index db38615e0..48c41e6f2 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py @@ -41,6 +41,8 @@ from webkitpy.common.system.outputcapture import OutputCapture class TrivialMockPort(object): def __init__(self): self.host = MockSystemHost() + self.host.executive.kill_process = lambda x: None + self.host.executive.kill_process = lambda x: None def results_directory(self): return "/mock-results" @@ -77,6 +79,9 @@ class MockProc(object): def poll(self): return 1 + def wait(self): + return 0 + class FakeServerProcess(server_process.ServerProcess): def _start(self): @@ -114,7 +119,7 @@ class TestServerProcess(unittest.TestCase): if line: self.assertEquals(line.strip(), "stderr") - proc.stop() + proc.stop(0) def test_broken_pipe(self): port_obj = TrivialMockPort() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py index 6302120d2..cccb9ac1d 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/test.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py @@ -525,7 +525,7 @@ class TestDriver(Driver): pixel_tests_flag = '-p' if pixel_tests else '' return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args - def run_test(self, test_input): + def run_test(self, test_input, stop_when_done): start_time = time.time() test_name = test_input.test_name test_args = test_input.args or [] @@ -563,6 +563,9 @@ class TestDriver(Driver): crash_logs = CrashLogs(self._port.host) crash_log = crash_logs.find_newest_log(crashed_process_name, None) or '' + if stop_when_done: + self.stop() + return DriverOutput(actual_text, test.actual_image, test.actual_checksum, audio, crash=test.crash or test.web_process_crash, crashed_process_name=crashed_process_name, crashed_pid=crashed_pid, crash_log=crash_log, diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py index 06b3032ff..ddb0105c2 100755 --- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py +++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py @@ -331,6 +331,8 @@ def parse_args(args=None): optparse.make_option("--additional-expectations", action="append", default=[], help="Path to a test_expectations file that will override previous expectations. " "Specify multiple times for multiple sets of overrides."), + optparse.make_option("--compare-port", action="store", default=None, + help="Use the specified port's baselines first"), optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results", help="Don't launch a browser with results after the tests " diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py index ecb58b89d..1f35857c4 100755 --- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py +++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py @@ -147,7 +147,7 @@ def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False, def stop(self): self._current_test_batch = None - def run_test(self, test_input): + def run_test(self, test_input, stop_when_done): if self._current_test_batch is None: self._current_test_batch = [] test_batches.append(self._current_test_batch) @@ -159,7 +159,7 @@ def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False, dirname, filename = filesystem.split(test_name) if include_reference_html or not Port.is_reference_html_file(filesystem, dirname, filename): self._current_test_batch.append(test_name) - return TestDriver.run_test(self, test_input) + return TestDriver.run_test(self, test_input, stop_when_done) class RecordingTestPort(TestPort): def create_driver(self, worker_number): diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py index b111c9b4b..8ef945893 100644 --- a/Tools/Scripts/webkitpy/performance_tests/perftest.py +++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py @@ -74,7 +74,7 @@ class PerfTest(object): return self.parse_output(output) def run_single(self, driver, path_or_url, time_out_ms, should_run_pixel_test=False): - return driver.run_test(DriverInput(path_or_url, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test)) + return driver.run_test(DriverInput(path_or_url, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test), stop_when_done=False) def run_failed(self, output): if output.text == None or output.error: @@ -112,7 +112,7 @@ class PerfTest(object): return False _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE) - _result_classes = ['Time', 'JS Heap', 'FastMalloc'] + _result_classes = ['Time', 'JS Heap', 'Malloc'] _result_class_regex = re.compile(r'^(?P<resultclass>' + r'|'.join(_result_classes) + '):') _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit'] _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)') diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py index 47fe6231c..3b7c609ce 100755 --- a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py +++ b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py @@ -99,7 +99,7 @@ class TestPageLoadingPerfTest(unittest.TestCase): self._values = values self._index = 0 - def run_test(self, input): + def run_test(self, input, stop_when_done): value = self._values[self._index] self._index += 1 if isinstance(value, str): @@ -141,8 +141,8 @@ class TestReplayPerfTest(unittest.TestCase): def __init__(self, custom_run_test=None): class ReplayTestDriver(TestDriver): - def run_test(self, text_input): - return custom_run_test(text_input) if custom_run_test else None + def run_test(self, text_input, stop_when_done): + return custom_run_test(text_input, stop_when_done) if custom_run_test else None self._custom_driver_class = ReplayTestDriver super(self.__class__, self).__init__(host=MockHost()) @@ -174,7 +174,7 @@ class TestReplayPerfTest(unittest.TestCase): loaded_pages = [] - def run_test(test_input): + def run_test(test_input, stop_when_done): if test_input.test_name != "about:blank": self.assertEqual(test_input.test_name, 'http://some-test/') loaded_pages.append(test_input) @@ -243,7 +243,7 @@ class TestReplayPerfTest(unittest.TestCase): loaded_pages = [] - def run_test(test_input): + def run_test(test_input, stop_when_done): loaded_pages.append(test_input) self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content') return DriverOutput('actual text', 'actual image', 'actual checksum', @@ -270,7 +270,7 @@ class TestReplayPerfTest(unittest.TestCase): output_capture = OutputCapture() output_capture.capture_output() - def run_test(test_input): + def run_test(test_input, stop_when_done): self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content') return DriverOutput('actual text', 'actual image', 'actual checksum', audio=None, crash=False, timeout=False, error=False) diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py index ef459cd69..7e2f05ecd 100755 --- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py +++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py @@ -49,7 +49,7 @@ class MainTest(unittest.TestCase): self.assertEquals(stream.buflist, contents) class TestDriver: - def run_test(self, driver_input): + def run_test(self, driver_input, stop_when_done): text = '' timeout = False crash = False @@ -123,7 +123,7 @@ stdev 15000 bytes min 811000 bytes max 848000 bytes -FastMalloc: +Malloc: avg 532000 bytes median 529000 bytes stdev 13000 bytes @@ -274,13 +274,13 @@ max 548000 bytes 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms', 'RESULT Parser: memory-test: JSHeap= 832000.0 bytes', 'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes', - 'RESULT Parser: memory-test: FastMalloc= 532000.0 bytes', + 'RESULT Parser: memory-test: Malloc= 532000.0 bytes', 'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes', '', ''])) results = runner.load_output_json()[0]['results'] self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms'}) self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes'}) - self.assertEqual(results['Parser/memory-test:FastMalloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes'}) + self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes'}) def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=True, expected_exit_code=0): filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content') diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp.py b/Tools/Scripts/webkitpy/style/checkers/cpp.py index 78cd88250..45c49aab4 100644 --- a/Tools/Scripts/webkitpy/style/checkers/cpp.py +++ b/Tools/Scripts/webkitpy/style/checkers/cpp.py @@ -2747,6 +2747,10 @@ def check_include_line(filename, file_extension, clean_lines, line_number, inclu error(line_number, 'build/include', 4, 'wtf includes should be <wtf/file.h> instead of "wtf/file.h".') + if filename.find('/chromium/') != -1 and include.startswith('cc/CC'): + error(line_number, 'build/include', 4, + 'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".') + duplicate_header = include in include_state if duplicate_header: error(line_number, 'build/include', 4, diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py index 339897fe8..a5a94b793 100644 --- a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py +++ b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py @@ -2736,6 +2736,15 @@ class OrderOfIncludesTest(CppStyleTestBase): 'wtf includes should be <wtf/file.h> instead of "wtf/file.h".' ' [build/include] [4]') + def test_check_cc_includes(self): + self.assert_language_rules_check('bar/chromium/foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '\n' + '#include "cc/CCProxy.h"\n', + 'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".' + ' [build/include] [4]') + def test_classify_include(self): classify_include = cpp_style._classify_include include_state = cpp_style._IncludeState() diff --git a/Tools/Scripts/webkitpy/tool/commands/perfalizer.py b/Tools/Scripts/webkitpy/tool/commands/perfalizer.py index b9fc6fe5b..ae9f63a65 100644 --- a/Tools/Scripts/webkitpy/tool/commands/perfalizer.py +++ b/Tools/Scripts/webkitpy/tool/commands/perfalizer.py @@ -72,7 +72,7 @@ class PerfalizerTask(PatchAnalysisTask): head_revision = self._tool.scm().head_svn_revision() self._logger('Building WebKit at r%s without the patch' % head_revision) - if not self._build(): + if not self._build_without_patch(): return False if not self._port.check_build(needs_http=False): @@ -96,11 +96,11 @@ class PerfalizerTask(PatchAnalysisTask): filesystem.remove(self._json_path()) self._logger("Running performance tests...") - if self._run_perf_test(self._build_directory_without_patch) < 0: + if self._run_perf_test(self._build_directory_without_patch, 'without %d' % self._patch.id()) < 0: self._logger('Failed to run performance tests without the patch.') return False - if self._run_perf_test(self._build_directory) < 0: + if self._run_perf_test(self._build_directory, 'with %d' % self._patch.id()) < 0: self._logger('Failed to run performance tests with the patch.') return False @@ -129,11 +129,11 @@ class PerfalizerTask(PatchAnalysisTask): def _results_page_path(self): return self._tool.filesystem.join(self._build_directory, 'PerformanceTestResults.html') - def _run_perf_test(self, build_path): + def _run_perf_test(self, build_path, description): filesystem = self._tool.filesystem script_path = filesystem.join(filesystem.dirname(self._tool.path()), 'run-perf-tests') perf_test_runner_args = [script_path, '--no-build', '--no-show-results', '--build-directory', build_path, - '--output-json-path', self._json_path()] + '--output-json-path', self._json_path(), '--description', description] return self._tool.executive.run_and_throw_if_fail(perf_test_runner_args, cwd=self._tool.scm().checkout_root) def run_command(self, command): diff --git a/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py b/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py index f519e3f78..feb7b05b3 100644 --- a/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py +++ b/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py @@ -50,7 +50,8 @@ class PerfalizerTaskTest(unittest.TestCase): if args[0] in commands_to_fail: raise ScriptError - def run_perf_test(build_path): + def run_perf_test(build_path, description): + self.assertTrue(description == 'without 10000' or description == 'with 10000') if 'run-perf-tests' in commands_to_fail: return -1 if 'results-page' not in commands_to_fail: diff --git a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py index 09b45ba55..115249916 100644 --- a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py +++ b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py @@ -217,8 +217,8 @@ class PrintExpectationsTest(unittest.TestCase): def test_csv(self): self.run_test(['failures/expected/text.html', 'failures/expected/image.html'], - ('test-win-xp,failures/expected/image.html,wontfix,image\n' - 'test-win-xp,failures/expected/text.html,wontfix,text\n'), + ('test-win-xp,failures/expected/image.html,WONTFIX,IMAGE\n' + 'test-win-xp,failures/expected/text.html,WONTFIX,TEXT\n'), csv=True) |