summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-07-30 11:37:48 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-07-30 11:38:52 +0200
commit89e2486a48b739f8d771d69ede5a6a1b244a10fc (patch)
tree503b1a7812cf97d93704c32437eb5f62dc1a1ff9 /Tools/Scripts/webkitpy
parent625f028249cb37c55bbbd153f3902afd0b0756d9 (diff)
downloadqtwebkit-89e2486a48b739f8d771d69ede5a6a1b244a10fc.tar.gz
Imported WebKit commit 0282df8ca7c11d8c8a66ea18543695c69f545a27 (http://svn.webkit.org/repository/webkit/trunk@124002)
New snapshot with prospective Mountain Lion build fix
Diffstat (limited to 'Tools/Scripts/webkitpy')
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/scm/git.py2
-rw-r--r--Tools/Scripts/webkitpy/common/config/committers.py4
-rwxr-xr-xTools/Scripts/webkitpy/common/multiprocessing_bootstrap.py70
-rw-r--r--Tools/Scripts/webkitpy/common/net/layouttestresults.py4
-rw-r--r--Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager.py35
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py12
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py13
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/worker.py7
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/apple.py6
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/base.py258
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py2
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/chromium.py30
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py127
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py15
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py8
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py8
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py9
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py8
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py4
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/chromium_win.py8
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py8
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/driver.py21
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py48
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/efl.py21
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/gtk.py18
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/image_diff.py117
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py52
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/mac.py6
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py3
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/port_testcase.py222
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/qt.py27
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/server_process.py13
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py77
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/test.py13
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/webkit.py271
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py233
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/win.py2
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py57
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py23
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftest.py9
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftest_unittest.py6
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner.py31
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py130
-rw-r--r--Tools/Scripts/webkitpy/test/main.py28
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/expectedfailures.py46
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py19
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py12
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queues.py20
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queues_unittest.py12
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/rebaseline.py3
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/upload.py8
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/gardeningserver.py8
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py6
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py4
56 files changed, 1259 insertions, 955 deletions
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/git.py b/Tools/Scripts/webkitpy/common/checkout/scm/git.py
index 802d81db0..41e1e1e04 100644
--- a/Tools/Scripts/webkitpy/common/checkout/scm/git.py
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/git.py
@@ -274,7 +274,7 @@ class Git(SCM, SVNRepository):
if self._filesystem.exists(order_file):
order = "-O%s" % order_file
- command = [self.executable_name, 'diff', '--binary', "--no-ext-diff", "--full-index", "--no-renames", order, self.merge_base(git_commit), "--"]
+ command = [self.executable_name, 'diff', '--binary', '--no-color', "--no-ext-diff", "--full-index", "--no-renames", order, self.merge_base(git_commit), "--"]
if changed_files:
command += changed_files
return self.prepend_svn_revision(self.run(command, decode_output=False, cwd=self.checkout_root))
diff --git a/Tools/Scripts/webkitpy/common/config/committers.py b/Tools/Scripts/webkitpy/common/config/committers.py
index c1878849d..b1b8a3fac 100644
--- a/Tools/Scripts/webkitpy/common/config/committers.py
+++ b/Tools/Scripts/webkitpy/common/config/committers.py
@@ -158,7 +158,6 @@ contributors_who_are_not_committers = [
Contributor("Tamas Czene", ["tczene@inf.u-szeged.hu", "Czene.Tamas@stud.u-szeged.hu"], "tczene"),
Contributor("Terry Anderson", "tdanderson@chromium.org", "tdanderson"),
Contributor("Tien-Ren Chen", "trchen@chromium.org", "trchen"),
- Contributor("Tom Hudson", "tomhudson@google.com"),
Contributor("WebKit Review Bot", "webkit.review.bot@gmail.com", "sheriff-bot"),
Contributor("Wyatt Carss", ["wcarss@chromium.org", "wcarss@google.com"], "wcarss"),
Contributor("Zeev Lieber", "zlieber@chromium.org"),
@@ -283,7 +282,6 @@ committers_unable_to_review = [
Committer("Jeremy Moskovich", ["playmobil@google.com", "jeremy@chromium.org"], "jeremymos"),
Committer("Jesus Sanchez-Palencia", ["jesus@webkit.org", "jesus.palencia@openbossa.org"], "jeez_"),
Committer("Jia Pu", "jpu@apple.com"),
- Committer("Jochen Eisinger", "jochen@chromium.org", "jochen__"),
Committer("Joe Thomas", "joethomas@motorola.com", "joethomas"),
Committer("John Abd-El-Malek", "jam@chromium.org", "jam"),
Committer("John Gregg", ["johnnyg@google.com", "johnnyg@chromium.org"], "johnnyg"),
@@ -372,6 +370,7 @@ committers_unable_to_review = [
Committer("Steve Lacey", "sjl@chromium.org", "stevela"),
Committer("Takashi Toyoshima", "toyoshim@chromium.org", "toyoshim"),
Committer("Thomas Sepez", "tsepez@chromium.org", "tsepez"),
+ Committer("Tom Hudson", ["tomhudson@google.com", "tomhudson@chromium.org"], "tomhudson"),
Committer("Tom Zakrajsek", "tomz@codeaurora.org", "tomz"),
Committer("Tommy Widenflycht", "tommyw@google.com", "tommyw"),
Committer("Trey Matteson", "trey@usa.net", "trey"),
@@ -471,6 +470,7 @@ reviewers_list = [
Reviewer("Jessie Berlin", ["jberlin@webkit.org", "jberlin@apple.com"], "jessieberlin"),
Reviewer("Jian Li", "jianli@chromium.org", "jianli"),
Reviewer("Jocelyn Turcotte", "jocelyn.turcotte@nokia.com", "jturcotte"),
+ Reviewer("Jochen Eisinger", "jochen@chromium.org", "jochen__"),
Reviewer("John Sullivan", "sullivan@apple.com", "sullivan"),
Reviewer("Jon Honeycutt", "jhoneycutt@apple.com", "jhoneycutt"),
Reviewer("Joseph Pecoraro", ["joepeck@webkit.org", "pecoraro@apple.com"], "JoePeck"),
diff --git a/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py b/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py
new file mode 100755
index 000000000..11897764e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""In order for the multiprocessing module to spawn children correctly on
+Windows, we need to be running a Python module that can be imported
+(which means a file in sys.path that ends in .py). In addition, we need to
+ensure that sys.path / PYTHONPATH is set and propagating correctly.
+
+This module enforces that."""
+
+import os
+import subprocess
+import sys
+
+from webkitpy.common import version_check # 'unused import' pylint: disable=W0611
+
+
+def run(*parts):
+ up = os.path.dirname
+ script_dir = up(up(up(os.path.abspath(__file__))))
+ env = os.environ
+ if 'PYTHONPATH' in env:
+ if script_dir not in env['PYTHONPATH']:
+ env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + script_dir
+ else:
+ env['PYTHONPATH'] = script_dir
+ module_path = os.path.join(script_dir, *parts)
+ cmd = [sys.executable, module_path] + sys.argv[1:]
+
+ # Wrap processes in the jhbuild environment so DRT or WKTR
+ # doesn't need to do it and their process id as reported by
+ # subprocess.Popen is not jhbuild's.
+ if '--gtk' in sys.argv[1:] and os.path.exists(os.path.join(script_dir, '..', '..', 'WebKitBuild', 'Dependencies')):
+ cmd.insert(1, os.path.join(script_dir, '..', 'gtk', 'run-with-jhbuild'))
+
+ proc = subprocess.Popen(cmd, env=env)
+ try:
+ proc.wait()
+ except KeyboardInterrupt:
+ # We need a second wait in order to make sure the subprocess exits fully.
+ # FIXME: It would be nice if we could put a timeout on this.
+ proc.wait()
+ sys.exit(proc.returncode)
diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults.py b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
index bd7211cca..f0d807edc 100644
--- a/Tools/Scripts/webkitpy/common/net/layouttestresults.py
+++ b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
@@ -168,9 +168,7 @@ class LayoutTestResults(object):
return [result.test_name for result in self.results_matching_failure_types(failure_types)]
def failing_test_results(self):
- # These should match the "fail", "crash", and "timeout" keys.
- failure_types = [test_failures.FailureTextMismatch, test_failures.FailureImageHashMismatch, test_failures.FailureCrash, test_failures.FailureTimeout]
- return self.results_matching_failure_types(failure_types)
+ return self.results_matching_failure_types(test_failures.ALL_FAILURE_CLASSES)
def failing_tests(self):
return [result.test_name for result in self.failing_test_results()] + self._unit_test_failures
diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
index 09352a275..939a56a7d 100644
--- a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
@@ -138,9 +138,9 @@ class LayoutTestResultsTest(unittest.TestCase):
self.assertEqual(LayoutTestResults.results_from_string(None), None)
self.assertEqual(LayoutTestResults.results_from_string(""), None)
results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html)
- self.assertEqual(len(results.failing_tests()), 0)
+ self.assertEqual(len(results.failing_tests()), 1)
def test_tests_matching_failure_types(self):
results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html_with_failing_tests)
failing_tests = results.tests_matching_failure_types([test_failures.FailureTextMismatch])
- self.assertEqual(len(results.failing_tests()), 1)
+ self.assertEqual(len(results.failing_tests()), 2)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
index 0d07d3a68..0544918f4 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -329,7 +329,7 @@ class Manager(object):
self._worker_stats = {}
self._current_result_summary = None
- def collect_tests(self, args):
+ def _collect_tests(self, args):
"""Find all the files to test.
Args:
@@ -366,7 +366,7 @@ class Manager(object):
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
- def parse_expectations(self):
+ def _parse_expectations(self):
self._expectations = test_expectations.TestExpectations(self._port, self._test_files)
def _split_into_chunks_if_necessary(self, skipped):
@@ -439,7 +439,7 @@ class Manager(object):
self._test_files_list = files + skip_chunk_list
self._test_files = set(self._test_files_list)
- self.parse_expectations()
+ self._parse_expectations()
self._test_files = set(files)
self._test_files_list = files
@@ -833,21 +833,22 @@ class Manager(object):
return result_summary
- def run(self):
- """Run all our tests on all our test files.
-
- For each test file, we run each test type. If there are any failures,
- we collect them for reporting.
+ def run(self, args):
+ """Run all our tests on all our test files and return the number of unexpected results (0 == success)."""
+ self._printer.write_update("Collecting tests ...")
+ try:
+ self._collect_tests(args)
+ except IOError as e:
+ # This is raised when the --test-list doesn't exist.
+ return -1
- Args:
- result_summary: a summary object tracking the test results.
+ self._printer.write_update("Checking build ...")
+ if not self._port.check_build(self.needs_servers()):
+ _log.error("Build check failed")
+ return -1
- Return:
- The number of unexpected results (0 == success)
- """
- # collect_tests() must have been called first to initialize us.
- # If we didn't find any files to test, we've errored out already in
- # prepare_lists_and_print_output().
+ self._printer.write_update("Parsing expectations ...")
+ self._parse_expectations()
result_summary = self._set_up_run()
if not result_summary:
@@ -1169,7 +1170,7 @@ def read_test_files(fs, filenames, test_path_separator):
except IOError, e:
if e.errno == errno.ENOENT:
_log.critical('')
- _log.critical('--test-list file "%s" not found' % file)
+ _log.critical('--test-list file "%s" not found' % filename)
raise
return tests
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
index ae20a8a50..576d423af 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
@@ -263,9 +263,7 @@ class ManagerTest(unittest.TestCase):
run_webkit_tests._set_up_derived_options(port, options)
printer = printing.Printer(port, options, StringIO.StringIO(), StringIO.StringIO())
manager = LockCheckingManager(port, options, printer, tester, True)
- manager.collect_tests(args)
- manager.parse_expectations()
- num_unexpected_results = manager.run()
+ num_unexpected_results = manager.run(args)
printer.cleanup()
tester.assertEquals(num_unexpected_results, 0)
@@ -276,9 +274,7 @@ class ManagerTest(unittest.TestCase):
run_webkit_tests._set_up_derived_options(port, options)
printer = printing.Printer(port, options, StringIO.StringIO(), StringIO.StringIO())
manager = LockCheckingManager(port, options, printer, tester, False)
- manager.collect_tests(args)
- manager.parse_expectations()
- num_unexpected_results = manager.run()
+ num_unexpected_results = manager.run(args)
printer.cleanup()
tester.assertEquals(num_unexpected_results, 0)
@@ -350,7 +346,7 @@ class ManagerTest(unittest.TestCase):
host = MockHost()
port = host.port_factory.get()
manager = Manager(port, options=MockOptions(test_list=None, http=True), printer=Mock())
- manager.collect_tests(test_names)
+ manager._collect_tests(test_names)
return manager
manager = get_manager_with_tests(['fast/html'])
@@ -371,7 +367,7 @@ class ManagerTest(unittest.TestCase):
host = MockHost()
port = host.port_factory.get('test-mac-leopard')
manager = Manager(port, options=MockOptions(test_list=None, http=True), printer=Mock())
- manager.collect_tests(test_names)
+ manager._collect_tests(test_names)
return manager
host = MockHost()
port = host.port_factory.get('test-mac-leopard')
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
index fa07e88ff..88cbabf23 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
@@ -78,10 +78,7 @@ class SingleTestRunner(object):
self._port.expected_audio(self._test_name))
def _should_fetch_expected_checksum(self):
- if not self._should_run_pixel_test:
- return False
- return (self._options.pixel_tests and
- not (self._options.new_baseline or self._options.reset_results))
+ return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results)
def _driver_input(self):
# The image hash is used to avoid doing an image dump if the
@@ -153,7 +150,7 @@ class SingleTestRunner(object):
location = self.VERSION_DIR if self._options.add_platform_exceptions else self.UPDATE
self._save_baseline_data(driver_output.text, '.txt', location)
self._save_baseline_data(driver_output.audio, '.wav', location)
- if self._options.pixel_tests:
+ if self._should_run_pixel_test:
self._save_baseline_data(driver_output.image, '.png', location)
def _save_baseline_data(self, data, extension, location):
@@ -202,9 +199,9 @@ class SingleTestRunner(object):
driver_output.crashed_process_name,
driver_output.crashed_pid))
if driver_output.error:
- _log.debug("%s %s crashed, stack trace:" % (self._worker_name, testname))
+ _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
else:
- _log.debug("%s %s crashed, no stack trace" % (self._worker_name, testname))
+ _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
elif driver_output.error:
_log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
for line in driver_output.error.splitlines():
@@ -222,7 +219,7 @@ class SingleTestRunner(object):
failures.extend(self._compare_text(driver_output.text, expected_driver_output.text))
failures.extend(self._compare_audio(driver_output.audio, expected_driver_output.audio))
- if self._options.pixel_tests:
+ if self._should_run_pixel_test:
failures.extend(self._compare_image(driver_output, expected_driver_output))
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
index 378826c51..b1583fff3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
@@ -88,13 +88,8 @@ class Worker(object):
test_input.reference_files = self._port.reference_files(test_input.test_name)
if test_input.reference_files:
test_input.should_run_pixel_test = True
- elif self._options.pixel_tests:
- if self._options.skip_pixel_test_if_no_baseline:
- test_input.should_run_pixel_test = bool(self._port.expected_image(test_input.test_name))
- else:
- test_input.should_run_pixel_test = True
else:
- test_input.should_run_pixel_test = False
+ test_input.should_run_pixel_test = self._port.should_run_as_pixel_test(test_input)
def _run_test(self, test_input):
self._update_test_input(test_input)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/apple.py b/Tools/Scripts/webkitpy/layout_tests/port/apple.py
index 5e8b8b829..28ecc6380 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/apple.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/apple.py
@@ -28,14 +28,14 @@
import logging
-from webkitpy.layout_tests.port.webkit import WebKitPort
+from webkitpy.layout_tests.port.base import Port
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
_log = logging.getLogger(__name__)
-class ApplePort(WebKitPort):
+class ApplePort(Port):
"""Shared logic between all of Apple's ports."""
# This is used to represent the version of an operating system
@@ -72,7 +72,7 @@ class ApplePort(WebKitPort):
return port_name[len(self.port_name + '-'):]
def __init__(self, host, port_name, **kwargs):
- WebKitPort.__init__(self, host, port_name, **kwargs)
+ super(ApplePort, self).__init__(host, port_name, **kwargs)
allowed_port_names = self.VERSION_FALLBACK_ORDER + [self.operating_system() + "-future"]
port_name = port_name.replace('-wk2', '')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
index d79143189..ad70f042d 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -33,7 +33,10 @@ test infrastructure (the Port and Driver classes)."""
import cgi
import difflib
import errno
+import itertools
+import logging
import os
+import operator
import optparse
import re
import sys
@@ -49,18 +52,19 @@ from webkitpy.common import find_files
from webkitpy.common import read_checksum_from_png
from webkitpy.common.memoized import memoized
from webkitpy.common.system import path
-from webkitpy.common.system import logutils
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port import config as port_config
from webkitpy.layout_tests.port import driver
from webkitpy.layout_tests.port import http_lock
+from webkitpy.layout_tests.port import image_diff
+from webkitpy.layout_tests.port import server_process
from webkitpy.layout_tests.servers import apache_http_server
from webkitpy.layout_tests.servers import http_server
from webkitpy.layout_tests.servers import websocket_server
-_log = logutils.get_logger(__file__)
+_log = logging.getLogger(__name__)
# FIXME: This class should merge with WebKitPort now that Chromium behaves mostly like other webkit ports.
@@ -110,6 +114,8 @@ class Port(object):
self._helper = None
self._http_server = None
self._websocket_server = None
+ self._image_differ = None
+ self._server_process_constructor = server_process.ServerProcess # overridable for testing
self._http_lock = None # FIXME: Why does this live on the port object?
# Python's Popen has a bug that causes any pipes opened to a
@@ -179,10 +185,14 @@ class Port(object):
def baseline_version_dir(self):
"""Return the absolute path to the platform-and-version-specific results."""
- baseline_search_paths = self.get_option('additional_platform_directory', []) + self.baseline_search_path()
+ baseline_search_paths = self.baseline_search_path()
return baseline_search_paths[0]
+
def baseline_search_path(self):
+ return self.get_option('additional_platform_directory', []) + self.default_baseline_search_path()
+
+ def default_baseline_search_path(self):
"""Return a list of absolute paths to directories to search under for
baselines. The directories are searched in order."""
search_paths = []
@@ -305,8 +315,18 @@ class Port(object):
|tolerance| should be a percentage value (0.0 - 100.0).
If it is omitted, the port default tolerance value is used.
+
+ If an error occurs (like ImageDiff isn't found, or crashes, we log an error and return True (for a diff).
"""
- raise NotImplementedError('Port.diff_image')
+ if not actual_contents and not expected_contents:
+ return (None, 0)
+ if not actual_contents or not expected_contents:
+ return (True, 0)
+ if not self._image_differ:
+ self._image_differ = image_diff.ImageDiffer(self)
+ self.set_option_default('tolerance', 0.1)
+ tolerance = tolerance or self.get_option('tolerance')
+ return self._image_differ.diff_image(expected_contents, actual_contents, tolerance)
def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
"""Returns a string containing the diff of the two text strings
@@ -395,7 +415,7 @@ class Port(object):
platform specific.
"""
baseline_filename = self._filesystem.splitext(test_name)[0] + '-expected' + suffix
- baseline_search_path = self.get_option('additional_platform_directory', []) + self.baseline_search_path()
+ baseline_search_path = self.baseline_search_path()
baselines = []
for platform_dir in baseline_search_path:
@@ -650,7 +670,10 @@ class Port(object):
def skipped_layout_tests(self, test_list):
"""Returns the set of tests found in Skipped files. Does *not* include tests marked as SKIP in expectations files."""
- return set([])
+ tests_to_skip = set(self._expectations_from_skipped_files(self._skipped_file_search_paths()))
+ tests_to_skip.update(self._tests_for_other_platforms())
+ tests_to_skip.update(self._skipped_tests_for_unsupported_features(test_list))
+ return tests_to_skip
def _tests_from_skipped_file_contents(self, skipped_file_contents):
tests_to_skip = []
@@ -776,7 +799,9 @@ class Port(object):
def clean_up_test_run(self):
"""Perform port-specific work at the end of a test run."""
- pass
+ if self._image_differ:
+ self._image_differ.stop()
+ self._image_differ = None
# FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
def _value_or_default_from_environ(self, name, default=None):
@@ -1122,14 +1147,17 @@ class Port(object):
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
def _build_path(self, *comps):
- # --root is used for running with a pre-built root (like from a nightly zip).
- build_directory = self.get_option('root') or self.get_option('build_directory')
- if not build_directory:
- build_directory = self._config.build_directory(self.get_option('configuration'))
- # Set --build-directory here Since this modifies the options object used by the worker subprocesses,
- # it avoids the slow call out to build_directory in each subprocess.
- self.set_option_default('build_directory', build_directory)
- return self._filesystem.join(self._filesystem.abspath(build_directory), *comps)
+ root_directory = self.get_option('root')
+ if not root_directory:
+ build_directory = self.get_option('build_directory')
+ if build_directory:
+ root_directory = self._filesystem.join(self.get_option('configuration'))
+ else:
+ root_directory = self._config.build_directory(self.get_option('configuration'))
+ # Set --root so that we can pass this to subprocesses and avoid making the
+ # slow call to config.build_directory() N times in each worker.
+ self.set_option_default('root', root_directory)
+ return self._filesystem.join(self._filesystem.abspath(root_directory), *comps)
def _path_to_driver(self, configuration=None):
"""Returns the full path to the test driver (DumpRenderTree)."""
@@ -1197,9 +1225,9 @@ class Port(object):
pid_str = str(pid or '<unknown>')
stdout_lines = (stdout or '<empty>').decode('utf8', 'replace').splitlines()
stderr_lines = (stderr or '<empty>').decode('utf8', 'replace').splitlines()
- return 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
+ return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
'\n'.join(('STDOUT: ' + l) for l in stdout_lines),
- '\n'.join(('STDERR: ' + l) for l in stderr_lines))
+ '\n'.join(('STDERR: ' + l) for l in stderr_lines)))
def look_for_new_crash_logs(self, crashed_processes, start_time):
pass
@@ -1249,6 +1277,202 @@ class Port(object):
return suite.args
return []
+ def supports_switching_pixel_tests_per_test(self):
+ if self.get_option('webkit_test_runner'):
+ return True
+ return self._supports_switching_pixel_tests_per_test()
+
+ def _supports_switching_pixel_tests_per_test(self):
+ # FIXME: all ports should support it.
+ return False
+
+ def should_run_as_pixel_test(self, test_input):
+ if not self._options.pixel_tests:
+ return False
+ if not self.supports_switching_pixel_tests_per_test():
+ # Cannot do more filtering without this.
+ return True
+ if self._options.pixel_test_directories:
+ return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
+ return self._should_run_as_pixel_test(test_input)
+
+ def _should_run_as_pixel_test(self, test_input):
+ # Default behavior is to allow all test to run as pixel tests if --pixel-tests is on and
+ # --pixel-test-directory is not specified.
+ return True
+
+ # FIXME: Eventually we should standarize port naming, and make this method smart enough
+ # to use for all port configurations (including architectures, graphics types, etc).
+ def _port_flag_for_scripts(self):
+ # This is overrriden by ports which need a flag passed to scripts to distinguish the use of that port.
+ # For example --qt on linux, since a user might have both Gtk and Qt libraries installed.
+ # FIXME: Chromium should override this once ChromiumPort is a WebKitPort.
+ return None
+
+ # This is modeled after webkitdirs.pm argumentsForConfiguration() from old-run-webkit-tests
+ def _arguments_for_configuration(self):
+ config_args = []
+ config_args.append(self._config.flag_for_configuration(self.get_option('configuration')))
+ # FIXME: We may need to add support for passing --32-bit like old-run-webkit-tests had.
+ port_flag = self._port_flag_for_scripts()
+ if port_flag:
+ config_args.append(port_flag)
+ return config_args
+
+ def _run_script(self, script_name, args=None, include_configuration_arguments=True, decode_output=True, env=None):
+ run_script_command = [self._config.script_path(script_name)]
+ if include_configuration_arguments:
+ run_script_command.extend(self._arguments_for_configuration())
+ if args:
+ run_script_command.extend(args)
+ output = self._executive.run_command(run_script_command, cwd=self._config.webkit_base_dir(), decode_output=decode_output, env=env)
+ _log.debug('Output of %s:\n%s' % (run_script_command, output))
+ return output
+
+ def _build_driver(self):
+ environment = self.host.copy_current_environment()
+ environment.disable_gcc_smartquotes()
+ env = environment.to_dictionary()
+
+ # FIXME: We build both DumpRenderTree and WebKitTestRunner for
+ # WebKitTestRunner runs because DumpRenderTree still includes
+ # the DumpRenderTreeSupport module and the TestNetscapePlugin.
+ # These two projects should be factored out into their own
+ # projects.
+ try:
+ self._run_script("build-dumprendertree", args=self._build_driver_flags(), env=env)
+ if self.get_option('webkit_test_runner'):
+ self._run_script("build-webkittestrunner", args=self._build_driver_flags(), env=env)
+ except ScriptError, e:
+ _log.error(e.message_with_output(output_limit=None))
+ return False
+ return True
+
+ def _build_driver_flags(self):
+ return []
+
+ def _tests_for_other_platforms(self):
+ # By default we will skip any directory under LayoutTests/platform
+ # that isn't in our baseline search path (this mirrors what
+ # old-run-webkit-tests does in findTestsToRun()).
+ # Note this returns LayoutTests/platform/*, not platform/*/*.
+ entries = self._filesystem.glob(self._webkit_baseline_path('*'))
+ dirs_to_skip = []
+ for entry in entries:
+ if self._filesystem.isdir(entry) and entry not in self.baseline_search_path():
+ basename = self._filesystem.basename(entry)
+ dirs_to_skip.append('platform/%s' % basename)
+ return dirs_to_skip
+
+ def _runtime_feature_list(self):
+ """If a port makes certain features available only through runtime flags, it can override this routine to indicate which ones are available."""
+ return None
+
+ def nm_command(self):
+ return 'nm'
+
+ def _modules_to_search_for_symbols(self):
+ path = self._path_to_webcore_library()
+ if path:
+ return [path]
+ return []
+
+ def _symbols_string(self):
+ symbols = ''
+ for path_to_module in self._modules_to_search_for_symbols():
+ try:
+ symbols += self._executive.run_command([self.nm_command(), path_to_module], error_handler=self._executive.ignore_error)
+ except OSError, e:
+ _log.warn("Failed to run nm: %s. Can't determine supported features correctly." % e)
+ return symbols
+
+ # Ports which use run-time feature detection should define this method and return
+ # a dictionary mapping from Feature Names to skipped directoires. NRWT will
+ # run DumpRenderTree --print-supported-features and parse the output.
+ # If the Feature Names are not found in the output, the corresponding directories
+ # will be skipped.
+ def _missing_feature_to_skipped_tests(self):
+ """Return the supported feature dictionary. Keys are feature names and values
+ are the lists of directories to skip if the feature name is not matched."""
+ # FIXME: This list matches WebKitWin and should be moved onto the Win port.
+ return {
+ "Accelerated Compositing": ["compositing"],
+ "3D Rendering": ["animations/3d", "transforms/3d"],
+ }
+
+ # Ports which use compile-time feature detection should define this method and return
+ # a dictionary mapping from symbol substrings to possibly disabled test directories.
+ # When the symbol substrings are not matched, the directories will be skipped.
+ # If ports don't ever enable certain features, then those directories can just be
+ # in the Skipped list instead of compile-time-checked here.
+ def _missing_symbol_to_skipped_tests(self):
+ """Return the supported feature dictionary. The keys are symbol-substrings
+ and the values are the lists of directories to skip if that symbol is missing."""
+ return {
+ "MathMLElement": ["mathml"],
+ "GraphicsLayer": ["compositing"],
+ "WebCoreHas3DRendering": ["animations/3d", "transforms/3d"],
+ "WebGLShader": ["fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl"],
+ "MHTMLArchive": ["mhtml"],
+ "CSSVariableValue": ["fast/css/variables", "inspector/styles/variables"],
+ }
+
+ def _has_test_in_directories(self, directory_lists, test_list):
+ if not test_list:
+ return False
+
+ directories = itertools.chain.from_iterable(directory_lists)
+ for directory, test in itertools.product(directories, test_list):
+ if test.startswith(directory):
+ return True
+ return False
+
+ def _skipped_tests_for_unsupported_features(self, test_list):
+ # Only check the runtime feature list of there are tests in the test_list that might get skipped.
+ # This is a performance optimization to avoid the subprocess call to DRT.
+ # If the port supports runtime feature detection, disable any tests
+ # for features missing from the runtime feature list.
+ # If _runtime_feature_list returns a non-None value, then prefer
+ # runtime feature detection over static feature detection.
+ if self._has_test_in_directories(self._missing_feature_to_skipped_tests().values(), test_list):
+ supported_feature_list = self._runtime_feature_list()
+ if supported_feature_list is not None:
+ return reduce(operator.add, [directories for feature, directories in self._missing_feature_to_skipped_tests().items() if feature not in supported_feature_list])
+
+ # Only check the symbols of there are tests in the test_list that might get skipped.
+ # This is a performance optimization to avoid the calling nm.
+ # Runtime feature detection not supported, fallback to static dectection:
+ # Disable any tests for symbols missing from the executable or libraries.
+ if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
+ symbols_string = self._symbols_string()
+ if symbols_string is not None:
+ return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
+
+ return []
+
+ def _wk2_port_name(self):
+ # By current convention, the WebKit2 name is always mac-wk2, win-wk2, not mac-leopard-wk2, etc,
+ # except for Qt because WebKit2 is only supported by Qt 5.0 (therefore: qt-5.0-wk2).
+ return "%s-wk2" % self.port_name
+
+ def _skipped_file_search_paths(self):
+ # Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] and any directories
+ # included via --additional-platform-directory, not the full casade.
+ # Note order doesn't matter since the Skipped file contents are all combined.
+
+ search_paths = set([self.port_name])
+ if 'future' not in self.name():
+ search_paths.add(self.name())
+
+ if self.get_option('webkit_test_runner'):
+ # Because nearly all of the skipped tests for WebKit 2 are due to cross-platform
+ # issues, all wk2 ports share a skipped list under platform/wk2.
+ search_paths.update([self._wk2_port_name(), "wk2"])
+
+ search_paths.update(self.get_option("additional_platform_directory", []))
+
+ return search_paths
+
class VirtualTestSuite(object):
def __init__(self, name, base, args, tests=None):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
index 7e9259bd3..019873567 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
@@ -239,7 +239,7 @@ class PortTest(unittest.TestCase):
def test_additional_platform_directory(self):
port = self.make_port(port_name='foo')
- port.baseline_search_path = lambda: ['LayoutTests/platform/foo']
+ port.default_baseline_search_path = lambda: ['LayoutTests/platform/foo']
layout_test_dir = port.layout_tests_dir()
test_file = 'fast/test.html'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
index 9729f94c0..b72783c5d 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -42,13 +42,12 @@ from webkitpy.common.system import executive
from webkitpy.common.system.path import cygpath
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port.base import Port, VirtualTestSuite
-from webkitpy.layout_tests.port.webkit import WebKitPort
_log = logging.getLogger(__name__)
-class ChromiumPort(WebKitPort):
+class ChromiumPort(Port):
"""Abstract base class for Chromium implementations of the Port class."""
ALL_SYSTEMS = (
@@ -78,17 +77,17 @@ class ChromiumPort(WebKitPort):
DEFAULT_BUILD_DIRECTORIES = ('out',)
@classmethod
- def _static_build_path(cls, filesystem, build_directory, chromium_base, webkit_base, *comps):
+ def _static_build_path(cls, filesystem, build_directory, chromium_base, webkit_base, configuration, comps):
if build_directory:
- return filesystem.join(build_directory, *comps)
+ return filesystem.join(build_directory, configuration, *comps)
for directory in cls.DEFAULT_BUILD_DIRECTORIES:
- base_dir = filesystem.join(chromium_base, directory)
+ base_dir = filesystem.join(chromium_base, directory, configuration)
if filesystem.exists(base_dir):
return filesystem.join(base_dir, *comps)
for directory in cls.DEFAULT_BUILD_DIRECTORIES:
- base_dir = filesystem.join(webkit_base, directory)
+ base_dir = filesystem.join(webkit_base, directory, configuration)
if filesystem.exists(base_dir):
return filesystem.join(base_dir, *comps)
@@ -263,7 +262,7 @@ class ChromiumPort(WebKitPort):
try:
return self.path_from_chromium_base('webkit', self.get_option('configuration'), 'layout-test-results')
except AssertionError:
- return self._build_path(self.get_option('configuration'), 'layout-test-results')
+ return self._build_path('layout-test-results')
def _missing_symbol_to_skipped_tests(self):
# FIXME: Should WebKitPort have these definitions also?
@@ -356,14 +355,13 @@ class ChromiumPort(WebKitPort):
return repos
def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
- new_stderr = stderr
if stderr and 'AddressSanitizer' in stderr:
asan_filter_path = self.path_from_chromium_base('tools', 'valgrind', 'asan', 'asan_symbolize.py')
if self._filesystem.exists(asan_filter_path):
output = self._executive.run_command([asan_filter_path], input=stderr)
- new_stderr = self._executive.run_command(['c++filt'], input=output)
+ stderr = self._executive.run_command(['c++filt'], input=output)
- return super(ChromiumPort, self)._get_crash_log(name, pid, stdout, new_stderr, newer_than)
+ return super(ChromiumPort, self)._get_crash_log(name, pid, stdout, stderr, newer_than)
def virtual_test_suites(self):
return [
@@ -386,11 +384,19 @@ class ChromiumPort(WebKitPort):
#
def _build_path(self, *comps):
- return self._static_build_path(self._filesystem, self.get_option('build_directory'), self.path_from_chromium_base(), self.path_from_webkit_base(), *comps)
+ return self._build_path_with_configuration(None, *comps)
+
+ def _build_path_with_configuration(self, configuration, *comps):
+ # Note that we don't implement --root or do the option caching that the
+ # base class does, because chromium doesn't use 'webkit-build-directory' and
+ # hence finding the right directory is relatively fast.
+ configuration = configuration or self.get_option('configuration')
+ return self._static_build_path(self._filesystem, self.get_option('build_directory'),
+ self.path_from_chromium_base(), self.path_from_webkit_base(), configuration, comps)
def _path_to_image_diff(self):
binary_name = 'ImageDiff'
- return self._build_path(self.get_option('configuration'), binary_name)
+ return self._build_path(binary_name)
def _check_driver_build_up_to_date(self, configuration):
if configuration in ('Debug', 'Release'):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
index eb4dcec95..fa85f10d5 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
@@ -37,7 +37,6 @@ from webkitpy.layout_tests.port import chromium
from webkitpy.layout_tests.port import driver
from webkitpy.layout_tests.port import factory
from webkitpy.layout_tests.port import server_process
-from webkitpy.layout_tests.port import webkit
_log = logging.getLogger(__name__)
@@ -161,7 +160,7 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
self._options.additional_drt_flag.append('--encode-binary')
# The Chromium port for Android always uses the hardware GPU path.
- self._options.enable_hardware_gpu = True
+ self._options.additional_drt_flag.append('--enable-hardware-gpu')
# Shard ref tests so that they run together to avoid repeatedly driver restarts.
self._options.shard_ref_tests = True
@@ -177,7 +176,6 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
adb_args = self.get_option('adb_args')
if adb_args:
self._adb_command += shlex.split(adb_args)
- self._drt_retry_after_killed = 0
def default_timeout_ms(self):
# Android platform has less computing power than desktop platforms.
@@ -282,6 +280,9 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def _build_path(self, *comps):
return self._host_port._build_path(*comps)
+ def _build_path_with_configuration(self, configuration, *comps):
+ return self._host_port._build_path_with_configuration(configuration, *comps)
+
def _path_to_apache(self):
return self._host_port._path_to_apache()
@@ -289,15 +290,13 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
return self._host_port._path_to_apache_config_file()
def _path_to_driver(self, configuration=None):
- if not configuration:
- configuration = self.get_option('configuration')
- return self._build_path(configuration, 'DumpRenderTree_apk/DumpRenderTree-debug.apk')
+ return self._build_path_with_configuration(configuration, 'DumpRenderTree_apk/DumpRenderTree-debug.apk')
def _path_to_helper(self):
return None
def _path_to_forwarder(self):
- return self._build_path(self.get_option('configuration'), 'forwarder')
+ return self._build_path('forwarder')
def _path_to_image_diff(self):
return self._host_port._path_to_image_diff()
@@ -345,14 +344,10 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
install_result = self._run_adb_command(['install', drt_host_path])
if install_result.find('Success') == -1:
raise AssertionError('Failed to install %s onto device: %s' % (drt_host_path, install_result))
- self._push_to_device(self._build_path(self.get_option('configuration'), 'DumpRenderTree.pak'),
- DEVICE_DRT_DIR + 'DumpRenderTree.pak')
- self._push_to_device(self._build_path(self.get_option('configuration'), 'DumpRenderTree_resources'),
- DEVICE_DRT_DIR + 'DumpRenderTree_resources')
- self._push_to_device(self._build_path(self.get_option('configuration'), 'android_main_fonts.xml'),
- DEVICE_DRT_DIR + 'android_main_fonts.xml')
- self._push_to_device(self._build_path(self.get_option('configuration'), 'android_fallback_fonts.xml'),
- DEVICE_DRT_DIR + 'android_fallback_fonts.xml')
+ self._push_to_device(self._build_path('DumpRenderTree.pak'), DEVICE_DRT_DIR + 'DumpRenderTree.pak')
+ self._push_to_device(self._build_path('DumpRenderTree_resources'), DEVICE_DRT_DIR + 'DumpRenderTree_resources')
+ self._push_to_device(self._build_path('android_main_fonts.xml'), DEVICE_DRT_DIR + 'android_main_fonts.xml')
+ self._push_to_device(self._build_path('android_fallback_fonts.xml'), DEVICE_DRT_DIR + 'android_fallback_fonts.xml')
# Version control of test resources is dependent on executables,
# because we will always rebuild executables when resources are
# updated.
@@ -362,7 +357,7 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def _push_fonts(self):
if not self._check_version(DEVICE_FONTS_DIR, FONT_FILES_VERSION):
_log.debug('Pushing fonts')
- path_to_ahem_font = self._build_path(self.get_option('configuration'), 'AHEM____.TTF')
+ path_to_ahem_font = self._build_path('AHEM____.TTF')
self._push_to_device(path_to_ahem_font, DEVICE_FONTS_DIR + 'AHEM____.TTF')
for (host_dir, font_file) in HOST_FONT_FILES:
self._push_to_device(host_dir + font_file, DEVICE_FONTS_DIR + font_file)
@@ -403,7 +398,9 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
else:
error_handler = None
result = self._executive.run_command(self._adb_command + cmd, error_handler=error_handler)
- _log.debug('Run adb result:\n' + result)
+ # Limit the length to avoid too verbose output of commands like 'adb logcat' and 'cat /data/tombstones/tombstone01'
+ # whose outputs are normally printed in later logs.
+ _log.debug('Run adb result: ' + result[:80])
return result
def _link_device_file(self, from_file, to_file, ignore_error=False):
@@ -464,7 +461,6 @@ class ChromiumAndroidDriver(driver.Driver):
self._in_fifo_path = DRT_APP_FILES_DIR + 'DumpRenderTree.in'
self._out_fifo_path = DRT_APP_FILES_DIR + 'DumpRenderTree.out'
self._err_fifo_path = DRT_APP_FILES_DIR + 'DumpRenderTree.err'
- self._restart_after_killed = False
self._read_stdout_process = None
self._read_stderr_process = None
@@ -479,17 +475,6 @@ class ChromiumAndroidDriver(driver.Driver):
assert full_file_path.startswith('/')
return self._port._run_adb_command(['shell', 'ls', full_file_path]).strip() == full_file_path
- def _deadlock_detector(self, processes, normal_startup_event):
- time.sleep(DRT_START_STOP_TIMEOUT_SECS)
- if not normal_startup_event.is_set():
- # If normal_startup_event is not set in time, the main thread must be blocked at
- # reading/writing the fifo. Kill the fifo reading/writing processes to let the
- # main thread escape from the deadlocked state. After that, the main thread will
- # treat this as a crash.
- for i in processes:
- i.kill()
- # Otherwise the main thread has been proceeded normally. This thread just exits silently.
-
def _drt_cmd_line(self, pixel_tests, per_test_args):
return driver.Driver.cmd_line(self, pixel_tests, per_test_args) + [
'--in-fifo=' + self._in_fifo_path,
@@ -497,22 +482,41 @@ class ChromiumAndroidDriver(driver.Driver):
'--err-fifo=' + self._err_fifo_path,
]
+ @staticmethod
+ def _loop_with_timeout(condition, timeout_secs):
+ deadline = time.time() + timeout_secs
+ while time.time() < deadline:
+ if condition():
+ return True
+ return False
+
+ def _all_pipes_created(self):
+ return (self._file_exists_on_device(self._in_fifo_path) and
+ self._file_exists_on_device(self._out_fifo_path) and
+ self._file_exists_on_device(self._err_fifo_path))
+
+ def _remove_all_pipes(self):
+ self._port._run_adb_command(['shell', 'rm', self._in_fifo_path, self._out_fifo_path, self._err_fifo_path])
+ return (not self._file_exists_on_device(self._in_fifo_path) and
+ not self._file_exists_on_device(self._out_fifo_path) and
+ not self._file_exists_on_device(self._err_fifo_path))
+
def start(self, pixel_tests, per_test_args):
# Only one driver instance is allowed because of the nature of Android activity.
# The single driver needs to switch between pixel test and no pixel test mode by itself.
if pixel_tests != self._pixel_tests:
self.stop()
+ self._pixel_tests = pixel_tests
super(ChromiumAndroidDriver, self).start(pixel_tests, per_test_args)
def _start(self, pixel_tests, per_test_args):
- retries = 0
- while not self._start_once(pixel_tests, per_test_args):
+ for retries in range(3):
+ if self._start_once(pixel_tests, per_test_args):
+ return
_log.error('Failed to start DumpRenderTree application. Retries=%d. Log:%s' % (retries, self._port._get_logcat()))
- retries += 1
- if retries >= 3:
- raise AssertionError('Failed to start DumpRenderTree application multiple times. Give up.')
self.stop()
time.sleep(2)
+ raise AssertionError('Failed to start DumpRenderTree application multiple times. Give up.')
def _start_once(self, pixel_tests, per_test_args):
super(ChromiumAndroidDriver, self)._start(pixel_tests, per_test_args)
@@ -524,14 +528,8 @@ class ChromiumAndroidDriver(driver.Driver):
_log.error('Failed to start DumpRenderTree application. Exception:\n' + start_result)
return False
- seconds = 0
- while (not self._file_exists_on_device(self._in_fifo_path) or
- not self._file_exists_on_device(self._out_fifo_path) or
- not self._file_exists_on_device(self._err_fifo_path)):
- time.sleep(1)
- seconds += 1
- if seconds >= DRT_START_STOP_TIMEOUT_SECS:
- return False
+ if not ChromiumAndroidDriver._loop_with_timeout(self._all_pipes_created, DRT_START_STOP_TIMEOUT_SECS):
+ return False
# Read back the shell prompt to ensure adb shell ready.
deadline = time.time() + DRT_START_STOP_TIMEOUT_SECS
@@ -557,9 +555,19 @@ class ChromiumAndroidDriver(driver.Driver):
# Combine the stdout and stderr pipes into self._server_process.
self._server_process.replace_outputs(self._read_stdout_process._proc.stdout, self._read_stderr_process._proc.stdout)
+ def deadlock_detector(processes, normal_startup_event):
+ if not ChromiumAndroidDriver._loop_with_timeout(lambda: normal_startup_event.is_set(), DRT_START_STOP_TIMEOUT_SECS):
+ # If normal_startup_event is not set in time, the main thread must be blocked at
+ # reading/writing the fifo. Kill the fifo reading/writing processes to let the
+ # main thread escape from the deadlocked state. After that, the main thread will
+ # treat this as a crash.
+ _log.warn('Deadlock detected. Processes killed.')
+ for i in processes:
+ i.kill()
+
# Start a thread to kill the pipe reading/writing processes on deadlock of the fifos during startup.
normal_startup_event = threading.Event()
- threading.Thread(target=self._deadlock_detector,
+ threading.Thread(name='DeadlockDetector', target=deadlock_detector,
args=([self._server_process, self._read_stdout_process, self._read_stderr_process], normal_startup_event)).start()
output = ''
@@ -578,28 +586,6 @@ class ChromiumAndroidDriver(driver.Driver):
normal_startup_event.set()
return True
- def run_test(self, driver_input):
- driver_output = super(ChromiumAndroidDriver, self).run_test(driver_input)
- if driver_output.crash:
- # When Android is OOM, DRT process may be killed by ActivityManager or system OOM.
- # It looks like a crash but there is no fatal signal logged. Re-run the test for
- # such crash.
- # To test: adb shell am force-stop org.chromium.native_test,
- # or kill -11 pid twice or three times to simulate a fatal crash.
- if self._port._get_logcat().find('Fatal signal') == -1:
- self._restart_after_killed = True
- self._port._drt_retry_after_killed += 1
- if self._port._drt_retry_after_killed > 10:
- raise AssertionError('DumpRenderTree is killed by Android for too many times!')
- _log.error('DumpRenderTree is killed by system (%d).' % self._port._drt_retry_after_killed)
- self.stop()
- # Sleep 10 seconds to let system recover.
- time.sleep(10)
- return self.run_test(driver_input)
-
- self._restart_after_killed = False
- return driver_output
-
def stop(self):
self._port._run_adb_command(['shell', 'am', 'force-stop', DRT_APP_PACKAGE])
@@ -618,15 +604,8 @@ class ChromiumAndroidDriver(driver.Driver):
self._server_process = None
super(ChromiumAndroidDriver, self).stop()
- seconds = 0
- while (self._file_exists_on_device(self._in_fifo_path) or
- self._file_exists_on_device(self._out_fifo_path) or
- self._file_exists_on_device(self._err_fifo_path)):
- time.sleep(1)
- self._port._run_adb_command(['shell', 'rm', self._in_fifo_path, self._out_fifo_path, self._err_fifo_path])
- seconds += 1
- if seconds >= DRT_START_STOP_TIMEOUT_SECS:
- raise AssertionError('Failed to remove fifo files. May be locked.')
+ if not ChromiumAndroidDriver._loop_with_timeout(self._remove_all_pipes, DRT_START_STOP_TIMEOUT_SECS):
+ raise AssertionError('Failed to remove fifo files. May be locked.')
def _command_from_driver_input(self, driver_input):
command = super(ChromiumAndroidDriver, self)._command_from_driver_input(driver_input)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
index 0e2801a57..65b6a2d7b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
@@ -48,7 +48,6 @@ class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
def test_attributes(self):
port = self.make_port()
- self.assertTrue(port.get_option('enable_hardware_gpu'))
self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-android'))
def test_default_timeout_ms(self):
@@ -105,7 +104,13 @@ class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
port._executive = MockExecutive2(run_command_fn=ChromiumAndroidPortTest.mock_run_command_fn)
ChromiumAndroidPortTest.mock_logcat = 'logcat contents\n'
self.assertEquals(port._get_crash_log('foo', 1234, 'out bar\nout baz\n', 'err bar\nerr baz\n', newer_than=None),
- (u'crash log for foo (pid 1234):\n'
+ ('err bar\n'
+ 'err baz\n'
+ '********* Tombstone file:\n'
+ '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
+ '/data/tombstones/tombstone_03\n'
+ 'mock_contents\n',
+ u'crash log for foo (pid 1234):\n'
u'STDOUT: out bar\n'
u'STDOUT: out baz\n'
u'STDOUT: ********* Logcat:\n'
@@ -117,7 +122,11 @@ class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
u'STDERR: /data/tombstones/tombstone_03\n'
u'STDERR: mock_contents\n'))
self.assertEquals(port._get_crash_log(None, None, None, None, newer_than=None),
- (u'crash log for <unknown process name> (pid <unknown>):\n'
+ ('********* Tombstone file:\n'
+ '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
+ '/data/tombstones/tombstone_03\n'
+ 'mock_contents\n',
+ u'crash log for <unknown process name> (pid <unknown>):\n'
u'STDOUT: ********* Logcat:\n'
u'STDOUT: logcat contents\n'
u'STDERR: ********* Tombstone file:\n'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
index e54078d2b..d5c2235bf 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
@@ -69,7 +69,7 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
configuration = options.configuration
else:
configuration = config_object.default_configuration()
- return cls._static_build_path(host.filesystem, build_directory, chromium_base, webkit_base, configuration, 'DumpRenderTree')
+ return cls._static_build_path(host.filesystem, build_directory, chromium_base, webkit_base, configuration, ['DumpRenderTree'])
@staticmethod
def _determine_architecture(filesystem, executive, driver_path):
@@ -111,7 +111,7 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
return map(self._webkit_baseline_path, port_names)
def _modules_to_search_for_symbols(self):
- return [self._build_path(self.get_option('configuration'), 'libffmpegsumo.so')]
+ return [self._build_path('libffmpegsumo.so')]
def check_build(self, needs_http):
result = chromium.ChromiumPort.check_build(self, needs_http)
@@ -173,10 +173,8 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
return "/usr/bin/php-cgi"
def _path_to_driver(self, configuration=None):
- if not configuration:
- configuration = self.get_option('configuration')
binary_name = self.driver_name()
- return self._build_path(configuration, binary_name)
+ return self._build_path_with_configuration(configuration, binary_name)
def _path_to_helper(self):
return None
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
index 9094d7458..169c2f4f4 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
@@ -93,19 +93,19 @@ class ChromiumLinuxPortTest(chromium_port_testcase.ChromiumPortTestCase):
def test_build_path(self):
# Test that optional paths are used regardless of whether they exist.
options = MockOptions(configuration='Release', build_directory='/foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out'], '/foo')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], '/foo/Release')
# Test that optional relative paths are returned unmodified.
options = MockOptions(configuration='Release', build_directory='foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out'], 'foo')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], 'foo/Release')
# Test that we look in a chromium directory before the webkit directory.
options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out', '/mock-checkout/out'], '/mock-checkout/Source/WebKit/chromium/out')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release', '/mock-checkout/out/Release'], '/mock-checkout/Source/WebKit/chromium/out/Release')
# Test that we prefer the legacy dir over the new dir.
options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/sconsbuild', '/mock-checkout/Source/WebKit/chromium/out'], '/mock-checkout/Source/WebKit/chromium/sconsbuild')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/sconsbuild/Release', '/mock-checkout/Source/WebKit/chromium/out/Release'], '/mock-checkout/Source/WebKit/chromium/sconsbuild/Release')
def test_driver_name_option(self):
self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree'))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
index df1ac7b58..baea0639b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
@@ -79,7 +79,7 @@ class ChromiumMacPort(chromium.ChromiumPort):
return map(self._webkit_baseline_path, fallback_paths[self._version])
def _modules_to_search_for_symbols(self):
- return [self._build_path(self.get_option('configuration'), 'ffmpegsumo.so')]
+ return [self._build_path('ffmpegsumo.so')]
def check_build(self, needs_http):
result = chromium.ChromiumPort.check_build(self, needs_http)
@@ -120,14 +120,11 @@ class ChromiumMacPort(chromium.ChromiumPort):
def _path_to_driver(self, configuration=None):
# FIXME: make |configuration| happy with case-sensitive file systems.
- if not configuration:
- configuration = self.get_option('configuration')
- return self._build_path(configuration, self.driver_name() + '.app',
- 'Contents', 'MacOS', self.driver_name())
+ return self._build_path_with_configuration(configuration, self.driver_name() + '.app', 'Contents', 'MacOS', self.driver_name())
def _path_to_helper(self):
binary_name = 'LayoutTestHelper'
- return self._build_path(self.get_option('configuration'), binary_name)
+ return self._build_path(binary_name)
def _path_to_wdiff(self):
return 'wdiff'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
index 87904a804..edf92ea20 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
@@ -74,19 +74,19 @@ class ChromiumMacPortTest(chromium_port_testcase.ChromiumPortTestCase):
def test_build_path(self):
# Test that optional paths are used regardless of whether they exist.
options = MockOptions(configuration='Release', build_directory='/foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out'], '/foo')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], '/foo/Release')
# Test that optional relative paths are returned unmodified.
options = MockOptions(configuration='Release', build_directory='foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out'], 'foo')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], 'foo/Release')
# Test that we look in a chromium directory before the webkit directory.
options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out', '/mock-checkout/out'], '/mock-checkout/Source/WebKit/chromium/out')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release', '/mock-checkout/out/Release'], '/mock-checkout/Source/WebKit/chromium/out/Release')
# Test that we prefer the legacy dir over the new dir.
options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/xcodebuild', '/mock-checkout/Source/WebKit/chromium/out'], '/mock-checkout/Source/WebKit/chromium/xcodebuild')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/xcodebuild/Release', '/mock-checkout/Source/WebKit/chromium/out/Release'], '/mock-checkout/Source/WebKit/chromium/xcodebuild/Release')
def test_driver_name_option(self):
self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree'))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
index 697c27242..7d4c235f3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
@@ -44,6 +44,10 @@ from webkitpy.layout_tests.port import port_testcase
class ChromiumPortTestCase(port_testcase.PortTestCase):
+ def test_check_build(self):
+ port = self.make_port()
+ port.check_build(needs_http=True)
+
def test_default_timeout_ms(self):
self.assertEquals(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
self.assertEquals(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 12000)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
index 51611241c..7d0337acc 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
@@ -149,18 +149,16 @@ class ChromiumWinPort(chromium.ChromiumPort):
return self._lighttpd_path('php5', 'php-cgi.exe')
def _path_to_driver(self, configuration=None):
- if not configuration:
- configuration = self.get_option('configuration')
binary_name = '%s.exe' % self.driver_name()
- return self._build_path(configuration, binary_name)
+ return self._build_path_with_configuration(configuration, binary_name)
def _path_to_helper(self):
binary_name = 'LayoutTestHelper.exe'
- return self._build_path(self.get_option('configuration'), binary_name)
+ return self._build_path(binary_name)
def _path_to_image_diff(self):
binary_name = 'ImageDiff.exe'
- return self._build_path(self.get_option('configuration'), binary_name)
+ return self._build_path(binary_name)
def _path_to_wdiff(self):
return self.path_from_chromium_base('third_party', 'cygwin', 'bin', 'wdiff.exe')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
index 607719241..dc184fc14 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
@@ -100,19 +100,19 @@ class ChromiumWinTest(chromium_port_testcase.ChromiumPortTestCase):
def test_build_path(self):
# Test that optional paths are used regardless of whether they exist.
options = MockOptions(configuration='Release', build_directory='/foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out'], '/foo')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], '/foo/Release')
# Test that optional relative paths are returned unmodified.
options = MockOptions(configuration='Release', build_directory='foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out'], 'foo')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], 'foo/Release')
# Test that we look in a chromium directory before the webkit directory.
options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out', '/mock-checkout/out'], '/mock-checkout/Source/WebKit/chromium/out')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release', '/mock-checkout/out/Release'], '/mock-checkout/Source/WebKit/chromium/out/Release')
# Test that we prefer the legacy dir over the new dir.
options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/build', '/mock-checkout/Source/WebKit/chromium/out'], '/mock-checkout/Source/WebKit/chromium/build')
+ self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/build/Release', '/mock-checkout/Source/WebKit/chromium/out'], '/mock-checkout/Source/WebKit/chromium/build/Release')
def test_operating_system(self):
self.assertEqual('win', self.make_port().operating_system())
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver.py b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
index 5e7061bab..781823b8d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/driver.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
@@ -36,8 +36,6 @@ import time
from webkitpy.common.system import path
-from webkitpy.layout_tests.port import server_process
-
_log = logging.getLogger(__name__)
@@ -115,9 +113,6 @@ class Driver(object):
self._worker_number = worker_number
self._no_timeout = no_timeout
- # overridable for testing.
- self._server_process_constructor = server_process.ServerProcess
-
self._driver_tempdir = None
# WebKitTestRunner can report back subprocess crashes by printing
# "#CRASHED - PROCESSNAME". Since those can happen at any time
@@ -171,8 +166,8 @@ class Driver(object):
crash_log = None
if self.has_crashed():
- crash_log = self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, text, self.error_from_test,
- newer_than=start_time)
+ self.error_from_test, crash_log = self._port._get_crash_log(self._crashed_process_name,
+ self._crashed_pid, text, self.error_from_test, newer_than=start_time)
# If we don't find a crash log use a placeholder error message instead.
if not crash_log:
@@ -271,7 +266,7 @@ class Driver(object):
environment['LOCAL_RESOURCE_ROOT'] = self._port.layout_tests_dir()
self._crashed_process_name = None
self._crashed_pid = None
- self._server_process = self._server_process_constructor(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
+ self._server_process = self._port._server_process_constructor(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
self._server_process.start()
def stop(self):
@@ -298,7 +293,7 @@ class Driver(object):
cmd.extend(self._port.get_option('additional_drt_flag', []))
- if pixel_tests:
+ if pixel_tests and not self._port.supports_switching_pixel_tests_per_test():
cmd.append('--pixel-tests')
cmd.extend(per_test_args)
@@ -338,8 +333,14 @@ class Driver(object):
if sys.platform == 'cygwin':
command = path.cygpath(command)
+ assert not driver_input.image_hash or driver_input.should_run_pixel_test
+
+ if driver_input.should_run_pixel_test:
+ if self._port.supports_switching_pixel_tests_per_test():
+ # We did not start the driver with --pixel-tests, instead we specify it per test.
+ # "'" is the separator of command fields.
+ command += "'" + '--pixel-test'
if driver_input.image_hash:
- # "'" is the separator of command fields.
command += "'" + driver_input.image_hash
return command + "\n"
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
index d712023e3..705c1bb7b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
@@ -31,9 +31,10 @@ import unittest
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.port import Port, Driver, DriverOutput
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
# FIXME: remove the dependency on TestWebKitPort
-from webkitpy.layout_tests.port.webkit_unittest import TestWebKitPort
+from webkitpy.layout_tests.port.port_testcase import TestWebKitPort
class DriverOutputTest(unittest.TestCase):
@@ -232,8 +233,8 @@ class DriverTest(unittest.TestCase):
def test_stop_cleans_up_properly(self):
port = TestWebKitPort()
+ port._server_process_constructor = MockServerProcess
driver = Driver(port, 0, pixel_tests=True)
- driver._server_process_constructor = MockServerProcess
driver.start(True, [])
last_tmpdir = port._filesystem.last_tmpdir
self.assertNotEquals(last_tmpdir, None)
@@ -242,8 +243,8 @@ class DriverTest(unittest.TestCase):
def test_two_starts_cleans_up_properly(self):
port = TestWebKitPort()
+ port._server_process_constructor = MockServerProcess
driver = Driver(port, 0, pixel_tests=True)
- driver._server_process_constructor = MockServerProcess
driver.start(True, [])
last_tmpdir = port._filesystem.last_tmpdir
driver._start(True, [])
@@ -251,50 +252,11 @@ class DriverTest(unittest.TestCase):
def test_start_actually_starts(self):
port = TestWebKitPort()
+ port._server_process_constructor = MockServerProcess
driver = Driver(port, 0, pixel_tests=True)
- driver._server_process_constructor = MockServerProcess
driver.start(True, [])
self.assertTrue(driver._server_process.started)
-class MockServerProcess(object):
- def __init__(self, port_obj=None, name=None, cmd=None, env=None, universal_newlines=False, lines=None):
- self.timed_out = False
- self.lines = lines or []
- self.crashed = False
- self.started = False
-
- def has_crashed(self):
- return self.crashed
-
- def read_stdout_line(self, deadline):
- return self.lines.pop(0) + "\n"
-
- def read_stdout(self, deadline, size):
- first_line = self.lines[0]
- if size > len(first_line):
- self.lines.pop(0)
- remaining_size = size - len(first_line) - 1
- if not remaining_size:
- return first_line + "\n"
- return first_line + "\n" + self.read_stdout(deadline, remaining_size)
- result = self.lines[0][:size]
- self.lines[0] = self.lines[0][size:]
- return result
-
- def read_either_stdout_or_stderr_line(self, deadline):
- # FIXME: We should have tests which intermix stderr and stdout lines.
- return self.read_stdout_line(deadline), None
-
- def start(self):
- self.started = True
-
- def stop(self, kill_directly=False):
- return
-
- def kill(self):
- return
-
-
if __name__ == '__main__':
unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/efl.py b/Tools/Scripts/webkitpy/layout_tests/port/efl.py
index 4e43f8b6e..25a81d2da 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/efl.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/efl.py
@@ -27,20 +27,16 @@
"""WebKit Efl implementation of the Port interface."""
-import logging
-import signal
-import subprocess
-
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port.webkit import WebKitPort
+from webkitpy.layout_tests.port.base import Port
from webkitpy.layout_tests.port.pulseaudio_sanitizer import PulseAudioSanitizer
-class EflPort(WebKitPort, PulseAudioSanitizer):
+class EflPort(Port, PulseAudioSanitizer):
port_name = 'efl'
def __init__(self, *args, **kwargs):
- WebKitPort.__init__(self, *args, **kwargs)
+ super(EflPort, self).__init__(*args, **kwargs)
self._jhbuild_wrapper_path = self.path_from_webkit_base('Tools', 'efl', 'run-with-jhbuild')
@@ -59,9 +55,20 @@ class EflPort(WebKitPort, PulseAudioSanitizer):
env['TEST_RUNNER_PLUGIN_PATH'] = self._build_path('lib')
if self.webprocess_cmd_prefix:
env['WEB_PROCESS_CMD_PREFIX'] = self.webprocess_cmd_prefix
+
+ env['XDG_CACHE_HOME'] = str(self._filesystem.mkdtemp(prefix='%s-Efl-CacheDir-' % self.driver_name()))
+ env['XDG_DATA_HOME'] = str(self._filesystem.mkdtemp(prefix='%s-Efl-DataDir-' % self.driver_name()))
return env
+ def default_timeout_ms(self):
+ # Tests run considerably slower under gdb
+ # or valgrind.
+ if self.get_option('webprocess_cmd_prefix'):
+ return 350 * 1000
+ return super(EflPort, self).default_timeout_ms()
+
def clean_up_test_run(self):
+ super(EflPort, self).clean_up_test_run()
self._restore_pulseaudio_module()
def _generate_all_test_configurations(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
index ebb761bbd..29ab861ce 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
@@ -26,19 +26,16 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import logging
import os
-import signal
import subprocess
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port.server_process import ServerProcess
-from webkitpy.layout_tests.port.webkit import WebKitPort
+from webkitpy.layout_tests.port.base import Port
from webkitpy.layout_tests.port.pulseaudio_sanitizer import PulseAudioSanitizer
from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
-from webkitpy.common.system.executive import Executive
-class GtkPort(WebKitPort, PulseAudioSanitizer):
+
+class GtkPort(Port, PulseAudioSanitizer):
port_name = "gtk"
def _port_flag_for_scripts(self):
@@ -51,10 +48,11 @@ class GtkPort(WebKitPort, PulseAudioSanitizer):
self._unload_pulseaudio_module()
def clean_up_test_run(self):
+ super(GtkPort, self).clean_up_test_run()
self._restore_pulseaudio_module()
def setup_environ_for_server(self, server_name=None):
- environment = WebKitPort.setup_environ_for_server(self, server_name)
+ environment = super(GtkPort, self).setup_environ_for_server(server_name)
environment['GTK_MODULES'] = 'gail'
environment['GSETTINGS_BACKEND'] = 'memory'
environment['LIBOVERLAY_SCROLLBAR'] = '0'
@@ -113,7 +111,7 @@ class GtkPort(WebKitPort, PulseAudioSanitizer):
return None
# FIXME: We should find a way to share this implmentation with Gtk,
- # or teach run-launcher how to call run-safari and move this down to WebKitPort.
+ # or teach run-launcher how to call run-safari and move this down to Port.
def show_results_html_file(self, results_filename):
run_launcher_args = ["file://%s" % results_filename]
if self.get_option('webkit_test_runner'):
@@ -165,8 +163,8 @@ Coredump %(expected_crash_dump_filename)s not found. To enable crash logs:
""" % locals()
- return """\
+ return (stderr, """\
Crash log for %(name)s (pid %(pid_representation)s):
%(crash_log)s
-%(errors_str)s""" % locals()
+%(errors_str)s""" % locals())
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py
index 2b5c1a464..5140305aa 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py
@@ -75,8 +75,10 @@ STDERR: <empty>""" % locals()
port = self.make_port()
port._get_gdb_output = mock_empty_crash_log
- log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
+ stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
+ self.assertEqual(stderr, "")
self.assertLinesEqual(log, mock_empty_crash_log)
- log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
+ stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
+ self.assertEqual(stderr, "")
self.assertLinesEqual(log, mock_empty_crash_log)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py b/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py
new file mode 100644
index 000000000..2cccc1f5e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit implementations of the Port interface."""
+
+import logging
+import re
+import time
+
+from webkitpy.layout_tests.port import server_process
+
+
+_log = logging.getLogger(__name__)
+
+
+class ImageDiffer(object):
+ def __init__(self, port):
+ self._port = port
+ self._tolerance = None
+ self._process = None
+
+ def diff_image(self, expected_contents, actual_contents, tolerance):
+ if tolerance != self._tolerance:
+ self.stop()
+ try:
+ assert(expected_contents)
+ assert(actual_contents)
+ assert(tolerance is not None)
+
+ if not self._process:
+ self._start(tolerance)
+ self._process.write('Content-Length: %d\n%sContent-Length: %d\n%s' % (
+ len(actual_contents), actual_contents,
+ len(expected_contents), expected_contents))
+ return self._read()
+ except IOError as exception:
+ _log.error("Failed to compute an image diff: %s" % str(exception))
+ return (True, 0)
+
+ def _start(self, tolerance):
+ command = [self._port._path_to_image_diff(), '--tolerance', str(tolerance)]
+ environment = self._port.setup_environ_for_server('ImageDiff')
+ self._process = self._port._server_process_constructor(self._port, 'ImageDiff', command, environment)
+ self._process.start()
+ self._tolerance = tolerance
+
+ def _read(self):
+ deadline = time.time() + 2.0
+ output = None
+ output_image = ""
+
+ while True:
+ output = self._process.read_stdout_line(deadline)
+ if self._process.timed_out or self._process.has_crashed() or not output:
+ break
+
+ if output.startswith('diff'): # This is the last line ImageDiff prints.
+ break
+
+ if output.startswith('Content-Length'):
+ m = re.match('Content-Length: (\d+)', output)
+ content_length = int(m.group(1))
+ output_image = self._process.read_stdout(deadline, content_length)
+ output = self._process.read_stdout_line(deadline)
+ break
+
+ stderr = self._process.pop_all_buffered_stderr()
+ if stderr:
+ _log.warn("ImageDiff produced stderr output:\n" + stderr)
+ if self._process.timed_out:
+ _log.error("ImageDiff timed out")
+ if self._process.has_crashed():
+ _log.error("ImageDiff crashed")
+ # FIXME: There is no need to shut down the ImageDiff server after every diff.
+ self._process.stop()
+
+ diff_percent = 0
+ if output and output.startswith('diff'):
+ m = re.match('diff: (.+)% (passed|failed)', output)
+ if m.group(2) == 'passed':
+ return [None, 0]
+ diff_percent = float(m.group(1))
+
+ return (output_image, diff_percent)
+
+ def stop(self):
+ if self._process:
+ self._process.stop()
+ self._process = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py
new file mode 100755
index 000000000..b06756c35
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit testing base class for Port implementations."""
+
+import unittest
+
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+from webkitpy.layout_tests.port.image_diff import ImageDiffer
+
+
+class FakePort(object):
+ def __init__(self, server_process_output):
+ self._server_process_constructor = lambda port, nm, cmd, env: MockServerProcess(lines=server_process_output)
+
+ def _path_to_image_diff(self):
+ return ''
+
+ def setup_environ_for_server(self, nm):
+ return None
+
+
+class TestImageDiffer(unittest.TestCase):
+ def test_diff_image(self):
+ port = FakePort(['diff: 100% failed\n'])
+ image_differ = ImageDiffer(port)
+ self.assertEquals(image_differ.diff_image('foo', 'bar', 0.1), ('', 100.0))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac.py b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
index 372ffb807..756bd2abe 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mac.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
@@ -89,7 +89,7 @@ class MacPort(ApplePort):
return self._webkit_baseline_path('mac')
return ApplePort.baseline_path(self)
- def baseline_search_path(self):
+ def default_baseline_search_path(self):
fallback_index = self.VERSION_FALLBACK_ORDER.index(self._port_name_with_version())
fallback_names = list(self.VERSION_FALLBACK_ORDER[fallback_index:])
if self.get_option('webkit_test_runner'):
@@ -220,8 +220,8 @@ class MacPort(ApplePort):
now = time_fn()
if not crash_log:
- return None
- return crash_log
+ return (stderr, None)
+ return (stderr, crash_log)
def look_for_new_crash_logs(self, crashed_processes, start_time):
"""Since crash logs can take a long time to be written out if the system is
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
index 1654e1c48..964ef07ef 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
@@ -57,6 +57,9 @@ class MockDRTPortTest(port_testcase.PortTestCase):
def test_check_sys_deps(self):
pass
+ def test_diff_image(self):
+ pass
+
def test_uses_apache(self):
pass
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
index 9f77832aa..a366e8951 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
@@ -29,21 +29,48 @@
"""Unit testing base class for Port implementations."""
import errno
+import logging
import socket
import sys
import time
import unittest
-from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.servers import http_server_base
-from webkitpy.layout_tests.servers import http_server_base
-from webkitpy.layout_tests.port import factory
+from webkitpy.layout_tests.port.base import Port
from webkitpy.layout_tests.port.config_mock import MockConfig
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+from webkitpy.layout_tests.servers import http_server_base
from webkitpy.tool.mocktool import MockOptions
+# FIXME: get rid of this fixture
+class TestWebKitPort(Port):
+ port_name = "testwebkitport"
+
+ def __init__(self, symbols_string=None,
+ expectations_file=None, skips_file=None, host=None, config=None,
+ **kwargs):
+ self.symbols_string = symbols_string # Passing "" disables all staticly-detectable features.
+ host = host or MockSystemHost()
+ config = config or MockConfig()
+ super(TestWebKitPort, self).__init__(host=host, config=config, **kwargs)
+
+ def all_test_configurations(self):
+ return [self.test_configuration()]
+
+ def _symbols_string(self):
+ return self.symbols_string
+
+ def _tests_for_other_platforms(self):
+ return ["media", ]
+
+ def _tests_for_disabled_features(self):
+ return ["accessibility", ]
+
+
class PortTestCase(unittest.TestCase):
"""Tests that all Port implementations must pass."""
HTTP_PORTS = (8000, 8080, 8443)
@@ -52,7 +79,7 @@ class PortTestCase(unittest.TestCase):
# Subclasses override this to point to their Port subclass.
os_name = None
os_version = None
- port_maker = None
+ port_maker = TestWebKitPort
def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, config=None, **kwargs):
host = host or MockSystemHost(os_name=(os_name or self.os_name), os_version=(os_version or self.os_version))
@@ -209,6 +236,7 @@ class PortTestCase(unittest.TestCase):
self.assertFalse(port.diff_image(None, None)[0])
self.assertFalse(port.diff_image(None, '')[0])
self.assertFalse(port.diff_image('', None)[0])
+
self.assertFalse(port.diff_image('', '')[0])
def test_diff_image__missing_actual(self):
@@ -221,9 +249,21 @@ class PortTestCase(unittest.TestCase):
self.assertTrue(port.diff_image('foo', None)[0])
self.assertTrue(port.diff_image('foo', '')[0])
- def test_check_build(self):
+ def test_diff_image(self):
port = self.make_port()
- port.check_build(needs_http=True)
+ self.proc = None
+
+ def make_proc(port, nm, cmd, env):
+ self.proc = MockServerProcess(port, nm, cmd, env, lines=['diff: 100% failed\n'])
+ return self.proc
+
+ port._server_process_constructor = make_proc
+ port.setup_test_run()
+ self.assertEquals(port.diff_image('foo', 'bar'), ('', 100.0))
+ self.assertEquals(self.proc.cmd[1:3], ["--tolerance", "0.1"])
+ port.clean_up_test_run()
+ self.assertTrue(self.proc.stopped)
+ self.assertEquals(port._image_differ, None)
def test_check_wdiff(self):
port = self.make_port()
@@ -319,24 +359,28 @@ class PortTestCase(unittest.TestCase):
def test_get_crash_log(self):
port = self.make_port()
self.assertEquals(port._get_crash_log(None, None, None, None, newer_than=None),
- ('crash log for <unknown process name> (pid <unknown>):\n'
+ (None,
+ 'crash log for <unknown process name> (pid <unknown>):\n'
'STDOUT: <empty>\n'
'STDERR: <empty>\n'))
self.assertEquals(port._get_crash_log('foo', 1234, 'out bar\nout baz', 'err bar\nerr baz\n', newer_than=None),
- ('crash log for foo (pid 1234):\n'
+ ('err bar\nerr baz\n',
+ 'crash log for foo (pid 1234):\n'
'STDOUT: out bar\n'
'STDOUT: out baz\n'
'STDERR: err bar\n'
'STDERR: err baz\n'))
self.assertEquals(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=None),
- (u'crash log for foo (pid 1234):\n'
+ ('foo\xa6bar',
+ u'crash log for foo (pid 1234):\n'
u'STDOUT: foo\ufffdbar\n'
u'STDERR: foo\ufffdbar\n'))
self.assertEquals(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=1.0),
- (u'crash log for foo (pid 1234):\n'
+ ('foo\xa6bar',
+ u'crash log for foo (pid 1234):\n'
u'STDOUT: foo\ufffdbar\n'
u'STDERR: foo\ufffdbar\n'))
@@ -363,6 +407,162 @@ class PortTestCase(unittest.TestCase):
self.assertEquals(ordered_dict.keys()[-2:], options.additional_expectations)
self.assertEquals(ordered_dict.values()[-2:], ['foo', 'bar'])
+ def test_path_to_test_expectations_file(self):
+ port = TestWebKitPort()
+ port._options = MockOptions(webkit_test_runner=False)
+ self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
+
+ port = TestWebKitPort()
+ port._options = MockOptions(webkit_test_runner=True)
+ self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
+
+ port = TestWebKitPort()
+ port.host.filesystem.files['/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations'] = 'some content'
+ port._options = MockOptions(webkit_test_runner=False)
+ self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
+
+ def test_skipped_directories_for_symbols(self):
+ # This first test confirms that the commonly found symbols result in the expected skipped directories.
+ symbols_string = " ".join(["GraphicsLayer", "WebCoreHas3DRendering", "isXHTMLMPDocument", "fooSymbol"])
+ expected_directories = set([
+ "mathml", # Requires MathMLElement
+ "fast/canvas/webgl", # Requires WebGLShader
+ "compositing/webgl", # Requires WebGLShader
+ "http/tests/canvas/webgl", # Requires WebGLShader
+ "mhtml", # Requires MHTMLArchive
+ "fast/css/variables", # Requires CSS Variables
+ "inspector/styles/variables", # Requires CSS Variables
+ ])
+
+ result_directories = set(TestWebKitPort(symbols_string, None)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
+ self.assertEqual(result_directories, expected_directories)
+
+ # Test that the nm string parsing actually works:
+ symbols_string = """
+000000000124f498 s __ZZN7WebCore13GraphicsLayer12replaceChildEPS0_S1_E19__PRETTY_FUNCTION__
+000000000124f500 s __ZZN7WebCore13GraphicsLayer13addChildAboveEPS0_S1_E19__PRETTY_FUNCTION__
+000000000124f670 s __ZZN7WebCore13GraphicsLayer13addChildBelowEPS0_S1_E19__PRETTY_FUNCTION__
+"""
+ # Note 'compositing' is not in the list of skipped directories (hence the parsing of GraphicsLayer worked):
+ expected_directories = set(['mathml', 'transforms/3d', 'compositing/webgl', 'fast/canvas/webgl', 'animations/3d', 'mhtml', 'http/tests/canvas/webgl', 'fast/css/variables', 'inspector/styles/variables'])
+ result_directories = set(TestWebKitPort(symbols_string, None)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
+ self.assertEqual(result_directories, expected_directories)
+
+ def test_skipped_directories_for_features(self):
+ supported_features = ["Accelerated Compositing", "Foo Feature"]
+ expected_directories = set(["animations/3d", "transforms/3d"])
+ port = TestWebKitPort(None, supported_features)
+ port._runtime_feature_list = lambda: supported_features
+ result_directories = set(port._skipped_tests_for_unsupported_features(test_list=["animations/3d/foo.html"]))
+ self.assertEqual(result_directories, expected_directories)
+
+ def test_skipped_directories_for_features_no_matching_tests_in_test_list(self):
+ supported_features = ["Accelerated Compositing", "Foo Feature"]
+ expected_directories = set([])
+ result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=['foo.html']))
+ self.assertEqual(result_directories, expected_directories)
+
+ def test_skipped_tests_for_unsupported_features_empty_test_list(self):
+ supported_features = ["Accelerated Compositing", "Foo Feature"]
+ expected_directories = set([])
+ result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=None))
+ self.assertEqual(result_directories, expected_directories)
+
+ def test_skipped_layout_tests(self):
+ self.assertEqual(TestWebKitPort(None, None).skipped_layout_tests(test_list=[]), set(['media']))
+
+ def test_skipped_file_search_paths(self):
+ port = TestWebKitPort()
+ self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport']))
+ port._name = "testwebkitport-version"
+ self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version']))
+ port._options = MockOptions(webkit_test_runner=True)
+ self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version', 'testwebkitport-wk2', 'wk2']))
+ port._options = MockOptions(additional_platform_directory=["internal-testwebkitport"])
+ self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version', 'internal-testwebkitport']))
+
+ def test_root_option(self):
+ port = TestWebKitPort()
+ port._options = MockOptions(root='/foo')
+ self.assertEqual(port._path_to_driver(), "/foo/DumpRenderTree")
+
+ def test_test_expectations(self):
+ # Check that we read the expectations file
+ host = MockSystemHost()
+ host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations',
+ 'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = TEXT\n')
+ port = TestWebKitPort(host=host)
+ self.assertEqual(''.join(port.expectations_dict().values()), 'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = TEXT\n')
+
+ def test_build_driver(self):
+ output = OutputCapture()
+ port = TestWebKitPort()
+ # Delay setting _executive to avoid logging during construction
+ port._executive = MockExecutive(should_log=True)
+ port._options = MockOptions(configuration="Release") # This should not be necessary, but I think TestWebKitPort is actually reading from disk (and thus detects the current configuration).
+ expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
+ self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=''))
+
+ # Make sure when passed --webkit-test-runner we build the right tool.
+ port._options = MockOptions(webkit_test_runner=True, configuration="Release")
+ expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\nMOCK run_command: ['Tools/Scripts/build-webkittestrunner', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
+ self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=''))
+
+ # Make sure we show the build log when --verbose is passed, which we simulate by setting the logging level to DEBUG.
+ output.set_log_level(logging.DEBUG)
+ port._options = MockOptions(configuration="Release")
+ expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
+ expected_logs = "Output of ['Tools/Scripts/build-dumprendertree', '--release']:\nMOCK output of child process\n"
+ self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs))
+ output.set_log_level(logging.INFO)
+
+ # Make sure that failure to build returns False.
+ port._executive = MockExecutive(should_log=True, should_throw=True)
+ # Because WK2 currently has to build both webkittestrunner and DRT, if DRT fails, that's the only one it tries.
+ expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
+ expected_logs = "MOCK ScriptError\n\nMOCK output of child process\n"
+ self.assertFalse(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs))
+
+ def _assert_config_file_for_platform(self, port, platform, config_file):
+ self.assertEquals(port._apache_config_file_name_for_platform(platform), config_file)
+
+ def test_linux_distro_detection(self):
+ port = TestWebKitPort()
+ self.assertFalse(port._is_redhat_based())
+ self.assertFalse(port._is_debian_based())
+
+ port._filesystem = MockFileSystem({'/etc/redhat-release': ''})
+ self.assertTrue(port._is_redhat_based())
+ self.assertFalse(port._is_debian_based())
+
+ port._filesystem = MockFileSystem({'/etc/debian_version': ''})
+ self.assertFalse(port._is_redhat_based())
+ self.assertTrue(port._is_debian_based())
+
+ def test_apache_config_file_name_for_platform(self):
+ port = TestWebKitPort()
+ self._assert_config_file_for_platform(port, 'cygwin', 'cygwin-httpd.conf')
+
+ self._assert_config_file_for_platform(port, 'linux2', 'apache2-httpd.conf')
+ self._assert_config_file_for_platform(port, 'linux3', 'apache2-httpd.conf')
+
+ port._is_redhat_based = lambda: True
+ self._assert_config_file_for_platform(port, 'linux2', 'fedora-httpd.conf')
+
+ port = TestWebKitPort()
+ port._is_debian_based = lambda: True
+ self._assert_config_file_for_platform(port, 'linux2', 'apache2-debian-httpd.conf')
+
+ self._assert_config_file_for_platform(port, 'mac', 'apache2-httpd.conf')
+ self._assert_config_file_for_platform(port, 'win32', 'apache2-httpd.conf') # win32 isn't a supported sys.platform. AppleWin/WinCairo/WinCE ports all use cygwin.
+ self._assert_config_file_for_platform(port, 'barf', 'apache2-httpd.conf')
+
+ def test_path_to_apache_config_file(self):
+ port = TestWebKitPort()
+ # Mock out _apache_config_file_name_for_platform to ignore the passed sys.platform value.
+ port._apache_config_file_name_for_platform = lambda platform: 'httpd.conf'
+ self.assertEquals(port._path_to_apache_config_file(), '/mock-checkout/LayoutTests/http/conf/httpd.conf')
+
# FIXME: This class and main() should be merged into test-webkitpy.
class EnhancedTestLoader(unittest.TestLoader):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/qt.py b/Tools/Scripts/webkitpy/layout_tests/port/qt.py
index aaaafac5d..392ab028f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/qt.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/qt.py
@@ -34,17 +34,15 @@ import re
import sys
import os
-import webkit
-
from webkitpy.common.memoized import memoized
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port.webkit import WebKitPort
+from webkitpy.layout_tests.port.base import Port
from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
_log = logging.getLogger(__name__)
-class QtPort(WebKitPort):
+class QtPort(Port):
ALL_VERSIONS = ['linux', 'win', 'mac']
port_name = "qt"
@@ -62,9 +60,9 @@ class QtPort(WebKitPort):
# sys_platform exists only for unit testing.
def __init__(self, host, port_name, **kwargs):
- WebKitPort.__init__(self, host, port_name, **kwargs)
+ super(QtPort, self).__init__(host, port_name, **kwargs)
- # FIXME: This will allow WebKitPort.baseline_search_path and WebKitPort._skipped_file_search_paths
+ # FIXME: This will allow Port.baseline_search_path and Port._skipped_file_search_paths
# to do the right thing, but doesn't include support for qt-4.8 or qt-arm (seen in LayoutTests/platform) yet.
self._operating_system = port_name.replace('qt-', '')
@@ -141,7 +139,7 @@ class QtPort(WebKitPort):
search_paths.append(self.port_name)
return search_paths
- def baseline_search_path(self):
+ def default_baseline_search_path(self):
return map(self._webkit_baseline_path, self._search_paths())
def _skipped_file_search_paths(self):
@@ -156,7 +154,7 @@ class QtPort(WebKitPort):
return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self._search_paths()]))
def setup_environ_for_server(self, server_name=None):
- clean_env = WebKitPort.setup_environ_for_server(self, server_name)
+ clean_env = super(QtPort, self).setup_environ_for_server(server_name)
clean_env['QTWEBKIT_PLUGIN_PATH'] = self._build_path('lib/plugins')
self._copy_value_from_environ_if_set(clean_env, 'QT_DRT_WEBVIEW_MODE')
self._copy_value_from_environ_if_set(clean_env, 'DYLD_IMAGE_SUFFIX')
@@ -166,7 +164,7 @@ class QtPort(WebKitPort):
return clean_env
# FIXME: We should find a way to share this implmentation with Gtk,
- # or teach run-launcher how to call run-safari and move this down to WebKitPort.
+ # or teach run-launcher how to call run-safari and move this down to Port.
def show_results_html_file(self, results_filename):
run_launcher_args = []
if self.get_option('webkit_test_runner'):
@@ -185,3 +183,14 @@ class QtPort(WebKitPort):
_log.error('Use git to grab the actual fonts from http://gitorious.org/qtwebkit/testfonts')
return False
return result
+
+ def _supports_switching_pixel_tests_per_test(self):
+ return True
+
+ def _should_run_as_pixel_test(self, test_input):
+ return any(test_input.test_name.startswith(directory)
+ for directory in QtPort._default_pixel_test_directories())
+
+ @staticmethod
+ def _default_pixel_test_directories():
+ return ['compositing']
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
index e07a804b0..aa1e3024f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
@@ -32,7 +32,6 @@
import errno
import logging
import signal
-import subprocess
import sys
import time
@@ -99,12 +98,12 @@ class ServerProcess(object):
self._reset()
# close_fds is a workaround for http://bugs.python.org/issue2320
close_fds = not self._host.platform.is_win()
- self._proc = subprocess.Popen(self._cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- close_fds=close_fds,
- env=self._env,
- universal_newlines=self._universal_newlines)
+ self._proc = self._host.executive.popen(self._cmd, stdin=self._host.executive.PIPE,
+ stdout=self._host.executive.PIPE,
+ stderr=self._host.executive.PIPE,
+ close_fds=close_fds,
+ env=self._env,
+ universal_newlines=self._universal_newlines)
self._pid = self._proc.pid
fd = self._proc.stdout.fileno()
if not self._use_win32_apis:
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
new file mode 100644
index 000000000..ae48523eb
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockServerProcess(object):
+ def __init__(self, port_obj=None, name=None, cmd=None, env=None, universal_newlines=False, lines=None):
+ self.timed_out = False
+ self.lines = lines or []
+ self.crashed = False
+ self.writes = []
+ self.cmd = cmd
+ self.env = env
+ self.started = False
+ self.stopped = False
+
+ def write(self, bytes):
+ self.writes.append(bytes)
+
+ def has_crashed(self):
+ return self.crashed
+
+ def read_stdout_line(self, deadline):
+ return self.lines.pop(0) + "\n"
+
+ def read_stdout(self, deadline, size):
+ first_line = self.lines[0]
+ if size > len(first_line):
+ self.lines.pop(0)
+ remaining_size = size - len(first_line) - 1
+ if not remaining_size:
+ return first_line + "\n"
+ return first_line + "\n" + self.read_stdout(deadline, remaining_size)
+ result = self.lines[0][:size]
+ self.lines[0] = self.lines[0][size:]
+ return result
+
+ def pop_all_buffered_stderr(self):
+ return ''
+
+ def read_either_stdout_or_stderr_line(self, deadline):
+ # FIXME: We should have tests which intermix stderr and stdout lines.
+ return self.read_stdout_line(deadline), None
+
+ def start(self):
+ self.started = True
+
+ def stop(self, kill_directly=False):
+ self.stopped = True
+ return
+
+ def kill(self):
+ return
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index 9cf98de74..5714661fd 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -227,6 +227,14 @@ layer at (0,0) size 800x34
tests.add('perf/foo/test.html')
tests.add('perf/foo/test-ref.html')
+ # For testing --pixel-test-directories.
+ tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
+ actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
+ expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
+ tests.add('failures/unexpected/image_not_in_pixeldir.html',
+ actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
+ expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
+
return tests
@@ -505,6 +513,11 @@ class TestPort(Port):
VirtualTestSuite('virtual/skipped', 'failures/expected', ['--virtual-arg2']),
]
+ def supports_switching_pixel_tests_per_test(self):
+ # Let it true so we can test the --pixel-test-directory option.
+ return True
+
+
class TestDriver(Driver):
"""Test/Dummy implementation of the DumpRenderTree interface."""
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
index d5b7c0d7f..058787c71 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
@@ -29,270 +29,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""WebKit implementations of the Port interface."""
-
-import itertools
-import logging
-import operator
-import re
-import time
-
-from webkitpy.common.system.executive import Executive, ScriptError
-from webkitpy.layout_tests.port import server_process, Port
-
-
-_log = logging.getLogger(__name__)
-
-
-class WebKitPort(Port):
-
- # FIXME: Eventually we should standarize port naming, and make this method smart enough
- # to use for all port configurations (including architectures, graphics types, etc).
- def _port_flag_for_scripts(self):
- # This is overrriden by ports which need a flag passed to scripts to distinguish the use of that port.
- # For example --qt on linux, since a user might have both Gtk and Qt libraries installed.
- # FIXME: Chromium should override this once ChromiumPort is a WebKitPort.
- return None
-
- # This is modeled after webkitdirs.pm argumentsForConfiguration() from old-run-webkit-tests
- def _arguments_for_configuration(self):
- config_args = []
- config_args.append(self._config.flag_for_configuration(self.get_option('configuration')))
- # FIXME: We may need to add support for passing --32-bit like old-run-webkit-tests had.
- port_flag = self._port_flag_for_scripts()
- if port_flag:
- config_args.append(port_flag)
- return config_args
-
- def _run_script(self, script_name, args=None, include_configuration_arguments=True, decode_output=True, env=None):
- run_script_command = [self._config.script_path(script_name)]
- if include_configuration_arguments:
- run_script_command.extend(self._arguments_for_configuration())
- if args:
- run_script_command.extend(args)
- output = self._executive.run_command(run_script_command, cwd=self._config.webkit_base_dir(), decode_output=decode_output, env=env)
- _log.debug('Output of %s:\n%s' % (run_script_command, output))
- return output
-
- def _build_driver(self):
- environment = self.host.copy_current_environment()
- environment.disable_gcc_smartquotes()
- env = environment.to_dictionary()
-
- # FIXME: We build both DumpRenderTree and WebKitTestRunner for
- # WebKitTestRunner runs because DumpRenderTree still includes
- # the DumpRenderTreeSupport module and the TestNetscapePlugin.
- # These two projects should be factored out into their own
- # projects.
- try:
- self._run_script("build-dumprendertree", args=self._build_driver_flags(), env=env)
- if self.get_option('webkit_test_runner'):
- self._run_script("build-webkittestrunner", args=self._build_driver_flags(), env=env)
- except ScriptError, e:
- _log.error(e.message_with_output(output_limit=None))
- return False
- return True
-
- def _build_driver_flags(self):
- return []
-
- def diff_image(self, expected_contents, actual_contents, tolerance=None):
- # Handle the case where the test didn't actually generate an image.
- # FIXME: need unit tests for this.
- if not actual_contents and not expected_contents:
- return (None, 0)
- if not actual_contents or not expected_contents:
- # FIXME: It's not clear what we should return in this case.
- # Maybe we should throw an exception?
- return (True, 0)
-
- process = self._start_image_diff_process(expected_contents, actual_contents, tolerance=tolerance)
- return self._read_image_diff(process)
-
- def _image_diff_command(self, tolerance=None):
- # FIXME: There needs to be a more sane way of handling default
- # values for options so that you can distinguish between a default
- # value of None and a default value that wasn't set.
- if tolerance is None:
- if self.get_option('tolerance') is not None:
- tolerance = self.get_option('tolerance')
- else:
- tolerance = 0.1
-
- command = [self._path_to_image_diff(), '--tolerance', str(tolerance)]
- return command
-
- def _start_image_diff_process(self, expected_contents, actual_contents, tolerance=None):
- command = self._image_diff_command(tolerance)
- environment = self.setup_environ_for_server('ImageDiff')
- process = server_process.ServerProcess(self, 'ImageDiff', command, environment)
-
- process.write('Content-Length: %d\n%sContent-Length: %d\n%s' % (
- len(actual_contents), actual_contents,
- len(expected_contents), expected_contents))
- return process
-
- def _read_image_diff(self, sp):
- deadline = time.time() + 2.0
- output = None
- output_image = ""
-
- while True:
- output = sp.read_stdout_line(deadline)
- if sp.timed_out or sp.has_crashed() or not output:
- break
-
- if output.startswith('diff'): # This is the last line ImageDiff prints.
- break
-
- if output.startswith('Content-Length'):
- m = re.match('Content-Length: (\d+)', output)
- content_length = int(m.group(1))
- output_image = sp.read_stdout(deadline, content_length)
- output = sp.read_stdout_line(deadline)
- break
-
- stderr = sp.pop_all_buffered_stderr()
- if stderr:
- _log.warn("ImageDiff produced stderr output:\n" + stderr)
- if sp.timed_out:
- _log.error("ImageDiff timed out")
- if sp.has_crashed():
- _log.error("ImageDiff crashed")
- # FIXME: There is no need to shut down the ImageDiff server after every diff.
- sp.stop()
-
- diff_percent = 0
- if output and output.startswith('diff'):
- m = re.match('diff: (.+)% (passed|failed)', output)
- if m.group(2) == 'passed':
- return [None, 0]
- diff_percent = float(m.group(1))
-
- return (output_image, diff_percent)
-
- def _tests_for_other_platforms(self):
- # By default we will skip any directory under LayoutTests/platform
- # that isn't in our baseline search path (this mirrors what
- # old-run-webkit-tests does in findTestsToRun()).
- # Note this returns LayoutTests/platform/*, not platform/*/*.
- entries = self._filesystem.glob(self._webkit_baseline_path('*'))
- dirs_to_skip = []
- for entry in entries:
- if self._filesystem.isdir(entry) and entry not in self.baseline_search_path():
- basename = self._filesystem.basename(entry)
- dirs_to_skip.append('platform/%s' % basename)
- return dirs_to_skip
-
- def _runtime_feature_list(self):
- """If a port makes certain features available only through runtime flags, it can override this routine to indicate which ones are available."""
- return None
-
- def nm_command(self):
- return 'nm'
-
- def _modules_to_search_for_symbols(self):
- path = self._path_to_webcore_library()
- if path:
- return [path]
- return []
-
- def _symbols_string(self):
- symbols = ''
- for path_to_module in self._modules_to_search_for_symbols():
- try:
- symbols += self._executive.run_command([self.nm_command(), path_to_module], error_handler=Executive.ignore_error)
- except OSError, e:
- _log.warn("Failed to run nm: %s. Can't determine supported features correctly." % e)
- return symbols
-
- # Ports which use run-time feature detection should define this method and return
- # a dictionary mapping from Feature Names to skipped directoires. NRWT will
- # run DumpRenderTree --print-supported-features and parse the output.
- # If the Feature Names are not found in the output, the corresponding directories
- # will be skipped.
- def _missing_feature_to_skipped_tests(self):
- """Return the supported feature dictionary. Keys are feature names and values
- are the lists of directories to skip if the feature name is not matched."""
- # FIXME: This list matches WebKitWin and should be moved onto the Win port.
- return {
- "Accelerated Compositing": ["compositing"],
- "3D Rendering": ["animations/3d", "transforms/3d"],
- }
-
- # Ports which use compile-time feature detection should define this method and return
- # a dictionary mapping from symbol substrings to possibly disabled test directories.
- # When the symbol substrings are not matched, the directories will be skipped.
- # If ports don't ever enable certain features, then those directories can just be
- # in the Skipped list instead of compile-time-checked here.
- def _missing_symbol_to_skipped_tests(self):
- """Return the supported feature dictionary. The keys are symbol-substrings
- and the values are the lists of directories to skip if that symbol is missing."""
- return {
- "MathMLElement": ["mathml"],
- "GraphicsLayer": ["compositing"],
- "WebCoreHas3DRendering": ["animations/3d", "transforms/3d"],
- "WebGLShader": ["fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl"],
- "MHTMLArchive": ["mhtml"],
- "CSSVariableValue": ["fast/css/variables", "inspector/styles/variables"],
- }
-
- def _has_test_in_directories(self, directory_lists, test_list):
- if not test_list:
- return False
-
- directories = itertools.chain.from_iterable(directory_lists)
- for directory, test in itertools.product(directories, test_list):
- if test.startswith(directory):
- return True
- return False
-
- def _skipped_tests_for_unsupported_features(self, test_list):
- # Only check the runtime feature list of there are tests in the test_list that might get skipped.
- # This is a performance optimization to avoid the subprocess call to DRT.
- if self._has_test_in_directories(self._missing_feature_to_skipped_tests().values(), test_list):
- # If the port supports runtime feature detection, disable any tests
- # for features missing from the runtime feature list.
- supported_feature_list = self._runtime_feature_list()
- # If _runtime_feature_list returns a non-None value, then prefer
- # runtime feature detection over static feature detection.
- if supported_feature_list is not None:
- return reduce(operator.add, [directories for feature, directories in self._missing_feature_to_skipped_tests().items() if feature not in supported_feature_list])
-
- # Only check the symbols of there are tests in the test_list that might get skipped.
- # This is a performance optimization to avoid the calling nm.
- if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
- # Runtime feature detection not supported, fallback to static dectection:
- # Disable any tests for symbols missing from the executable or libraries.
- symbols_string = self._symbols_string()
- if symbols_string is not None:
- return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
-
- # Failed to get any runtime or symbol information, don't skip any tests.
- return []
-
- def _wk2_port_name(self):
- # By current convention, the WebKit2 name is always mac-wk2, win-wk2, not mac-leopard-wk2, etc,
- # except for Qt because WebKit2 is only supported by Qt 5.0 (therefore: qt-5.0-wk2).
- return "%s-wk2" % self.port_name
-
- def _skipped_file_search_paths(self):
- # Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] and any directories
- # included via --additional-platform-directory, not the full casade.
- # Note order doesn't matter since the Skipped file contents are all combined.
- search_paths = set([self.port_name])
- if 'future' not in self.name():
- search_paths.add(self.name())
- if self.get_option('webkit_test_runner'):
- # Because nearly all of the skipped tests for WebKit 2 are due to cross-platform
- # issues, all wk2 ports share a skipped list under platform/wk2.
- search_paths.update([self._wk2_port_name(), "wk2"])
- search_paths.update(self.get_option("additional_platform_directory", []))
-
- return search_paths
-
- def skipped_layout_tests(self, test_list):
- tests_to_skip = set(self._expectations_from_skipped_files(self._skipped_file_search_paths()))
- tests_to_skip.update(self._tests_for_other_platforms())
- tests_to_skip.update(self._skipped_tests_for_unsupported_features(test_list))
- return tests_to_skip
+# FIXME: this is a stub file needed to ensure that chrome still compiles
+# until we can remove this from the browser_tests.isolate file downstream.
+# See https://bugs.webkit.org/show_bug.cgi?id=92549
+pass
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py
deleted file mode 100755
index 078182541..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-import unittest
-
-from webkitpy.common.system.executive_mock import MockExecutive
-from webkitpy.common.system.filesystem_mock import MockFileSystem
-from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port import port_testcase
-from webkitpy.layout_tests.port.webkit import WebKitPort
-from webkitpy.layout_tests.port.config_mock import MockConfig
-from webkitpy.tool.mocktool import MockOptions
-
-
-class TestWebKitPort(WebKitPort):
- port_name = "testwebkitport"
-
- def __init__(self, symbols_string=None,
- expectations_file=None, skips_file=None, host=None, config=None,
- **kwargs):
- self.symbols_string = symbols_string # Passing "" disables all staticly-detectable features.
- host = host or MockSystemHost()
- config = config or MockConfig()
- WebKitPort.__init__(self, host=host, config=config, **kwargs)
-
- def all_test_configurations(self):
- return [self.test_configuration()]
-
- def _symbols_string(self):
- return self.symbols_string
-
- def _tests_for_other_platforms(self):
- return ["media", ]
-
- def _tests_for_disabled_features(self):
- return ["accessibility", ]
-
-
-class WebKitPortTest(port_testcase.PortTestCase):
- port_name = 'webkit'
- port_maker = TestWebKitPort
-
- def test_check_build(self):
- pass
-
- def test_driver_cmd_line(self):
- pass
-
- def test_baseline_search_path(self):
- pass
-
- def test_path_to_test_expectations_file(self):
- port = TestWebKitPort()
- port._options = MockOptions(webkit_test_runner=False)
- self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
-
- port = TestWebKitPort()
- port._options = MockOptions(webkit_test_runner=True)
- self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
-
- port = TestWebKitPort()
- port.host.filesystem.files['/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations'] = 'some content'
- port._options = MockOptions(webkit_test_runner=False)
- self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
-
- def test_skipped_directories_for_symbols(self):
- # This first test confirms that the commonly found symbols result in the expected skipped directories.
- symbols_string = " ".join(["GraphicsLayer", "WebCoreHas3DRendering", "isXHTMLMPDocument", "fooSymbol"])
- expected_directories = set([
- "mathml", # Requires MathMLElement
- "fast/canvas/webgl", # Requires WebGLShader
- "compositing/webgl", # Requires WebGLShader
- "http/tests/canvas/webgl", # Requires WebGLShader
- "mhtml", # Requires MHTMLArchive
- "fast/css/variables", # Requires CSS Variables
- "inspector/styles/variables", # Requires CSS Variables
- ])
-
- result_directories = set(TestWebKitPort(symbols_string, None)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
- self.assertEqual(result_directories, expected_directories)
-
- # Test that the nm string parsing actually works:
- symbols_string = """
-000000000124f498 s __ZZN7WebCore13GraphicsLayer12replaceChildEPS0_S1_E19__PRETTY_FUNCTION__
-000000000124f500 s __ZZN7WebCore13GraphicsLayer13addChildAboveEPS0_S1_E19__PRETTY_FUNCTION__
-000000000124f670 s __ZZN7WebCore13GraphicsLayer13addChildBelowEPS0_S1_E19__PRETTY_FUNCTION__
-"""
- # Note 'compositing' is not in the list of skipped directories (hence the parsing of GraphicsLayer worked):
- expected_directories = set(['mathml', 'transforms/3d', 'compositing/webgl', 'fast/canvas/webgl', 'animations/3d', 'mhtml', 'http/tests/canvas/webgl', 'fast/css/variables', 'inspector/styles/variables'])
- result_directories = set(TestWebKitPort(symbols_string, None)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
- self.assertEqual(result_directories, expected_directories)
-
- def test_skipped_directories_for_features(self):
- supported_features = ["Accelerated Compositing", "Foo Feature"]
- expected_directories = set(["animations/3d", "transforms/3d"])
- port = TestWebKitPort(None, supported_features)
- port._runtime_feature_list = lambda: supported_features
- result_directories = set(port._skipped_tests_for_unsupported_features(test_list=["animations/3d/foo.html"]))
- self.assertEqual(result_directories, expected_directories)
-
- def test_skipped_directories_for_features_no_matching_tests_in_test_list(self):
- supported_features = ["Accelerated Compositing", "Foo Feature"]
- expected_directories = set([])
- result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=['foo.html']))
- self.assertEqual(result_directories, expected_directories)
-
- def test_skipped_tests_for_unsupported_features_empty_test_list(self):
- supported_features = ["Accelerated Compositing", "Foo Feature"]
- expected_directories = set([])
- result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=None))
- self.assertEqual(result_directories, expected_directories)
-
- def test_skipped_layout_tests(self):
- self.assertEqual(TestWebKitPort(None, None).skipped_layout_tests(test_list=[]), set(['media']))
-
- def test_skipped_file_search_paths(self):
- port = TestWebKitPort()
- self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport']))
- port._name = "testwebkitport-version"
- self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version']))
- port._options = MockOptions(webkit_test_runner=True)
- self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version', 'testwebkitport-wk2', 'wk2']))
- port._options = MockOptions(additional_platform_directory=["internal-testwebkitport"])
- self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version', 'internal-testwebkitport']))
-
- def test_root_option(self):
- port = TestWebKitPort()
- port._options = MockOptions(root='/foo')
- self.assertEqual(port._path_to_driver(), "/foo/DumpRenderTree")
-
- def test_test_expectations(self):
- # Check that we read the expectations file
- host = MockSystemHost()
- host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations',
- 'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = TEXT\n')
- port = TestWebKitPort(host=host)
- self.assertEqual(''.join(port.expectations_dict().values()), 'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = TEXT\n')
-
- def test_build_driver(self):
- output = OutputCapture()
- port = TestWebKitPort()
- # Delay setting _executive to avoid logging during construction
- port._executive = MockExecutive(should_log=True)
- port._options = MockOptions(configuration="Release") # This should not be necessary, but I think TestWebKitPort is actually reading from disk (and thus detects the current configuration).
- expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
- self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=''))
-
- # Make sure when passed --webkit-test-runner we build the right tool.
- port._options = MockOptions(webkit_test_runner=True, configuration="Release")
- expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\nMOCK run_command: ['Tools/Scripts/build-webkittestrunner', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
- self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=''))
-
- # Make sure we show the build log when --verbose is passed, which we simulate by setting the logging level to DEBUG.
- output.set_log_level(logging.DEBUG)
- port._options = MockOptions(configuration="Release")
- expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
- expected_logs = "Output of ['Tools/Scripts/build-dumprendertree', '--release']:\nMOCK output of child process\n"
- self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs))
- output.set_log_level(logging.INFO)
-
- # Make sure that failure to build returns False.
- port._executive = MockExecutive(should_log=True, should_throw=True)
- # Because WK2 currently has to build both webkittestrunner and DRT, if DRT fails, that's the only one it tries.
- expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
- expected_logs = "MOCK ScriptError\n\nMOCK output of child process\n"
- self.assertFalse(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs))
-
- def _assert_config_file_for_platform(self, port, platform, config_file):
- self.assertEquals(port._apache_config_file_name_for_platform(platform), config_file)
-
- def test_linux_distro_detection(self):
- port = TestWebKitPort()
- self.assertFalse(port._is_redhat_based())
- self.assertFalse(port._is_debian_based())
-
- port._filesystem = MockFileSystem({'/etc/redhat-release': ''})
- self.assertTrue(port._is_redhat_based())
- self.assertFalse(port._is_debian_based())
-
- port._filesystem = MockFileSystem({'/etc/debian_version': ''})
- self.assertFalse(port._is_redhat_based())
- self.assertTrue(port._is_debian_based())
-
- def test_apache_config_file_name_for_platform(self):
- port = TestWebKitPort()
- self._assert_config_file_for_platform(port, 'cygwin', 'cygwin-httpd.conf')
-
- self._assert_config_file_for_platform(port, 'linux2', 'apache2-httpd.conf')
- self._assert_config_file_for_platform(port, 'linux3', 'apache2-httpd.conf')
-
- port._is_redhat_based = lambda: True
- self._assert_config_file_for_platform(port, 'linux2', 'fedora-httpd.conf')
-
- port = TestWebKitPort()
- port._is_debian_based = lambda: True
- self._assert_config_file_for_platform(port, 'linux2', 'apache2-debian-httpd.conf')
-
- self._assert_config_file_for_platform(port, 'mac', 'apache2-httpd.conf')
- self._assert_config_file_for_platform(port, 'win32', 'apache2-httpd.conf') # win32 isn't a supported sys.platform. AppleWin/WinCairo/WinCE ports all use cygwin.
- self._assert_config_file_for_platform(port, 'barf', 'apache2-httpd.conf')
-
- def test_path_to_apache_config_file(self):
- port = TestWebKitPort()
- # Mock out _apache_config_file_name_for_platform to ignore the passed sys.platform value.
- port._apache_config_file_name_for_platform = lambda platform: 'httpd.conf'
- self.assertEquals(port._path_to_apache_config_file(), '/mock-checkout/LayoutTests/http/conf/httpd.conf')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/win.py b/Tools/Scripts/webkitpy/layout_tests/port/win.py
index 420f4db04..28465bff3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/win.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/win.py
@@ -62,7 +62,7 @@ class WinPort(ApplePort):
actual_text = delegate_regexp.sub("", actual_text)
return expected_text != actual_text
- def baseline_search_path(self):
+ def default_baseline_search_path(self):
fallback_index = self.VERSION_FALLBACK_ORDER.index(self._port_name_with_version())
fallback_names = list(self.VERSION_FALLBACK_ORDER[fallback_index:])
# FIXME: The AppleWin port falls back to AppleMac for some results. Eventually we'll have a shared 'apple' port.
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index 455a8c2dc..95a07f59e 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -112,23 +112,7 @@ def run(port, options, args, regular_output=sys.stderr, buildbot_output=sys.stdo
manager = Manager(port, options, printer)
printer.print_config()
- printer.write_update("Collecting tests ...")
- try:
- manager.collect_tests(args)
- except IOError, e:
- if e.errno == errno.ENOENT:
- return -1
- raise
-
- printer.write_update("Checking build ...")
- if not port.check_build(manager.needs_servers()):
- _log.error("Build check failed")
- return -1
-
- printer.write_update("Parsing expectations ...")
- manager.parse_expectations()
-
- unexpected_result_count = manager.run()
+ unexpected_result_count = manager.run(args)
_log.debug("Testing completed, Exit status: %d" % unexpected_result_count)
except Exception:
exception_type, exception_value, exception_traceback = sys.exc_info()
@@ -175,9 +159,6 @@ def _set_up_derived_options(port, options):
warnings.append("--force/--skipped=%s overrides --no-http." % (options.skipped))
options.http = True
- if options.skip_pixel_test_if_no_baseline and not options.pixel_tests:
- warnings.append("--skip-pixel-test-if-no-baseline is only supported with -p (--pixel-tests)")
-
if options.ignore_metrics and (options.new_baseline or options.reset_results):
warnings.append("--ignore-metrics has no effect with --new-baselines or with --reset-results")
@@ -185,6 +166,22 @@ def _set_up_derived_options(port, options):
options.reset_results = True
options.add_platform_exceptions = True
+ if options.pixel_test_directories:
+ options.pixel_tests = True
+ varified_dirs = set()
+ pixel_test_directories = options.pixel_test_directories
+ for directory in pixel_test_directories:
+ # FIXME: we should support specifying the directories all the ways we support it for additional
+ # arguments specifying which tests and directories to run. We should also move the logic for that
+ # to Port.
+ filesystem = port.host.filesystem
+ if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
+ warnings.append("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
+ else:
+ varified_dirs.add(directory)
+
+ options.pixel_test_directories = list(varified_dirs)
+
return warnings
@@ -246,10 +243,6 @@ def parse_args(args=None):
action="store_true",
default=False,
help="Use hardware accelerated painting of composited pages"),
- optparse.make_option("--enable-hardware-gpu",
- action="store_true",
- default=False,
- help="Run graphics tests on real GPU hardware vs software"),
optparse.make_option("--per-tile-painting",
action="store_true",
help="Use per-tile painting of composited pages"),
@@ -275,8 +268,9 @@ def parse_args(args=None):
help="Run a concurrent JavaScript thread with each test"),
optparse.make_option("--webkit-test-runner", "-2", action="store_true",
help="Use WebKitTestRunner rather than DumpRenderTree."),
+ # FIXME: We should merge this w/ --build-directory and only have one flag.
optparse.make_option("--root", action="store",
- help="Path to a pre-built root of WebKit (for running tests using a nightly build of WebKit)"),
+ help="Path to a directory containing the executables needed to run tests."),
]))
option_group_definitions.append(("ORWT Compatibility Options", [
@@ -312,9 +306,16 @@ def parse_args(args=None):
optparse.make_option("--no-new-test-results", action="store_false",
dest="new_test_results", default=True,
help="Don't create new baselines when no expected results exist"),
- optparse.make_option("--skip-pixel-test-if-no-baseline", action="store_true",
- dest="skip_pixel_test_if_no_baseline", help="Do not generate and check pixel result in the case when "
- "no image baseline is available for the test."),
+
+ #FIXME: we should support a comma separated list with --pixel-test-directory as well.
+ optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
+ help="A directory where it is allowed to execute tests as pixel tests. "
+ "Specify multiple times to add multiple directories. "
+ "This option implies --pixel-tests. If specified, only those tests "
+ "will be executed as pixel tests that are located in one of the "
+ "directories enumerated with the option. Some ports may ignore this "
+ "option while others can have a default value that can be overridden here."),
+
optparse.make_option("--skip-failing-tests", action="store_true",
default=False, help="Skip tests that are expected to fail. "
"Note: When using this option, you might miss new crashes "
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
index ad14bf4ef..6e85977b2 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -179,7 +179,7 @@ def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False,
# Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
# FIXME: It's nice to have a routine in port/test.py that returns this number.
-unexpected_tests_count = 12
+unexpected_tests_count = 14
class StreamTestingMixin(object):
@@ -404,11 +404,6 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
self.assertEquals(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
- def test_skip_pixel_test_if_no_baseline_option(self):
- tests_to_run = ['passes/image.html', 'passes/text.html']
- tests_run = get_tests_run(['--skip-pixel-test-if-no-baseline'] + tests_to_run, tests_included=True, flatten_batches=True)
- self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html'])
-
def test_ignore_flag(self):
# Note that passes/image.html is expected to be run since we specified it directly.
tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'], flatten_batches=True, tests_included=True)
@@ -501,7 +496,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
def test_run_singly_actually_runs_tests(self):
res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
- self.assertEquals(res, 8)
+ self.assertEquals(res, 10)
def test_single_file(self):
# FIXME: We should consider replacing more of the get_tests_run()-style tests
@@ -573,6 +568,20 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertTrue(json_string.find('"num_flaky":0') != -1)
self.assertTrue(json_string.find('"num_missing":1') != -1)
+ def test_pixel_test_directories(self):
+ host = MockHost()
+
+ """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
+ args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
+ 'failures/unexpected/pixeldir/image_in_pixeldir.html',
+ 'failures/unexpected/image_not_in_pixeldir.html']
+ res, out, err, _ = logging_run(extra_args=args, host=host, record_results=True, tests_included=True)
+
+ self.assertEquals(res, 1)
+ expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE"'
+ json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+ self.assertTrue(json_string.find(expected_token) != -1)
+
def test_missing_and_unexpected_results_with_custom_exit_code(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
index 8623c0aee..8aac78f3d 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -232,7 +232,7 @@ class ReplayServer(object):
self._process = subprocess.Popen(args)
def wait_until_ready(self):
- for i in range(0, 10):
+ for i in range(0, 3):
try:
connection = socket.create_connection(('localhost', '8080'), timeout=1)
connection.close()
@@ -281,7 +281,7 @@ class ReplayPerfTest(PageLoadingPerfTest):
driver = self._port.create_driver(worker_number=1, no_timeout=True)
try:
- output = self.run_single(driver, self._url, time_out_ms, record=True)
+ output = self.run_single(driver, self._archive_path, time_out_ms, record=True)
finally:
driver.stop()
@@ -300,6 +300,7 @@ class ReplayPerfTest(PageLoadingPerfTest):
return None
try:
+ _log.debug("Waiting for Web page replay to start.")
if not server.wait_until_ready():
_log.error("Web page replay didn't start.")
return None
@@ -317,8 +318,8 @@ class ReplayPerfTest(PageLoadingPerfTest):
return None
filesystem = self._port.host.filesystem
- dirname = filesystem.dirname(url)
- filename = filesystem.split(url)[1]
+ dirname = filesystem.dirname(self._archive_path)
+ filename = filesystem.split(self._archive_path)[1]
writer = TestResultWriter(filesystem, self._port, dirname, filename)
if record:
writer.write_image_files(actual_image=None, expected_image=output.image)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
index 078f08a46..4fca894da 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
@@ -156,7 +156,7 @@ class TestReplayPerfTest(unittest.TestCase):
def _add_file(self, port, dirname, filename, content=True):
port.host.filesystem.maybe_make_directory(dirname)
- port.host.filesystem.files[port.host.filesystem.join(dirname, filename)] = content
+ port.host.filesystem.write_binary_file(port.host.filesystem.join(dirname, filename), content)
def _setup_test(self, run_test=None):
test_port = self.ReplayTestPort(run_test)
@@ -195,6 +195,7 @@ class TestReplayPerfTest(unittest.TestCase):
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, '')
+ self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-actual.png'), 'actual image')
def test_run_single_fails_without_webpagereplay(self):
output_capture = OutputCapture()
@@ -281,6 +282,7 @@ class TestReplayPerfTest(unittest.TestCase):
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'Preparing replay for some-test.replay\nPrepared replay for some-test.replay\n')
+ self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-expected.png'), 'actual image')
def test_prepare_calls_run_single(self):
output_capture = OutputCapture()
@@ -289,7 +291,7 @@ class TestReplayPerfTest(unittest.TestCase):
def run_single(driver, url, time_out_ms, record):
self.assertTrue(record)
- self.assertEqual(url, 'http://some-test/')
+ self.assertEqual(url, '/path/some-dir/some-test.wpr')
called[0] = True
return False
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
index ab4386443..cda3a6b59 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -159,15 +159,20 @@ class PerfTestsRunner(object):
test_results_server = options.test_results_server
branch = self._default_branch if test_results_server else None
build_number = int(options.build_number) if options.build_number else None
+
if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
+ not test_results_server,
branch, options.platform, options.builder_name, build_number) and not unexpected:
return self._EXIT_CODE_BAD_JSON
+
if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
return self._EXIT_CODE_FAILED_UPLOADING
return unexpected
- def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
+ def _generate_json(self, timestamp, output_json_path, source_json_path, should_generate_results_page,
+ branch, platform, builder_name, build_number):
+
contents = {'timestamp': int(timestamp), 'results': self._results}
for (name, path) in self._port.repository_paths():
contents[name + '-revision'] = self._host.scm().svn_revision(path)
@@ -193,7 +198,29 @@ class PerfTestsRunner(object):
if not succeeded:
return False
- filesystem.write_text_file(output_json_path, json.dumps(contents))
+ if should_generate_results_page:
+ if filesystem.isfile(output_json_path):
+ existing_contents = json.loads(filesystem.read_text_file(output_json_path))
+ existing_contents.append(contents)
+ contents = existing_contents
+ else:
+ contents = [contents]
+
+ serialized_contents = json.dumps(contents)
+ filesystem.write_text_file(output_json_path, serialized_contents)
+
+ if should_generate_results_page:
+ jquery_path = filesystem.join(self._port.perf_tests_dir(), 'Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js')
+ jquery = filesystem.read_text_file(jquery_path)
+
+ template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
+ template = filesystem.read_text_file(template_path)
+
+ results_page = template.replace('<?WebKitPerfTestRunnerInsertionPoint?>',
+ '<script>%s</script><script id="json">%s</script>' % (jquery, serialized_contents))
+
+ filesystem.write_text_file(filesystem.splitext(output_json_path)[0] + '.html', results_page)
+
return True
def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
index de3528cb1..389201521 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
@@ -225,101 +225,111 @@ max 1120
'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
'', '']))
- def test_run_test_set_with_json_output(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
- port.host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
- port.host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
- runner._timestamp = 123456789
- output_capture = OutputCapture()
- output_capture.capture_output()
- try:
- self.assertEqual(runner.run(), 0)
- finally:
- stdout, stderr, logs = output_capture.restore_output()
+ def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=True, expected_exit_code=0):
+ filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
+ filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
- self.assertEqual(logs,
- '\n'.join(['Running 2 tests',
- 'Running Bindings/event-target-wrapper.html (1 of 2)',
- 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
- 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
- '',
- 'Running inspector/pass.html (2 of 2)',
- 'RESULT group_name: test_name= 42 ms',
- '', '']))
+ uploaded = [False]
- self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
- "timestamp": 123456789, "results":
- {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
- "inspector/pass.html:group_name:test_name": 42},
- "webkit-revision": 5678})
+ def mock_upload_json(hostname, json_path):
+ self.assertEqual(hostname, 'some.host')
+ self.assertEqual(json_path, '/mock-checkout/output.json')
+ uploaded[0] = True
+ return upload_suceeds
- def test_run_test_set_with_json_source(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json', '--source-json-path=/mock-checkout/source.json'])
- port.host.filesystem.files['/mock-checkout/source.json'] = '{"key": "value"}'
- port.host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
- port.host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
+ runner._upload_json = mock_upload_json
runner._timestamp = 123456789
output_capture = OutputCapture()
output_capture.capture_output()
try:
- self.assertEqual(runner.run(), 0)
+ self.assertEqual(runner.run(), expected_exit_code)
finally:
stdout, stderr, logs = output_capture.restore_output()
- self.assertEqual(logs, '\n'.join(['Running 2 tests',
+ self.assertEqual(logs, '\n'.join([
+ 'Running 2 tests',
'Running Bindings/event-target-wrapper.html (1 of 2)',
'RESULT Bindings: event-target-wrapper= 1489.05 ms',
'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
'',
'Running inspector/pass.html (2 of 2)',
'RESULT group_name: test_name= 42 ms',
- '', '']))
+ '',
+ '']))
+
+ return uploaded[0]
+
+ def test_run_with_json_output(self):
+ runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host'])
+ self._test_run_with_json_output(runner, port.host.filesystem)
+ self.assertEqual(json.loads(port.host.filesystem.read_text_file('/mock-checkout/output.json')), {
+ "timestamp": 123456789, "results":
+ {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
+ "inspector/pass.html:group_name:test_name": 42},
+ "webkit-revision": 5678, "branch": "webkit-trunk"})
+ def test_run_generates_results_page(self):
+ runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
+ filesystem = port.host.filesystem
+ print runner._base_path + '/resources/results-template.html'
+ filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
+ 'BEGIN<?WebKitPerfTestRunnerInsertionPoint?>END')
+ filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js',
+ 'jquery content')
+
+ self._test_run_with_json_output(runner, filesystem)
+
+ expected_entry = {"timestamp": 123456789, "results": {"Bindings/event-target-wrapper":
+ {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
+ "inspector/pass.html:group_name:test_name": 42}, "webkit-revision": 5678}
+
+ self.maxDiff = None
+ json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
+ self.assertEqual(json.loads(json_output), [expected_entry])
+ self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+ 'BEGIN<script>jquery content</script><script id="json">' + json_output + '</script>END')
+
+ self._test_run_with_json_output(runner, filesystem)
+ json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
+ self.assertEqual(json.loads(json_output), [expected_entry, expected_entry])
+ self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+ 'BEGIN<script>jquery content</script><script id="json">' + json_output + '</script>END')
+
+ def test_run_with_json_source(self):
+ runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ '--source-json-path=/mock-checkout/source.json', '--test-results-server=some.host'])
+ port.host.filesystem.write_text_file('/mock-checkout/source.json', '{"key": "value"}')
+ self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
"timestamp": 123456789, "results":
{"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
"inspector/pass.html:group_name:test_name": 42},
- "webkit-revision": 5678,
+ "webkit-revision": 5678, "branch": "webkit-trunk",
"key": "value"})
- def test_run_test_set_with_multiple_repositories(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
- port.host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
- runner._timestamp = 123456789
+ def test_run_with_multiple_repositories(self):
+ runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host'])
port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
- self.assertEqual(runner.run(), 0)
+ self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
- "timestamp": 123456789, "results": {"inspector/pass.html:group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})
+ "timestamp": 123456789, "results":
+ {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
+ "inspector/pass.html:group_name:test_name": 42.0},
+ "webkit-revision": 5678, "some-revision": 5678, "branch": "webkit-trunk"})
def test_run_with_upload_json(self):
runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
- upload_json_is_called = [False]
- upload_json_returns_true = True
- def mock_upload_json(hostname, json_path):
- self.assertEqual(hostname, 'some.host')
- self.assertEqual(json_path, '/mock-checkout/output.json')
- upload_json_is_called[0] = True
- return upload_json_returns_true
-
- runner._upload_json = mock_upload_json
- port.host.filesystem.files['/mock-checkout/source.json'] = '{"key": "value"}'
- port.host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
- port.host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
- runner._timestamp = 123456789
- self.assertEqual(runner.run(), 0)
- self.assertEqual(upload_json_is_called[0], True)
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertEqual(generated_json['platform'], 'platform1')
self.assertEqual(generated_json['builder-name'], 'builder1')
self.assertEqual(generated_json['build-number'], 123)
- upload_json_returns_true = False
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
- '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
- runner._upload_json = mock_upload_json
- self.assertEqual(runner.run(), -3)
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=-3)
def test_upload_json(self):
runner, port = self.create_runner()
diff --git a/Tools/Scripts/webkitpy/test/main.py b/Tools/Scripts/webkitpy/test/main.py
index 2968e7daa..986af56b8 100644
--- a/Tools/Scripts/webkitpy/test/main.py
+++ b/Tools/Scripts/webkitpy/test/main.py
@@ -26,6 +26,7 @@
import logging
import multiprocessing
import optparse
+import os
import StringIO
import sys
import traceback
@@ -39,6 +40,30 @@ from webkitpy.test.runner import Runner
_log = logging.getLogger(__name__)
+def main():
+ up = os.path.dirname
+ webkit_root = up(up(up(up(up(os.path.abspath(__file__))))))
+
+ tester = Tester()
+ tester.add_tree(os.path.join(webkit_root, 'Tools', 'Scripts'), 'webkitpy')
+ tester.add_tree(os.path.join(webkit_root, 'Source', 'WebKit2', 'Scripts'), 'webkit2')
+
+ # FIXME: Do we need to be able to test QueueStatusServer on Windows as well?
+ appengine_sdk_path = '/usr/local/google_appengine'
+ if os.path.exists(appengine_sdk_path):
+ if not appengine_sdk_path in sys.path:
+ sys.path.append(appengine_sdk_path)
+ import dev_appserver
+ from google.appengine.dist import use_library
+ use_library('django', '1.2')
+ dev_appserver.fix_sys_path()
+ tester.add_tree(os.path.join(webkit_root, 'Tools', 'QueueStatusServer'))
+ else:
+ _log.info('Skipping QueueStatusServer tests; the Google AppEngine Python SDK is not installed.')
+
+ return not tester.run()
+
+
class Tester(object):
def __init__(self, filesystem=None):
self.finder = Finder(filesystem or FileSystem())
@@ -132,3 +157,6 @@ class Tester(object):
traceback.print_exc(file=s)
for l in s.buflist:
_log.error(' ' + l.rstrip())
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py b/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py
index adbd79515..c0cfe21e3 100644
--- a/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py
+++ b/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py
@@ -30,45 +30,31 @@
class ExpectedFailures(object):
def __init__(self):
self._failures = set()
- # If the set of failures is unbounded, self._failures isn't very
- # meaningful because we can't store an unbounded set in memory.
- self._failures_are_bounded = True
+ self._is_trustworthy = True
- def _has_failures(self, results):
- return bool(results and len(results.failing_tests()) != 0)
+ @classmethod
+ def _has_failures(cls, results):
+ return bool(results and results.failing_tests())
- def has_bounded_failures(self, results):
- assert(results) # You probably want to call _has_failures first!
- return bool(results.failure_limit_count() and len(results.failing_tests()) < results.failure_limit_count())
-
- def _can_trust_results(self, results):
- return self._has_failures(results) and self.has_bounded_failures(results)
+ @classmethod
+ def _should_trust(cls, results):
+ return bool(cls._has_failures(results) and results.failure_limit_count() and len(results.failing_tests()) < results.failure_limit_count())
def failures_were_expected(self, results):
- if not self._can_trust_results(results):
+ if not self._is_trustworthy:
+ return False
+ if not self._should_trust(results):
return False
return set(results.failing_tests()) <= self._failures
def unexpected_failures_observed(self, results):
- if not self._has_failures(results):
+ if not self._is_trustworthy:
return None
- if not self._failures_are_bounded:
+ if not self._has_failures(results):
return None
return set(results.failing_tests()) - self._failures
- def shrink_expected_failures(self, results, run_success):
- if run_success:
- self._failures = set()
- self._failures_are_bounded = True
- elif self._can_trust_results(results):
- # Remove all expected failures which are not in the new failing results.
- self._failures.intersection_update(set(results.failing_tests()))
- self._failures_are_bounded = True
-
- def grow_expected_failures(self, results):
- if not self._can_trust_results(results):
- self._failures_are_bounded = False
- return
- self._failures.update(results.failing_tests())
- self._failures_are_bounded = True
- # FIXME: Should we assert() here that expected_failures never crosses a certain size?
+ def update(self, results):
+ if results:
+ self._failures = set(results.failing_tests())
+ self._is_trustworthy = self._should_trust(results)
diff --git a/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py b/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
index 0668746a2..4c1c3d929 100644
--- a/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
@@ -45,7 +45,7 @@ class MockResults(object):
class ExpectedFailuresTest(unittest.TestCase):
def _assert_can_trust(self, results, can_trust):
- self.assertEquals(ExpectedFailures()._can_trust_results(results), can_trust)
+ self.assertEquals(ExpectedFailures._should_trust(results), can_trust)
def test_can_trust_results(self):
self._assert_can_trust(None, False)
@@ -61,21 +61,22 @@ class ExpectedFailuresTest(unittest.TestCase):
def test_failures_were_expected(self):
failures = ExpectedFailures()
- failures.grow_expected_failures(MockResults(['foo.html']))
+ failures.update(MockResults(['foo.html']))
self._assert_expected(failures, ['foo.html'], True)
self._assert_expected(failures, ['bar.html'], False)
- failures.shrink_expected_failures(MockResults(['baz.html']), False)
- self._assert_expected(failures, ['foo.html'], False)
- self._assert_expected(failures, ['baz.html'], False)
+ self._assert_expected(failures, ['bar.html', 'foo.html'], False)
- failures.grow_expected_failures(MockResults(['baz.html']))
+ failures.update(MockResults(['baz.html']))
self._assert_expected(failures, ['baz.html'], True)
- failures.shrink_expected_failures(MockResults(), True)
+ self._assert_expected(failures, ['foo.html'], False)
+
+ failures.update(MockResults([]))
self._assert_expected(failures, ['baz.html'], False)
+ self._assert_expected(failures, ['foo.html'], False)
def test_unexpected_failures_observed(self):
failures = ExpectedFailures()
- failures.grow_expected_failures(MockResults(['foo.html']))
+ failures.update(MockResults(['foo.html']))
self.assertEquals(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), set(['bar.html']))
self.assertEquals(failures.unexpected_failures_observed(MockResults(['baz.html'])), set(['baz.html']))
unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], failure_limit=3)
@@ -85,7 +86,7 @@ class ExpectedFailuresTest(unittest.TestCase):
def test_unexpected_failures_observed_when_tree_is_hosed(self):
failures = ExpectedFailures()
- failures.grow_expected_failures(MockResults(['foo.html', 'banana.html'], failure_limit=2))
+ failures.update(MockResults(['foo.html', 'banana.html'], failure_limit=2))
self.assertEquals(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), None)
self.assertEquals(failures.unexpected_failures_observed(MockResults(['baz.html'])), None)
unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], failure_limit=3)
diff --git a/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py b/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
index 96518c69e..05ba73798 100644
--- a/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
+++ b/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
@@ -134,7 +134,7 @@ class PatchAnalysisTask(object):
"Unable to build without patch")
def _test(self):
- success = self._run_command([
+ return self._run_command([
"build-and-test",
"--no-clean",
"--no-update",
@@ -145,11 +145,8 @@ class PatchAnalysisTask(object):
"Passed tests",
"Patch does not pass tests")
- self._expected_failures.shrink_expected_failures(self._delegate.test_results(), success)
- return success
-
def _build_and_test_without_patch(self):
- success = self._run_command([
+ return self._run_command([
"build-and-test",
"--force-clean",
"--no-update",
@@ -160,9 +157,6 @@ class PatchAnalysisTask(object):
"Able to pass tests without patch",
"Unable to pass tests without patch (tree is red?)")
- self._expected_failures.shrink_expected_failures(self._delegate.test_results(), success)
- return success
-
def _land(self):
# Unclear if this should pass --quiet or not. If --parent-command always does the reporting, then it should.
return self._run_command([
@@ -220,7 +214,7 @@ class PatchAnalysisTask(object):
return self.report_failure(first_results_archive, first_results, first_script_error)
clean_tree_results = self._delegate.test_results()
- self._expected_failures.grow_expected_failures(clean_tree_results)
+ self._expected_failures.update(clean_tree_results)
# Re-check if the original results are now to be expected to avoid a full re-try.
if self._expected_failures.failures_were_expected(first_results):
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues.py b/Tools/Scripts/webkitpy/tool/commands/queues.py
index e8db17c7b..b251c0fb6 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queues.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queues.py
@@ -379,9 +379,6 @@ class AbstractReviewQueue(AbstractPatchQueue, StepSequenceErrorHandler):
# AbstractPatchQueue methods
- def begin_work_queue(self):
- AbstractPatchQueue.begin_work_queue(self)
-
def next_work_item(self):
return self._next_patch()
@@ -416,6 +413,23 @@ class StyleQueue(AbstractReviewQueue, StyleQueueTaskDelegate):
def __init__(self):
AbstractReviewQueue.__init__(self)
+ def begin_work_queue(self):
+ AbstractReviewQueue.begin_work_queue(self)
+ self.clean_bugzilla()
+
+ def clean_bugzilla(self):
+ try:
+ self._update_status("Cleaning review queue")
+ self.run_webkit_patch(["clean-review-queue"])
+ except ScriptError, e:
+ self._update_status(e)
+
+ try:
+ self._update_status("Cleaning pending commit")
+ self.run_webkit_patch(["clean-pending-commit"])
+ except ScriptError, e:
+ self._update_status(e)
+
def review_patch(self, patch):
task = StyleQueueTask(self, patch)
if not task.validate():
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
index 1914ccd4b..2e6b1f07b 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
@@ -449,7 +449,11 @@ The commit-queue is continuing to process your patch.
class StyleQueueTest(QueuesTest):
def test_style_queue_with_style_exception(self):
expected_stderr = {
- "begin_work_queue": self._default_begin_work_queue_stderr("style-queue"),
+ "begin_work_queue": self._default_begin_work_queue_stderr("style-queue") + """MOCK: update_status: style-queue Cleaning review queue
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean-review-queue'], cwd=/mock-checkout
+MOCK: update_status: style-queue Cleaning pending commit
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean-pending-commit'], cwd=/mock-checkout
+""",
"next_work_item": "",
"process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'], cwd=/mock-checkout
MOCK: update_status: style-queue Cleaned working directory
@@ -472,7 +476,11 @@ MOCK: release_work_item: style-queue 10000
def test_style_queue_with_watch_list_exception(self):
expected_stderr = {
- "begin_work_queue": self._default_begin_work_queue_stderr("style-queue"),
+ "begin_work_queue": self._default_begin_work_queue_stderr("style-queue") + """MOCK: update_status: style-queue Cleaning review queue
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean-review-queue'], cwd=/mock-checkout
+MOCK: update_status: style-queue Cleaning pending commit
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean-pending-commit'], cwd=/mock-checkout
+""",
"next_work_item": "",
"process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'], cwd=/mock-checkout
MOCK: update_status: style-queue Cleaned working directory
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
index c214a339c..ed27ab553 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
@@ -302,7 +302,8 @@ class AbstractParallelRebaselineCommand(AbstractDeclarativeCommand):
command_results = self._tool.executive.run_in_parallel(commands)
files_to_add = self._files_to_add(command_results)
- self._tool.scm().add_list(list(files_to_add))
+ if files_to_add:
+ self._tool.scm().add_list(list(files_to_add))
if options.optimize:
self._optimize_baselines(test_list)
diff --git a/Tools/Scripts/webkitpy/tool/commands/upload.py b/Tools/Scripts/webkitpy/tool/commands/upload.py
index d587e0c97..6b52e6c83 100644
--- a/Tools/Scripts/webkitpy/tool/commands/upload.py
+++ b/Tools/Scripts/webkitpy/tool/commands/upload.py
@@ -74,7 +74,7 @@ class CleanPendingCommit(AbstractDeclarativeCommand):
what_was_cleared = []
if patch.review() == "+":
if patch.reviewer():
- what_was_cleared.append("%s's review+" % patch.reviewer().full_name)
+ what_was_cleared.append(u"%s's review+" % patch.reviewer().full_name)
else:
what_was_cleared.append("review+")
return join_with_separators(what_was_cleared)
@@ -88,7 +88,7 @@ class CleanPendingCommit(AbstractDeclarativeCommand):
flags_to_clear = self._flags_to_clear_on_patch(patch)
if not flags_to_clear:
continue
- message = "Cleared %s from obsolete attachment %s so that this bug does not appear in http://webkit.org/pending-commit." % (flags_to_clear, patch.id())
+ message = u"Cleared %s from obsolete attachment %s so that this bug does not appear in http://webkit.org/pending-commit." % (flags_to_clear, patch.id())
self._tool.bugs.obsolete_attachment(patch.id(), message)
@@ -133,7 +133,7 @@ class AssignToCommitter(AbstractDeclarativeCommand):
bug = self._tool.bugs.fetch_bug(bug_id)
if not bug.is_unassigned():
assigned_to_email = bug.assigned_to_email()
- log("Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email)))
+ log(u"Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email)))
return
reviewed_patches = bug.reviewed_patches()
@@ -153,7 +153,7 @@ class AssignToCommitter(AbstractDeclarativeCommand):
log("Attacher %s is not a committer. Bug %s likely needs commit-queue+." % (attacher_email, bug_id))
return
- reassign_message = "Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name)
+ reassign_message = u"Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name)
self._tool.bugs.reassign_bug(bug_id, committer.bugzilla_email(), reassign_message)
def execute(self, options, args, tool):
diff --git a/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
index 947bf1d88..46d9751db 100644
--- a/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
+++ b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
@@ -142,5 +142,11 @@ class GardeningHTTPRequestHandler(ReflectionHandler):
def rebaselineall(self):
command = ['rebaseline-json']
- self.server.tool.executive.run_command([self.server.tool.path()] + command, input=self.read_entity_body(), cwd=self.server.tool.scm().checkout_root)
+ json_input = self.read_entity_body()
+ _log.debug("rebaselining using '%s'" % json_input)
+
+ def error_handler(script_error):
+ _log.error("error from rebaseline-json: %s, input='%s', output='%s'" % (str(script_error), json_input, script_error.output))
+
+ self.server.tool.executive.run_command([self.server.tool.path()] + command, input=json_input, cwd=self.server.tool.scm().checkout_root, return_stderr=True, error_handler=error_handler)
self._serve_text('success')
diff --git a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
index 15bcd6f55..0e7458727 100644
--- a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
+++ b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
@@ -32,7 +32,7 @@ import os.path
import BaseHTTPServer
from webkitpy.common.host import Host # FIXME: This should not be needed!
-from webkitpy.layout_tests.port.webkit import WebKitPort
+from webkitpy.layout_tests.port.base import Port
from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
@@ -163,9 +163,9 @@ def _move_test_baselines(test_file, extensions_to_move, source_platform, destina
def get_test_baselines(test_file, test_config):
# FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
- class AllPlatformsPort(WebKitPort):
+ class AllPlatformsPort(Port):
def __init__(self, host):
- WebKitPort.__init__(self, host, 'mac')
+ super(AllPlatformsPort, self).__init__(host, 'mac')
self._platforms_by_directory = dict([(self._webkit_baseline_path(p), p) for p in test_config.platforms])
def baseline_search_path(self):
diff --git a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
index c8ea6c02c..f5c1cbf5e 100644
--- a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
@@ -32,7 +32,7 @@ import unittest
from webkitpy.common.net import resultsjsonparser_unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.layout_package.json_results_generator import strip_json_wrapper
-from webkitpy.layout_tests.port.webkit import WebKitPort
+from webkitpy.layout_tests.port.base import Port
from webkitpy.tool.commands.rebaselineserver import TestConfig, RebaselineServer
from webkitpy.tool.servers import rebaselineserver
@@ -299,7 +299,7 @@ def get_test_config(test_files=[], result_files=[]):
for file in result_files:
host.filesystem.write_binary_file(host.filesystem.join(results_directory, file), '')
- class TestMacPort(WebKitPort):
+ class TestMacPort(Port):
port_name = "mac"
return TestConfig(