summaryrefslogtreecommitdiff
path: root/chromium/third_party/blink/tools
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/third_party/blink/tools
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/blink/tools')
-rw-r--r--chromium/third_party/blink/tools/BUILD.gn1
-rw-r--r--chromium/third_party/blink/tools/OWNERS1
-rw-r--r--chromium/third_party/blink/tools/blinkpy/common/checkout/baseline_optimizer_unittest.py6
-rw-r--r--chromium/third_party/blink/tools/blinkpy/common/config/builders.json26
-rwxr-xr-xchromium/third_party/blink/tools/blinkpy/presubmit/audit_non_blink_usage.py29
-rw-r--r--chromium/third_party/blink/tools/blinkpy/style/checkers/python.py1
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/README.chromium4
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/PRESUBMIT.py43
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/README.chromium10
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/WPTIncludeList (renamed from chromium/third_party/blink/tools/blinkpy/third_party/wpt/WPTWhiteList)0
-rwxr-xr-xchromium/third_party/blink/tools/blinkpy/third_party/wpt/checkout.sh8
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/lint.py40
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/rules.py9
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/item.py9
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/manifest.py23
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/sourcefile.py141
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/vcs.py6
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/quic/requirements.txt2
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/serve/serve.py57
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/browser.py78
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/commands.json1
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/pipes.py2
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/request.py142
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/response.py68
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/sslutils/openssl.py2
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/stash.py13
-rw-r--r--chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/utils.py41
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater.py136
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater_unittest.py158
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/export_notifier.py56
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/export_notifier_unittest.py196
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/test_copier.py12
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/test_copier_unittest.py16
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/test_exporter.py17
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/test_importer.py144
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/test_importer_unittest.py195
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater.py208
-rw-r--r--chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater_unittest.py224
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/builder_list.py13
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/controllers/manager.py5
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink.py172
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink_unittest.py135
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder.py22
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder_unittest.py60
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner.py17
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner_unittest.py13
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations.py106
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations_unittest.py59
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations.py72
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations_unittest.py38
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py72
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations_unittest.py165
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/models/test_results_unittest.py15
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/port/android.py33
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/port/android_unittest.py20
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/port/base.py63
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/port/base_unittest.py27
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test.py24
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test_unittest.py17
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/port/fuchsia.py7
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/port/mac.py2
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/port/port_testcase.py38
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests.py37
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests_unittest.py15
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper.py11
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper_unittest.py20
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/servers/server_base.py32
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve.py3
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve_unittest.py2
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations.py61
-rw-r--r--chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations_unittest.py48
-rwxr-xr-xchromium/third_party/blink/tools/build_wpt_metadata.py18
-rwxr-xr-xchromium/third_party/blink/tools/run_blink_wptserve.py4
-rwxr-xr-xchromium/third_party/blink/tools/run_web_tests.bat2
74 files changed, 2557 insertions, 1016 deletions
diff --git a/chromium/third_party/blink/tools/BUILD.gn b/chromium/third_party/blink/tools/BUILD.gn
index b76b17e7004..cfea9509167 100644
--- a/chromium/third_party/blink/tools/BUILD.gn
+++ b/chromium/third_party/blink/tools/BUILD.gn
@@ -7,6 +7,7 @@ group("wpt_tests_isolate") {
testonly = true
data = [
"//testing/scripts/common.py",
+ "//testing/scripts/wpt_common.py",
"//testing/xvfb.py",
# Include blinkpy tools for setting up expectations.
diff --git a/chromium/third_party/blink/tools/OWNERS b/chromium/third_party/blink/tools/OWNERS
index 1b4442b503a..151ed365c32 100644
--- a/chromium/third_party/blink/tools/OWNERS
+++ b/chromium/third_party/blink/tools/OWNERS
@@ -1,4 +1,5 @@
dpranke@chromium.org
+dpranke@google.com
jeffcarp@chromium.org
qyearsley@chromium.org
robertma@chromium.org
diff --git a/chromium/third_party/blink/tools/blinkpy/common/checkout/baseline_optimizer_unittest.py b/chromium/third_party/blink/tools/blinkpy/common/checkout/baseline_optimizer_unittest.py
index bf0a5e1efbe..b121b1b79a8 100644
--- a/chromium/third_party/blink/tools/blinkpy/common/checkout/baseline_optimizer_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/common/checkout/baseline_optimizer_unittest.py
@@ -67,6 +67,10 @@ class BaselineOptimizerTest(unittest.TestCase):
'port_name': 'linux-trusty',
'specifiers': ['Trusty', 'Release']
},
+ 'Fake Test Mac10.15': {
+ 'port_name': 'mac-mac10.15',
+ 'specifiers': ['Mac10.15', 'Release']
+ },
'Fake Test Mac10.14': {
'port_name': 'mac-mac10.14',
'specifiers': ['Mac10.14', 'Release']
@@ -94,7 +98,7 @@ class BaselineOptimizerTest(unittest.TestCase):
self.assertEqual(
sorted(self.host.port_factory.all_port_names()), [
'linux-trusty', 'mac-mac10.10', 'mac-mac10.11', 'mac-mac10.12',
- 'mac-mac10.13', 'mac-mac10.14', 'win-win10'
+ 'mac-mac10.13', 'mac-mac10.14', 'mac-mac10.15', 'win-win10'
])
def _assert_optimization(self,
diff --git a/chromium/third_party/blink/tools/blinkpy/common/config/builders.json b/chromium/third_party/blink/tools/blinkpy/common/config/builders.json
index 6eb01c77288..55775288854 100644
--- a/chromium/third_party/blink/tools/blinkpy/common/config/builders.json
+++ b/chromium/third_party/blink/tools/blinkpy/common/config/builders.json
@@ -44,6 +44,11 @@
"port_name": "mac-mac10.13",
"specifiers": ["Mac10.13", "Debug"]
},
+ "Mac10.14 Tests": {
+ "master": "chromium.mac",
+ "port_name": "mac-mac10.14",
+ "specifiers": ["Mac10.14", "Release"]
+ },
"Win7 Tests (1)": {
"master": "chromium.win",
"port_name": "win-win7",
@@ -59,58 +64,69 @@
"port_name": "win-win7",
"specifiers": ["Win7", "Debug"]
},
- "Win10 Tests x64 (dbg)": {
- "master": "chromium.win",
- "port_name": "win-win10",
- "specifiers": ["Win10", "Debug"]
- },
"fuchsia_x64": {
+ "master": "tryserver.chromium.linux",
"port_name": "fuchsia",
"specifiers": ["Fuchsia", "Release"],
"is_try_builder": true
},
"linux-blink-rel": {
+ "master": "tryserver.blink",
"port_name": "linux-trusty",
"specifiers": ["Trusty", "Release"],
"has_webdriver_tests": true,
"is_try_builder": true
},
"mac10.10-blink-rel": {
+ "master": "tryserver.blink",
"port_name": "mac-mac10.10",
"specifiers": ["Mac10.10", "Release"],
"is_try_builder": true
},
"mac10.11-blink-rel": {
+ "master": "tryserver.blink",
"port_name": "mac-mac10.11",
"specifiers": ["Mac10.11", "Release"],
"is_try_builder": true
},
"mac10.12-blink-rel": {
+ "master": "tryserver.blink",
"port_name": "mac-mac10.12",
"specifiers": ["Mac10.12", "Release"],
"is_try_builder": true
},
"mac10.13-blink-rel": {
+ "master": "tryserver.blink",
"port_name": "mac-mac10.13",
"specifiers": ["Mac10.13", "Release"],
"is_try_builder": true
},
"mac10.13_retina-blink-rel": {
+ "master": "tryserver.blink",
"port_name": "mac-retina",
"specifiers": ["Retina", "Release"],
"is_try_builder": true
},
"mac10.14-blink-rel": {
+ "master": "tryserver.blink",
"port_name": "mac-mac10.14",
"specifiers": ["Mac10.14", "Release"],
"is_try_builder": true
},
+ "mac10.15-blink-rel": {
+ "master": "tryserver.blink",
+ "port_name": "mac-mac10.15",
+ "specifiers": ["Mac10.15", "Release"],
+ "is_try_builder": true
+ },
"win7-blink-rel": {
+ "master": "tryserver.blink",
"port_name": "win-win7",
"specifiers": ["Win7", "Release"],
"is_try_builder": true
},
"win10-blink-rel": {
+ "master": "tryserver.blink",
"port_name": "win-win10",
"specifiers": ["Win10", "Release"],
"is_try_builder": true
diff --git a/chromium/third_party/blink/tools/blinkpy/presubmit/audit_non_blink_usage.py b/chromium/third_party/blink/tools/blinkpy/presubmit/audit_non_blink_usage.py
index 2bb7f07a616..e9f401bfdf7 100755
--- a/chromium/third_party/blink/tools/blinkpy/presubmit/audit_non_blink_usage.py
+++ b/chromium/third_party/blink/tools/blinkpy/presubmit/audit_non_blink_usage.py
@@ -56,6 +56,7 @@ _CONFIG = [
'base::RepeatingTimer',
'base::SequencedTaskRunner',
'base::SingleThreadTaskRunner',
+ 'base::ScopedAllowBlocking',
'base::ScopedFD',
'base::ScopedClosureRunner',
'base::SupportsWeakPtr',
@@ -315,12 +316,16 @@ _CONFIG = [
'cc::kManipulationInfoHasScrolledByPrecisionTouchPad',
'cc::kManipulationInfoHasScrolledByTouch',
'cc::kManipulationInfoHasScrolledByWheel',
+ 'cc::kPixelsPerLineStep',
+ 'cc::kMinFractionToStepWhenPaging',
+ 'cc::kPercentDeltaForDirectionalScroll',
'cc::MainThreadScrollingReason',
'cc::ManipulationInfo',
'cc::ScrollSnapAlign',
'cc::ScrollSnapType',
'cc::ScrollOffsetAnimationCurve',
'cc::ScrollStateData',
+ 'cc::ScrollUtils',
'cc::SnapAlignment',
'cc::SnapAreaData',
'cc::SnapAxis',
@@ -408,6 +413,9 @@ _CONFIG = [
# HTTP status codes
'net::HTTP_.+',
+ # For ConnectionInfo enumeration
+ 'net::HttpResponseInfo',
+
# Network service.
'network::.+',
@@ -449,6 +457,7 @@ _CONFIG = [
'ui::AXEvent',
'ui::AXEventIntent',
'ui::AXNodeData',
+ 'ui::IsDialog',
'ax::mojom::BoolAttribute',
'ax::mojom::HasPopup',
'ax::mojom::State',
@@ -543,7 +552,9 @@ _CONFIG = [
],
},
{
- 'paths': ['third_party/blink/renderer/core/html/canvas/canvas_rendering_context_host.cc'],
+ 'paths': [
+ 'third_party/blink/renderer/core/html/canvas/canvas_rendering_context_host.cc'
+ ],
'allowed': [
'gpu::SHARED_IMAGE_USAGE_DISPLAY',
'gpu::SHARED_IMAGE_USAGE_SCANOUT',
@@ -560,6 +571,7 @@ _CONFIG = [
'cc::ApplyViewportChangesArgs',
'cc::LayerTreeSettings',
'cc::TaskGraphRunner',
+ 'ui::ImeTextSpan',
],
},
{
@@ -583,6 +595,7 @@ _CONFIG = [
{
'paths': ['third_party/blink/renderer/core/editing/ime'],
'allowed': [
+ 'ui::ImeTextSpan',
'ui::TextInputAction',
],
},
@@ -617,6 +630,12 @@ _CONFIG = [
],
},
{
+ 'paths': ['third_party/blink/renderer/core/html/forms'],
+ 'allowed': [
+ 'ui::TextInputType',
+ ],
+ },
+ {
'paths': [
'third_party/blink/renderer/core/loader/alternate_signed_exchange_resource_info.cc'
],
@@ -704,13 +723,6 @@ _CONFIG = [
],
},
{
- 'paths':
- ['third_party/blink/renderer/core/scroll/scrollbar_theme_mac.mm'],
- 'allowed': [
- 'gfx::CocoaScrollbarPainter',
- ],
- },
- {
'paths': ['third_party/blink/renderer/core/workers/worker_thread.cc'],
'allowed': [
'base::ScopedAllowBaseSyncPrimitives',
@@ -1037,6 +1049,7 @@ _CONFIG = [
'base::MD5.*',
'base::MessageLoopCurrent',
'base::Passed',
+ 'base::PowerObserver',
'base::RetainedRef',
'base::StringPrintf',
'base::Value',
diff --git a/chromium/third_party/blink/tools/blinkpy/style/checkers/python.py b/chromium/third_party/blink/tools/blinkpy/style/checkers/python.py
index c85f143535c..92d6b6278ab 100644
--- a/chromium/third_party/blink/tools/blinkpy/style/checkers/python.py
+++ b/chromium/third_party/blink/tools/blinkpy/style/checkers/python.py
@@ -83,7 +83,6 @@ class PythonChecker(object):
finder.path_from_chromium_base('build', 'android'),
finder.path_from_chromium_base('third_party'), # for jinja2
finder.path_from_chromium_base('third_party', 'catapult', 'devil'),
- finder.path_from_chromium_base('third_party', 'pymock'),
finder.path_from_chromium_base('tools'),
])
return executive.run_command([
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/README.chromium b/chromium/third_party/blink/tools/blinkpy/third_party/README.chromium
index 84e3ccb7aeb..c797121def6 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/README.chromium
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/README.chromium
@@ -22,7 +22,7 @@ Local Modifications: None
Name: web-platform-tests - Test Suites for Web Platform specifications
Short Name: wpt
URL: https://github.com/web-platform-tests/wpt/
-Version: 622c9625dddfdef0c6dfafa8fa00d5119db50201
+Version: 66af89be218a1c81e89ad786104bf2866d006422
License: LICENSES FOR W3C TEST SUITES (https://www.w3.org/Consortium/Legal/2008/03-bsd-license.html)
License File: wpt/wpt/LICENSE.md
Security Critical: no
@@ -32,4 +32,4 @@ Description: This includes code for the manifest tool, lint tool, and wptserve.
web_tests/external/wpt contains the tests. Also see wpt/README.chromium
for more details on maintenance.
Local Modifications:
-- Removed all files except for those listed in wpt/WPTWhiteList.
+- Removed all files except for those listed in wpt/WPTIncludeList.
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/PRESUBMIT.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/PRESUBMIT.py
index 9e559fe61d9..302b3fb3e33 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/PRESUBMIT.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/PRESUBMIT.py
@@ -4,11 +4,14 @@
"""Check the basic functionalities of WPT tools.
-After rolling a new version of WPT tools, we should do some sanity checks. For
-now, we only test `wpt lint` via web_tests/external/PRESUBMIT_test.py.
+This PRESUBMIT guards against rolling a broken version of WPT tooling. It does
+some smoke checks of WPT functionality.
"""
+
def _TestWPTLint(input_api, output_api):
+ # We test 'wpt lint' by deferring to the web_tests/external presubmit test,
+ # which runs 'wpt lint' against web_tests/external/wpt.
abspath_to_test = input_api.os_path.join(
input_api.change.RepositoryRoot(),
'third_party', 'blink', 'web_tests', 'external', 'PRESUBMIT_test.py'
@@ -24,9 +27,41 @@ def _TestWPTLint(input_api, output_api):
return input_api.RunTests([command])
+def _TestWPTManifest(input_api, output_api):
+ # We test 'wpt manifest' by making a copy of the base manifest and updating
+ # it. A copy is used so that this PRESUBMIT doesn't change files in the tree.
+ blink_path = input_api.os_path.join(
+ input_api.change.RepositoryRoot(), 'third_party', 'blink')
+
+ base_manifest = input_api.os_path.join(
+ blink_path, 'web_tests', 'external', 'WPT_BASE_MANIFEST_8.json')
+ with input_api.CreateTemporaryFile() as f:
+ f.write(input_api.ReadFile(base_manifest))
+ f.close()
+
+ wpt_exec_path = input_api.os_path.join(
+ blink_path, 'tools', 'blinkpy', 'third_party', 'wpt', 'wpt', 'wpt')
+ external_wpt = input_api.os_path.join(
+ blink_path, 'web_tests', 'external', 'wpt')
+ try:
+ input_api.subprocess.check_output(
+ ['python', wpt_exec_path, 'manifest', '--no-download',
+ '--path', f.name, '--tests-root', external_wpt])
+ except input_api.subprocess.CalledProcessError as exc:
+ return [output_api.PresubmitError('wpt manifest failed:', long_text=exc.output)]
+
+ return []
+
+
def CheckChangeOnUpload(input_api, output_api):
- return _TestWPTLint(input_api, output_api)
+ results = []
+ results += _TestWPTLint(input_api, output_api)
+ results += _TestWPTManifest(input_api, output_api)
+ return results
def CheckChangeOnCommit(input_api, output_api):
- return _TestWPTLint(input_api, output_api)
+ results = []
+ results += _TestWPTLint(input_api, output_api)
+ results += _TestWPTManifest(input_api, output_api)
+ return results
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/README.chromium b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/README.chromium
index be20e1fcf8a..84017a82f05 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/README.chromium
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/README.chromium
@@ -35,7 +35,7 @@ checkout.sh
Running this script without arguments will remove the existing checkout
(third_party/wpt/wpt) and perform a fresh one. See "Rolling in WPT" for more.
-WPTWhiteList
+WPTIncludeList
============
The explicit list of files being kept, everything else not on this list is
deleted when running "./checkout.sh reduce". Use this file to control what gets
@@ -72,8 +72,8 @@ re-generate them:
Rolling in WPT
If there are new files that need to be rolled in, add the intended files to
-the WPTWhiteList. Ensure these files are in the correct order by running
-"LC_ALL=C sort WPTWhiteList".
+the WPTIncludeList. Ensure these files are in the correct order by running
+"LC_ALL=C sort WPTIncludeList".
When rolling in new versions of WPT support, modify WPT_HEAD in checkout.sh to
the desired HEAD position. You can then call "./checkout.sh clone" which will
@@ -85,11 +85,11 @@ the "Local Modifications" section which lists ways in which Chromium has
diverged from WPT. Make sure these modifications are persisted when reviewing
the changes being made.
-You can examine what's pulled in and update WPTWhiteList if some new files are
+You can examine what's pulled in and update WPTIncludeList if some new files are
required to run the updated version.
Once you've cloned the repositories you can call "./checkout.sh reduce" to
-remove everything that is not listed in WPTWhiteList.
+remove everything that is not listed in WPTIncludeList.
Note that calling "./checkout.sh" without arguments is equivalent of calling
"./checkout.sh clone reduce".
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/WPTWhiteList b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/WPTIncludeList
index b3d5da34dba..b3d5da34dba 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/WPTWhiteList
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/WPTIncludeList
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/checkout.sh b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/checkout.sh
index f2c0f0c8d1b..fb848e087d5 100755
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/checkout.sh
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/checkout.sh
@@ -1,15 +1,15 @@
#!/bin/bash
#
# Removes ./wpt/ directory containing the reduced web-platform-tests tree and
-# starts a new checkout. Only files in WPTWhiteList are retained. The revisions
-# getting checked out are defined in WPTHeads.
+# starts a new checkout. Only files in WPTIncludeList are retained. The
+# revisions getting checked out are defined in WPTHeads.
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd $DIR
TARGET_DIR=$DIR/wpt
REMOTE_REPO="https://github.com/web-platform-tests/wpt.git"
-WPT_HEAD=622c9625dddfdef0c6dfafa8fa00d5119db50201
+WPT_HEAD=66af89be218a1c81e89ad786104bf2866d006422
function clone {
# Remove existing repo if already exists.
@@ -27,7 +27,7 @@ function reduce {
# xargs on some platforms, so we remove those directories first.
rm -fr html css
# Remove all except white-listed.
- comm -23 <(find . -type f | sort) <(cat ../WPTWhiteList | sort) | xargs -d '\n' -n 1 rm
+ comm -23 <(find . -type f | sort) <(cat ../WPTIncludeList | sort) | xargs -d '\n' -n 1 rm
find . -empty -type d -delete
}
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/lint.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/lint.py
index 74b3c8cf930..fc1a816f8ab 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/lint.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/lint.py
@@ -323,6 +323,41 @@ def check_css_globally_unique(repo_root, paths):
return errors
+def check_unique_testharness_basenames(repo_root, paths):
+ # type: (str, List[str]) -> List[rules.Error]
+ """
+ Checks that all testharness files have unique basename paths.
+
+ The 'basename path' refers to the entire path excluding the extension. For
+ example, 'foo/bar/baz.html' and 'foo/bar/baz.xhtml' have the same basename
+ path, but 'foo/bar/baz.html' and 'foo/qux/baz.html' do not.
+
+ Testharness files with identical basenames have caused issues in downstream
+ infrastructure (see https://github.com/web-platform-tests/wpt/issues/7570),
+ and may cause confusion in general.
+
+ :param repo_root: the repository root
+ :param paths: list of all paths
+ :returns: a list of errors found in ``paths``
+ """
+
+ errors = []
+ file_dict = defaultdict(list)
+ for path in paths:
+ source_file = SourceFile(repo_root, path, "/")
+ if source_file.type != "testharness":
+ continue
+ file_name, file_extension = os.path.splitext(path)
+ file_dict[file_name].append(file_extension)
+ for k, v in file_dict.items():
+ if len(v) == 1:
+ continue
+ context = (', '.join(v),)
+ for extension in v:
+ errors.append(rules.DuplicateBasenamePath.error(k + extension, context))
+ return errors
+
+
def parse_ignorelist(f):
# type: (IO[bytes]) -> Tuple[Ignorelist, Set[Text]]
"""
@@ -432,6 +467,7 @@ def check_parsed(repo_root, path, f):
if (source_file.type != "support" and
not source_file.name_is_reference and
+ not source_file.name_is_tentative and
not source_file.spec_links):
return [rules.MissingLink.error(path)]
@@ -617,7 +653,7 @@ broken_python_metadata = re.compile(br"#\s*META:")
def check_global_metadata(value):
# type: (str) -> Iterable[Tuple[Type[rules.Rule], Tuple[Any, ...]]]
- global_values = {item.strip() for item in value.split(b",") if item.strip()}
+ global_values = {item.strip().decode("utf8") for item in value.split(b",") if item.strip()}
# TODO: this could check for duplicates and such
for global_value in global_values:
@@ -930,7 +966,7 @@ def lint(repo_root, paths, output_format, ignore_glob=str()):
path_lints = [check_file_type, check_path_length, check_worker_collision, check_ahem_copy,
check_gitignore_file]
-all_paths_lints = [check_css_globally_unique]
+all_paths_lints = [check_css_globally_unique, check_unique_testharness_basenames]
file_lints = [check_regexp_line, check_parsed, check_python_ast, check_script_metadata,
check_ahem_system_font]
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/rules.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/rules.py
index 6fbdc1c360d..695f6cd4e53 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/rules.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/rules.py
@@ -322,6 +322,15 @@ class TestharnessInOtherType(Rule):
description = "testharness.js included in a %s test"
+class DuplicateBasenamePath(Rule):
+ name = "DUPLICATE-BASENAME-PATH"
+ description = collapse("""
+ File has identical basename path (path excluding extension) as
+ other file(s) (found extensions: %s)
+ """)
+ to_fix = "rename files so they have unique basename paths"
+
+
class Regexp(six.with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
def pattern(self):
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/item.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/item.py
index 72802758941..ee07f0d1d79 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/item.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/item.py
@@ -191,6 +191,11 @@ class TestharnessTest(URLManifestItem):
return self._extras.get("jsshell")
@property
+ def quic(self):
+ # type: () -> Optional[bool]
+ return self._extras.get("quic")
+
+ @property
def script_metadata(self):
# type: () -> Optional[Text]
return self._extras.get("script_metadata")
@@ -204,8 +209,10 @@ class TestharnessTest(URLManifestItem):
rv[-1]["testdriver"] = self.testdriver
if self.jsshell:
rv[-1]["jsshell"] = True
+ if self.quic is not None:
+ rv[-1]["quic"] = self.quic
if self.script_metadata:
- rv[-1]["script_metadata"] = [(k.decode('utf8'), v.decode('utf8')) for (k,v) in self.script_metadata]
+ rv[-1]["script_metadata"] = [(k, v) for (k,v) in self.script_metadata]
return rv
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/manifest.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/manifest.py
index f79edc84352..345eeb6d2b0 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/manifest.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/manifest.py
@@ -1,9 +1,18 @@
+import io
import itertools
import json
import os
from copy import deepcopy
from multiprocessing import Pool, cpu_count
-from six import PY3, iteritems, itervalues, string_types, binary_type, text_type
+from six import (
+ PY3,
+ binary_type,
+ ensure_text,
+ iteritems,
+ itervalues,
+ string_types,
+ text_type,
+)
from . import vcs
from .item import (ConformanceCheckerTest, ManifestItem, ManualTest, RefTest, SupportFile,
@@ -167,12 +176,14 @@ class Manifest(object):
to_update = []
- for source_file, update in tree:
+ for source_file_or_path, update in tree:
if not update:
- assert isinstance(source_file, (binary_type, text_type))
- deleted.remove(tuple(source_file.split(os.path.sep)))
+ assert isinstance(source_file_or_path, (binary_type, text_type))
+ path = ensure_text(source_file_or_path)
+ deleted.remove(tuple(path.split(os.path.sep)))
else:
- assert not isinstance(source_file, bytes)
+ assert not isinstance(source_file_or_path, (binary_type, text_type))
+ source_file = source_file_or_path
rel_path_parts = source_file.rel_path_parts
assert isinstance(rel_path_parts, tuple)
@@ -318,7 +329,7 @@ def _load(logger, # type: Logger
else:
logger.debug("Creating new manifest at %s" % manifest)
try:
- with open(manifest, "rb") as f:
+ with io.open(manifest, "r", encoding="utf-8") as f:
rv = Manifest.from_json(tests_root,
fast_json.load(f),
types=types,
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/sourcefile.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/sourcefile.py
index d0794a2851e..299ad212f76 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/sourcefile.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/sourcefile.py
@@ -2,7 +2,7 @@ import hashlib
import re
import os
from collections import deque
-from six import binary_type, iteritems, text_type
+from six import binary_type, ensure_text, iteritems, text_type
from six.moves.urllib.parse import urljoin
from fnmatch import fnmatch
@@ -56,9 +56,9 @@ def replace_end(s, old, new):
def read_script_metadata(f, regexp):
- # type: (BinaryIO, Pattern[bytes]) -> Iterable[Tuple[bytes, bytes]]
+ # type: (BinaryIO, Pattern[bytes]) -> Iterable[Tuple[Text, Text]]
"""
- Yields any metadata (pairs of bytestrings) from the file-like object `f`,
+ Yields any metadata (pairs of strings) from the file-like object `f`,
as specified according to a supplied regexp.
`regexp` - Regexp containing two groups containing the metadata name and
@@ -70,25 +70,25 @@ def read_script_metadata(f, regexp):
if not m:
break
- yield (m.groups()[0], m.groups()[1])
+ yield (m.groups()[0].decode("utf8"), m.groups()[1].decode("utf8"))
_any_variants = {
- b"window": {"suffix": ".any.html"},
- b"serviceworker": {"force_https": True},
- b"sharedworker": {},
- b"dedicatedworker": {"suffix": ".any.worker.html"},
- b"worker": {"longhand": {b"dedicatedworker", b"sharedworker", b"serviceworker"}},
- b"jsshell": {"suffix": ".any.js"},
-} # type: Dict[bytes, Dict[str, Any]]
+ "window": {"suffix": ".any.html"},
+ "serviceworker": {"force_https": True},
+ "sharedworker": {},
+ "dedicatedworker": {"suffix": ".any.worker.html"},
+ "worker": {"longhand": {"dedicatedworker", "sharedworker", "serviceworker"}},
+ "jsshell": {"suffix": ".any.js"},
+} # type: Dict[Text, Dict[Text, Any]]
def get_any_variants(item):
- # type: (bytes) -> Set[bytes]
+ # type: (Text) -> Set[Text]
"""
- Returns a set of variants (bytestrings) defined by the given keyword.
+ Returns a set of variants (strings) defined by the given keyword.
"""
- assert isinstance(item, binary_type), item
+ assert isinstance(item, text_type), item
variant = _any_variants.get(item, None)
if variant is None:
@@ -98,46 +98,46 @@ def get_any_variants(item):
def get_default_any_variants():
- # type: () -> Set[bytes]
+ # type: () -> Set[Text]
"""
- Returns a set of variants (bytestrings) that will be used by default.
+ Returns a set of variants (strings) that will be used by default.
"""
- return set({b"window", b"dedicatedworker"})
+ return set({"window", "dedicatedworker"})
def parse_variants(value):
- # type: (bytes) -> Set[bytes]
+ # type: (Text) -> Set[Text]
"""
- Returns a set of variants (bytestrings) defined by a comma-separated value.
+ Returns a set of variants (strings) defined by a comma-separated value.
"""
- assert isinstance(value, binary_type), value
+ assert isinstance(value, text_type), value
- if value == b"":
+ if value == "":
return get_default_any_variants()
globals = set()
- for item in value.split(b","):
+ for item in value.split(","):
item = item.strip()
globals |= get_any_variants(item)
return globals
def global_suffixes(value):
- # type: (bytes) -> Set[Tuple[bytes, bool]]
+ # type: (Text) -> Set[Tuple[Text, bool]]
"""
Yields tuples of the relevant filename suffix (a string) and whether the
variant is intended to run in a JS shell, for the variants defined by the
given comma-separated value.
"""
- assert isinstance(value, binary_type), value
+ assert isinstance(value, text_type), value
rv = set()
global_types = parse_variants(value)
for global_type in global_types:
variant = _any_variants[global_type]
- suffix = variant.get("suffix", ".any.%s.html" % global_type.decode("utf-8"))
- rv.add((suffix, global_type == b"jsshell"))
+ suffix = variant.get("suffix", ".any.%s.html" % global_type)
+ rv.add((suffix, global_type == "jsshell"))
return rv
@@ -190,24 +190,21 @@ class SourceFile(object):
("css", "CSS2", "archive"),
("css", "common")} # type: Set[Tuple[bytes, ...]]
- def __init__(self, tests_root, rel_path, url_base, hash=None, contents=None):
+ def __init__(self, tests_root, rel_path_str, url_base, hash=None, contents=None):
# type: (AnyStr, AnyStr, Text, Optional[Text], Optional[bytes]) -> None
"""Object representing a file in a source tree.
:param tests_root: Path to the root of the source tree
- :param rel_path: File path relative to tests_root
+ :param rel_path_str: File path relative to tests_root
:param url_base: Base URL used when converting file paths to urls
:param contents: Byte array of the contents of the file or ``None``.
"""
+ rel_path = ensure_text(rel_path_str)
assert not os.path.isabs(rel_path), rel_path
-
if os.name == "nt":
# do slash normalization on Windows
- if isinstance(rel_path, binary_type):
- rel_path = rel_path.replace(b"/", b"\\")
- else:
- rel_path = rel_path.replace(u"/", u"\\")
+ rel_path = rel_path.replace(u"/", u"\\")
dir_path, filename = os.path.split(rel_path)
name, ext = os.path.splitext(filename)
@@ -218,13 +215,13 @@ class SourceFile(object):
meta_flags = name.split(".")[1:]
- self.tests_root = tests_root # type: Union[bytes, Text]
- self.rel_path = rel_path # type: Union[bytes, Text]
- self.dir_path = dir_path # type: Union[bytes, Text]
- self.filename = filename # type: Union[bytes, Text]
- self.name = name # type: Union[bytes, Text]
- self.ext = ext # type: Union[bytes, Text]
- self.type_flag = type_flag # type: Optional[Union[bytes, Text]]
+ self.tests_root = ensure_text(tests_root) # type: Text
+ self.rel_path = rel_path # type: Text
+ self.dir_path = dir_path # type: Text
+ self.filename = filename # type: Text
+ self.name = name # type: Text
+ self.ext = ext # type: Text
+ self.type_flag = type_flag # type: Optional[Text]
self.meta_flags = meta_flags # type: Union[List[bytes], List[Text]]
self.url_base = url_base
self.contents = contents
@@ -282,7 +279,7 @@ class SourceFile(object):
@cached_property
def path(self):
- # type: () -> Union[bytes, Text]
+ # type: () -> Text
return os.path.join(self.tests_root, self.rel_path)
@cached_property
@@ -411,6 +408,15 @@ class SourceFile(object):
return self.type_flag == "crash" or "crashtests" in self.dir_path.split(os.path.sep)
@property
+ def name_is_tentative(self):
+ # type: () -> bool
+ """Check if the file name matches the conditions for the file to be a
+ tentative file.
+
+ See https://web-platform-tests.org/writing-tests/file-names.html#test-features"""
+ return "tentative" in self.meta_flags
+
+ @property
def markup_type(self):
# type: () -> Optional[Text]
"""Return the type of markup contained in a file, based on its extension,
@@ -462,7 +468,7 @@ class SourceFile(object):
@cached_property
def script_metadata(self):
- # type: () -> Optional[List[Tuple[bytes, bytes]]]
+ # type: () -> Optional[List[Tuple[Text, Text]]]
if self.name_is_worker or self.name_is_multi_global or self.name_is_window:
regexp = js_meta_re
elif self.name_is_webdriver:
@@ -479,7 +485,7 @@ class SourceFile(object):
"""The timeout of a test or reference file. "long" if the file has an extended timeout
or None otherwise"""
if self.script_metadata:
- if any(m == (b"timeout", b"long") for m in self.script_metadata):
+ if any(m == ("timeout", "long") for m in self.script_metadata):
return "long"
if self.root is None:
@@ -641,8 +647,8 @@ class SourceFile(object):
script_metadata = self.script_metadata
assert script_metadata is not None
for (key, value) in script_metadata:
- if key == b"variant":
- rv.append(value.decode("utf-8"))
+ if key == "variant":
+ rv.append(value)
else:
for element in self.variant_nodes:
if "content" in element.attrib:
@@ -675,6 +681,36 @@ class SourceFile(object):
return bool(self.testdriver_nodes)
@cached_property
+ def quic_nodes(self):
+ # type: () -> List[ElementTree.Element]
+ """List of ElementTree Elements corresponding to nodes in a test that
+ specify whether it needs QUIC server."""
+ assert self.root is not None
+ return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='quic']")
+
+ @cached_property
+ def quic(self):
+ # type: () -> Optional[bool]
+ """Boolean indicating whether a test requires QUIC server
+
+ Determined by <meta> elements (`quic_nodes()`) and "// META" comments
+ (`script_metadata()`).
+ """
+ if self.script_metadata:
+ if any(m == ("quic", "true") for m in self.script_metadata):
+ return True
+
+ if self.root is None:
+ return None
+
+ if self.quic_nodes:
+ quic_str = self.quic_nodes[0].attrib.get("content", "false") # type: Text
+ if quic_str.lower() == "true":
+ return True
+
+ return None
+
+ @cached_property
def reftest_nodes(self):
# type: () -> List[ElementTree.Element]
"""List of ElementTree Elements corresponding to nodes representing a
@@ -834,11 +870,11 @@ class SourceFile(object):
)]
elif self.name_is_multi_global:
- globals = b""
+ globals = u""
script_metadata = self.script_metadata
assert script_metadata is not None
for (key, value) in script_metadata:
- if key == b"global":
+ if key == "global":
globals = value
break
@@ -850,6 +886,7 @@ class SourceFile(object):
global_variant_url(self.rel_url, suffix) + variant,
timeout=self.timeout,
jsshell=jsshell,
+ quic=self.quic,
script_metadata=self.script_metadata
)
for (suffix, jsshell) in sorted(global_suffixes(globals))
@@ -866,6 +903,7 @@ class SourceFile(object):
self.url_base,
test_url + variant,
timeout=self.timeout,
+ quic=self.quic,
script_metadata=self.script_metadata
)
for variant in self.test_variants
@@ -881,6 +919,7 @@ class SourceFile(object):
self.url_base,
test_url + variant,
timeout=self.timeout,
+ quic=self.quic,
script_metadata=self.script_metadata
)
for variant in self.test_variants
@@ -917,6 +956,7 @@ class SourceFile(object):
self.url_base,
url,
timeout=self.timeout,
+ quic=self.quic,
testdriver=testdriver,
script_metadata=self.script_metadata
))
@@ -930,6 +970,7 @@ class SourceFile(object):
self.rel_url,
references=self.references,
timeout=self.timeout,
+ quic=self.quic,
viewport_size=self.viewport_size,
dpi=self.dpi,
fuzzy=self.fuzzy
@@ -957,9 +998,9 @@ class SourceFile(object):
if drop_cached and "__cached_properties__" in self.__dict__:
cached_properties = self.__dict__["__cached_properties__"]
- for key in cached_properties:
- if key in self.__dict__:
- del self.__dict__[key]
+ for prop in cached_properties:
+ if prop in self.__dict__:
+ del self.__dict__[prop]
del self.__dict__["__cached_properties__"]
return rv
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/vcs.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/vcs.py
index 05afdf39b22..7c0feeb8164 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/vcs.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/vcs.py
@@ -226,12 +226,12 @@ class GitIgnoreCache(CacheFile, MutableMapping): # type: ignore
def check_valid(self, data):
# type: (Dict[Any, Any]) -> Dict[Any, Any]
- ignore_path = os.path.join(self.tests_root, b".gitignore")
+ ignore_path = os.path.join(self.tests_root, ".gitignore")
mtime = os.path.getmtime(ignore_path)
- if data.get(b"/gitignore_file") != [ignore_path, mtime]:
+ if data.get("/gitignore_file") != [ignore_path, mtime]:
self.modified = True
data = {}
- data[b"/gitignore_file"] = [ignore_path, mtime]
+ data["/gitignore_file"] = [ignore_path, mtime]
return data
def __contains__(self, key):
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/quic/requirements.txt b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/quic/requirements.txt
index 165260c78f7..c66414cb751 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/quic/requirements.txt
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/quic/requirements.txt
@@ -1 +1 @@
-aioquic==0.8.7
+aioquic==0.8.8
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/serve/serve.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/serve/serve.py
index 45bfd4766eb..43ff7bd0dfb 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/serve/serve.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/serve/serve.py
@@ -178,9 +178,9 @@ class HtmlWrapperHandler(WrapperHandler):
def check_exposure(self, request):
if self.global_type:
- globals = b""
+ globals = u""
for (key, value) in self._get_metadata(request):
- if key == b"global":
+ if key == "global":
globals = value
break
@@ -189,23 +189,23 @@ class HtmlWrapperHandler(WrapperHandler):
self.global_type)
def _meta_replacement(self, key, value):
- if key == b"timeout":
- if value == b"long":
+ if key == "timeout":
+ if value == "long":
return '<meta name="timeout" content="long">'
- if key == b"title":
- value = value.decode('utf-8').replace("&", "&amp;").replace("<", "&lt;")
+ if key == "title":
+ value = value.replace("&", "&amp;").replace("<", "&lt;")
return '<title>%s</title>' % value
return None
def _script_replacement(self, key, value):
- if key == b"script":
- attribute = value.decode('utf-8').replace("&", "&amp;").replace('"', "&quot;")
+ if key == "script":
+ attribute = value.replace("&", "&amp;").replace('"', "&quot;")
return '<script src="%s"></script>' % attribute
return None
class WorkersHandler(HtmlWrapperHandler):
- global_type = b"dedicatedworker"
+ global_type = "dedicatedworker"
path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
@@ -234,7 +234,7 @@ class WindowHandler(HtmlWrapperHandler):
class AnyHtmlHandler(HtmlWrapperHandler):
- global_type = b"window"
+ global_type = "window"
path_replace = [(".any.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
@@ -254,7 +254,7 @@ self.GLOBAL = {
class SharedWorkersHandler(HtmlWrapperHandler):
- global_type = b"sharedworker"
+ global_type = "sharedworker"
path_replace = [(".any.sharedworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
@@ -269,7 +269,7 @@ fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s"));
class ServiceWorkersHandler(HtmlWrapperHandler):
- global_type = b"serviceworker"
+ global_type = "serviceworker"
path_replace = [(".any.serviceworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
@@ -307,11 +307,11 @@ done();
return None
def _script_replacement(self, key, value):
- if key == b"script":
- attribute = value.decode('utf-8').replace("\\", "\\\\").replace('"', '\\"')
+ if key == "script":
+ attribute = value.replace("\\", "\\\\").replace('"', '\\"')
return 'importScripts("%s")' % attribute
- if key == b"title":
- value = value.decode('utf-8').replace("\\", "\\\\").replace('"', '\\"')
+ if key == "title":
+ value = value.replace("\\", "\\\\").replace('"', '\\"')
return 'self.META_TITLE = "%s";' % value
return None
@@ -922,17 +922,22 @@ def run(**kwargs):
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
- while (all(item.is_alive() for item in iter_procs(servers)) and
+ while (all(subproc.is_alive() for subproc in iter_procs(servers)) and
not received_signal.is_set()):
- for item in iter_procs(servers):
- item.join(1)
- exited = [item for item in iter_procs(servers) if not item.is_alive()]
- subject = "subprocess" if len(exited) == 1 else "subprocesses"
-
- logger.info("%s %s exited:" % (len(exited), subject))
-
- for item in iter_procs(servers):
- logger.info("Status of %s:\t%s" % (item.name, "running" if item.is_alive() else "not running"))
+ for subproc in iter_procs(servers):
+ subproc.join(1)
+
+ failed_subproc = 0
+ for subproc in iter_procs(servers):
+ if subproc.is_alive():
+ logger.info('Status of subprocess "%s": running' % subproc.name)
+ else:
+ if subproc.exitcode == 0:
+ logger.info('Status of subprocess "%s": exited correctly' % subproc.name)
+ else:
+ logger.warning('Status of subprocess "%s": failed. Exit with non-zero status: %d' % (subproc.name, subproc.exitcode))
+ failed_subproc += 1
+ return failed_subproc
def main():
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/browser.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/browser.py
index 8b1b55f607f..366dc781d90 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/browser.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/browser.py
@@ -18,8 +18,7 @@ from .utils import call, get, untar, unzip
uname = platform.uname()
# the rootUrl for the firefox-ci deployment of Taskcluster
-# (after November 9, https://firefox-ci-tc.services.mozilla.com/)
-FIREFOX_CI_ROOT_URL = 'https://taskcluster.net'
+FIREFOX_CI_ROOT_URL = 'https://firefox-ci-tc.services.mozilla.com'
def _get_fileversion(binary, logger=None):
@@ -49,6 +48,15 @@ def get_ext(filename):
return ext
+def get_taskcluster_artifact(index, path):
+ TC_INDEX_BASE = FIREFOX_CI_ROOT_URL + "/api/index/v1/"
+
+ resp = get(TC_INDEX_BASE + "task/%s/artifacts/%s" % (index, path))
+ resp.raise_for_status()
+
+ return resp
+
+
class Browser(object):
__metaclass__ = ABCMeta
@@ -174,7 +182,7 @@ class Firefox(Browser):
url = "https://download.mozilla.org/?product=%s&os=%s&lang=en-US" % (product[channel],
os_builds[os_key])
self.logger.info("Downloading Firefox from %s" % url)
- resp = requests.get(url)
+ resp = get(url)
filename = None
@@ -413,32 +421,32 @@ class Firefox(Browser):
return find_executable(os.path.join(dest, "geckodriver"))
def install_geckodriver_nightly(self, dest):
- import tarfile
- import mozdownload
self.logger.info("Attempting to install webdriver from nightly")
+
+ platform_bits = ("64" if uname[4] == "x86_64" else
+ ("32" if self.platform == "win" else ""))
+ tc_platform = "%s%s" % (self.platform, platform_bits)
+
+ archive_ext = ".zip" if uname[0] == "Windows" else ".tar.gz"
+ archive_name = "public/geckodriver%s" % archive_ext
+
try:
- s = mozdownload.DailyScraper(branch="mozilla-central",
- extension="common.tests.tar.gz",
- destination=dest)
- package_path = s.download()
- except mozdownload.errors.NotFoundError:
+ resp = get_taskcluster_artifact(
+ "gecko.v2.mozilla-central.latest.geckodriver.%s" % tc_platform,
+ archive_name)
+ except Exception:
+ self.logger.info("Geckodriver download failed")
return
- try:
- exe_suffix = ".exe" if uname[0] == "Windows" else ""
- with tarfile.open(package_path, "r") as f:
- try:
- member = f.getmember("bin%sgeckodriver%s" % (os.path.sep,
- exe_suffix))
- except KeyError:
- return
- # Remove bin/ from the path.
- member.name = os.path.basename(member.name)
- f.extractall(members=[member], path=dest)
- path = os.path.join(dest, member.name)
- self.logger.info("Extracted geckodriver to %s" % path)
- finally:
- os.unlink(package_path)
+ if archive_ext == ".zip":
+ unzip(resp.raw, dest)
+ else:
+ untar(resp.raw, dest)
+
+ exe_ext = ".exe" if uname[0] == "Windows" else ""
+ path = os.path.join(dest, "geckodriver%s" % exe_ext)
+
+ self.logger.info("Extracted geckodriver to %s" % path)
return path
@@ -461,24 +469,10 @@ class FirefoxAndroid(Browser):
if dest is None:
dest = os.pwd
- if FIREFOX_CI_ROOT_URL == 'https://taskcluster.net':
- # NOTE: this condition can be removed after November 9, 2019
- TC_QUEUE_BASE = "https://queue.taskcluster.net/v1/"
- TC_INDEX_BASE = "https://index.taskcluster.net/v1/"
- else:
- TC_QUEUE_BASE = FIREFOX_CI_ROOT_URL + "/api/queue/v1/"
- TC_INDEX_BASE = FIREFOX_CI_ROOT_URL + "/api/index/v1/"
-
-
- resp = requests.get(TC_INDEX_BASE +
- "task/gecko.v2.mozilla-central.latest.mobile.android-x86_64-opt")
- resp.raise_for_status()
- index = resp.json()
- task_id = index["taskId"]
- resp = requests.get(TC_QUEUE_BASE + "task/%s/artifacts/%s" %
- (task_id, "public/build/geckoview-androidTest.apk"))
- resp.raise_for_status()
+ resp = get_taskcluster_artifact(
+ "gecko.v2.mozilla-central.latest.mobile.android-x86_64-opt",
+ "public/build/geckoview-androidTest.apk")
filename = "geckoview-androidTest.apk"
if rename:
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/commands.json b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/commands.json
index 60fe1621af0..a47ab40d728 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/commands.json
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/commands.json
@@ -52,7 +52,6 @@
"parser": "get_parser",
"help": "Install browser components",
"install": [
- "mozdownload",
"mozinstall"
]
},
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/pipes.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/pipes.py
index 569875bf972..bbf25e6fe11 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/pipes.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/pipes.py
@@ -320,6 +320,8 @@ class FirstWrapper(object):
def __getitem__(self, key):
try:
+ if isinstance(key, text_type):
+ key = key.encode('iso-8859-1')
return self.params.first(key)
except KeyError:
return ""
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/request.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/request.py
index a80bc0c8300..7c4d02d6de6 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/request.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/request.py
@@ -1,13 +1,13 @@
import base64
import cgi
-from six.moves.http_cookies import BaseCookie
-from six import BytesIO, binary_type, text_type, iteritems, PY3
import tempfile
+from six import BytesIO, binary_type, iteritems, PY3
+from six.moves.http_cookies import BaseCookie
from six.moves.urllib.parse import parse_qsl, urlsplit
from . import stash
-from .utils import HTTPException
+from .utils import HTTPException, isomorphic_encode, isomorphic_decode
missing = object()
@@ -219,22 +219,23 @@ class Request(object):
MultiDict representing the parameters supplied with the request.
Note that these may be present on non-GET requests; the name is
chosen to be familiar to users of other systems such as PHP.
+ Both keys and values are binary strings.
.. attribute:: POST
MultiDict representing the request body parameters. Most parameters
are present as string values, but file uploads have file-like
- values.
+ values. All string values (including keys) have binary type.
.. attribute:: cookies
- Cookies object representing cookies sent with the request with a
+ A Cookies object representing cookies sent with the request with a
dictionary-like interface.
.. attribute:: auth
- Object with username and password properties representing any
- credentials supplied using HTTP authentication.
+ An instance of Authentication with username and password properties
+ representing any credentials supplied using HTTP authentication.
.. attribute:: server
@@ -248,8 +249,12 @@ class Request(object):
self.protocol_version = request_handler.protocol_version
self.method = request_handler.command
+ # Keys and values in raw headers are native strings.
+ self._headers = None
+ self.raw_headers = request_handler.headers
+
scheme = request_handler.server.scheme
- host = request_handler.headers.get("Host")
+ host = self.raw_headers.get("Host")
port = request_handler.server.server_address[1]
if host is None:
@@ -262,22 +267,17 @@ class Request(object):
self.url_base = "/"
if self.request_path.startswith(scheme + "://"):
- self.url = request_handler.path
+ self.url = self.request_path
else:
- self.url = "%s://%s:%s%s" % (scheme,
- host,
- port,
- self.request_path)
+ # TODO(#23362): Stop using native strings for URLs.
+ self.url = "%s://%s:%s%s" % (
+ scheme, host, port, self.request_path)
self.url_parts = urlsplit(self.url)
- self.raw_headers = request_handler.headers
-
self.request_line = request_handler.raw_requestline
- self._headers = None
-
self.raw_input = InputFile(request_handler.rfile,
- int(self.headers.get("Content-Length", 0)))
+ int(self.raw_headers.get("Content-Length", 0)))
self._body = None
@@ -297,19 +297,24 @@ class Request(object):
params = parse_qsl(self.url_parts.query, keep_blank_values=True)
self._GET = MultiDict()
for key, value in params:
- self._GET.add(key, value)
+ self._GET.add(isomorphic_encode(key), isomorphic_encode(value))
return self._GET
@property
def POST(self):
if self._POST is None:
- #Work out the post parameters
+ # Work out the post parameters
pos = self.raw_input.tell()
self.raw_input.seek(0)
- fs = cgi.FieldStorage(fp=self.raw_input,
- environ={"REQUEST_METHOD": self.method},
- headers=self.raw_headers,
- keep_blank_values=True)
+ kwargs = {
+ "fp": self.raw_input,
+ "environ": {"REQUEST_METHOD": self.method},
+ "headers": self.raw_headers,
+ "keep_blank_values": True,
+ }
+ if PY3:
+ kwargs["encoding"] = "iso-8859-1"
+ fs = cgi.FieldStorage(**kwargs)
self._POST = MultiDict.from_field_storage(fs)
self.raw_input.seek(pos)
return self._POST
@@ -317,14 +322,12 @@ class Request(object):
@property
def cookies(self):
if self._cookies is None:
- parser = BaseCookie()
+ parser = BinaryCookieParser()
cookie_headers = self.headers.get("cookie", b"")
- if PY3:
- cookie_headers = cookie_headers.decode("iso-8859-1")
parser.load(cookie_headers)
cookies = Cookies()
for key, value in iteritems(parser):
- cookies[key] = CookieValue(value)
+ cookies[isomorphic_encode(key)] = CookieValue(value)
self._cookies = cookies
return self._cookies
@@ -357,24 +360,6 @@ class H2Request(Request):
super(H2Request, self).__init__(request_handler)
-def _maybe_encode(s):
- """Encodes a text-type string into binary data using iso-8859-1.
-
- Returns `str` in Python 2 and `bytes` in Python 3. The function is a no-op
- if the argument already has a binary type.
- """
- if isinstance(s, binary_type):
- return s
-
- # Python 3 assumes iso-8859-1 when parsing headers, which will garble text
- # with non ASCII characters. We try to encode the text back to binary.
- # https://github.com/python/cpython/blob/273fc220b25933e443c82af6888eb1871d032fb8/Lib/http/client.py#L213
- if isinstance(s, text_type):
- return s.encode("iso-8859-1")
-
- raise TypeError("Unexpected value in RequestHeaders: %r" % s)
-
-
class RequestHeaders(dict):
"""Read-only dictionary-like API for accessing request headers.
@@ -384,7 +369,7 @@ class RequestHeaders(dict):
"""
def __init__(self, items):
for header in items.keys():
- key = _maybe_encode(header).lower()
+ key = isomorphic_encode(header).lower()
# get all headers with the same name
values = items.getallmatchingheaders(header)
if len(values) > 1:
@@ -394,22 +379,21 @@ class RequestHeaders(dict):
for value in values:
# getallmatchingheaders returns raw header lines, so
# split to get name, value
- multiples.append(_maybe_encode(value).split(b':', 1)[1].strip())
+ multiples.append(isomorphic_encode(value).split(b':', 1)[1].strip())
headers = multiples
else:
- headers = [_maybe_encode(items[header])]
+ headers = [isomorphic_encode(items[header])]
dict.__setitem__(self, key, headers)
-
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
- key = _maybe_encode(key)
+ key = isomorphic_encode(key)
values = dict.__getitem__(self, key.lower())
if len(values) == 1:
return values[0]
else:
- return ", ".join(values)
+ return b", ".join(values)
def __setitem__(self, name, value):
raise Exception
@@ -430,7 +414,7 @@ class RequestHeaders(dict):
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as
a list"""
- key = _maybe_encode(key)
+ key = isomorphic_encode(key)
try:
return dict.__getitem__(self, key.lower())
except KeyError:
@@ -440,7 +424,7 @@ class RequestHeaders(dict):
raise
def __contains__(self, key):
- key = _maybe_encode(key)
+ key = isomorphic_encode(key)
return dict.__contains__(self, key.lower())
def iteritems(self):
@@ -451,6 +435,7 @@ class RequestHeaders(dict):
for item in self:
yield self[item]
+
class CookieValue(object):
"""Representation of cookies.
@@ -524,9 +509,8 @@ class CookieValue(object):
class MultiDict(dict):
- """Dictionary type that holds multiple values for each
- key"""
- #TODO: this should perhaps also order the keys
+ """Dictionary type that holds multiple values for each key"""
+ # TODO: this should perhaps also order the keys
def __init__(self):
pass
@@ -541,7 +525,6 @@ class MultiDict(dict):
def __getitem__(self, key):
"""Get the first value with a given key"""
- #TODO: should this instead be the last value?
return self.first(key)
def first(self, key, default=missing):
@@ -584,6 +567,10 @@ class MultiDict(dict):
@classmethod
def from_field_storage(cls, fs):
+ """Construct a MultiDict from a cgi.FieldStorage
+
+ Note that all keys and values are binary strings.
+ """
self = cls()
if fs.list is None:
return self
@@ -594,13 +581,46 @@ class MultiDict(dict):
for value in values:
if not value.filename:
- value = value.value
- self.add(key, value)
+ value = isomorphic_encode(value.value)
+ else:
+ assert isinstance(value, cgi.FieldStorage)
+ self.add(isomorphic_encode(key), value)
return self
+class BinaryCookieParser(BaseCookie):
+ """A subclass of BaseCookie that returns values in binary strings
+
+ This is not intended to store the cookies; use Cookies instead.
+ """
+ def value_decode(self, val):
+ """Decode value from network to (real_value, coded_value).
+
+ Override BaseCookie.value_decode.
+ """
+ return isomorphic_encode(val), val
+
+ def value_encode(self, val):
+ raise NotImplementedError('BinaryCookieParser is not for setting cookies')
+
+ def load(self, rawdata):
+ """Load cookies from a binary string.
+
+ This overrides and calls BaseCookie.load. Unlike BaseCookie.load, it
+ does not accept dictionaries.
+ """
+ assert isinstance(rawdata, binary_type)
+ if PY3:
+ # BaseCookie.load expects a native string, which in Python 3 is text.
+ rawdata = isomorphic_decode(rawdata)
+ super(BinaryCookieParser, self).load(rawdata)
+
+
class Cookies(MultiDict):
- """MultiDict specialised for Cookie values"""
+ """MultiDict specialised for Cookie values
+
+ Keys and values are binary strings.
+ """
def __init__(self):
pass
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/response.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/response.py
index 3a60c2babb3..b6f27447451 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/response.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/response.py
@@ -1,19 +1,22 @@
from collections import OrderedDict
from datetime import datetime, timedelta
-from six.moves.http_cookies import BaseCookie, Morsel
+from io import BytesIO
import json
-import uuid
import socket
-from .constants import response_codes, h2_headers
-from .logger import get_logger
-from io import BytesIO
+import uuid
-from six import binary_type, text_type, integer_types, itervalues, PY3
-from hyperframe.frame import HeadersFrame, DataFrame, ContinuationFrame
from hpack.struct import HeaderTuple
+from hyperframe.frame import HeadersFrame, DataFrame, ContinuationFrame
+from six import binary_type, text_type, integer_types, itervalues, PY3
+from six.moves.http_cookies import BaseCookie, Morsel
+
+from .constants import response_codes, h2_headers
+from .logger import get_logger
+from .utils import isomorphic_decode, isomorphic_encode
missing = object()
+
class Response(object):
"""Object representing the response to a HTTP request
@@ -79,7 +82,6 @@ class Response(object):
self.headers = ResponseHeaders()
self.content = []
-
@property
def status(self):
return self._status
@@ -99,8 +101,8 @@ class Response(object):
"""Set a cookie to be sent with a Set-Cookie header in the
response
- :param name: String name of the cookie
- :param value: String value of the cookie
+ :param name: name of the cookie (a binary string)
+ :param value: value of the cookie (a binary string, or None)
:param max_age: datetime.timedelta int representing the time (in seconds)
until the cookie expires
:param path: String path to which the cookie applies
@@ -113,14 +115,20 @@ class Response(object):
time or interval from now when the cookie expires
"""
+ # TODO(Python 3): Convert other parameters (e.g. path) to bytes, too.
+ if value is None:
+ value = b''
+ max_age = 0
+ expires = timedelta(days=-1)
+
+ if PY3:
+ name = isomorphic_decode(name)
+ value = isomorphic_decode(value)
+
days = {i+1: name for i, name in enumerate(["jan", "feb", "mar",
"apr", "may", "jun",
"jul", "aug", "sep",
"oct", "nov", "dec"])}
- if value is None:
- value = ''
- max_age = 0
- expires = timedelta(days=-1)
if isinstance(expires, timedelta):
expires = datetime.utcnow() + expires
@@ -154,11 +162,14 @@ class Response(object):
def unset_cookie(self, name):
"""Remove a cookie from those that are being sent with the response"""
+ if PY3:
+ name = isomorphic_decode(name)
cookies = self.headers.get("Set-Cookie")
parser = BaseCookie()
for cookie in cookies:
if PY3:
- cookie = cookie.decode("iso-8859-1")
+ # BaseCookie.load expects a text string.
+ cookie = isomorphic_decode(cookie)
parser.load(cookie)
if name in parser.keys():
@@ -223,9 +234,12 @@ class Response(object):
self.write_status_headers()
self.write_content()
- def set_error(self, code, message=""):
- """Set the response status headers and body to indicate an
- error"""
+ def set_error(self, code, message=u""):
+ """Set the response status headers and return a JSON error object:
+
+ {"error": {"code": code, "message": message}}
+ code is an int (HTTP status code), and message is a text string.
+ """
err = {"code": code,
"message": message}
data = json.dumps({"error": err})
@@ -297,24 +311,10 @@ class MultipartPart(object):
def _maybe_encode(s):
- """Encodes a text-type string into binary data using iso-8859-1.
-
- Returns `str` in Python 2 and `bytes` in Python 3. The function is a no-op
- if the argument already has a binary type.
- """
- if isinstance(s, binary_type):
- return s
-
- # Python 3 assumes iso-8859-1 when parsing headers, which will garble text
- # with non ASCII characters. We try to encode the text back to binary.
- # https://github.com/python/cpython/blob/273fc220b25933e443c82af6888eb1871d032fb8/Lib/http/client.py#L213
- if isinstance(s, text_type):
- return s.encode("iso-8859-1")
-
+ """Encode a string or an int into binary data using isomorphic_encode()."""
if isinstance(s, integer_types):
return b"%i" % (s,)
-
- raise TypeError("Unexpected value in ResponseHeaders: %r" % s)
+ return isomorphic_encode(s)
class ResponseHeaders(object):
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/sslutils/openssl.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/sslutils/openssl.py
index aea1c7380b1..64f6d5fb2db 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/sslutils/openssl.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/sslutils/openssl.py
@@ -402,6 +402,8 @@ class OpenSSLEnvironment(object):
def _generate_host_cert(self, hosts):
host = hosts[0]
+ if not self.force_regenerate:
+ self._load_ca_cert()
if self._ca_key_path is None:
self._generate_ca(hosts)
ca_key_path = self._ca_key_path
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/stash.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/stash.py
index 541aced6010..6b351847491 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/stash.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/stash.py
@@ -4,7 +4,9 @@ import os
import uuid
import threading
from multiprocessing.managers import AcquirerProxy, BaseManager, DictProxy
-from six import text_type
+from six import text_type, binary_type
+
+from .utils import isomorphic_encode
class ServerDictManager(BaseManager):
@@ -145,9 +147,12 @@ class Stash(object):
if path is None:
path = self.default_path
# This key format is required to support using the path. Since the data
- # passed into the stash can be a DictProxy which wouldn't detect changes
- # when writing to a subdict.
- return (str(path), str(uuid.UUID(key)))
+ # passed into the stash can be a DictProxy which wouldn't detect
+ # changes when writing to a subdict.
+ if isinstance(key, binary_type):
+ # UUIDs are within the ASCII charset.
+ key = key.decode('ascii')
+ return (isomorphic_encode(path), uuid.UUID(key).bytes)
def put(self, key, value, path=None):
"""Place a value in the shared stash.
diff --git a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/utils.py b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/utils.py
index 64b08a27aa8..b005b417d74 100644
--- a/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/utils.py
+++ b/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/utils.py
@@ -1,6 +1,45 @@
import socket
import sys
+from six import binary_type, text_type
+
+
+def isomorphic_decode(s):
+ """Decodes a binary string into a text string using iso-8859-1.
+
+ Returns `unicode` in Python 2 and `str` in Python 3. The function is a
+ no-op if the argument already has a text type. iso-8859-1 is chosen because
+ it is an 8-bit encoding whose code points range from 0x0 to 0xFF and the
+ values are the same as the binary representations, so any binary string can
+ be decoded into and encoded from iso-8859-1 without any errors or data
+ loss. Python 3 also uses iso-8859-1 (or latin-1) extensively in http:
+ https://github.com/python/cpython/blob/273fc220b25933e443c82af6888eb1871d032fb8/Lib/http/client.py#L213
+ """
+ if isinstance(s, text_type):
+ return s
+
+ if isinstance(s, binary_type):
+ return s.decode("iso-8859-1")
+
+ raise TypeError("Unexpected value (expecting string-like): %r" % s)
+
+
+def isomorphic_encode(s):
+ """Encodes a text-type string into binary data using iso-8859-1.
+
+ Returns `str` in Python 2 and `bytes` in Python 3. The function is a no-op
+ if the argument already has a binary type. This is the counterpart of
+ isomorphic_decode.
+ """
+ if isinstance(s, binary_type):
+ return s
+
+ if isinstance(s, text_type):
+ return s.encode("iso-8859-1")
+
+ raise TypeError("Unexpected value (expecting string-like): %r" % s)
+
+
def invert_dict(dict):
rv = {}
for key, values in dict.items():
@@ -25,6 +64,7 @@ def _open_socket(host, port):
sock.listen(5)
return sock
+
def is_bad_port(port):
"""
Bad port as per https://fetch.spec.whatwg.org/#port-blocking
@@ -99,6 +139,7 @@ def is_bad_port(port):
6697, # irc+tls
]
+
def get_port(host=''):
host = host or '127.0.0.1'
port = 0
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater.py b/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater.py
index fb4ce203bd4..752346c8a6e 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater.py
@@ -24,10 +24,10 @@ from blinkpy.common.memoized import memoized
from blinkpy.common.system.executive import ScriptError
from blinkpy.w3c.wpt_expectations_updater import WPTExpectationsUpdater
from blinkpy.web_tests.models.test_expectations import TestExpectations
-from blinkpy.web_tests.models.typ_types import Expectation
+from blinkpy.web_tests.models.typ_types import Expectation, ResultType
from blinkpy.web_tests.port.android import (
PRODUCTS, PRODUCTS_TO_STEPNAMES, PRODUCTS_TO_BROWSER_TAGS,
- PRODUCTS_TO_EXPECTATION_FILE_PATHS)
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS, ANDROID_DISABLED_TESTS)
_log = logging.getLogger(__name__)
@@ -36,18 +36,22 @@ AndroidConfig = namedtuple('AndroidConfig', ['port_name', 'browser'])
class AndroidWPTExpectationsUpdater(WPTExpectationsUpdater):
MARKER_COMMENT = '# Add untriaged failures in this block'
+ NEVER_FIX_MARKER_COMMENT = '# Add untriaged disabled tests in this block'
UMBRELLA_BUG = 'crbug.com/1050754'
def __init__(self, host, args=None):
super(AndroidWPTExpectationsUpdater, self).__init__(host, args)
- expectations_dict = {}
- for product in self.options.android_product:
- path = PRODUCTS_TO_EXPECTATION_FILE_PATHS[product]
- expectations_dict.update(
- {path: self.host.filesystem.read_text_file(path)})
+ self._never_fix_expectations = TestExpectations(
+ self.port, {
+ ANDROID_DISABLED_TESTS:
+ host.filesystem.read_text_file(ANDROID_DISABLED_TESTS)})
- self._test_expectations = TestExpectations(
- self.port, expectations_dict=expectations_dict)
+ def expectations_files(self):
+ # We need to put all the Android expectation files in
+ # the _test_expectations member variable so that the
+ # files get cleaned in cleanup_test_expectations_files()
+ return (PRODUCTS_TO_EXPECTATION_FILE_PATHS.values() +
+ [ANDROID_DISABLED_TESTS])
def _get_web_test_results(self, build):
"""Gets web tests results for Android builders. We need to
@@ -114,13 +118,42 @@ class AndroidWPTExpectationsUpdater(WPTExpectationsUpdater):
Android at the moment."""
return False
+ @staticmethod
@memoized
- def _get_marker_line_number(self, path):
- for line in self._test_expectations.get_updated_lines(path):
- if line.to_string() == self.MARKER_COMMENT:
+ def _get_marker_line_number(test_expectations, path, marker_comment):
+ for line in test_expectations.get_updated_lines(path):
+ if line.to_string() == marker_comment:
return line.lineno
raise ScriptError('Marker comment does not exist in %s' % path)
+ def _get_untriaged_test_expectations(
+ self, test_expectations, paths, marker_comment):
+ untriaged_exps = defaultdict(dict)
+ for path in paths:
+ marker_lineno = self._get_marker_line_number(
+ test_expectations, path, marker_comment)
+ exp_lines = test_expectations.get_updated_lines(path)
+ for i in range(marker_lineno, len(exp_lines)):
+ if (not exp_lines[i].to_string().strip() or
+ exp_lines[i].to_string().startswith('#')):
+ break
+ untriaged_exps[path].setdefault(
+ exp_lines[i].test, []).append(exp_lines[i])
+ return untriaged_exps
+
+ def _maybe_create_never_fix_expectation(
+ self, path, test, test_skipped, tags):
+ if test_skipped:
+ exps = self._test_expectations.get_expectations_from_file(
+ path, test)
+ wontfix = self._never_fix_expectations.matches_an_expected_result(
+ test, ResultType.Skip)
+ temporary_skip = any(ResultType.Skip in exp.results for exp in exps)
+ if not (wontfix or temporary_skip):
+ return Expectation(
+ test=test, reason=self.UMBRELLA_BUG,
+ results={ResultType.Skip}, tags=tags, raw_tags=tags)
+
def write_to_test_expectations(self, test_to_results):
"""Each expectations file is browser specific, and currently only
runs on pie. Therefore we do not need any configuration specifiers
@@ -136,48 +169,77 @@ class AndroidWPTExpectationsUpdater(WPTExpectationsUpdater):
browser_to_exp_path = {
browser: PRODUCTS_TO_EXPECTATION_FILE_PATHS[product]
for product, browser in PRODUCTS_TO_BROWSER_TAGS.items()}
- untriaged_exps = defaultdict(dict)
-
- for path in self._test_expectations.expectations_dict:
- marker_lineno = self._get_marker_line_number(path)
- exp_lines = self._test_expectations.get_updated_lines(path)
- for i in range(marker_lineno, len(exp_lines)):
- if (not exp_lines[i].to_string().strip() or
- exp_lines[i].to_string().startswith('#')):
- break
- untriaged_exps[path][exp_lines[i].test] = exp_lines[i]
+ product_exp_paths = {PRODUCTS_TO_EXPECTATION_FILE_PATHS[prod]
+ for prod in self.options.android_product}
+ untriaged_exps = self._get_untriaged_test_expectations(
+ self._test_expectations, product_exp_paths, self.MARKER_COMMENT)
+ neverfix_tests = self._get_untriaged_test_expectations(
+ self._never_fix_expectations, [ANDROID_DISABLED_TESTS],
+ self.NEVER_FIX_MARKER_COMMENT)[ANDROID_DISABLED_TESTS]
for path, test_exps in untriaged_exps.items():
self._test_expectations.remove_expectations(
- path, test_exps.values())
+ path, reduce(lambda x, y: x + y, test_exps.values()))
+
+ if neverfix_tests:
+ self._never_fix_expectations.remove_expectations(
+ ANDROID_DISABLED_TESTS,
+ reduce(lambda x, y: x + y, neverfix_tests.values()))
for results_test_name, platform_results in test_to_results.items():
exps_test_name = 'external/wpt/%s' % results_test_name
for configs, test_results in platform_results.items():
for config in configs:
path = browser_to_exp_path[config.browser]
- # no system specifiers are necessary because we are
- # writing to browser specific expectations files for
- # only one Android version.
- unexpected_results = {r for r in test_results.actual.split()
- if r not in test_results.expected.split()}
-
- if exps_test_name not in untriaged_exps[path]:
- untriaged_exps[path][exps_test_name] = Expectation(
- test=exps_test_name, reason=self.UMBRELLA_BUG,
- results=unexpected_results)
+ neverfix_exp = self._maybe_create_never_fix_expectation(
+ path, exps_test_name,
+ ResultType.Skip in test_results.actual,
+ {config.browser.lower()})
+ if neverfix_exp:
+ neverfix_tests.setdefault(exps_test_name, []).append(
+ neverfix_exp)
else:
- untriaged_exps[path][exps_test_name].add_expectations(
- unexpected_results, reason=self.UMBRELLA_BUG)
+ # no system specifiers are necessary because we are
+ # writing to browser specific expectations files for
+ # only one Android version.
+ unexpected_results = {
+ r for r in test_results.actual.split()
+ if r not in test_results.expected.split()}
+
+ if exps_test_name not in untriaged_exps[path]:
+ untriaged_exps[path].setdefault(
+ exps_test_name, []).append(Expectation(
+ test=exps_test_name, reason=self.UMBRELLA_BUG,
+ results=unexpected_results))
+ else:
+ exp = untriaged_exps[path][exps_test_name][0]
+ exp.add_expectations(
+ unexpected_results, reason=self.UMBRELLA_BUG)
for path in untriaged_exps:
- marker_lineno = self._get_marker_line_number(path)
+ marker_lineno = self._get_marker_line_number(
+ self._test_expectations, path, self.MARKER_COMMENT)
self._test_expectations.add_expectations(
path,
- sorted(untriaged_exps[path].values(), key=lambda e: e.test),
+ sorted([exps[0] for exps in untriaged_exps[path].values()],
+ key=lambda e: e.test),
marker_lineno)
+ disabled_tests_marker_lineno = self._get_marker_line_number(
+ self._never_fix_expectations,
+ ANDROID_DISABLED_TESTS,
+ self.NEVER_FIX_MARKER_COMMENT)
+
+ if neverfix_tests:
+ self._never_fix_expectations.add_expectations(
+ ANDROID_DISABLED_TESTS,
+ sorted(reduce(lambda x, y: x + y, neverfix_tests.values()),
+ key=lambda e: e.test),
+ disabled_tests_marker_lineno)
+
self._test_expectations.commit_changes()
+ self._never_fix_expectations.commit_changes()
+
# TODO(rmhasan): Return dictionary mapping test names to lists of
# test expectation strings.
return {}
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater_unittest.py b/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater_unittest.py
index e3c70e486b1..789db9a8026 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater_unittest.py
@@ -12,10 +12,16 @@ from blinkpy.common.net.web_test_results import WebTestResults
from blinkpy.common.system.log_testing import LoggingTestCase
from blinkpy.web_tests.builder_list import BuilderList
from blinkpy.web_tests.port.factory_mock import MockPortFactory
-from blinkpy.web_tests.port.android import PRODUCTS_TO_EXPECTATION_FILE_PATHS
+from blinkpy.web_tests.port.android import (
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS, ANDROID_DISABLED_TESTS,
+ ANDROID_WEBLAYER, ANDROID_WEBVIEW, CHROME_ANDROID,
+ PRODUCTS_TO_STEPNAMES)
from blinkpy.w3c.android_wpt_expectations_updater import (
AndroidWPTExpectationsUpdater)
+WEBLAYER_WPT_STEP = PRODUCTS_TO_STEPNAMES[ANDROID_WEBLAYER]
+WEBVIEW_WPT_STEP = PRODUCTS_TO_STEPNAMES[ANDROID_WEBVIEW]
+CHROME_ANDROID_WPT_STEP = PRODUCTS_TO_STEPNAMES[CHROME_ANDROID]
class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
@@ -35,11 +41,19 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'# This comment will not be deleted\n'
'crbug.com/111111 external/wpt/hello_world.html [ Crash ]\n')
+ _raw_android_never_fix_tests = (
+ '# tags: [ android-weblayer android-webview chrome-android ]\n'
+ '# results: [ Skip ]\n'
+ '\n'
+ '# Add untriaged disabled tests in this block\n'
+ 'crbug.com/1050754 [ android-webview ] external/wpt/disabled.html [ Skip ]\n')
+
def _setup_host(self):
"""Returns a mock host with fake values set up for testing."""
self.set_logging_level(logging.DEBUG)
host = MockHost()
host.port_factory = MockPortFactory(host)
+ host.executive._output = ''
# Set up a fake list of try builders.
host.builders = BuilderList({
@@ -65,6 +79,9 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values():
host.filesystem.write_text_file(
path, self._raw_android_expectations)
+
+ host.filesystem.write_text_file(
+ ANDROID_DISABLED_TESTS, self._raw_android_never_fix_tests)
return host
def testUpdateTestExpectationsForWebview(self):
@@ -88,11 +105,17 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'actual': 'CRASH CRASH TIMEOUT',
'is_unexpected': True,
},
+ 'dog.html': {
+ 'expected': 'SKIP',
+ 'actual': 'SKIP',
+ 'is_unexpected': True,
+ },
},
- }, step_name='system_webview_wpt (with patch)'),
- step_name='system_webview_wpt (with patch)')
+ }, step_name=WEBVIEW_WPT_STEP + ' (with patch)'),
+ step_name=WEBVIEW_WPT_STEP + ' (with patch)')
updater = AndroidWPTExpectationsUpdater(
- host, ['-vvv', '--android-product', 'android_webview'])
+ host, ['-vvv', '--android-product', ANDROID_WEBVIEW,
+ '--clean-up-affected-tests-only'])
updater.git_cl = MockGitCL(host, {
Build('MOCK Android Pie', 123):
TryJobStatus('COMPLETED', 'FAILURE')})
@@ -100,7 +123,7 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
updater.run()
# Get new expectations
content = host.filesystem.read_text_file(
- PRODUCTS_TO_EXPECTATION_FILE_PATHS['android_webview'])
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBVIEW])
self.assertEqual(
content,
('# results: [ Failure Crash Timeout]\n'
@@ -118,17 +141,36 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'\n'
'# This comment will not be deleted\n'
'crbug.com/111111 external/wpt/hello_world.html [ Crash ]\n'))
+ neverfix_content = host.filesystem.read_text_file(
+ ANDROID_DISABLED_TESTS)
+ self.assertEqual(
+ neverfix_content,
+ ('# tags: [ android-weblayer android-webview chrome-android ]\n'
+ '# results: [ Skip ]\n'
+ '\n'
+ '# Add untriaged disabled tests in this block\n'
+ 'crbug.com/1050754 [ android-webview ] external/wpt/disabled.html [ Skip ]\n'
+ 'crbug.com/1050754 [ android-webview ] external/wpt/dog.html [ Skip ]\n'))
# check that chrome android's expectation file was not modified
# since the same bot is used to update chrome android & webview
# expectations
self.assertEqual(
host.filesystem.read_text_file(
- PRODUCTS_TO_EXPECTATION_FILE_PATHS['chrome_android']),
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[CHROME_ANDROID]),
self._raw_android_expectations)
# Check logs
logs = ''.join(self.logMessages()).lower()
- self.assertNotIn('weblayer', logs)
- self.assertNotIn('chrome', logs)
+ self.assertNotIn(WEBLAYER_WPT_STEP, logs)
+ self.assertNotIn(CHROME_ANDROID_WPT_STEP, logs)
+ # Check that weblayer and chrome expectation files were not changed
+ self.assertEqual(
+ self._raw_android_expectations,
+ host.filesystem.read_text_file(
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[CHROME_ANDROID]))
+ self.assertEqual(
+ self._raw_android_expectations,
+ host.filesystem.read_text_file(
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBLAYER]))
def testUpdateTestExpectationsForWeblayer(self):
host = self._setup_host()
@@ -157,10 +199,11 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'is_unexpected': True,
},
},
- }, step_name='weblayer_shell_wpt (with patch)'),
- step_name='weblayer_shell_wpt (with patch)')
+ }, step_name=WEBLAYER_WPT_STEP + ' (with patch)'),
+ step_name=WEBLAYER_WPT_STEP + ' (with patch)')
updater = AndroidWPTExpectationsUpdater(
- host, ['-vvv', '--android-product', 'android_weblayer'])
+ host, ['-vvv', '--android-product', ANDROID_WEBLAYER,
+ '--clean-up-affected-tests-only'])
updater.git_cl = MockGitCL(host, {
Build('MOCK Android Weblayer - Pie', 123):
TryJobStatus('COMPLETED', 'FAILURE')})
@@ -168,7 +211,7 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
updater.run()
# Get new expectations
content = host.filesystem.read_text_file(
- PRODUCTS_TO_EXPECTATION_FILE_PATHS['android_weblayer'])
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBLAYER])
self.assertEqual(
content,
('# results: [ Failure Crash Timeout]\n'
@@ -189,10 +232,24 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'crbug.com/111111 external/wpt/hello_world.html [ Crash ]\n'))
# Check logs
logs = ''.join(self.logMessages()).lower()
- self.assertNotIn('webview', logs)
- self.assertNotIn('chrome', logs)
+ self.assertNotIn(WEBVIEW_WPT_STEP, logs)
+ self.assertNotIn(CHROME_ANDROID_WPT_STEP, logs)
+ # Check that webview and chrome expectation files were not changed
+ self.assertEqual(
+ self._raw_android_expectations,
+ host.filesystem.read_text_file(
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[CHROME_ANDROID]))
+ self.assertEqual(
+ self._raw_android_expectations,
+ host.filesystem.read_text_file(
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBVIEW]))
+ self.assertEqual(
+ self._raw_android_never_fix_tests,
+ host.filesystem.read_text_file(ANDROID_DISABLED_TESTS))
- def testUpdateTestExpectationsForAll(self):
+ def testCleanupAndUpdateTestExpectationsForAll(self):
+ # Full integration test for expectations cleanup and update
+ # using builder results.
host = self._setup_host()
# Add results for Weblayer
host.results_fetcher.set_results(
@@ -209,9 +266,14 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'actual': 'CRASH CRASH FAIL',
'is_unexpected': True,
},
+ 'disabled_weblayer_only.html': {
+ 'expected': 'SKIP',
+ 'actual': 'SKIP',
+ 'is_unexpected': True,
+ },
},
- }, step_name='weblayer_shell_wpt (with patch)'),
- step_name='weblayer_shell_wpt (with patch)')
+ }, step_name=WEBLAYER_WPT_STEP + ' (with patch)'),
+ step_name=WEBLAYER_WPT_STEP + ' (with patch)')
# Add Results for Webview
host.results_fetcher.set_results(
Build('MOCK Android Pie', 101),
@@ -227,9 +289,13 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'actual': 'TIMEOUT',
'is_unexpected': True,
},
+ 'disabled.html': {
+ 'expected': 'SKIP',
+ 'actual': 'SKIP',
+ },
},
- }, step_name='system_webview_wpt (with patch)'),
- step_name='system_webview_wpt (with patch)')
+ }, step_name=WEBVIEW_WPT_STEP + ' (with patch)'),
+ step_name=WEBVIEW_WPT_STEP + ' (with patch)')
# Add Results for Chrome
host.results_fetcher.set_results(
Build('MOCK Android Pie', 101),
@@ -245,24 +311,42 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'actual': 'CRASH CRASH TIMEOUT',
'is_unexpected': True,
},
+ 'disabled.html': {
+ 'expected': 'SKIP',
+ 'actual': 'SKIP',
+ 'is_unexpected': True,
+ },
},
- }, step_name='chrome_public_wpt (with patch)'),
- step_name='chrome_public_wpt (with patch)')
+ }, step_name=CHROME_ANDROID_WPT_STEP + ' (with patch)'),
+ step_name=CHROME_ANDROID_WPT_STEP + ' (with patch)')
updater = AndroidWPTExpectationsUpdater(
host, ['-vvv',
- '--android-product', 'android_weblayer',
- '--android-product', 'chrome_android',
- '--android-product', 'android_webview'])
+ '--clean-up-affected-tests-only',
+ '--android-product', ANDROID_WEBLAYER,
+ '--android-product', CHROME_ANDROID,
+ '--android-product', ANDROID_WEBVIEW])
+
+ def _git_command_return_val(cmd):
+ if '--diff-filter=D' in cmd:
+ return 'external/wpt/ghi.html'
+ if '--diff-filter=R' in cmd:
+ return 'C external/wpt/van.html external/wpt/wagon.html'
+ return ''
+
updater.git_cl = MockGitCL(host, {
Build('MOCK Android Weblayer - Pie', 123):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Android Pie', 101):
TryJobStatus('COMPLETED', 'FAILURE')})
+
+ updater.git.run = _git_command_return_val
+ updater._relative_to_web_test_dir = lambda test_path: test_path
+
# Run command
updater.run()
# Check expectations for weblayer
content = host.filesystem.read_text_file(
- PRODUCTS_TO_EXPECTATION_FILE_PATHS['android_weblayer'])
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBLAYER])
self.assertEqual(
content,
('# results: [ Failure Crash Timeout]\n'
@@ -272,9 +356,8 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'crbug.com/1050754 external/wpt/cat.html [ Failure ]\n'
'crbug.com/1050754 external/wpt/def.html [ Crash ]\n'
'external/wpt/dog.html [ Crash Timeout ]\n'
- 'crbug.com/1050754 external/wpt/ghi.html [ Timeout ]\n'
'crbug.com/1111111 external/wpt/jkl.html [ Failure ]\n'
- 'crbug.com/6789043 external/wpt/van.html [ Failure ]\n'
+ 'crbug.com/6789043 external/wpt/wagon.html [ Failure ]\n'
'crbug.com/1050754 external/wpt/weblayer_only.html [ Failure Crash ]\n'
'external/wpt/www.html [ Crash Failure ]\n'
'\n'
@@ -282,7 +365,7 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'crbug.com/111111 external/wpt/hello_world.html [ Crash ]\n'))
# Check expectations for webview
content = host.filesystem.read_text_file(
- PRODUCTS_TO_EXPECTATION_FILE_PATHS['android_webview'])
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBVIEW])
self.assertEqual(
content,
('# results: [ Failure Crash Timeout]\n'
@@ -292,9 +375,8 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'crbug.com/1050754 external/wpt/cat.html [ Crash Failure Timeout ]\n'
'crbug.com/1050754 external/wpt/def.html [ Crash ]\n'
'external/wpt/dog.html [ Crash Timeout ]\n'
- 'crbug.com/1050754 external/wpt/ghi.html [ Timeout ]\n'
'crbug.com/1111111 external/wpt/jkl.html [ Failure ]\n'
- 'crbug.com/6789043 external/wpt/van.html [ Failure ]\n'
+ 'crbug.com/6789043 external/wpt/wagon.html [ Failure ]\n'
'crbug.com/1050754 external/wpt/webview_only.html [ Timeout ]\n'
'external/wpt/www.html [ Crash Failure ]\n'
'\n'
@@ -302,7 +384,7 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'crbug.com/111111 external/wpt/hello_world.html [ Crash ]\n'))
# Check expectations chrome
content = host.filesystem.read_text_file(
- PRODUCTS_TO_EXPECTATION_FILE_PATHS['chrome_android'])
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS[CHROME_ANDROID])
self.assertEqual(
content,
('# results: [ Failure Crash Timeout]\n'
@@ -313,11 +395,21 @@ class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
'crbug.com/1050754 external/wpt/chrome_only.html [ Crash Timeout ]\n'
'crbug.com/1050754 external/wpt/def.html [ Crash ]\n'
'external/wpt/dog.html [ Crash Timeout ]\n'
- 'crbug.com/1050754 external/wpt/ghi.html [ Timeout ]\n'
'crbug.com/1111111 crbug.com/1050754'
' external/wpt/jkl.html [ Failure ]\n'
- 'crbug.com/6789043 external/wpt/van.html [ Failure ]\n'
+ 'crbug.com/6789043 external/wpt/wagon.html [ Failure ]\n'
'external/wpt/www.html [ Crash Failure ]\n'
'\n'
'# This comment will not be deleted\n'
'crbug.com/111111 external/wpt/hello_world.html [ Crash ]\n'))
+ # Check disabled test file
+ neverfix_content = host.filesystem.read_text_file(ANDROID_DISABLED_TESTS)
+ self.assertEqual(
+ neverfix_content,
+ ('# tags: [ android-weblayer android-webview chrome-android ]\n'
+ '# results: [ Skip ]\n'
+ '\n'
+ '# Add untriaged disabled tests in this block\n'
+ 'crbug.com/1050754 [ android-webview ] external/wpt/disabled.html [ Skip ]\n'
+ 'crbug.com/1050754 [ chrome-android ] external/wpt/disabled.html [ Skip ]\n'
+ 'crbug.com/1050754 [ android-weblayer ] external/wpt/disabled_weblayer_only.html [ Skip ]\n'))
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier.py b/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier.py
index 5f04d9ac63f..28ae1593c68 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier.py
@@ -13,7 +13,7 @@ Design doc: https://docs.google.com/document/d/1MtdbUcWBDZyvmV0FOdsTWw_Jv16YtE6K
import logging
-from blinkpy.w3c.common import WPT_REVISION_FOOTER
+from blinkpy.w3c.common import WPT_REVISION_FOOTER, WPT_GH_URL
from blinkpy.w3c.gerrit import GerritError
from blinkpy.w3c.wpt_github import GitHubError
@@ -65,8 +65,7 @@ class ExportNotifier(object):
gerrit_sha = self.wpt_github.extract_metadata(
WPT_REVISION_FOOTER, pr.body)
gerrit_dict[gerrit_id] = PRStatusInfo(
- taskcluster_status['node_id'],
- taskcluster_status['target_url'], gerrit_sha)
+ taskcluster_status['target_url'], pr.number, gerrit_sha)
self.process_failing_prs(gerrit_dict)
return False
@@ -126,10 +125,10 @@ class ExportNotifier(object):
pr_status_info: PRStatusInfo object.
"""
for message in reversed(messages):
- existing_status = PRStatusInfo.from_gerrit_comment(
+ cl_gerrit_sha = PRStatusInfo.get_gerrit_sha_from_comment(
message['message'])
- if existing_status:
- return existing_status.node_id == pr_status_info.node_id
+ if cl_gerrit_sha:
+ return cl_gerrit_sha == pr_status_info.gerrit_sha
return False
@@ -176,24 +175,19 @@ class ExportNotifier(object):
class PRStatusInfo(object):
- NODE_ID_TAG = 'Taskcluster Node ID: '
LINK_TAG = 'Taskcluster Link: '
CL_SHA_TAG = 'Gerrit CL SHA: '
PATCHSET_TAG = 'Patchset Number: '
- def __init__(self, node_id, link, gerrit_sha=None):
- self._node_id = node_id
+ def __init__(self, link, pr_number, gerrit_sha=None):
self._link = link
+ self.pr_number = pr_number
if gerrit_sha:
self._gerrit_sha = gerrit_sha
else:
self._gerrit_sha = 'Latest'
@property
- def node_id(self):
- return self._node_id
-
- @property
def link(self):
return self._link
@@ -202,37 +196,27 @@ class PRStatusInfo(object):
return self._gerrit_sha
@staticmethod
- def from_gerrit_comment(comment):
- tags = [
- PRStatusInfo.NODE_ID_TAG, PRStatusInfo.LINK_TAG,
- PRStatusInfo.CL_SHA_TAG
- ]
- values = ['', '', '']
-
+ def get_gerrit_sha_from_comment(comment):
for line in comment.splitlines():
- for index, tag in enumerate(tags):
- if line.startswith(tag):
- values[index] = line[len(tag):]
+ if line.startswith(PRStatusInfo.CL_SHA_TAG):
+ return line[len(PRStatusInfo.CL_SHA_TAG):]
- for val in values:
- if not val:
- return None
-
- return PRStatusInfo(*values)
+ return None
def to_gerrit_comment(self, patchset=None):
status_line = (
- 'The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.')
- node_id_line = ('\n\n{}{}').format(PRStatusInfo.NODE_ID_TAG,
- self.node_id)
- link_line = ('\n{}{}').format(PRStatusInfo.LINK_TAG, self.link)
+ 'The exported PR, {pr_url}, has failed Taskcluster check(s) '
+ 'on GitHub, which could indicate cross-browser failures on the '
+ 'exported changes. Please contact ecosystem-infra@chromium.org for '
+ 'more information.').format(
+ pr_url='%spull/%d' % (WPT_GH_URL, self.pr_number)
+ )
+ link_line = ('\n\n{}{}').format(PRStatusInfo.LINK_TAG, self.link)
sha_line = ('\n{}{}').format(PRStatusInfo.CL_SHA_TAG, self.gerrit_sha)
- comment = status_line + node_id_line + link_line + sha_line
+ comment = status_line + link_line + sha_line
if patchset is not None:
comment += ('\n{}{}').format(PRStatusInfo.PATCHSET_TAG, patchset)
+ comment += '\n\nAny suggestions to improve this service are welcome; crbug.com/1027618.'
return comment
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier_unittest.py b/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier_unittest.py
index cdaaaf73689..204f2f2b278 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier_unittest.py
@@ -18,84 +18,40 @@ class ExportNotifierTest(LoggingTestCase):
self.gerrit = MockGerritAPI()
self.notifier = ExportNotifier(self.host, self.git, self.gerrit)
- def test_from_gerrit_comment_success(self):
- gerrit_comment = (
- 'The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: num')
-
- actual = PRStatusInfo.from_gerrit_comment(gerrit_comment)
-
- self.assertEqual(actual.node_id, 'foo')
- self.assertEqual(actual.link, 'bar')
- self.assertEqual(actual.gerrit_sha, 'num')
-
- def test_from_gerrit_comment_missing_info(self):
- gerrit_comment = (
- 'The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: \n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: num')
-
- actual = PRStatusInfo.from_gerrit_comment(gerrit_comment)
+ def test_get_gerrit_sha_from_comment_success(self):
+ gerrit_comment = self.generate_notifier_comment(
+ 123, 'bar', 'num', None)
- self.assertIsNone(actual)
+ actual = PRStatusInfo.get_gerrit_sha_from_comment(gerrit_comment)
+
+ self.assertEqual(actual, 'num')
- def test_from_gerrit_comment_fail(self):
+ def test_get_gerrit_sha_from_comment_fail(self):
gerrit_comment = 'ABC'
- actual = PRStatusInfo.from_gerrit_comment(gerrit_comment)
+ actual = PRStatusInfo.get_gerrit_sha_from_comment(gerrit_comment)
self.assertIsNone(actual)
def test_to_gerrit_comment(self):
- pr_status_info = PRStatusInfo('foo', 'bar', 'num')
- expected = (
- 'The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: num')
+ pr_status_info = PRStatusInfo('bar', 123, 'num')
+ expected = self.generate_notifier_comment(123, 'bar', 'num', None)
actual = pr_status_info.to_gerrit_comment()
self.assertEqual(expected, actual)
def test_to_gerrit_comment_latest(self):
- pr_status_info = PRStatusInfo('foo', 'bar', None)
- expected = (
- 'The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: Latest')
+ pr_status_info = PRStatusInfo('bar', 123, None)
+ expected = self.generate_notifier_comment(123, 'bar', 'Latest', None)
actual = pr_status_info.to_gerrit_comment()
self.assertEqual(expected, actual)
def test_to_gerrit_comment_with_patchset(self):
- pr_status_info = PRStatusInfo('foo', 'bar', 'num')
- expected = (
- 'The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: num\n'
- 'Patchset Number: 3')
+ pr_status_info = PRStatusInfo('bar', 123, 'num')
+ expected = self.generate_notifier_comment(123, 'bar', 'num', 3)
actual = pr_status_info.to_gerrit_comment(3)
@@ -115,7 +71,7 @@ class ExportNotifierTest(LoggingTestCase):
taskcluster_status, 123), {
"state": "failure",
"context": "Community-TC (pull_request)",
- })
+ })
def test_get_failure_taskcluster_status_fail(self):
taskcluster_status = [
@@ -130,7 +86,7 @@ class ExportNotifierTest(LoggingTestCase):
taskcluster_status, 123), None)
def test_has_latest_taskcluster_status_commented_false(self):
- pr_status_info = PRStatusInfo('foo', 'bar', 'num')
+ pr_status_info = PRStatusInfo('bar', 123, 'num')
messages = [{
"date": "2019-08-20 17:42:05.000000000",
"message": "Uploaded patch set 1.\nInitial upload",
@@ -143,7 +99,7 @@ class ExportNotifierTest(LoggingTestCase):
self.assertFalse(actual)
def test_has_latest_taskcluster_status_commented_true(self):
- pr_status_info = PRStatusInfo('foo', 'bar', 'num')
+ pr_status_info = PRStatusInfo('bar', 123, 'num')
messages = [
{
"date": "2019-08-20 17:42:05.000000000",
@@ -153,15 +109,7 @@ class ExportNotifierTest(LoggingTestCase):
{
"date":
"2019-08-21 17:41:05.000000000",
- "message":
- ('The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: num\n'
- 'Patchset Number: 3'),
+ "message": self.generate_notifier_comment(123, 'bar', 'num', 3),
"_revision_number":
2
},
@@ -209,15 +157,7 @@ class ExportNotifierTest(LoggingTestCase):
{
"date":
"2019-08-21 17:41:05.000000000",
- "message":
- ('The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: notfoo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: notnum\n'
- 'Patchset Number: 3'),
+ "message": self.generate_notifier_comment(123, 'notbar', 'notnum', 3),
"_revision_number":
2
},
@@ -229,16 +169,8 @@ class ExportNotifierTest(LoggingTestCase):
}
},
api=self.notifier.gerrit)
- gerrit_dict = {'abc': PRStatusInfo('foo', 'bar', 'num')}
- expected = (
- 'The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: num\n'
- 'Patchset Number: 1')
+ gerrit_dict = {'abc': PRStatusInfo('bar', 123, 'num')}
+ expected = self.generate_notifier_comment(123, 'bar', 'num', 1)
self.notifier.process_failing_prs(gerrit_dict)
@@ -264,15 +196,7 @@ class ExportNotifierTest(LoggingTestCase):
{
"date":
"2019-08-21 17:41:05.000000000",
- "message":
- ('The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: notnum\n'
- 'Patchset Number: 3'),
+ "message": self.generate_notifier_comment(123, 'bar', 'num', 3),
"_revision_number":
2
},
@@ -284,7 +208,7 @@ class ExportNotifierTest(LoggingTestCase):
}
},
api=self.notifier.gerrit)
- gerrit_dict = {'abc': PRStatusInfo('foo', 'bar', 'num')}
+ gerrit_dict = {'abc': PRStatusInfo('bar', 123, 'num')}
self.notifier.process_failing_prs(gerrit_dict)
@@ -307,15 +231,7 @@ class ExportNotifierTest(LoggingTestCase):
{
"date":
"2019-08-21 17:41:05.000000000",
- "message":
- ('The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: not foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: notnum\n'
- 'Patchset Number: 3'),
+ "message": self.generate_notifier_comment(123, 'notbar', 'notnum', 3),
"_revision_number":
2
},
@@ -327,15 +243,8 @@ class ExportNotifierTest(LoggingTestCase):
}
},
api=self.notifier.gerrit)
- expected = (
- 'The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: Latest')
- gerrit_dict = {'abc': PRStatusInfo('foo', 'bar', None)}
+ expected = self.generate_notifier_comment(123, 'bar', 'Latest')
+ gerrit_dict = {'abc': PRStatusInfo('bar', 123, None)}
self.notifier.process_failing_prs(gerrit_dict)
@@ -348,7 +257,7 @@ class ExportNotifierTest(LoggingTestCase):
def test_process_failing_prs_raise_gerrit_error(self):
self.notifier.dry_run = False
self.notifier.gerrit = MockGerritAPI(raise_error=True)
- gerrit_dict = {'abc': PRStatusInfo('foo', 'bar', 'num')}
+ gerrit_dict = {'abc': PRStatusInfo('bar', 'num')}
self.notifier.process_failing_prs(gerrit_dict)
@@ -365,8 +274,7 @@ class ExportNotifierTest(LoggingTestCase):
PullRequest(
title='title1',
number=1234,
- body=
- 'description\nWPT-Export-Revision: hash\nChange-Id: decafbad',
+ body='description\nWPT-Export-Revision: hash\nChange-Id: decafbad',
state='open',
labels=[''])
]
@@ -393,15 +301,7 @@ class ExportNotifierTest(LoggingTestCase):
{
"date":
"2019-08-21 17:41:05.000000000",
- "message":
- ('The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: notfoo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: notnum\n'
- 'Patchset Number: 3'),
+ "message": self.generate_notifier_comment(1234, 'notbar', 'notnum', 3),
"_revision_number":
2
},
@@ -413,15 +313,7 @@ class ExportNotifierTest(LoggingTestCase):
}
},
api=self.notifier.gerrit)
- expected = (
- 'The exported PR for the current patch failed Taskcluster check(s) '
- 'on GitHub, which could indict cross-broswer failures on the '
- 'exportable changes. Please contact ecosystem-infra@ team for '
- 'more information.\n\n'
- 'Taskcluster Node ID: foo\n'
- 'Taskcluster Link: bar\n'
- 'Gerrit CL SHA: hash\n'
- 'Patchset Number: 2')
+ expected = self.generate_notifier_comment(1234, 'bar', 'hash', 2)
exit_code = self.notifier.main()
@@ -436,3 +328,31 @@ class ExportNotifierTest(LoggingTestCase):
[('/a/changes/decafbad/revisions/current/review', {
'message': expected
})])
+
+ def generate_notifier_comment(self, pr_number, link, sha, patchset=None):
+ if patchset:
+ comment = (
+ 'The exported PR, https://github.com/web-platform-tests/wpt/pull/{}, '
+ 'has failed Taskcluster check(s) on GitHub, which could indicate '
+ 'cross-browser failures on the exported changes. Please contact '
+ 'ecosystem-infra@chromium.org for more information.\n\n'
+ 'Taskcluster Link: {}\n'
+ 'Gerrit CL SHA: {}\n'
+ 'Patchset Number: {}'
+ '\n\nAny suggestions to improve this service are welcome; '
+ 'crbug.com/1027618.').format(
+ pr_number, link, sha, patchset
+ )
+ else:
+ comment = (
+ 'The exported PR, https://github.com/web-platform-tests/wpt/pull/{}, '
+ 'has failed Taskcluster check(s) on GitHub, which could indicate '
+ 'cross-browser failures on the exported changes. Please contact '
+ 'ecosystem-infra@chromium.org for more information.\n\n'
+ 'Taskcluster Link: {}\n'
+ 'Gerrit CL SHA: {}'
+ '\n\nAny suggestions to improve this service are welcome; '
+ 'crbug.com/1027618.').format(
+ pr_number, link, sha
+ )
+ return comment
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/test_copier.py b/chromium/third_party/blink/tools/blinkpy/w3c/test_copier.py
index 00e157aa22f..03b1857b96e 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/test_copier.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/test_copier.py
@@ -155,14 +155,14 @@ class TestCopier(object):
w3c_import_expectations_path):
if line.is_glob:
_log.warning(
- 'W3CImportExpectations:%d Globs are not allowed in this file.'
- % line.lineno)
+ 'W3CImportExpectations:%d Globs are not allowed in this file.',
+ line.lineno)
continue
if ResultType.Skip in line.results:
if line.tags:
_log.warning(
- 'W3CImportExpectations:%d should not have any specifiers'
- % line.lineno)
+ 'W3CImportExpectations:%d should not have any specifiers',
+ line.lineno)
paths_to_skip.add(line.test)
return paths_to_skip
@@ -235,5 +235,7 @@ class TestCopier(object):
if not self.import_in_place:
self.filesystem.copyfile(source_path, dest_path)
- if self.filesystem.read_binary_file(source_path)[:2] == '#!':
+ # Fix perms: https://github.com/web-platform-tests/wpt/issues/23997
+ if self.filesystem.read_binary_file(source_path)[:2] == '#!' or \
+ self.filesystem.splitext(source_path)[1].lower() == '.bat':
self.filesystem.make_executable(dest_path)
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/test_copier_unittest.py b/chromium/third_party/blink/tools/blinkpy/w3c/test_copier_unittest.py
index 60f526518d0..3072ea73666 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/test_copier_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/test_copier_unittest.py
@@ -38,6 +38,7 @@ FAKE_SOURCE_REPO_DIR = '/blink'
FAKE_FILES = {
MOCK_WEB_TESTS + 'external/OWNERS': '',
+ '/blink/w3c/dir/run.bat': '',
'/blink/w3c/dir/has_shebang.txt': '#!',
'/blink/w3c/dir/README.txt': '',
'/blink/w3c/dir/OWNERS': '',
@@ -62,6 +63,9 @@ class TestCopierTest(LoggingTestCase):
copier.find_importable_tests()
self.assertEqual(copier.import_list, [{
'copy_list': [{
+ 'dest': 'run.bat',
+ 'src': '/blink/w3c/dir/run.bat'
+ }, {
'dest': 'has_shebang.txt',
'src': '/blink/w3c/dir/has_shebang.txt'
}, {
@@ -79,6 +83,9 @@ class TestCopierTest(LoggingTestCase):
copier.find_importable_tests()
self.assertEqual(copier.import_list, [{
'copy_list': [{
+ 'dest': 'run.bat',
+ 'src': '/blink/w3c/dir/run.bat'
+ }, {
'dest': 'has_shebang.txt',
'src': '/blink/w3c/dir/has_shebang.txt'
}, {
@@ -89,14 +96,17 @@ class TestCopierTest(LoggingTestCase):
'/blink/w3c/dir',
}])
- def test_files_with_shebang_are_made_executable(self):
+ def test_executable_files(self):
+ # Files with shebangs or .bat files need to be made executable.
host = MockHost()
host.filesystem = MockFileSystem(files=FAKE_FILES)
copier = TestCopier(host, FAKE_SOURCE_REPO_DIR)
copier.do_import()
self.assertEqual(
- host.filesystem.executable_files,
- set([MOCK_WEB_TESTS + 'external/blink/w3c/dir/has_shebang.txt']))
+ host.filesystem.executable_files, {
+ MOCK_WEB_TESTS + 'external/blink/w3c/dir/run.bat',
+ MOCK_WEB_TESTS + 'external/blink/w3c/dir/has_shebang.txt'
+ })
def test_ref_test_with_ref_is_copied(self):
host = MockHost()
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/test_exporter.py b/chromium/third_party/blink/tools/blinkpy/w3c/test_exporter.py
index 89afaf417ef..fb49050784f 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/test_exporter.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/test_exporter.py
@@ -31,6 +31,7 @@ class TestExporter(object):
self.gerrit = None
self.dry_run = False
self.local_wpt = None
+ self.surface_failures_to_gerrit = False
def main(self, argv=None):
"""Creates PRs for in-flight CLs and merges changes that land on master.
@@ -41,11 +42,13 @@ class TestExporter(object):
options = self.parse_args(argv)
self.dry_run = options.dry_run
+ self.surface_failures_to_gerrit = options.surface_failures_to_gerrit
log_level = logging.DEBUG if options.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
- if options.verbose:
- # Print out the full output when executive.run_command fails.
- self.host.executive.error_output_limit = None
+
+ # Having the full output when executive.run_command fails is useful when
+ # investigating a failed export, as all we have are logs.
+ self.host.executive.error_output_limit = None
credentials = read_credentials(self.host, options.credentials_json)
if not (credentials.get('GH_USER') and credentials.get('GH_TOKEN')):
@@ -96,7 +99,7 @@ class TestExporter(object):
_log.info('Automatic export process has finished successfully.')
export_notifier_failure = False
- if options.surface_failures_to_gerrit:
+ if self.surface_failures_to_gerrit:
_log.info('Starting surfacing cross-browser failures to Gerrit.')
export_notifier_failure = ExportNotifier(
self.host, self.wpt_github, self.gerrit, self.dry_run).main()
@@ -299,6 +302,12 @@ class TestExporter(object):
provisional=True,
pr_number=pull_request.number,
pr_footer=footer)
+
+ # When surface_failures_to_gerrit is enabled, the pull request update comment below
+ # is ignored.
+ if self.surface_failures_to_gerrit:
+ return
+
if pr_number is None:
return
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/test_importer.py b/chromium/third_party/blink/tools/blinkpy/w3c/test_importer.py
index 034b6780b0a..115c7d2ffe8 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/test_importer.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/test_importer.py
@@ -38,8 +38,8 @@ POLL_DELAY_SECONDS = 2 * 60
TIMEOUT_SECONDS = 210 * 60
# Sheriff calendar URL, used for getting the ecosystem infra sheriff to TBR.
-ROTATIONS_URL = 'https://rota-ng.appspot.com/legacy/all_rotations.js'
-TBR_FALLBACK = 'robertma'
+ROTATIONS_URL = 'https://chrome-ops-rotation-proxy.appspot.com/current/grotation:chrome-ecosystem-infra'
+TBR_FALLBACK = 'robertma@google.com'
_log = logging.getLogger(__file__)
@@ -70,6 +70,9 @@ class TestImporter(object):
self.new_test_expectations = {}
self.verbose = False
+ args = ['--clean-up-affected-tests-only']
+ self._expectations_updater = WPTExpectationsUpdater(self.host, args)
+
def main(self, argv=None):
# TODO(robertma): Test this method! Split it to make it easier to test
# if necessary.
@@ -79,9 +82,10 @@ class TestImporter(object):
self.verbose = options.verbose
log_level = logging.DEBUG if self.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
- if options.verbose:
- # Print out the full output when executive.run_command fails.
- self.host.executive.error_output_limit = None
+
+ # Having the full output when executive.run_command fails is useful when
+ # investigating a failed import, as all we have are logs.
+ self.executive.error_output_limit = None
if options.auto_update and options.auto_upload:
_log.error(
@@ -155,10 +159,9 @@ class TestImporter(object):
# TODO(crbug.com/800570 robertma): Re-enable it once we fix the bug.
# self._delete_orphaned_baselines()
- _log.info(
- 'Updating TestExpectations for any removed or renamed tests.')
- self.update_all_test_expectations_files(self._list_deleted_tests(),
- self._list_renamed_tests())
+ # Remove expectations for tests that were deleted and rename tests
+ # in expectations for renamed tests.
+ self._expectations_updater.cleanup_test_expectations_files()
if not self.chromium_git.has_working_directory_changes():
_log.info('Done: no changes to import.')
@@ -561,42 +564,34 @@ class TestImporter(object):
return '\n'.join(message_lines)
def tbr_reviewer(self):
- """Returns the user name or email address to use as the reviewer.
+ """Returns the email address to use as the reviewer.
This tries to fetch the current ecosystem infra sheriff, but falls back
in case of error.
-
- Either a user name (which is assumed to have a chromium.org email
- address) or a full email address (for other cases) is returned.
"""
- username = ''
+ email = ''
try:
- username = self._fetch_ecosystem_infra_sheriff_username()
+ email = self._fetch_ecosystem_infra_sheriff_email()
except (IOError, KeyError, ValueError) as error:
_log.error('Exception while fetching current sheriff: %s', error)
- if username in ['kyleju']:
- _log.warning('Cannot TBR by %s: not a committer', username)
- username = ''
- return username or TBR_FALLBACK
+ if email in ['kyleju@google.com']:
+ _log.warning('Cannot TBR by %s: not a committer', email)
+ email = ''
+ return email or TBR_FALLBACK
- def _fetch_ecosystem_infra_sheriff_username(self):
+ def _fetch_ecosystem_infra_sheriff_email(self):
try:
content = self.host.web.get_binary(ROTATIONS_URL)
except NetworkTimeout:
_log.error('Cannot fetch %s', ROTATIONS_URL)
return ''
data = json.loads(content)
- today = datetime.date.fromtimestamp(self.host.time()).isoformat()
- index = data['rotations'].index('ecosystem_infra')
- calendar = data['calendar']
- for entry in calendar:
- if entry['date'] == today:
- if not entry['participants'][index]:
- _log.info('No sheriff today.')
- return ''
- return entry['participants'][index][0]
- _log.error('No entry found for date %s in rotations table.', today)
- return ''
+ if not data.get('emails'):
+ _log.error(
+ 'No email found for current sheriff. Retrieved content: %s',
+ content)
+ return ''
+ return data['emails'][0]
def fetch_new_expectations_and_baselines(self):
"""Modifies expectation lines and baselines based on try job results.
@@ -608,91 +603,8 @@ class TestImporter(object):
This is the same as invoking the `wpt-update-expectations` script.
"""
_log.info('Adding test expectations lines to TestExpectations.')
- expectation_updater = WPTExpectationsUpdater(self.host)
- self.rebaselined_tests, self.new_test_expectations = expectation_updater.update_expectations(
- )
-
- def update_all_test_expectations_files(self, deleted_tests, renamed_tests):
- """Updates all test expectations files for tests that have been deleted or renamed.
-
- This is only for deleted or renamed tests in the initial import,
- not for tests that have failures in try jobs.
- """
- port = self.host.port_factory.get()
- for path, file_contents in port.all_expectations_dict().iteritems():
- self._update_single_test_expectations_file(
- port, path, file_contents, deleted_tests, renamed_tests)
-
- def _update_single_test_expectations_file(self, port, path, file_contents,
- deleted_tests, renamed_tests):
- """Updates a single test expectations file."""
- test_expectations = TestExpectations(
- port, expectations_dict={path: file_contents})
-
- new_lines = []
- for line in test_expectations.get_updated_lines(path):
- # if a test is a glob type expectation or empty line or comment then add it to the updated
- # expectations file without modifications
- if not line.test or line.is_glob:
- new_lines.append(line.to_string())
- continue
- test_name = line.test
- if self.finder.is_webdriver_test_path(test_name):
- root_test_file, subtest_suffix = port.split_webdriver_test_name(
- test_name)
- else:
- root_test_file = test_name
- if root_test_file in deleted_tests:
- continue
- if root_test_file in renamed_tests:
- if self.finder.is_webdriver_test_path(root_test_file):
- renamed_test = renamed_tests[root_test_file]
- test_name = port.add_webdriver_subtest_suffix(
- renamed_test, subtest_suffix)
- else:
- test_name = renamed_tests[root_test_file]
- line.test = test_name
- new_lines.append(line.to_string())
- self.host.filesystem.write_text_file(path, '\n'.join(new_lines) + '\n')
-
- def _list_deleted_tests(self):
- """List of web tests that have been deleted."""
- # TODO(robertma): Improve Git.changed_files so that we can use it here.
- out = self.chromium_git.run([
- 'diff', 'origin/master', '-M100%', '--diff-filter=D', '--name-only'
- ])
- deleted_tests = []
- for path in out.splitlines():
- test = self._relative_to_web_test_dir(path)
- if test:
- deleted_tests.append(test)
- return deleted_tests
-
- def _list_renamed_tests(self):
- """Lists tests that have been renamed.
-
- Returns a dict mapping source name to destination name.
- """
- out = self.chromium_git.run([
- 'diff', 'origin/master', '-M100%', '--diff-filter=R',
- '--name-status'
- ])
- renamed_tests = {}
- for line in out.splitlines():
- _, source_path, dest_path = line.split()
- source_test = self._relative_to_web_test_dir(source_path)
- dest_test = self._relative_to_web_test_dir(dest_path)
- if source_test and dest_test:
- renamed_tests[source_test] = dest_test
- return renamed_tests
-
- def _relative_to_web_test_dir(self, path_relative_to_repo_root):
- """Returns a path that's relative to the web tests directory."""
- abs_path = self.finder.path_from_chromium_base(
- path_relative_to_repo_root)
- if not abs_path.startswith(self.finder.web_tests_dir()):
- return None
- return self.fs.relpath(abs_path, self.finder.web_tests_dir())
+ self.rebaselined_tests, self.new_test_expectations = (
+ self._expectations_updater.update_expectations())
def _get_last_imported_wpt_revision(self):
"""Finds the last imported WPT revision."""
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/test_importer_unittest.py b/chromium/third_party/blink/tools/blinkpy/w3c/test_importer_unittest.py
index f4f4c7f4538..0153d8ef046 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/test_importer_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/test_importer_unittest.py
@@ -24,13 +24,21 @@ from blinkpy.w3c.test_importer import TestImporter, ROTATIONS_URL, TBR_FALLBACK
from blinkpy.w3c.wpt_github_mock import MockWPTGitHub
from blinkpy.w3c.wpt_manifest import BASE_MANIFEST_NAME
from blinkpy.web_tests.builder_list import BuilderList
+from blinkpy.web_tests.port.android import PRODUCTS_TO_EXPECTATION_FILE_PATHS
MOCK_WEB_TESTS = '/mock-checkout/' + RELATIVE_WEB_TESTS
class TestImporterTest(LoggingTestCase):
- def test_update_expectations_for_cl_no_results(self):
+
+ def mock_host(self):
host = MockHost()
+ for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values():
+ host.filesystem.write_text_file(path, '')
+ return host
+
+ def test_update_expectations_for_cl_no_results(self):
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
importer = TestImporter(host)
@@ -44,7 +52,7 @@ class TestImporterTest(LoggingTestCase):
self.assertEqual(importer.git_cl.calls[-1], ['git', 'cl', 'set-close'])
def test_update_expectations_for_cl_closed_cl(self):
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
importer = TestImporter(host)
@@ -62,7 +70,7 @@ class TestImporterTest(LoggingTestCase):
])
def test_update_expectations_for_cl_all_jobs_pass(self):
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
importer = TestImporter(host)
@@ -80,7 +88,7 @@ class TestImporterTest(LoggingTestCase):
self.assertTrue(success)
def test_update_expectations_for_cl_fail_but_no_changes(self):
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
importer = TestImporter(host)
@@ -99,7 +107,7 @@ class TestImporterTest(LoggingTestCase):
])
def test_run_commit_queue_for_cl_pass(self):
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
importer = TestImporter(host)
@@ -128,7 +136,7 @@ class TestImporterTest(LoggingTestCase):
])
def test_run_commit_queue_for_cl_fail_cq(self):
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
importer = TestImporter(host)
@@ -157,7 +165,7 @@ class TestImporterTest(LoggingTestCase):
])
def test_run_commit_queue_for_cl_fail_to_land(self):
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
importer = TestImporter(host)
@@ -188,7 +196,7 @@ class TestImporterTest(LoggingTestCase):
])
def test_run_commit_queue_for_cl_closed_cl(self):
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
importer = TestImporter(host)
@@ -213,7 +221,7 @@ class TestImporterTest(LoggingTestCase):
def test_run_commit_queue_for_cl_timeout(self):
# This simulates the case where we time out while waiting for try jobs.
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(host)
importer.git_cl = MockGitCL(host, time_out=True)
success = importer.run_commit_queue_for_cl()
@@ -229,7 +237,7 @@ class TestImporterTest(LoggingTestCase):
# Here we simulate a case where we timeout waiting for the CQ to submit a
# CL because we miss the notification that it was merged. We then get an
# error when trying to close the CL because it's already been merged.
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
importer = TestImporter(host)
@@ -271,7 +279,7 @@ class TestImporterTest(LoggingTestCase):
def test_apply_exportable_commits_locally(self):
# TODO(robertma): Consider using MockLocalWPT.
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(
host, wpt_github=MockWPTGitHub(pull_requests=[]))
importer.wpt_git = MockGit(cwd='/tmp/wpt', executive=host.executive)
@@ -315,7 +323,7 @@ class TestImporterTest(LoggingTestCase):
[['Applying patch 14fd77e88e42147c57935c49d9e3b2412b8491b7']])
def test_apply_exportable_commits_locally_returns_none_on_failure(self):
- host = MockHost()
+ host = self.mock_host()
wpt_github = MockWPTGitHub(pull_requests=[])
importer = TestImporter(host, wpt_github=wpt_github)
commit = MockChromiumCommit(host, subject='My fake commit')
@@ -325,58 +333,8 @@ class TestImporterTest(LoggingTestCase):
applied = importer.apply_exportable_commits_locally(local_wpt)
self.assertIsNone(applied)
- def test_update_all_test_expectations_files(self):
- host = MockHost()
- host.filesystem.files[MOCK_WEB_TESTS + 'TestExpectations'] = (
- '# results: [ Failure ]\n'
- 'some/test/a.html [ Failure ]\n'
- 'some/test/b.html [ Failure ]\n'
- 'ignore/globs/* [ Failure ]\n'
- 'some/test/c\*.html [ Failure ]\n'
- # default test case, line below should exist in new file
- 'some/test/d.html [ Failure ]\n')
- host.filesystem.files[MOCK_WEB_TESTS + 'WebDriverExpectations'] = (
- '# results: [ Failure ]\n'
- 'external/wpt/webdriver/some/test/a\*.html>>foo\* [ Failure ]\n'
- 'external/wpt/webdriver/some/test/a\*.html>>bar [ Failure ]\n'
- 'external/wpt/webdriver/some/test/b.html>>foo [ Failure ]\n'
- 'external/wpt/webdriver/some/test/c.html>>a [ Failure ]\n'
- # default test case, line below should exist in new file
- 'external/wpt/webdriver/some/test/d.html>>foo [ Failure ]\n')
- host.filesystem.files[MOCK_WEB_TESTS + 'VirtualTestSuites'] = '[]'
- host.filesystem.files[MOCK_WEB_TESTS + 'new/a.html'] = ''
- host.filesystem.files[MOCK_WEB_TESTS + 'new/b.html'] = ''
- importer = TestImporter(host)
- deleted_tests = [
- 'some/test/b.html', 'external/wpt/webdriver/some/test/b.html'
- ]
- renamed_test_pairs = {
- 'some/test/a.html': 'new/a.html',
- 'some/test/c*.html': 'new/c*.html',
- 'external/wpt/webdriver/some/test/a*.html': 'old/a*.html',
- 'external/wpt/webdriver/some/test/c.html': 'old/c.html',
- }
- importer.update_all_test_expectations_files(deleted_tests,
- renamed_test_pairs)
- self.assertMultiLineEqual(
- host.filesystem.read_text_file(MOCK_WEB_TESTS +
- 'TestExpectations'),
- ('# results: [ Failure ]\n'
- 'new/a.html [ Failure ]\n'
- 'ignore/globs/* [ Failure ]\n'
- 'new/c\*.html [ Failure ]\n'
- 'some/test/d.html [ Failure ]\n'))
- self.assertMultiLineEqual(
- host.filesystem.read_text_file(MOCK_WEB_TESTS +
- 'WebDriverExpectations'),
- ('# results: [ Failure ]\n'
- 'old/a\*.html>>foo\* [ Failure ]\n'
- 'old/a\*.html>>bar [ Failure ]\n'
- 'old/c.html>>a [ Failure ]\n'
- 'external/wpt/webdriver/some/test/d.html>>foo [ Failure ]\n'))
-
def test_get_directory_owners(self):
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
host.filesystem.write_text_file(
@@ -388,7 +346,7 @@ class TestImporterTest(LoggingTestCase):
{('someone@chromium.org', ): ['external/wpt/foo']})
def test_get_directory_owners_no_changed_files(self):
- host = MockHost()
+ host = self.mock_host()
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'W3CImportExpectations', '')
host.filesystem.write_text_file(
@@ -400,21 +358,21 @@ class TestImporterTest(LoggingTestCase):
# Tests for protected methods - pylint: disable=protected-access
def test_commit_changes(self):
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(host)
importer._commit_changes('dummy message')
self.assertEqual(importer.chromium_git.local_commits(),
[['dummy message']])
def test_commit_message(self):
- importer = TestImporter(MockHost())
+ importer = TestImporter(self.mock_host())
self.assertEqual(
importer._commit_message('aaaa', '1111'), 'Import 1111\n\n'
'Using wpt-import in Chromium aaaa.\n\n'
'No-Export: true')
def test_cl_description_with_empty_environ(self):
- host = MockHost()
+ host = self.mock_host()
host.executive = MockExecutive(output='Last commit message\n\n')
importer = TestImporter(host)
description = importer._cl_description(directory_owners={})
@@ -432,14 +390,14 @@ class TestImporterTest(LoggingTestCase):
[['git', 'log', '-1', '--format=%B']])
def test_cl_description_moves_noexport_tag(self):
- host = MockHost()
+ host = self.mock_host()
host.executive = MockExecutive(output='Summary\n\nNo-Export: true\n\n')
importer = TestImporter(host)
description = importer._cl_description(directory_owners={})
self.assertIn('No-Export: true', description)
def test_cl_description_with_directory_owners(self):
- host = MockHost()
+ host = self.mock_host()
host.executive = MockExecutive(output='Last commit message\n\n')
importer = TestImporter(host)
description = importer._cl_description(
@@ -457,7 +415,7 @@ class TestImporterTest(LoggingTestCase):
' external/wpt/baz\n\n', description)
def test_tbr_reviewer_no_response_uses_backup(self):
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(host)
self.assertEqual(TBR_FALLBACK, importer.tbr_reviewer())
self.assertLog([
@@ -465,102 +423,65 @@ class TestImporterTest(LoggingTestCase):
'No JSON object could be decoded\n'
])
- def test_tbr_reviewer_date_not_found(self):
- host = MockHost()
- yesterday = (datetime.date.fromtimestamp(host.time()) -
- datetime.timedelta(days=1)).isoformat()
- host.web.urls[ROTATIONS_URL] = json.dumps({
- 'calendar': [
- {
- 'date': yesterday,
- 'participants': [['some-sheriff'], ['other-sheriff']],
- },
- ],
- 'rotations': ['ecosystem_infra', 'other_rotation']
- })
+ def test_tbr_reviewer_no_emails_field(self):
+ host = self.mock_host()
+ host.web.urls[ROTATIONS_URL] = json.dumps(
+ {'updated_unix_timestamp': '1591108191'})
importer = TestImporter(host)
self.assertEqual(TBR_FALLBACK, importer.tbr_reviewer())
- # Use a variable here, otherwise we get different values depending on
- # the machine's time zone settings (e.g. "1969-12-31" vs "1970-01-01").
- today = datetime.date.fromtimestamp(host.time()).isoformat()
self.assertLog([
- 'ERROR: No entry found for date %s in rotations table.\n' % today
+ 'ERROR: No email found for current sheriff. Retrieved content: %s\n'
+ % host.web.urls[ROTATIONS_URL]
])
def test_tbr_reviewer_nobody_on_rotation(self):
- host = MockHost()
- today = datetime.date.fromtimestamp(host.time()).isoformat()
+ host = self.mock_host()
host.web.urls[ROTATIONS_URL] = json.dumps({
- 'calendar': [
- {
- 'date': today,
- 'participants': [[], ['some-sheriff']],
- },
- ],
- 'rotations': ['ecosystem_infra', 'other-rotation']
+ 'emails': [],
+ 'updated_unix_timestamp':
+ '1591108191'
})
importer = TestImporter(host)
self.assertEqual(TBR_FALLBACK, importer.tbr_reviewer())
- self.assertLog(['INFO: No sheriff today.\n'])
+ self.assertLog([
+ 'ERROR: No email found for current sheriff. Retrieved content: %s\n'
+ % host.web.urls[ROTATIONS_URL]
+ ])
def test_tbr_reviewer_rotations_url_unavailable(self):
def raise_exception(*_):
raise NetworkTimeout
- host = MockHost()
+ host = self.mock_host()
host.web.get_binary = raise_exception
importer = TestImporter(host)
self.assertEqual(TBR_FALLBACK, importer.tbr_reviewer())
self.assertLog(['ERROR: Cannot fetch %s\n' % ROTATIONS_URL])
def test_tbr_reviewer(self):
- host = MockHost()
- today = datetime.date.fromtimestamp(host.time())
- yesterday = today - datetime.timedelta(days=1)
+ host = self.mock_host()
host.web.urls[ROTATIONS_URL] = json.dumps({
- 'calendar': [
- {
- 'date': yesterday.isoformat(),
- 'participants': [['other-sheriff'], ['last-sheriff']],
- },
- {
- 'date': today.isoformat(),
- 'participants': [['other-sheriff'], ['current-sheriff']],
- },
- ],
- 'rotations': ['other-rotation', 'ecosystem_infra']
+ 'emails': ['current-sheriff@chromium.org'],
+ 'updated_unix_timestamp':
+ '1591108191',
})
importer = TestImporter(host)
- self.assertEqual('current-sheriff', importer.tbr_reviewer())
- self.assertLog([])
-
- def test_tbr_reviewer_with_full_email_address(self):
- host = MockHost()
- today = datetime.date.fromtimestamp(host.time()).isoformat()
- host.web.urls[ROTATIONS_URL] = json.dumps({
- 'calendar': [
- {
- 'date': today,
- 'participants': [['external@example.com']],
- },
- ],
- 'rotations': ['ecosystem_infra']
- })
- importer = TestImporter(host)
- self.assertEqual('external@example.com', importer.tbr_reviewer())
+ self.assertEqual('current-sheriff@chromium.org',
+ importer.tbr_reviewer())
self.assertLog([])
def test_tbr_reviewer_skips_non_committer(self):
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(host)
- importer._fetch_ecosystem_infra_sheriff_username = lambda: 'kyleju'
+ importer._fetch_ecosystem_infra_sheriff_email = lambda: 'kyleju@google.com'
self.assertEqual(TBR_FALLBACK, importer.tbr_reviewer())
- self.assertLog(['WARNING: Cannot TBR by kyleju: not a committer\n'])
+ self.assertLog(
+ ['WARNING: Cannot TBR by kyleju@google.com: not a committer\n'])
def test_generate_manifest_successful_run(self):
# This test doesn't test any aspect of the real manifest script, it just
# asserts that TestImporter._generate_manifest would invoke the script.
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(host)
host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'external/wpt/MANIFEST.json', '{}')
@@ -577,7 +498,7 @@ class TestImporterTest(LoggingTestCase):
{MOCK_WEB_TESTS + 'external/' + BASE_MANIFEST_NAME})
def test_only_wpt_manifest_changed(self):
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(host)
importer.chromium_git.changed_files = lambda: [
RELATIVE_WEB_TESTS + 'external/' + BASE_MANIFEST_NAME,
@@ -592,7 +513,7 @@ class TestImporterTest(LoggingTestCase):
# variant tests.
@unittest.skip('Finding orphaned baselines is broken')
def test_delete_orphaned_baselines_basic(self):
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(host)
dest_path = importer.dest_path
host.filesystem.write_text_file(
@@ -621,7 +542,7 @@ class TestImporterTest(LoggingTestCase):
def test_delete_orphaned_baselines_worker_js_tests(self):
# This test checks that baselines for existing tests shouldn't be
# deleted, even if the test name isn't the same as the file name.
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(host)
dest_path = importer.dest_path
host.filesystem.write_text_file(
@@ -664,7 +585,7 @@ class TestImporterTest(LoggingTestCase):
self.assertTrue(host.filesystem.exists(dest_path + '/c-expected.txt'))
def test_clear_out_dest_path(self):
- host = MockHost()
+ host = self.mock_host()
importer = TestImporter(host)
dest_path = importer.dest_path
host.filesystem.write_text_file(dest_path + '/foo-test.html', '')
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater.py b/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater.py
index b018418dffd..21d74db77e6 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater.py
@@ -18,7 +18,9 @@ from blinkpy.common.net.git_cl import GitCL
from blinkpy.common.path_finder import PathFinder
from blinkpy.common.system.executive import ScriptError
from blinkpy.common.system.log_utils import configure_logging
-from blinkpy.web_tests.port.android import PRODUCTS
+from blinkpy.web_tests.models.test_expectations import TestExpectations
+from blinkpy.web_tests.port.android import (
+ PRODUCTS, PRODUCTS_TO_EXPECTATION_FILE_PATHS)
_log = logging.getLogger(__name__)
@@ -38,8 +40,9 @@ class WPTExpectationsUpdater(object):
def __init__(self, host, args=None):
self.host = host
self.port = self.host.port_factory.get()
- self.git_cl = GitCL(host)
self.finder = PathFinder(self.host.filesystem)
+ self.git_cl = GitCL(host)
+ self.git = self.host.git(self.finder.chromium_base())
self.configs_with_no_results = []
self.configs_with_all_pass = []
self.patchset = None
@@ -49,15 +52,41 @@ class WPTExpectationsUpdater(object):
self.add_arguments(parser)
self.options = parser.parse_args(args or [])
+ # Set up TestExpectations instance which contains all
+ # expectations files associated with the platform.
+ expectations_dict = {p: self.host.filesystem.read_text_file(p)
+ for p in self.expectations_files()}
+ self._test_expectations = TestExpectations(
+ self.port, expectations_dict=expectations_dict)
+
+ def expectations_files(self):
+ """Returns list of expectations files.
+
+ Each expectation file in the list will be cleaned of expectations
+ for tests that were removed and will also have test names renamed
+ for tests that were renamed. Also the files may have their expectations
+ updated using builder results.
+ """
+ return (self.port.all_expectations_dict().keys() +
+ PRODUCTS_TO_EXPECTATION_FILE_PATHS.values())
+
def run(self):
"""Does required setup before calling update_expectations().
+
Do not override this function!
"""
log_level = logging.DEBUG if self.options.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
self.patchset = self.options.patchset
- self.update_expectations()
+
+ # Remove expectations for deleted tests and rename tests in expectations
+ # for renamed tests.
+ self.cleanup_test_expectations_files()
+
+ if not self.options.cleanup_test_expectations_only:
+ # Use try job results to update expectations and baselines
+ self.update_expectations()
return 0
@@ -71,6 +100,16 @@ class WPTExpectationsUpdater(object):
'--verbose',
action='store_true',
help='More verbose logging.')
+ parser.add_argument(
+ '--clean-up-affected-tests-only',
+ action='store_true',
+ help='Only cleanup expectations deleted or renamed in current CL. '
+ 'If flag is not used then a full cleanup of deleted or '
+ 'renamed tests will be done in expectations.')
+ parser.add_argument(
+ '--cleanup-test-expectations-only',
+ action='store_true',
+ help='Cleanup test expectations files and then exit script.')
# TODO(rmhasan): Move this argument to the
# AndroidWPTExpectationsUpdater add_arguments implementation.
# Also look into using sub parsers to separate android and
@@ -264,7 +303,9 @@ class WPTExpectationsUpdater(object):
return test_dict
def _is_wpt_test(self, test_name):
- """In blink web tests results, each test name is relative to
+ """Check if a web test is a WPT tests.
+
+ In blink web tests results, each test name is relative to
the web_tests directory instead of the wpt directory. We
need to use the port.is_wpt_test() function to find out if a test
is from the WPT suite.
@@ -586,7 +627,7 @@ class WPTExpectationsUpdater(object):
'No lines to write to TestExpectations,'
' WebdriverExpectations or NeverFixTests.'
)
- return
+ return {}
line_list = []
wont_fix_list = []
@@ -643,6 +684,163 @@ class WPTExpectationsUpdater(object):
wont_fix_file_content)
return line_dict
+ def cleanup_test_expectations_files(self):
+ """Removes deleted tests from expectations files.
+
+ Removes expectations for deleted tests or renames test names in
+ expectation files for tests that were renamed. If the
+ --clean-up-affected-tests-only command line argument is used then
+ only tests deleted in the CL will have their expectations removed
+ through this script. If that command line argument is not used then
+ expectations for test files that no longer exist will be deleted.
+ """
+ deleted_test_files = self._list_deleted_test_files()
+ renamed_test_files = self._list_renamed_test_files()
+
+ for path in self._test_expectations.expectations_dict:
+ _log.info(
+ 'Updating %s for any removed or renamed tests.' %
+ self.host.filesystem.basename(path))
+ self._clean_single_test_expectations_file(
+ path, deleted_test_files, renamed_test_files)
+ self._test_expectations.commit_changes()
+
+ def _clean_single_test_expectations_file(
+ self, path, deleted_files, renamed_files):
+ """Cleans up a single test expectations file.
+
+ Args:
+ path: Path of expectations file that is being cleaned up.
+ deleted_files: List of test file paths relative to the web tests
+ directory which were deleted.
+ renamed_files: Dictionary mapping test file paths to their new file
+ name after renaming.
+ """
+ for line in self._test_expectations.get_updated_lines(path):
+ # if a test is a glob type expectation or empty line or comment then
+ # add it to the updated expectations file without modifications
+ if not line.test or line.is_glob:
+ continue
+ root_test_file = self._get_root_file(line.test)
+
+ if root_test_file in renamed_files:
+ self._test_expectations.remove_expectations(path, [line])
+ new_file_name = renamed_files[root_test_file]
+ if self.finder.is_webdriver_test_path(root_test_file):
+ _, subtest_suffix = self.port.split_webdriver_test_name(
+ line.test)
+ line.test = self.port.add_webdriver_subtest_suffix(
+ new_file_name, subtest_suffix)
+ elif '?' in line.test:
+ line.test = (
+ new_file_name + line.test[line.test.find('?'):])
+ else:
+ line.test = new_file_name
+ self._test_expectations.add_expectations(
+ path, [line], lineno=line.lineno)
+ elif root_test_file in deleted_files:
+ self._test_expectations.remove_expectations(
+ path, [line])
+
+ @memoized
+ def _get_root_file(self, test_name):
+ """Strips arguments from a web test name in order to get the file name.
+
+ It also removes the arguments for web driver tests. For instances for
+ the test test1/example.html?Hello this function will return
+ test1/example.html. For a webdriver test it would include arguments and
+ would have the following format, {test file}>>{argument}.
+
+ Args:
+ test_name: Test name which may include test arguments.
+
+ Returns:
+ Returns the test file which is the root of a test.
+ """
+ if self.finder.is_webdriver_test_path(test_name):
+ root_test_file, _ = (
+ self.port.split_webdriver_test_name(test_name))
+ elif '?' in test_name:
+ root_test_file = test_name[:test_name.find('?')]
+ else:
+ root_test_file = test_name
+ return root_test_file
+
+ def _list_deleted_test_files(self):
+ """Returns a list of web tests that have been deleted.
+
+ If --clean-up-affected-tests-only is true then only test files deleted
+ in the current CL may be removed from expectations. Otherwise, any test
+ file may be removed from expectations if it has been deleted.
+
+ Returns: A list of web test files that have been deleted.
+ """
+ if self.options.clean_up_affected_tests_only:
+ # TODO(robertma): Improve Git.changed_files so that we can use
+ # it here.
+ paths = set(self.git.run(
+ ['diff', 'origin/master', '-M100%', '--diff-filter=D',
+ '--name-only']).splitlines())
+ deleted_tests = set()
+ for path in paths:
+ test = self._relative_to_web_test_dir(path)
+ if test:
+ deleted_tests.add(test)
+ else:
+ # Remove expectations for all test which have files that
+ # were deleted. Paths are already relative to the web_tests
+ # directory
+ deleted_tests = self._deleted_test_files_in_expectations()
+ return deleted_tests
+
+ def _list_renamed_test_files(self):
+ """Returns a dictionary mapping tests to their new name.
+
+ Regardless of the command line arguments used this test will only
+ return a dictionary for tests affected in the current CL.
+
+ Returns a dictionary mapping source name to destination name.
+ """
+ out = self.git.run(
+ ['diff', 'origin/master', '-M100%', '--diff-filter=R',
+ '--name-status'])
+ renamed_tests = {}
+ for line in out.splitlines():
+ _, source_path, dest_path = line.split()
+ source_test = self._relative_to_web_test_dir(source_path)
+ dest_test = self._relative_to_web_test_dir(dest_path)
+ if source_test and dest_test:
+ renamed_tests[source_test] = dest_test
+ return renamed_tests
+
+ def _relative_to_web_test_dir(self, path_relative_to_repo_root):
+ """Returns a path that's relative to the web tests directory."""
+ abs_path = self.finder.path_from_chromium_base(
+ path_relative_to_repo_root)
+ if not abs_path.startswith(self.finder.web_tests_dir()):
+ return None
+ return self.host.filesystem.relpath(
+ abs_path, self.finder.web_tests_dir())
+
+ def _deleted_test_files_in_expectations(self):
+ """Returns a list of test files that were deleted.
+
+ Returns a list of test file names that are still in the expectations
+ files but no longer exists in the web tests directory.
+ """
+ deleted_files = set()
+ existing_files = {
+ self._get_root_file(p)
+ for p in self.port.tests()}
+ for path in self._test_expectations.expectations_dict:
+ for line in self._test_expectations.get_updated_lines(path):
+ if not line.test or line.is_glob:
+ continue
+ root_test_file = self._get_root_file(line.test)
+ if root_test_file not in existing_files:
+ deleted_files.add(root_test_file)
+ return deleted_files
+
# TODO(robertma): Unit test this method.
def download_text_baselines(self, test_results):
"""Fetches new baseline files for tests that should be rebaselined.
diff --git a/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater_unittest.py b/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater_unittest.py
index efd64b86949..737cc34fbe7 100644
--- a/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater_unittest.py
@@ -12,6 +12,7 @@ from blinkpy.common.net.results_fetcher import Build
from blinkpy.common.net.results_fetcher_mock import (
MockTestResultsFetcher, BuilderStep)
from blinkpy.common.net.web_test_results import WebTestResult, WebTestResults
+from blinkpy.common.path_finder import RELATIVE_WEB_TESTS
from blinkpy.common.system.executive import ScriptError
from blinkpy.common.system.log_testing import LoggingTestCase
@@ -20,14 +21,17 @@ from blinkpy.w3c.wpt_expectations_updater import (
from blinkpy.w3c.wpt_manifest import BASE_MANIFEST_NAME
from blinkpy.web_tests.builder_list import BuilderList
+from blinkpy.web_tests.port.android import PRODUCTS_TO_EXPECTATION_FILE_PATHS
from blinkpy.web_tests.port.factory_mock import MockPortFactory
+MOCK_WEB_TESTS = '/mock-checkout/' + RELATIVE_WEB_TESTS
class WPTExpectationsUpdaterTest(LoggingTestCase):
def mock_host(self):
"""Returns a mock host with fake values set up for testing."""
host = MockHost()
host.port_factory = MockPortFactory(host)
+ host.executive._output = ''
# Set up a fake list of try builders.
host.builders = BuilderList({
@@ -87,6 +91,8 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
},
}))
+ for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values():
+ host.filesystem.write_text_file(path, '')
return host
def test_run_single_platform_failure(self):
@@ -737,6 +743,95 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
}
})
+ def test_no_expectations_to_write(self):
+ host = self.mock_host()
+ updater = WPTExpectationsUpdater(host)
+ test_expectations = {'external/wpt/test/test1.html': {}}
+ exp_dict = updater.write_to_test_expectations(test_expectations)
+ self.assertEqual(exp_dict, {})
+ logs = ''.join(self.logMessages()).lower()
+ self.assertIn(
+ ('no lines to write to testexpectations,'
+ ' webdriverexpectations or neverfixtests.'),
+ logs)
+
+ def test_cleanup_outside_affected_expectations_in_cl(self):
+ host = self.mock_host()
+ expectations_path = \
+ host.port_factory.get().path_to_generic_test_expectations_file()
+ host.filesystem.write_text_file(
+ expectations_path,
+ '# tags: [ Linux ]\n' +
+ '# results: [ Pass Failure ]\n' +
+ WPTExpectationsUpdater.MARKER_COMMENT + '\n' +
+ '[ linux ] external/wpt/fake/some_test.html?HelloWorld [ Failure ]\n' +
+ 'external/wpt/fake/file/non_existent_file.html [ Pass ]\n' +
+ 'external/wpt/fake/file/deleted_path.html [ Pass ]\n')
+ updater = WPTExpectationsUpdater(
+ host, ['--cleanup-test-expectations-only'])
+ updater.port.tests = lambda: {
+ 'external/wpt/fake/new.html?HelloWorld'}
+
+ def _git_command_return_val(cmd):
+ if '--diff-filter=D' in cmd:
+ return 'external/wpt/fake/file/deleted_path.html'
+ if '--diff-filter=R' in cmd:
+ return 'C external/wpt/fake/some_test.html external/wpt/fake/new.html'
+ return ''
+
+ updater.git.run = _git_command_return_val
+ updater._relative_to_web_test_dir = lambda test_path: test_path
+ updater.run()
+
+ value = host.filesystem.read_text_file(expectations_path)
+ self.assertMultiLineEqual(
+ value, ('# tags: [ Linux ]\n' +
+ '# results: [ Pass Failure ]\n' +
+ WPTExpectationsUpdater.MARKER_COMMENT + '\n' +
+ '[ linux ] external/wpt/fake/new.html?HelloWorld [ Failure ]\n'))
+
+ def test_write_to_test_expectations_and_cleanup_expectations(self):
+ host = self.mock_host()
+ expectations_path = \
+ host.port_factory.get().path_to_generic_test_expectations_file()
+ host.filesystem.write_text_file(
+ expectations_path,
+ '# tags: [ Linux ]\n' +
+ '# results: [ Pass Failure ]\n' +
+ WPTExpectationsUpdater.MARKER_COMMENT + '\n' +
+ '[ linux ] external/wpt/fake/some_test.html?HelloWorld [ Failure ]\n' +
+ 'external/wpt/fake/file/deleted_path.html [ Pass ]\n')
+ updater = WPTExpectationsUpdater(
+ host, ['--clean-up-affected-tests-only'])
+
+ def _git_command_return_val(cmd):
+ if '--diff-filter=D' in cmd:
+ return 'external/wpt/fake/file/deleted_path.html'
+ if '--diff-filter=R' in cmd:
+ return 'C external/wpt/fake/some_test.html external/wpt/fake/new.html'
+ return ''
+
+ updater.git.run = _git_command_return_val
+ updater._relative_to_web_test_dir = lambda test_path: test_path
+ updater.cleanup_test_expectations_files()
+
+ test_expectations = {'external/wpt/fake/file/path.html': {
+ tuple([DesktopConfig(port_name='test-linux-trusty')]):
+ SimpleTestResult(actual='PASS', expected='', bug='crbug.com/123')}}
+ skip_path = host.port_factory.get().path_to_never_fix_tests_file()
+ skip_value_origin = host.filesystem.read_text_file(skip_path)
+
+ updater.write_to_test_expectations(test_expectations)
+ value = host.filesystem.read_text_file(expectations_path)
+ self.assertMultiLineEqual(
+ value, ('# tags: [ Linux ]\n' +
+ '# results: [ Pass Failure ]\n' +
+ WPTExpectationsUpdater.MARKER_COMMENT + '\n' +
+ 'crbug.com/123 [ Trusty ] external/wpt/fake/file/path.html [ Pass ]\n' +
+ '[ linux ] external/wpt/fake/new.html?HelloWorld [ Failure ]\n'))
+ skip_value = host.filesystem.read_text_file(skip_path)
+ self.assertMultiLineEqual(skip_value, skip_value_origin)
+
def test_write_to_test_expectations_with_marker_comment(self):
host = self.mock_host()
expectations_path = \
@@ -799,8 +894,10 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
host = self.mock_host()
expectations_path = \
host.port_factory.get().path_to_generic_test_expectations_file()
+ raw_exps = '# tags: [ Trusty ]\n# results: [ Pass Failure ]\n'
host.filesystem.write_text_file(
expectations_path,
+ raw_exps + '\n' +
'crbug.com/111 [ Trusty ] foo/bar.html [ Failure ]\n')
updater = WPTExpectationsUpdater(host)
test_expectations = {'external/wpt/fake/file/path.html': {
@@ -813,7 +910,8 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
value = host.filesystem.read_text_file(expectations_path)
self.assertMultiLineEqual(
- value, ('crbug.com/111 [ Trusty ] foo/bar.html [ Failure ]\n'
+ value, (raw_exps + '\n' +
+ 'crbug.com/111 [ Trusty ] foo/bar.html [ Failure ]\n'
'\n' + WPTExpectationsUpdater.MARKER_COMMENT + '\n'
'crbug.com/123 [ Trusty ] external/wpt/fake/file/path.html [ Pass ]'))
skip_value = host.filesystem.read_text_file(skip_path)
@@ -823,8 +921,11 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
host = self.mock_host()
expectations_path = \
host.port_factory.get().path_to_generic_test_expectations_file()
+ raw_exps = '# tags: [ Trusty ]\n# results: [ Pass ]\n'
host.filesystem.write_text_file(
- expectations_path, WPTExpectationsUpdater.MARKER_COMMENT + '\n' +
+ expectations_path,
+ raw_exps + '\n' +
+ WPTExpectationsUpdater.MARKER_COMMENT + '\n' +
'crbug.com/123 [ Trusty ] fake/file/path.html [ Pass ]\n')
skip_path = host.port_factory.get().path_to_never_fix_tests_file()
skip_value_origin = host.filesystem.read_text_file(skip_path)
@@ -834,7 +935,9 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
value = host.filesystem.read_text_file(expectations_path)
self.assertMultiLineEqual(
- value, WPTExpectationsUpdater.MARKER_COMMENT + '\n' +
+ value,
+ raw_exps + '\n' +
+ WPTExpectationsUpdater.MARKER_COMMENT + '\n' +
'crbug.com/123 [ Trusty ] fake/file/path.html [ Pass ]\n')
skip_value = host.filesystem.read_text_file(skip_path)
self.assertMultiLineEqual(skip_value, skip_value_origin)
@@ -844,14 +947,16 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
expectations_path = \
host.port_factory.get().path_to_generic_test_expectations_file()
skip_path = host.port_factory.get().path_to_never_fix_tests_file()
-
+ raw_exps = '# tags: [ Trusty ]\n# results: [ Skip ]\n'
test_expectations = {'external/wpt/fake/file/path-manual.html': {
tuple([DesktopConfig(port_name='test-linux-trusty')]):
SimpleTestResult(actual='TIMEOUT', expected={}, bug='')}}
host.filesystem.write_text_file(expectations_path,
WPTExpectationsUpdater.MARKER_COMMENT + '\n')
host.filesystem.write_text_file(
- skip_path, '[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]\n')
+ skip_path,
+ raw_exps +
+ '\n[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]\n')
updater = WPTExpectationsUpdater(host)
updater.write_to_test_expectations(test_expectations)
@@ -860,7 +965,9 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
skip_value = host.filesystem.read_text_file(skip_path)
self.assertMultiLineEqual(expectations_value, WPTExpectationsUpdater.MARKER_COMMENT + '\n')
self.assertMultiLineEqual(
- skip_value, '[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]\n'
+ skip_value,
+ raw_exps + '\n'
+ '[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]\n'
'[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]\n')
def test_write_to_test_expectations_without_newline(self):
@@ -871,10 +978,13 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
test_expectations = {'external/wpt/fake/file/path-manual.html': {
tuple([DesktopConfig(port_name='test-linux-trusty')]):
SimpleTestResult(actual='TIMEOUT', expected={}, bug='')}}
- host.filesystem.write_text_file(expectations_path,
- WPTExpectationsUpdater.MARKER_COMMENT + '\n')
+ raw_exps = '# tags: [ Trusty ]\n# results: [ Skip ]\n'
host.filesystem.write_text_file(
- skip_path, '[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]')
+ expectations_path,
+ WPTExpectationsUpdater.MARKER_COMMENT + '\n')
+ host.filesystem.write_text_file(
+ skip_path,
+ raw_exps + '\n[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]')
updater = WPTExpectationsUpdater(host)
updater.write_to_test_expectations(test_expectations)
@@ -883,7 +993,8 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
skip_value = host.filesystem.read_text_file(skip_path)
self.assertMultiLineEqual(expectations_value, WPTExpectationsUpdater.MARKER_COMMENT + '\n')
self.assertMultiLineEqual(
- skip_value, '[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]\n'
+ skip_value,
+ raw_exps + '\n[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]\n'
'[ Trusty ] external/wpt/fake/file/path-manual.html [ Skip ]\n')
def test_is_reference_test_given_testharness_test(self):
@@ -1087,6 +1198,99 @@ class WPTExpectationsUpdaterTest(LoggingTestCase):
]
})
+ def test_cleanup_all_deleted_tests_in_expectations_files(self):
+ host = MockHost()
+
+ host.filesystem.files[MOCK_WEB_TESTS + 'TestExpectations'] = (
+ '# results: [ Failure ]\n'
+ 'some/test/a.html?hello%20world [ Failure ]\n'
+ 'some/test/b.html [ Failure ]\n'
+ '# line below should exist in new file\n'
+ 'some/test/d.html [ Failure ]\n')
+ host.filesystem.files[MOCK_WEB_TESTS + 'VirtualTestSuites'] = '[]'
+ host.filesystem.files[MOCK_WEB_TESTS + 'new/a.html'] = ''
+ host.filesystem.files[MOCK_WEB_TESTS + 'new/b.html'] = ''
+ # TODO(rmhasan): Remove creation of Android files within
+ # tests.
+ for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values():
+ host.filesystem.write_text_file(path, '')
+
+ updater = WPTExpectationsUpdater(host)
+
+ def _git_command_return_val(cmd):
+ if '--diff-filter=D' in cmd:
+ return 'some/test/b.html'
+ return ''
+
+ updater.git.run = _git_command_return_val
+ updater.port.tests = lambda: ['some/test/d.html']
+ updater._relative_to_web_test_dir = lambda test_path: test_path
+ updater.cleanup_test_expectations_files()
+ self.assertMultiLineEqual(
+ host.filesystem.read_text_file(MOCK_WEB_TESTS +
+ 'TestExpectations'),
+ ('# results: [ Failure ]\n'
+ '# line below should exist in new file\n'
+ 'some/test/d.html [ Failure ]\n'))
+
+ def test_cleanup_all_test_expectations_files(self):
+ host = MockHost()
+ host.filesystem.files[MOCK_WEB_TESTS + 'TestExpectations'] = (
+ '# results: [ Failure ]\n'
+ 'some/test/a.html [ Failure ]\n'
+ 'some/test/b.html [ Failure ]\n'
+ 'ignore/globs/* [ Failure ]\n'
+ 'some/test/c\*.html [ Failure ]\n'
+ # default test case, line below should exist in new file
+ 'some/test/d.html [ Failure ]\n')
+ host.filesystem.files[MOCK_WEB_TESTS + 'WebDriverExpectations'] = (
+ '# results: [ Failure ]\n'
+ 'external/wpt/webdriver/some/test/a\*.html>>foo\* [ Failure ]\n'
+ 'external/wpt/webdriver/some/test/a\*.html>>bar [ Failure ]\n'
+ 'external/wpt/webdriver/some/test/b.html>>foo [ Failure ]\n'
+ 'external/wpt/webdriver/some/test/c.html>>a [ Failure ]\n'
+ # default test case, line below should exist in new file
+ 'external/wpt/webdriver/some/test/d.html>>foo [ Failure ]\n')
+ host.filesystem.files[MOCK_WEB_TESTS + 'VirtualTestSuites'] = '[]'
+ host.filesystem.files[MOCK_WEB_TESTS + 'new/a.html'] = ''
+ host.filesystem.files[MOCK_WEB_TESTS + 'new/b.html'] = ''
+
+ # TODO(rmhasan): Remove creation of Android files within
+ # tests.
+ for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values():
+ host.filesystem.write_text_file(path, '')
+
+ updater = WPTExpectationsUpdater(
+ host, ['--clean-up-affected-tests-only'])
+ deleted_files = [
+ 'some/test/b.html', 'external/wpt/webdriver/some/test/b.html'
+ ]
+ renamed_file_pairs = {
+ 'some/test/a.html': 'new/a.html',
+ 'some/test/c*.html': 'new/c*.html',
+ 'external/wpt/webdriver/some/test/a*.html': 'old/a*.html',
+ 'external/wpt/webdriver/some/test/c.html': 'old/c.html',
+ }
+ updater._list_deleted_test_files = lambda: deleted_files
+ updater._list_renamed_test_files = lambda: renamed_file_pairs
+ updater.cleanup_test_expectations_files()
+ self.assertMultiLineEqual(
+ host.filesystem.read_text_file(MOCK_WEB_TESTS +
+ 'TestExpectations'),
+ ('# results: [ Failure ]\n'
+ 'new/a.html [ Failure ]\n'
+ 'ignore/globs/* [ Failure ]\n'
+ 'new/c\*.html [ Failure ]\n'
+ 'some/test/d.html [ Failure ]\n'))
+ self.assertMultiLineEqual(
+ host.filesystem.read_text_file(MOCK_WEB_TESTS +
+ 'WebDriverExpectations'),
+ ('# results: [ Failure ]\n'
+ 'old/a\*.html>>foo\* [ Failure ]\n'
+ 'old/a\*.html>>bar [ Failure ]\n'
+ 'old/c.html>>a [ Failure ]\n'
+ 'external/wpt/webdriver/some/test/d.html>>foo [ Failure ]\n'))
+
def test_merging_platforms_if_possible(self):
host = self.mock_host()
updater = WPTExpectationsUpdater(host)
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/builder_list.py b/chromium/third_party/blink/tools/blinkpy/web_tests/builder_list.py
index 56d3635e4d2..76b11a3bc91 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/builder_list.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/builder_list.py
@@ -117,6 +117,9 @@ class BuilderList(object):
def specifiers_for_builder(self, builder_name):
return self._builders[builder_name]['specifiers']
+ def is_try_server_builder(self, builder_name):
+ return self._builders[builder_name].get('is_try_builder', False)
+
def platform_specifier_for_builder(self, builder_name):
return self.specifiers_for_builder(builder_name)[0]
@@ -150,7 +153,7 @@ class BuilderList(object):
return builder_info['specifiers'][0]
return None
- def builder_name_for_specifiers(self, version, build_type):
+ def builder_name_for_specifiers(self, version, build_type, is_try_builder):
"""Returns the builder name for a give version and build type.
Args:
@@ -161,8 +164,10 @@ class BuilderList(object):
The builder name if found, or an empty string if no match was found.
"""
for builder_name, info in sorted(self._builders.items()):
- specifiers = info['specifiers']
- if (specifiers[0].lower() == version.lower()
- and specifiers[1].lower() == build_type.lower()):
+ specifiers = set(spec.lower() for spec in info['specifiers'])
+ is_try_builder_info = info.get('is_try_builder', False)
+ if (version.lower() in specifiers
+ and build_type.lower() in specifiers
+ and is_try_builder_info == is_try_builder):
return builder_name
return ''
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/manager.py b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/manager.py
index 2e0df759d83..693a95d1fef 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/manager.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/manager.py
@@ -47,6 +47,7 @@ from blinkpy.common import path_finder
from blinkpy.common.net.file_uploader import FileUploader
from blinkpy.common.path_finder import PathFinder
from blinkpy.tool import grammar
+from blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink
from blinkpy.web_tests.controllers.web_test_finder import WebTestFinder
from blinkpy.web_tests.controllers.web_test_runner import WebTestRunner
from blinkpy.web_tests.layout_package import json_results_generator
@@ -91,9 +92,11 @@ class Manager(object):
self._artifacts_directory = self._port.artifacts_directory()
self._finder = WebTestFinder(self._port, self._options)
self._path_finder = PathFinder(port.host.filesystem)
+
+ sink = CreateTestResultSink(self._port)
self._runner = WebTestRunner(self._options, self._port, self._printer,
self._results_directory,
- self._test_is_slow)
+ self._test_is_slow, sink)
def run(self, args):
"""Runs the tests and return a RunDetails object with the results."""
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink.py b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink.py
new file mode 100644
index 00000000000..89537052be9
--- /dev/null
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink.py
@@ -0,0 +1,172 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""TestResultSink uploads test results and artifacts to ResultDB via ResultSink.
+
+ResultSink is a micro service that simplifies integration between ResultDB
+and domain-specific test frameworks. It runs a given test framework and uploads
+all the generated test results and artifacts to ResultDB in a progressive way.
+- APIs: https://godoc.org/go.chromium.org/luci/resultdb/proto/sink/v1
+
+TestResultSink implements methods for uploading test results and artifacts
+via ResultSink, and is activated only if LUCI_CONTEXT is present with ResultSink
+section.
+"""
+
+import json
+import logging
+import urllib2
+
+from blinkpy.web_tests.models.typ_types import ResultType
+
+_log = logging.getLogger(__name__)
+
+
+# A map from the enum values of typ.ResultType to ResultSink.Status.
+# The enum values of ResultSink.Status can be found at
+# https://godoc.org/go.chromium.org/luci/resultdb/proto/sink/v1#pkg-variables.
+_result_type_to_sink_status = {
+ ResultType.Pass:
+ 'PASS',
+ ResultType.Failure:
+ 'FAIL',
+ # timeout is just a special case of a reason to abort a test result.
+ ResultType.Timeout:
+ 'ABORT',
+ # 'Aborted' is a web_tests-specific type given on TestResults with a device
+ # failure.
+ 'Aborted':
+ 'ABORT',
+ ResultType.Crash:
+ 'CRASH',
+ ResultType.Skip:
+ 'SKIP',
+}
+
+
+def CreateTestResultSink(port):
+ """Creates TestResultSink, if result_sink is present in LUCI_CONTEXT.
+
+ Args:
+ port: A blinkpy.web_tests.port.Port object
+ Returns:
+ TestResultSink object if result_sink section is present in LUCI_CONTEXT.
+ None, otherwise.
+ """
+ luci_ctx_path = port.host.environ.get('LUCI_CONTEXT')
+ if luci_ctx_path is None:
+ return None
+
+ with port.host.filesystem.open_text_file_for_reading(luci_ctx_path) as f:
+ sink_ctx = json.load(f).get('result_sink')
+ if sink_ctx is None:
+ return None
+
+ return TestResultSink(port, sink_ctx)
+
+
+class TestResultSink(object):
+ """A class for uploading test results and artifacts via ResultSink."""
+
+ def __init__(self, port, sink_ctx):
+ self._port = port
+ self._sink_ctx = sink_ctx
+ self._sink_url = (
+ 'http://%s/prpc/luci.resultsink.v1.Sink/ReportTestResults' %
+ self._sink_ctx['address'])
+
+ def _send(self, data):
+ req = urllib2.Request(
+ url=self._sink_url,
+ data=json.dumps(data),
+ headers={
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ 'Authorization':
+ 'ResultSink %s' % self._sink_ctx['auth_token'],
+ },
+ )
+ return urllib2.urlopen(req)
+
+ def _status(self, result):
+ """Returns the TestStatus enum value corresponding to the result type.
+
+ Args:
+ result: The TestResult object to find the status of.
+ Returns:
+ The corresponding enum value.
+ """
+ status = _result_type_to_sink_status.get(
+ 'Aborted' if result.device_failed else result.type)
+
+ assert status is not None, 'unsupported result.type %r' % result.type
+ return status
+
+ def _tags(self, result):
+ """Returns a list of tags that should be added into a given test result.
+
+ Args:
+ result: The TestResult object to generate Tags for.
+ Returns:
+ A list of {'key': 'tag-name', 'value': 'tag-value'} dicts.
+ """
+ # the message structure of the dict can be found at
+ # https://chromium.googlesource.com/infra/luci/luci-go/+/master/resultdb/proto/type/common.proto#56
+ pair = lambda k, v: {'key': k, 'value': v}
+ return [
+ pair('test_name', result.test_name),
+ pair('web_tests_device_failed', str(result.device_failed)),
+ pair('web_tests_result_type', result.type),
+ ]
+
+ def _artifacts(self, result):
+ """Returns a dict of artifacts with the absolute file paths.
+
+ Args:
+ result: The TestResult object to look for the artifacts of.
+ Returns:
+ A dict of artifacts, where the key is the artifact ID and
+ the value is a dict with the absolute file path.
+ """
+ ret = {}
+ base_dir = self._port.results_directory()
+ for name, paths in result.artifacts.artifacts.iteritems():
+ for p in paths:
+ art_id = name
+ i = 1
+ while art_id in ret:
+ art_id = '%s-%d' % (name, i)
+ i += 1
+
+ ret[art_id] = {
+ 'filePath': self._port.host.filesystem.join(base_dir, p),
+ }
+
+ return ret
+
+ def sink(self, expected, result):
+ """Reports the test result to ResultSink.
+
+ Args:
+ expected: True if the test was expected to fail and actually failed.
+ False, otherwise.
+ result: The TestResult object to report.
+ Exceptions:
+ urllib2.URLError, if there was a network connection error.
+ urllib2.HTTPError, if ResultSink responded an error for the request.
+ """
+ # The structure and member definitions of this dict can be found at
+ # https://chromium.googlesource.com/infra/luci/luci-go/+/refs/heads/master/resultdb/proto/sink/v1/test_result.proto
+ r = {
+ 'artifacts': self._artifacts(result),
+ 'duration': '%ss' % result.total_run_time,
+ # device failures are never expected.
+ 'expected': not result.device_failed and expected,
+ 'status': self._status(result),
+ # TODO(crbug/1093659): web_tests report TestResult with the start
+ # time.
+ # 'startTime': result.start_time
+ 'tags': self._tags(result),
+ 'testId': result.test_name,
+ }
+ self._send({'testResults': [r]})
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink_unittest.py
new file mode 100644
index 00000000000..881b4340d59
--- /dev/null
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink_unittest.py
@@ -0,0 +1,135 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import json
+import mock
+import sys
+import unittest
+from urlparse import urlparse
+
+from blinkpy.common.host_mock import MockHost
+from blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink
+from blinkpy.web_tests.controllers.test_result_sink import TestResultSink
+from blinkpy.web_tests.models import test_results
+from blinkpy.web_tests.models.typ_types import ResultType
+
+
+class TestResultSinkTestBase(unittest.TestCase):
+ def setUp(self):
+ super(TestResultSinkTestBase, self).setUpClass()
+ self.port = MockHost().port_factory.get()
+
+ def luci_context(self, **section_values):
+ if not section_values:
+ return
+
+ host = self.port.host
+ f, fname = host.filesystem.open_text_tempfile()
+ json.dump(section_values, f)
+ f.close()
+ host.environ['LUCI_CONTEXT'] = f.path
+
+
+class TestCreateTestResultSink(TestResultSinkTestBase):
+ def test_without_luci_context(self):
+ self.assertIsNone(CreateTestResultSink(self.port))
+
+ def test_without_result_sink_section(self):
+ self.luci_context(app={'foo': 'bar'})
+ self.assertIsNone(CreateTestResultSink(self.port))
+
+ @mock.patch('urllib2.urlopen')
+ def test_with_result_sink_section(self, urlopen):
+ ctx = {'address': 'localhost:123', 'auth_token': 'secret'}
+ self.luci_context(result_sink=ctx)
+ r = CreateTestResultSink(self.port)
+ self.assertIsNotNone(r)
+ r.sink(True, test_results.TestResult('test'))
+
+ urlopen.assert_called_once()
+ req = urlopen.call_args[0][0]
+ self.assertEqual(urlparse(req.get_full_url()).netloc, ctx['address'])
+ self.assertEqual(req.get_header('Authorization'),
+ 'ResultSink ' + ctx['auth_token'])
+
+
+class TestResultSinkMessage(TestResultSinkTestBase):
+ """Tests ResulkSink.sink."""
+
+ def setUp(self):
+ super(TestResultSinkMessage, self).setUp()
+ patcher = mock.patch.object(TestResultSink, '_send')
+ self.mock_send = patcher.start()
+ self.addCleanup(patcher.stop)
+
+ ctx = {'address': 'localhost:123', 'auth_token': 'super-secret'}
+ self.luci_context(result_sink=ctx)
+ self.r = CreateTestResultSink(self.port)
+
+ def sink(self, expected, test_result):
+ self.r.sink(expected, test_result)
+ self.mock_send.called_once()
+ return self.mock_send.call_args[0][0]['testResults'][0]
+
+ def test_sink(self):
+ tr = test_results.TestResult(test_name='test-name')
+ tr.total_run_time = 123.456
+ tr.type = ResultType.Crash
+ sent_data = self.sink(True, tr)
+
+ self.assertEqual(sent_data['testId'], 'test-name')
+ self.assertEqual(sent_data['expected'], True)
+ self.assertEqual(sent_data['status'], 'CRASH')
+ self.assertEqual(sent_data['duration'], '123.456s')
+
+ def test_device_failure(self):
+ tr = test_results.TestResult(test_name='test-name')
+ tr.type = ResultType.Failure
+ tr.device_failed = True
+ sent_data = self.sink(True, tr)
+
+ # If the device failed, 'expected' and 'status' must be False and 'ABORT'
+ self.assertEqual(sent_data['expected'], False)
+ self.assertEqual(sent_data['status'], 'ABORT')
+
+ def test_timeout(self):
+ tr = test_results.TestResult(test_name='test-name')
+ tr.type = ResultType.Timeout
+ sent_data = self.sink(True, tr)
+
+ # Timeout is considered as 'ABORT'
+ self.assertEqual(sent_data['status'], 'ABORT')
+
+ def test_artifacts(self):
+ tr = test_results.TestResult(test_name='test-name')
+ tr.artifacts.AddArtifact('test-image.png', '/tmp/test-image.png', True)
+ tr.artifacts.AddArtifact('stdout', '/tmp/stdout', True)
+
+ sent_data = self.sink(True, tr)
+ self.assertDictEqual(
+ sent_data['artifacts'], {
+ 'test-image.png': {
+ 'filePath': '/tmp/test-image.png'
+ },
+ 'stdout': {
+ 'filePath': '/tmp/stdout'
+ }
+ })
+
+ def test_artifacts_with_duplicate_paths(self):
+ tr = test_results.TestResult(test_name='test-name')
+ tr.artifacts.AddArtifact('artifact', '/tmp/foo', False)
+ tr.artifacts.AddArtifact('artifact', '/tmp/bar', False)
+
+ sent_data = self.sink(True, tr)
+ self.assertDictEqual(
+ sent_data['artifacts'], {
+ 'artifact': {
+ 'filePath': '/tmp/foo'
+ },
+ 'artifact-1': {
+ 'filePath': '/tmp/bar'
+ }
+ })
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder.py b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder.py
index 1edb2901ce6..c12825f5378 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder.py
@@ -34,9 +34,10 @@ import math
import re
from blinkpy.web_tests.layout_package.json_results_generator import convert_times_trie_to_flat_paths
-from blinkpy.web_tests.models import test_expectations
+from blinkpy.web_tests.models import test_expectations, typ_types
from blinkpy.web_tests.models.typ_types import ResultType
from blinkpy.web_tests.port.base import Port
+from collections import OrderedDict
_log = logging.getLogger(__name__)
@@ -90,6 +91,8 @@ class WebTestFinder(object):
running_all_tests = True
test_files = filter_tests(test_files, [f.split('::') for f in filters])
+ # de-dupe the test list here before running them.
+ test_files = list(OrderedDict.fromkeys(test_files))
return (paths, test_files, running_all_tests)
def _times_trie(self):
@@ -181,7 +184,9 @@ class WebTestFinder(object):
A test may be skipped for many reasons, depending on the expectation
files and options selected. The most obvious is SKIP entries in
TestExpectations, but we also e.g. skip idlharness tests on MSAN/ASAN
- due to https://crbug.com/856601.
+ due to https://crbug.com/856601. Note that for programmatically added
+ SKIPs, this function will modify the input expectations to include the
+ SKIP expectation (but not write it to disk)
Args:
paths: the paths passed on the command-line to run_web_tests.py
@@ -192,6 +197,7 @@ class WebTestFinder(object):
"""
all_tests = set(all_tests_list)
tests_to_skip = set()
+ idlharness_skips = set()
for test in all_tests:
# We always skip idlharness tests for MSAN/ASAN, even when running
# with --no-expectations (https://crbug.com/856601). Note we will
@@ -200,6 +206,7 @@ class WebTestFinder(object):
if self._options.enable_sanitizer and Port.is_wpt_idlharness_test(
test):
tests_to_skip.update({test})
+ idlharness_skips.update({test})
continue
if self._options.no_expectations:
@@ -214,6 +221,17 @@ class WebTestFinder(object):
if self._options.skip_failing_tests and ResultType.Failure in expected_results:
tests_to_skip.update({test})
+ # Idlharness tests are skipped programmatically on MSAN/ASAN, so we have
+ # to add them to the expectations to avoid reporting unexpected skips.
+ if idlharness_skips and expectations is not None:
+ raw_expectations = '# results: [ Skip ]\n'
+ for test in idlharness_skips:
+ raw_expectations += typ_types.Expectation(
+ reason="crbug.com/856601",
+ test=test,
+ results=[ResultType.Skip]).to_string() + '\n'
+ expectations.merge_raw_expectations(raw_expectations)
+
if self._options.skipped == 'only':
tests_to_skip = all_tests - tests_to_skip
elif self._options.skipped == 'ignore':
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder_unittest.py
index 2a28d6b4a31..cbf48a7d642 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder_unittest.py
@@ -12,9 +12,6 @@ from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.controllers import web_test_finder
from blinkpy.web_tests.models import test_expectations
-_MOCK_ROOT = os.path.join(path_finder.get_chromium_src_dir(), 'third_party',
- 'pymock')
-sys.path.insert(0, _MOCK_ROOT)
import mock
@@ -99,7 +96,7 @@ class WebTestFinderTests(unittest.TestCase):
# Disable expectations entirely; nothing should be skipped by default.
finder._options.no_expectations = True
- tests = finder.skip_tests([], all_tests, expectations)
+ tests = finder.skip_tests([], all_tests, None)
self.assertEqual(tests, set())
def test_skip_tests_idlharness(self):
@@ -110,43 +107,68 @@ class WebTestFinderTests(unittest.TestCase):
host = MockHost()
port = host.port_factory.get('test-win-win7', None)
+ non_idlharness_test = 'external/wpt/dir1/dir2/foo.html'
+ idlharness_test_1 = 'external/wpt/dir1/dir2/idlharness.any.html'
+ idlharness_test_2 = 'external/wpt/dir1/dir2/idlharness.any.worker.html'
all_tests = [
- 'external/wpt/dir1/dir2/foo.html',
- 'external/wpt/dir1/dir2/idlharness.any.html',
- 'external/wpt/dir1/dir2/idlharness.any.worker.html',
+ non_idlharness_test,
+ idlharness_test_1,
+ idlharness_test_2,
]
# Patch port.tests() to return our tests
port.tests = lambda paths: paths or all_tests
options = optparse.Values({
- 'no_expectations': True,
+ 'no_expectations': False,
'enable_sanitizer': False,
'skipped': 'default',
+ 'skip_timeouts': False,
+ 'skip_failing_tests': False,
})
finder = web_test_finder.WebTestFinder(port, options)
# Default case; not MSAN/ASAN so should not skip anything.
- tests = finder.skip_tests([], all_tests, None)
+ expectations = test_expectations.TestExpectations(port)
+ tests = finder.skip_tests([], all_tests, expectations)
self.assertEqual(tests, set())
+ for test in all_tests:
+ self.assertTrue(
+ expectations.get_expectations(test).is_default_pass)
# MSAN/ASAN, with no paths specified explicitly, so should skip both
# idlharness tests.
+ expectations = test_expectations.TestExpectations(port)
finder._options.enable_sanitizer = True
+ tests = finder.skip_tests([], all_tests, expectations)
+ self.assertEqual(tests, set([idlharness_test_1, idlharness_test_2]))
+ self.assertTrue(
+ expectations.get_expectations(non_idlharness_test).is_default_pass)
+ self.assertEquals(
+ expectations.get_expectations(idlharness_test_1).results, {'SKIP'})
+ self.assertEquals(
+ expectations.get_expectations(idlharness_test_2).results, {'SKIP'})
+
+ # Disable expectations entirely; we should still skip the idlharness
+ # tests but shouldn't touch the expectations parameter.
+ finder._options.no_expectations = True
tests = finder.skip_tests([], all_tests, None)
- self.assertEqual(
- tests,
- set([
- 'external/wpt/dir1/dir2/idlharness.any.html',
- 'external/wpt/dir1/dir2/idlharness.any.worker.html'
- ]))
+ self.assertEqual(tests, set([idlharness_test_1, idlharness_test_2]))
# MSAN/ASAN, with one of the tests specified explicitly (and
# --skipped=default), so should skip only the unspecified test.
- tests = finder.skip_tests(
- ['external/wpt/dir1/dir2/idlharness.any.html'], all_tests, None)
- self.assertEqual(
- tests, set(['external/wpt/dir1/dir2/idlharness.any.worker.html']))
+ expectations = test_expectations.TestExpectations(port)
+ tests = finder.skip_tests([idlharness_test_1], all_tests, expectations)
+ self.assertEqual(tests, set([idlharness_test_2]))
+ # Although we will run the test because it was specified explicitly, it
+ # is still *expected* to Skip. This is consistent with how entries in
+ # TestExpectations work.
+ self.assertTrue(
+ expectations.get_expectations(non_idlharness_test).is_default_pass)
+ self.assertEquals(
+ expectations.get_expectations(idlharness_test_1).results, {'SKIP'})
+ self.assertEquals(
+ expectations.get_expectations(idlharness_test_2).results, {'SKIP'})
def test_find_fastest_tests(self):
host = MockHost()
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner.py b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner.py
index db6e7753fc8..caad1486fcc 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner.py
@@ -58,12 +58,13 @@ class TestRunInterruptedException(Exception):
class WebTestRunner(object):
def __init__(self, options, port, printer, results_directory,
- test_is_slow_fn):
+ test_is_slow_fn, result_sink):
self._options = options
self._port = port
self._printer = printer
self._results_directory = results_directory
self._test_is_slow = test_is_slow_fn
+ self._test_result_sink = result_sink
self._sharder = Sharder(self._port.split_test,
self._options.max_locked_shards)
self._filesystem = self._port.host.filesystem
@@ -224,6 +225,8 @@ class WebTestRunner(object):
result.test_name, result.type)
expectation_string = ' '.join(
self._expectations.get_expectations(result.test_name).results)
+ if self._test_result_sink:
+ self._test_result_sink.sink(expected, result)
if result.device_failed:
self._printer.print_finished_test(self._port, result, False,
@@ -349,10 +352,16 @@ class Worker(object):
if self._driver:
# When tracing we need to go through the standard shutdown path to
# ensure that the trace is recorded properly.
- if any(i in ['--trace-startup', '--trace-shutdown']
- for i in self._options.additional_driver_flag):
+ tracing_enabled = self._port.get_option(
+ 'enable_tracing') is not None or any(
+ flag.startswith(tracing_command) for tracing_command in
+ ['--trace-startup', '--trace-shutdown']
+ for flag in self._options.additional_driver_flag)
+
+ if tracing_enabled:
_log.debug('%s waiting %d seconds for %s driver to shutdown',
- self._name, self._port.driver_stop_timeout(), label)
+ self._name, self._port.driver_stop_timeout(),
+ self._name)
self._driver.stop(
timeout_secs=self._port.driver_stop_timeout())
return
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner_unittest.py
index 2051f0d55f2..bc8567be23d 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner_unittest.py
@@ -33,6 +33,7 @@ import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.common.system.system_host_mock import MockSystemHost
from blinkpy.web_tests import run_web_tests
+from blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink
from blinkpy.web_tests.controllers.web_test_runner import WebTestRunner, Worker, Sharder, TestRunInterruptedException
from blinkpy.web_tests.models import test_expectations
from blinkpy.web_tests.models import test_failures
@@ -73,10 +74,11 @@ class FakePrinter(object):
class LockCheckingRunner(WebTestRunner):
- def __init__(self, port, options, printer, tester, http_lock):
- super(LockCheckingRunner, self).__init__(options, port, printer,
- port.results_directory(),
- lambda test_name: False)
+ def __init__(self, port, options, printer, tester, http_lock, sink):
+ super(LockCheckingRunner,
+ self).__init__(options, port, printer,
+ port.results_directory(), lambda test_name: False,
+ sink)
self._finished_list_called = False
self._tester = tester
self._should_have_http_lock = http_lock
@@ -101,7 +103,8 @@ class WebTestRunnerTests(unittest.TestCase):
host = MockHost()
port = port or host.port_factory.get(options.platform, options=options)
- return LockCheckingRunner(port, options, FakePrinter(), self, True)
+ return LockCheckingRunner(port, options, FakePrinter(), self, True,
+ CreateTestResultSink(port))
def _run_tests(self, runner, tests):
test_inputs = [TestInput(test, timeout_ms=6000) for test in tests]
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations.py b/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations.py
index b619500c38b..1bd7b328885 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations.py
@@ -39,6 +39,34 @@ from blinkpy.web_tests.models.typ_types import Expectation, ResultType
_log = logging.getLogger(__name__)
+class ResultsFilter(object):
+ """Results filter for cq results.
+ Filtering any build which has passing tests in retry without patch.
+ If any test passed during this retry indicates the step has failed,
+ and the results are most likely unreliable.
+
+ For results.json v4 format check ResultsJSON"""
+
+ RESULTS_COUNT_BY_TYPE = 'num_failures_by_type'
+ BUILD_NUMBERS = 'buildNumbers'
+
+ # results.json was originally designed to support
+ # multiple builders in one json file, so the builder_name
+ # is needed to figure out which builder this json file
+ # refers to (and thus where the results are stored)
+ def __init__(self, builder_name, json_dict):
+ self._builds_to_filter = self._get_builds_to_ignore(json_dict[builder_name])
+
+ def _get_builds_to_ignore(self, json_builder_summary):
+ build_count = len(json_builder_summary[self.BUILD_NUMBERS])
+ passing_retries_indices = [
+ i for i, p in enumerate(json_builder_summary[
+ self.RESULTS_COUNT_BY_TYPE][ResultType.Pass][:build_count]) if p > 0]
+ return set([json_builder_summary[self.BUILD_NUMBERS][i] for i in passing_retries_indices])
+
+ def get_build_numbers_to_ignore(self):
+ return self._builds_to_filter
+
class ResultsJSON(object):
"""Contains the contents of a results.json file.
@@ -58,14 +86,16 @@ class ResultsJSON(object):
}
}
}
+ 'buildNumbers': [],
+ 'secondsSinceEpoch': [],
+ 'chromeRevision': [],
+ 'failure_map': {}, # Map from letter code to expectation name.
+ 'num_failures_by_type: {} # Map result type to list of result count'
}
- 'buildNumbers': [],
- 'secondsSinceEpoch': [],
- 'chromeRevision': [],
- 'failure_map': {} # Map from letter code to expectation name.
}
"""
TESTS_KEY = 'tests'
+ BUILD_NUMBERS = 'buildNumbers'
FAILURE_MAP_KEY = 'failure_map'
RESULTS_KEY = 'results'
EXPECTATIONS_KEY = 'expected'
@@ -100,6 +130,9 @@ class ResultsJSON(object):
def expectation_for_type(self, type_char):
return self._json[self.builder_name][self.FAILURE_MAP_KEY][type_char]
+ def build_numbers(self):
+ return self._json[self.builder_name][self.BUILD_NUMBERS]
+
# Knowing how to parse the run-length-encoded values in results.json
# is a detail of this class.
def occurances_and_type_from_result_item(self, item):
@@ -107,9 +140,15 @@ class ResultsJSON(object):
class BotTestExpectationsFactory(object):
+ # STEP_NAME is used to fetch results for ci builders and retry without
+ # patch for cq builders.
+ # STEP_NAME_TRY is use to fetch patched cq results.
+ STEP_NAME = 'blink_web_tests'
+ STEP_NAME_TRY = 'blink_web_tests (with patch)'
RESULTS_URL_FORMAT = (
- 'https://test-results.appspot.com/testfile?testtype=blink_web_tests'
- '&name=results-small.json&master=%s&builder=%s')
+ 'https://test-results.appspot.com/testfile?testtype=%s'
+ '&name=results-small.json&master=%s&builder=%s'
+ )
def __init__(self, builders):
self.builders = builders
@@ -120,12 +159,16 @@ class BotTestExpectationsFactory(object):
return None
return self._results_json_for_builder(builder)
- def _results_url_for_builder(self, builder):
- return self.RESULTS_URL_FORMAT % (urllib.quote(
- self.builders.master_for_builder(builder)), urllib.quote(builder))
+ def _results_url_for_builder(self, builder, use_try_step=False):
+ test_type = (self.STEP_NAME_TRY if use_try_step else self.STEP_NAME)
+ return self.RESULTS_URL_FORMAT % (
+ urllib.quote(test_type),
+ urllib.quote(self.builders.master_for_builder(builder)),
+ urllib.quote(builder))
def _results_json_for_builder(self, builder):
- results_url = self._results_url_for_builder(builder)
+ results_url = self._results_url_for_builder(
+ builder, self.builders.is_try_server_builder(builder))
try:
_log.debug('Fetching flakiness data from appengine: %s',
results_url)
@@ -137,6 +180,19 @@ class BotTestExpectationsFactory(object):
results_url)
_log.warning(error)
+ def _results_filter_for_builder(self, builder):
+ results_url = self._results_url_for_builder(builder, False)
+ try:
+ _log.debug('Fetching flakiness data from appengine: %s',
+ results_url)
+ return ResultsFilter(builder, json.load(
+ urllib2.urlopen(results_url)))
+ except urllib2.URLError as error:
+ _log.warning(
+ 'Could not retrieve flakiness data from the bot. url: %s',
+ results_url)
+ _log.warning(error)
+
def expectations_for_port(self, port_name, builder_category='layout'):
# FIXME: This only grabs release builder's flakiness data. If we're running debug,
# when we should grab the debug builder's data.
@@ -152,7 +208,11 @@ class BotTestExpectationsFactory(object):
results_json = self._results_json_for_builder(builder)
if not results_json:
return None
- return BotTestExpectations(results_json, self.builders)
+ results_filter = None
+ if self.builders.is_try_server_builder(builder):
+ results_filter = self._results_filter_for_builder(builder)
+ return BotTestExpectations(results_json, self.builders,
+ results_filter=results_filter)
class BotTestExpectations(object):
@@ -164,10 +224,11 @@ class BotTestExpectations(object):
NON_RESULT_TYPES = ['S', 'X'] # SLOW, SKIP
# specifiers arg is used in unittests to avoid the static dependency on builders.
- def __init__(self, results_json, builders, specifiers=None):
+ def __init__(self, results_json, builders, specifiers=None, results_filter=None):
self.results_json = results_json
self.specifiers = specifiers or set(
builders.specifiers_for_builder(results_json.builder_name))
+ self.filter_results_bitmap = self._get_results_filter(results_filter)
def flakes_by_path(self, only_ignore_very_flaky):
"""Sets test expectations to bot results if there are at least two distinct results."""
@@ -256,6 +317,13 @@ class BotTestExpectations(object):
lines.append(line)
return lines
+ def _get_results_filter(self, results_filter):
+ if results_filter:
+ filter_builds = results_filter.get_build_numbers_to_ignore()
+ return [build not in filter_builds for build in self.results_json.build_numbers()]
+ else:
+ return None
+
def _line_from_test_and_flaky_types(self, test_name, flaky_types):
return Expectation(
tags=self.specifiers, test=test_name, results=flaky_types)
@@ -263,14 +331,16 @@ class BotTestExpectations(object):
def _all_types_in_results(self, run_length_encoded_results):
results = set()
+ result_index = 0
for result_item in run_length_encoded_results:
- _, result_types = self.results_json.occurances_and_type_from_result_item(
+ count, result_types = self.results_json.occurances_and_type_from_result_item(
result_item)
-
- for result_type in result_types:
- if result_type not in self.RESULT_TYPES_TO_IGNORE:
- results.add(result_type)
-
+ if (not self.filter_results_bitmap or
+ any(self.filter_results_bitmap[result_index : result_index + count])):
+ for result_type in result_types:
+ if result_type not in self.RESULT_TYPES_TO_IGNORE:
+ results.add(result_type)
+ result_index += count
return results
def _flaky_types_in_results(self, results_entry, only_ignore_very_flaky):
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations_unittest.py
index f671f43f64e..34feb9b1889 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations_unittest.py
@@ -44,6 +44,12 @@ class BotTestExpectationsFactoryTest(unittest.TestCase):
'port_name': 'dummy-port',
'specifiers': ['dummy', 'release'],
},
+ 'Dummy tryserver builder name': {
+ 'master': 'tryserver.dummy.master',
+ 'port_name': 'dummy-port',
+ 'specifiers': ['dummy', 'release'],
+ "is_try_builder": True
+ },
})
def fake_results_json_for_builder(self, builder):
@@ -56,8 +62,21 @@ class BotTestExpectationsFactoryTest(unittest.TestCase):
self.assertEqual(
factory._results_url_for_builder('Dummy builder name'),
'https://test-results.appspot.com/testfile?testtype=blink_web_tests'
- '&name=results-small.json&master=dummy.master&builder=Dummy%20builder%20name'
- )
+ '&name=results-small.json&master=dummy.master&builder=Dummy%20builder%20name')
+
+ self.assertEqual(
+ factory._results_url_for_builder('Dummy tryserver builder name'),
+ 'https://test-results.appspot.com/testfile?'
+ 'testtype=blink_web_tests'
+ '&name=results-small.json&master=tryserver.dummy.master'
+ '&builder=Dummy%20tryserver%20builder%20name')
+
+ self.assertEqual(
+ factory._results_url_for_builder('Dummy tryserver builder name', True),
+ 'https://test-results.appspot.com/testfile?'
+ 'testtype=blink_web_tests%20%28with%20patch%29'
+ '&name=results-small.json&master=tryserver.dummy.master'
+ '&builder=Dummy%20tryserver%20builder%20name')
def test_expectations_for_builder(self):
factory = bot_test_expectations.BotTestExpectationsFactory(
@@ -168,6 +187,12 @@ class BotTestExpectationsTest(unittest.TestCase):
}
return bot_test_expectations.ResultsJSON('builder', json_dict)
+ def _results_filter_from_test_data(self, test_data):
+ json_dict = {
+ 'builder': test_data,
+ }
+ return bot_test_expectations.ResultsFilter('builder', json_dict)
+
def _results_from_string(self, results_string):
return {'results': [[1, results_string]]}
@@ -221,6 +246,36 @@ class BotTestExpectationsTest(unittest.TestCase):
self.assertEqual(results_by_path, expected_output)
+ def test_filtered_all_results_by_path(self):
+ test_data = {
+ 'buildNumbers': [1, 2 , 3, 4, 5, 6, 7],
+ 'tests': {
+ 'foo': {
+ 'fail_filtered.html': {'results': [[4, 'P'], [1, 'F'], [1, 'C'], [1, 'P']]},
+ 'fail_not_filtered.html': {'results': [[3, 'P'], [2, 'F'], [2, 'P']]},
+ }
+ }
+ }
+
+ filter_data = {
+ 'buildNumbers': [3, 4, 5, 6, 8],
+ 'num_failures_by_type' : {
+ 'PASS' : [0, 0, 1, 1, 1, 0, 1]
+ }
+ }
+
+ results_json = self._results_json_from_test_data(test_data)
+ results_filter = self._results_filter_from_test_data(filter_data)
+ expectations = bot_test_expectations.BotTestExpectations(results_json, BuilderList({}), set('test'), results_filter)
+ results_by_path = expectations.all_results_by_path()
+
+ expected_output = {
+ 'foo/fail_filtered.html': ['PASS'],
+ 'foo/fail_not_filtered.html': ['FAIL', 'PASS'],
+ }
+
+ self.assertEqual(results_by_path, expected_output)
+
def test_basic(self):
test_data = {
'tests': {
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations.py b/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations.py
index 146ea4f88c8..aeee9f4d9cc 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations.py
@@ -36,7 +36,7 @@ from blinkpy.common.host import Host
from blinkpy.common.system.log_utils import configure_logging
from blinkpy.web_tests.models.test_expectations import (TestExpectations,
ParseError)
-
+from blinkpy.web_tests.models.typ_types import ResultType
from blinkpy.web_tests.port.factory import platform_options
_log = logging.getLogger(__name__)
@@ -48,7 +48,10 @@ def PresubmitCheckTestExpectations(input_api, output_api):
os_path.dirname(os_path.abspath(__file__)), '..', '..',
'lint_test_expectations.py')
_, errs = input_api.subprocess.Popen(
- [input_api.python_executable, lint_path],
+ [
+ input_api.python_executable, lint_path,
+ '--no-check-redundant-virtual-expectations'
+ ],
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE).communicate()
if not errs:
@@ -125,7 +128,7 @@ def lint(host, options):
ports_to_lint[0], expectations_dict={path: content})
# Check each expectation for issues
f, w = _check_expectations(host, ports_to_lint[0], path,
- test_expectations)
+ test_expectations, options)
failures += f
warnings += w
except ParseError as error:
@@ -163,7 +166,7 @@ def _check_expectations_file_content(content):
return failures
-def _check_existence(host, port, path, expectations):
+def _check_test_existence(host, port, path, expectations):
failures = []
for exp in expectations:
if not exp.test:
@@ -246,16 +249,65 @@ def _check_redundant_virtual_expectations(host, port, path, expectations):
return failures
-def _check_expectations(host, port, path, test_expectations):
+def _check_never_fix_tests(host, port, path, expectations):
+ if not path.endswith('NeverFixTests'):
+ return []
+
+ def pass_validly_overrides_skip(pass_exp, skip_exp):
+ if skip_exp.results != set([ResultType.Skip]):
+ return False
+ if not skip_exp.tags.issubset(pass_exp.tags):
+ return False
+ if skip_exp.is_glob and pass_exp.test.startswith(skip_exp.test[:-1]):
+ return True
+ base_test = port.lookup_virtual_test_base(pass_exp.test)
+ if not base_test:
+ return False
+ if base_test == skip_exp.test:
+ return True
+ if skip_exp.is_glob and base_test.startswith(skip_exp.test[:-1]):
+ return True
+ return False
+
+ failures = []
+ for i in range(len(expectations)):
+ exp = expectations[i]
+ if (exp.results != set([ResultType.Pass])
+ and exp.results != set([ResultType.Skip])):
+ error = "{}:{} Only one of [ Skip ] and [ Pass ] is allowed".format(
+ host.filesystem.basename(path), exp.lineno)
+ _log.error(error)
+ failures.append(error)
+ continue
+ if exp.is_default_pass or exp.results != set([ResultType.Pass]):
+ continue
+ if any(
+ pass_validly_overrides_skip(exp, expectations[j])
+ for j in range(i - 1, 0, -1)):
+ continue
+ error = (
+ "{}:{} {}: The [ Pass ] entry must override a previous [ Skip ]"
+ " entry with a more specific test name or tags".format(
+ host.filesystem.basename(path), exp.lineno, exp.test))
+ _log.error(error)
+ failures.append(error)
+ return failures
+
+
+def _check_expectations(host, port, path, test_expectations, options):
# Check for original expectation lines (from get_updated_lines) instead of
# expectations filtered for the current port (test_expectations).
expectations = test_expectations.get_updated_lines(path)
- failures = _check_existence(host, port, path, expectations)
+ failures = _check_test_existence(host, port, path, expectations)
failures.extend(_check_directory_glob(host, port, path, expectations))
+ failures.extend(_check_never_fix_tests(host, port, path, expectations))
# TODO(crbug.com/1080691): Change this to failures once
# wpt_expectations_updater is fixed.
- warnings = _check_redundant_virtual_expectations(host, port, path,
- expectations)
+ warnings = []
+ if not getattr(options, 'no_check_redundant_virtual_expectations', False):
+ warnings.extend(
+ _check_redundant_virtual_expectations(host, port, path,
+ expectations))
return failures, warnings
@@ -387,6 +439,10 @@ def main(argv, stderr, host=None):
action='append',
default=[],
help='paths to additional expectation files to lint.')
+ parser.add_option('--no-check-redundant-virtual-expectations',
+ action='store_true',
+ default=False,
+ help='skip checking redundant virtual expectations.')
options, _ = parser.parse_args(argv)
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations_unittest.py
index 67e6bdaa796..88562f1708e 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations_unittest.py
@@ -346,6 +346,44 @@ class LintTest(LoggingTestCase):
self.assertEquals(len(warnings), 1)
self.assertRegexpMatches(warnings[0], ':5 .*redundant with.* line 4$')
+ def test_never_fix_tests(self):
+ options = optparse.Values({
+ 'additional_expectations': [],
+ 'platform': 'test',
+ 'debug_rwt_logging': False
+ })
+ host = MockHost()
+
+ port = host.port_factory.get(options.platform, options=options)
+ port.virtual_test_suites = lambda: [
+ VirtualTestSuite(
+ prefix='foo', bases=['test', 'test1'], args=['--foo'])
+ ]
+ test_expectations = ('# tags: [ mac win ]\n'
+ '# results: [ Skip Pass ]\n'
+ 'test/* [ Skip ]\n'
+ '[ mac ] test1/* [ Skip ]\n'
+ 'test/sub/* [ Pass ]\n'
+ 'test/test1.html [ Pass ]\n'
+ 'test1/foo/* [ Pass ]\n'
+ 'test2/* [ Pass ]\n'
+ 'test2.html [ Skip Pass ]\n'
+ 'virtual/foo/test/* [ Pass ]\n'
+ 'virtual/foo/test1/* [ Pass ]\n')
+ port.expectations_dict = lambda: {'NeverFixTests': test_expectations}
+ port.test_exists = lambda test: True
+ host.port_factory.get = lambda platform, options=None: port
+ host.port_factory.all_port_names = lambda platform=None: [port.name()]
+
+ failures, warnings = lint_test_expectations.lint(host, options)
+ self.assertEqual(warnings, [])
+
+ self.assertEquals(len(failures), 4)
+ self.assertRegexpMatches(failures[0], ':7 .*must override')
+ self.assertRegexpMatches(failures[1], ':8 .*must override')
+ self.assertRegexpMatches(failures[2], ':9 Only one of')
+ self.assertRegexpMatches(failures[3], ':11 .*must override')
+
class CheckVirtualSuiteTest(unittest.TestCase):
def setUp(self):
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py b/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py
index 3bebfe6b774..a1c47bb5f5e 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py
@@ -151,6 +151,11 @@ class TestExpectations(object):
raise ParseError(expectation_errors)
self._add_expectations_from_bot()
+ def set_system_condition_tags(self, tags):
+ for test_exps in self._expectations:
+ test_exps.set_tags(tags)
+ self._system_condition_tags = tags
+
@staticmethod
def _maybe_remove_comments_and_whitespace(lines):
"""If the last expectation in a block is deleted, then remove all associated
@@ -217,16 +222,26 @@ class TestExpectations(object):
lines.extend(lineno_to_exps[lineno])
lineno_to_exps.pop(lineno)
- # Add new expectations that were not part of the file before
+ # Handle Expectation instances with line numbers outside of the
+ # [1, total file line count] range. There are two cases for
+ # Expectation instances with line numbers outside the valid range.
+ #
+ # 1, If line number is 0 then the Expectation instance will be appended
+ # to the file.
+ # 2, If the line number is greater than the total number of lines then
+ # an exception will be raised.
if lineno_to_exps:
lines.append(_NotExpectation('', len(content_lines) + 1))
- extra_line_count = len(content_lines) + 1
- for lineno, extras in lineno_to_exps.items():
- for line in extras:
+ for line in sorted(
+ reduce(lambda x,y: x+y, lineno_to_exps.values()),
+ key=lambda e: e.test):
+ if line.lineno:
+ raise ValueError(
+ "Expectation '%s' was given a line number that "
+ "is greater than the total line count of file %s."
+ % (line.to_string(), path))
lines.append(line)
- extra_line_count += 1
- lines[-1].lineno = extra_line_count
self._expectation_file_linenos[path] = {
line.lineno for line in lines
@@ -290,7 +305,7 @@ class TestExpectations(object):
test_expectations.parse_tagged_list(content)
self._expectations.append(test_expectations)
- def _get_expectations(self, expectations, test, fallback_for_test=None):
+ def _get_expectations(self, expectations, test, original_test=None):
results = set()
reasons = set()
is_slow_test = False
@@ -313,7 +328,7 @@ class TestExpectations(object):
# If the results set is empty then the Expectation constructor
# will set the expected result to Pass.
- return typ_types.Expectation(test=fallback_for_test or test,
+ return typ_types.Expectation(test=original_test or test,
results=results,
is_slow_test=is_slow_test,
reason=' '.join(reasons),
@@ -332,26 +347,37 @@ class TestExpectations(object):
override.is_slow_test |= fallback.is_slow_test
return override
- def get_expectations(self, test, fallback_for_test=None):
- expectations = self._override_or_fallback_expectations(
- self._get_expectations(self._flag_expectations, test,
- fallback_for_test),
- self._get_expectations(self._expectations, test,
- fallback_for_test))
+ def _get_expectations_with_fallback(self,
+ expectations,
+ fallback_expectations,
+ test,
+ original_test=None):
+ exp = self._override_or_fallback_expectations(
+ self._get_expectations(expectations, test, original_test),
+ self._get_expectations(fallback_expectations, test, original_test))
base_test = self.port.lookup_virtual_test_base(test)
if base_test:
return self._override_or_fallback_expectations(
- expectations, self.get_expectations(base_test, test))
- return expectations
+ exp,
+ self._get_expectations_with_fallback(expectations,
+ fallback_expectations,
+ base_test, test))
+ return exp
+
+ def get_expectations(self, test):
+ return self._get_expectations_with_fallback(self._flag_expectations,
+ self._expectations, test)
def get_flag_expectations(self, test):
- exp = self._get_expectations(self._flag_expectations, test)
+ exp = self._get_expectations_with_fallback(self._flag_expectations, [],
+ test)
if exp.is_default_pass:
return None
return exp
def get_base_expectations(self, test):
- return self._get_expectations(self._base_expectations, test)
+ return self._get_expectations_with_fallback(self._base_expectations,
+ [], test)
def get_tests_with_expected_result(self, result):
"""This method will return a list of tests and directories which
@@ -411,7 +437,7 @@ class TestExpectations(object):
if not pattern_to_exps[exp.test]:
pattern_to_exps.pop(exp.test)
- def add_expectations(self, path, exps, lineno=0):
+ def add_expectations(self, path, exps, lineno=0, append_to_end_of_file=False):
"""This method adds Expectation instances to an expectations file. It will
add the new instances after the line number passed through the lineno parameter.
If the lineno is set to a value outside the range of line numbers in the file
@@ -426,6 +452,14 @@ class TestExpectations(object):
typ_expectations = self._expectations[idx]
added_glob = False
+ if lineno < 0:
+ raise ValueError('lineno cannot be negative.')
+ if (append_to_end_of_file and lineno or
+ not append_to_end_of_file and not lineno):
+ raise ValueError('If append_to_end_of_file is set then lineno '
+ 'must be 0. Also if lineno is 0 then '
+ 'append_to_end_of_file must be set to True.')
+
for exp in exps:
exp.lineno = lineno
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations_unittest.py
index 7370549015b..17205daf39a 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations_unittest.py
@@ -32,8 +32,10 @@ import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.common.system.output_capture import OutputCapture
-from blinkpy.web_tests.models.test_configuration import TestConfiguration, TestConfigurationConverter
-from blinkpy.web_tests.models.test_expectations import TestExpectations, SystemConfigurationRemover, ParseError
+from blinkpy.web_tests.models.test_configuration import (
+ TestConfiguration, TestConfigurationConverter)
+from blinkpy.web_tests.models.test_expectations import (
+ TestExpectations, SystemConfigurationRemover, ParseError)
from blinkpy.web_tests.models.typ_types import ResultType, Expectation
@@ -150,6 +152,19 @@ class FlagExpectationsTests(Base):
self._test_expectations = TestExpectations(self._port,
expectations_dict)
+ def assert_base_and_flag_exp(self, test, base_exp, flag_exp):
+ self.assertEqual(
+ self._test_expectations.get_base_expectations(test).results,
+ set([base_exp]))
+ actual_flag_exp = self._test_expectations.get_flag_expectations(test)
+ if flag_exp is None:
+ self.assertIsNone(actual_flag_exp)
+ else:
+ self.assertEqual(actual_flag_exp.results, set([flag_exp]))
+
+ def assert_exp(self, test, result):
+ self.assert_exp_list(test, [result])
+
def test_add_flag_test_expectations(self):
raw_flag_exps = """
# tags: [ Win ]
@@ -191,6 +206,8 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Pass]))
self.assertTrue(exp.is_default_pass)
self.assertFalse(exp.is_slow_test)
+ self.assert_base_and_flag_exp('passes/text.html', ResultType.Pass,
+ None)
# The test has a flag-specific expectation.
exp = self._test_expectations.get_expectations(
@@ -198,6 +215,8 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Failure]))
self.assertFalse(exp.is_default_pass)
self.assertTrue(exp.is_slow_test)
+ self.assert_base_and_flag_exp('failures/expected/text.html',
+ ResultType.Pass, ResultType.Failure)
# The flag-specific expectation overrides the base expectation.
exp = self._test_expectations.get_expectations(
@@ -205,6 +224,8 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Pass]))
self.assertFalse(exp.is_default_pass)
self.assertFalse(exp.is_slow_test)
+ self.assert_base_and_flag_exp('failures/expected/image.html',
+ ResultType.Skip, ResultType.Pass)
# The flag-specific expectation overrides the base expectation, but
# inherits [ Slow ] of the base expectation.
@@ -213,6 +234,8 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Pass]))
self.assertFalse(exp.is_default_pass)
self.assertTrue(exp.is_slow_test)
+ self.assert_base_and_flag_exp('failures/expected/reftest.html',
+ ResultType.Failure, ResultType.Pass)
# No flag-specific expectation. Fallback to the base expectation.
exp = self._test_expectations.get_expectations(
@@ -220,6 +243,8 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Crash]))
self.assertFalse(exp.is_default_pass)
self.assertFalse(exp.is_slow_test)
+ self.assert_base_and_flag_exp('failures/expected/crash.html',
+ ResultType.Crash, None)
def test_override_and_fallback_virtual_test(self):
raw_base_exps = """
@@ -253,6 +278,8 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Pass]))
self.assertTrue(exp.is_default_pass)
self.assertFalse(exp.is_slow_test)
+ self.assert_base_and_flag_exp(
+ 'virtual/virtual_passes/passes/image.html', ResultType.Pass, None)
# No virtual test expectation. The flag-specific expectation of the
# base test override the base expectation of the base test, but [ Slow ]
@@ -262,6 +289,9 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Failure]))
self.assertFalse(exp.is_default_pass)
self.assertTrue(exp.is_slow_test)
+ self.assert_base_and_flag_exp(
+ 'virtual/virtual_failures/failures/expected/text.html',
+ ResultType.Pass, ResultType.Failure)
# The flag-specific virtual test expectation wins.
exp = self._test_expectations.get_expectations(
@@ -269,6 +299,9 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Failure]))
self.assertFalse(exp.is_default_pass)
self.assertFalse(exp.is_slow_test)
+ self.assert_base_and_flag_exp(
+ 'virtual/virtual_failures/failures/expected/image.html',
+ ResultType.Skip, ResultType.Failure)
# No virtual test expectations. [ Slow ] in the flag-specific
# expectation of the base test and [ Failure ] in the base expectation
@@ -278,6 +311,9 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Failure]))
self.assertFalse(exp.is_default_pass)
self.assertTrue(exp.is_slow_test)
+ self.assert_base_and_flag_exp(
+ 'virtual/virtual_failures/failures/expected/reftest.html',
+ ResultType.Failure, None)
# No virtual test flag-specific expectation. The virtual test
# expectation in the base expectation file wins.
@@ -286,6 +322,9 @@ class FlagExpectationsTests(Base):
self.assertEqual(exp.results, set([ResultType.Pass]))
self.assertFalse(exp.is_default_pass)
self.assertFalse(exp.is_slow_test)
+ self.assert_base_and_flag_exp(
+ 'virtual/virtual_failures/failures/expected/crash.html',
+ ResultType.Pass, ResultType.Timeout)
class SystemConfigurationRemoverTests(Base):
@@ -706,22 +745,136 @@ class RemoveExpectationsTest(Base):
class AddExpectationsTest(Base):
- def test_add_expectation(self):
+
+ def test_add_expectation_end_of_file_nonzero_lineno(self):
+ port = MockHost().port_factory.get('test-win-win7')
+ raw_expectations = ('# tags: [ Mac Win ]\n'
+ '# tags: [ release ]\n'
+ '# results: [ Failure ]\n'
+ '\n'
+ '# this is a block of expectations\n'
+ 'test [ failure ]\n')
+ expectations_dict = OrderedDict()
+ expectations_dict['/tmp/TestExpectations'] = ''
+ expectations_dict['/tmp/TestExpectations2'] = raw_expectations
+ test_expectations = TestExpectations(port, expectations_dict)
+
+ with self.assertRaises(ValueError) as ctx:
+ test_expectations.add_expectations(
+ '/tmp/TestExpectations2',
+ [Expectation(test='test3',
+ results=set([ResultType.Failure]))],
+ lineno=0)
+ test_expectations.commit_changes()
+ self.assertIn('append_to_end_of_file must be set to True',
+ str(ctx.exception))
+
+ def test_add_expectation_with_negative_lineno(self):
+ port = MockHost().port_factory.get('test-win-win7')
+ raw_expectations = ('# tags: [ Mac Win ]\n'
+ '# tags: [ release ]\n'
+ '# results: [ Failure ]\n'
+ '\n'
+ '# this is a block of expectations\n'
+ 'test [ failure ]\n')
+ expectations_dict = OrderedDict()
+ expectations_dict['/tmp/TestExpectations'] = ''
+ expectations_dict['/tmp/TestExpectations2'] = raw_expectations
+ test_expectations = TestExpectations(port, expectations_dict)
+
+ with self.assertRaises(ValueError) as ctx:
+ test_expectations.add_expectations(
+ '/tmp/TestExpectations2',
+ [Expectation(test='test3',
+ results=set([ResultType.Failure]))],
+ lineno=-1)
+ test_expectations.commit_changes()
+ self.assertIn('cannot be negative', str(ctx.exception))
+
+ def test_add_expectation_outside_file_size_range(self):
port = MockHost().port_factory.get('test-win-win7')
- raw_expectations = ('# tags: [ Mac Win ]\n' '# results: [ Failure ]\n')
+ raw_expectations = ('# tags: [ Mac Win ]\n'
+ '# tags: [ release ]\n'
+ '# results: [ Failure ]\n'
+ '\n'
+ '# this is a block of expectations\n'
+ 'test [ failure ]\n')
+ expectations_dict = OrderedDict()
+ expectations_dict['/tmp/TestExpectations'] = ''
+ expectations_dict['/tmp/TestExpectations2'] = raw_expectations
+ test_expectations = TestExpectations(port, expectations_dict)
+
+ with self.assertRaises(ValueError) as ctx:
+ test_expectations.add_expectations(
+ '/tmp/TestExpectations2',
+ [Expectation(test='test3',
+ results=set([ResultType.Failure]))],
+ lineno=100)
+ test_expectations.commit_changes()
+ self.assertIn('greater than the total line count', str(ctx.exception))
+
+ def test_use_append_to_end_flag_non_zero_lineno(self):
+ # Use append_to_end_of_file=True with lineno != 0
+ # An exception should be raised.
+ port = MockHost().port_factory.get('test-win-win7')
+ raw_expectations = ('# tags: [ Mac Win ]\n'
+ '# tags: [ release ]\n'
+ '# results: [ Failure ]\n'
+ '\n'
+ '# this is a block of expectations\n'
+ 'test [ failure ]\n')
+ expectations_dict = OrderedDict()
+ expectations_dict['/tmp/TestExpectations'] = ''
+ expectations_dict['/tmp/TestExpectations2'] = raw_expectations
+ test_expectations = TestExpectations(port, expectations_dict)
+
+ with self.assertRaises(ValueError) as ctx:
+ test_expectations.add_expectations(
+ '/tmp/TestExpectations2',
+ [Expectation(test='test3',
+ results=set([ResultType.Failure]))],
+ lineno=100, append_to_end_of_file=True)
+ test_expectations.commit_changes()
+ self.assertIn('append_to_end_of_file is set then lineno must be 0',
+ str(ctx.exception))
+
+ def test_add_expectations_to_end_of_file(self):
+ port = MockHost().port_factory.get('test-win-win7')
+ raw_expectations = ('# tags: [ Mac Win ]\n'
+ '# tags: [ release ]\n'
+ '# results: [ Failure ]\n'
+ '\n'
+ '# this is a block of expectations\n'
+ 'test [ failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_expectations.add_expectations(
'/tmp/TestExpectations2',
- [Expectation(test='test1', results=set([ResultType.Failure]))])
+ [Expectation(test='test3', results=set([ResultType.Failure]))],
+ append_to_end_of_file=True)
+ test_expectations.add_expectations(
+ '/tmp/TestExpectations2',
+ [Expectation(test='test2', tags={'mac', 'release'},
+ results={ResultType.Crash, ResultType.Failure})],
+ append_to_end_of_file=True)
+ test_expectations.add_expectations(
+ '/tmp/TestExpectations2',
+ [Expectation(test='test1', results=set([ResultType.Pass]))],
+ append_to_end_of_file=True)
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, ('# tags: [ Mac Win ]\n'
+ '# tags: [ release ]\n'
'# results: [ Failure ]\n'
'\n'
- 'test1 [ Failure ]\n'))
+ '# this is a block of expectations\n'
+ 'test [ failure ]\n'
+ '\n'
+ 'test1 [ Pass ]\n'
+ '[ Release Mac ] test2 [ Failure Crash ]\n'
+ 'test3 [ Failure ]\n'))
def test_add_after_remove(self):
port = MockHost().port_factory.get('test-win-win7')
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_results_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_results_unittest.py
index 9b34b29309f..8d2888bf48d 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_results_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_results_unittest.py
@@ -62,3 +62,18 @@ class TestResultsTest(unittest.TestCase):
failures = [test_failures.FailureTextMismatch(driver_output, None)]
result = TestResult('foo', failures=failures)
self.assertTrue(result.has_repaint_overlay)
+
+ def test_results_multiple(self):
+ driver_output = DriverOutput(None, None, None, None)
+ failure_crash = [test_failures.FailureCrash(driver_output, None),
+ test_failures.TestFailure(driver_output, None)]
+ failure_timeout = [test_failures.FailureTimeout(driver_output, None),
+ test_failures.TestFailure(driver_output, None)]
+ failure_early_exit = [test_failures.FailureEarlyExit(driver_output, None),
+ test_failures.TestFailure(driver_output, None)]
+ # Should not raise an exception for CRASH and FAIL.
+ TestResult('foo', failures=failure_crash)
+ # Should not raise an exception for TIMEOUT and FAIL.
+ TestResult('foo', failures=failure_timeout)
+ with self.assertRaises(AssertionError):
+ TestResult('foo', failures=failure_early_exit) \ No newline at end of file
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/port/android.py b/chromium/third_party/blink/tools/blinkpy/web_tests/port/android.py
index 29daffff384..d7d9db87975 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/port/android.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/port/android.py
@@ -64,6 +64,8 @@ intent = None
perf_control = None
# pylint: enable=invalid-name
+_sanitize_android_tag = lambda t: t.replace('_', '-')
+
# product constants used by the wpt runner.
ANDROID_WEBLAYER = 'android_weblayer'
ANDROID_WEBVIEW = 'android_webview'
@@ -78,10 +80,8 @@ PRODUCTS_TO_STEPNAMES = {
}
PRODUCTS_TO_BROWSER_TAGS = {
- ANDROID_WEBLAYER: 'weblayer',
- ANDROID_WEBVIEW: 'webview',
- CHROME_ANDROID: 'chrome',
-}
+ product: _sanitize_android_tag(product)
+ for product in PRODUCTS}
# Android web tests directory, which contains override expectation files
ANDROID_WEB_TESTS_DIR = os.path.join(get_blink_dir(), 'web_tests', 'android')
@@ -95,6 +95,10 @@ PRODUCTS_TO_EXPECTATION_FILE_PATHS = {
ANDROID_WEB_TESTS_DIR, 'ClankWPTOverrideExpectations'),
}
+# Disabled WPT tests on Android
+ANDROID_DISABLED_TESTS = os.path.join(
+ ANDROID_WEB_TESTS_DIR, 'AndroidWPTNeverFixTests')
+
_friendly_browser_names = {
'weblayershell': 'weblayer',
'systemwebviewshell': 'webview',
@@ -297,19 +301,19 @@ class AndroidPort(base.Port):
BUILD_REQUIREMENTS_URL = 'https://www.chromium.org/developers/how-tos/android-build-instructions'
- def __init__(self, host, port_name='', apk='', options=None, **kwargs):
+ def __init__(self, host, port_name='', apk='', product='', options=None, **kwargs):
super(AndroidPort, self).__init__(
host, port_name, options=options, **kwargs)
self._operating_system = 'android'
self._version = 'kitkat'
fs = host.filesystem
self._local_port = factory.PortFactory(host).get(**kwargs)
-
- if apk:
+ if apk or product:
self._driver_details = DriverDetails(apk)
browser_type = fs.splitext(fs.basename(apk))[0].lower()
self._browser_type = _friendly_browser_names.get(
browser_type, browser_type)
+ self._wpt_product_arg = product
else:
# The legacy test runner will be used to run web tests on Android.
# So we need to initialize several port member variables.
@@ -318,6 +322,7 @@ class AndroidPort(base.Port):
self._browser_type = 'content_shell'
self._debug_logging = self.get_option('android_logging')
self.server_process_constructor = self._android_server_process_constructor
+ self._wpt_product_arg = ''
if not self.get_option('disable_breakpad'):
self._dump_reader = DumpReaderAndroid(host, self._build_path())
@@ -349,10 +354,14 @@ class AndroidPort(base.Port):
# TODO(rmhasan) Add bot expectations to WPT metadata.
return {}
+ def expected_test(self, _):
+ return
+
def get_platform_tags(self):
- _sanitize_tag = lambda t: t.replace('_', '-').replace(' ', '-')
- return frozenset(
- ['android', 'android-' + _sanitize_tag(self._browser_type)])
+ tags = {'android'}
+ if self._wpt_product_arg:
+ tags.add(_sanitize_android_tag(self._wpt_product_arg))
+ return frozenset(tags)
def default_smoke_test_only(self):
return True
@@ -361,11 +370,11 @@ class AndroidPort(base.Port):
return super(AndroidPort, self).additional_driver_flags() + \
self._driver_details.additional_command_line_flags(use_breakpad=not self.get_option('disable_breakpad'))
- def default_timeout_ms(self):
+ def _default_timeout_ms(self):
# Android platform has less computing power than desktop platforms.
# Using 10 seconds allows us to pass most slow tests which are not
# marked as slow tests on desktop platforms.
- return 10 * 1000
+ return 10000
def driver_stop_timeout(self):
# The driver doesn't respond to closing stdin, so we might as well stop the driver immediately.
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/port/android_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/port/android_unittest.py
index 3ee10e20772..96681585872 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/port/android_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/port/android_unittest.py
@@ -48,8 +48,6 @@ sys.path.insert(0, _DEVIL_ROOT)
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
-_MOCK_ROOT = os.path.join(get_chromium_src_dir(), 'third_party', 'pymock')
-sys.path.insert(0, _MOCK_ROOT)
import mock
@@ -132,15 +130,16 @@ class AndroidPortTest(port_testcase.PortTestCase):
def test_weblayer_expectation_tags(self):
host = MockSystemHost()
- port = android.AndroidPort(host, apk='apks/WebLayerShell.apk')
+ port = android.AndroidPort(
+ host, product='android_weblayer')
self.assertEqual(port.get_platform_tags(),
set(['android', 'android-weblayer']))
- def test_content_shell_expectation_tags(self):
+ def test_default_no_wpt_product_tag(self):
host = MockSystemHost()
port = android.AndroidPort(host)
self.assertEqual(port.get_platform_tags(),
- set(['android', 'android-content-shell']))
+ set(['android']))
# Test that an HTTP server indeed is required by Android (as we serve all tests over them)
def test_requires_http_server(self):
@@ -148,16 +147,7 @@ class AndroidPortTest(port_testcase.PortTestCase):
# Tests the default timeouts for Android, which are different than the rest of Chromium.
def test_default_timeout_ms(self):
- self.assertEqual(
- self.make_port(
- options=optparse.Values({
- 'configuration': 'Release'
- })).default_timeout_ms(), 10000)
- self.assertEqual(
- self.make_port(
- options=optparse.Values({
- 'configuration': 'Debug'
- })).default_timeout_ms(), 10000)
+ self.assertEqual(self.make_port().timeout_ms(), 10000)
def test_path_to_apache_config_file(self):
port = self.make_port()
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/port/base.py b/chromium/third_party/blink/tools/blinkpy/web_tests/port/base.py
index d507bd0d5e6..f4fc77f5983 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/port/base.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/port/base.py
@@ -31,6 +31,7 @@ The Port classes encapsulate Port-specific (platform-specific) behavior
in the web test infrastructure.
"""
+import time
import collections
import itertools
import json
@@ -85,8 +86,11 @@ FONT_FILES = [
[[CONTENT_SHELL_FONTS_DIR], 'Lohit-Gurmukhi.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Lohit-Tamil.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'MuktiNarrow.ttf', None],
+ [[CONTENT_SHELL_FONTS_DIR], 'NotoColorEmoji.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansCJKjp-Regular.otf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansKhmer-Regular.ttf', None],
+ [[CONTENT_SHELL_FONTS_DIR], 'NotoSansSymbols2-Regular.ttf', None],
+ [[CONTENT_SHELL_FONTS_DIR], 'NotoSansTibetan-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-Bold.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-BoldItalic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-Italic.ttf', None],
@@ -146,12 +150,9 @@ class Port(object):
)
CONFIGURATION_SPECIFIER_MACROS = {
- # NOTE: We don't support specifiers for mac10.15 because
- # we don't have separate baselines for it (it shares the mac10.14
- # results in the platform/mac directory). This list will need to be
- # updated if/when we actually have separate baselines.
'mac':
- ['retina', 'mac10.10', 'mac10.11', 'mac10.12', 'mac10.13', 'mac10.14'],
+ ['retina', 'mac10.10', 'mac10.11', 'mac10.12', 'mac10.13', 'mac10.14',
+ 'mac10.15'],
'win': ['win7', 'win10'],
'linux': ['trusty'],
'fuschia': ['fuchsia'],
@@ -376,19 +377,36 @@ class Port(object):
def default_smoke_test_only(self):
return False
- def default_timeout_ms(self):
- timeout_ms = 6 * 1000
+ def _default_timeout_ms(self):
+ return 6000
+
+ def timeout_ms(self):
+ timeout_ms = self._default_timeout_ms()
if self.get_option('configuration') == 'Debug':
- # Debug is usually 2x-3x slower than Release.
- return 3 * timeout_ms
+ # Debug is about 5x slower than Release.
+ return 5 * timeout_ms
+ if self._build_has_dcheck_always_on():
+ # Release with DCHECK is also slower than pure Release.
+ return 2 * timeout_ms
return timeout_ms
+ @memoized
+ def _build_has_dcheck_always_on(self):
+ args_gn_file = self._build_path('args.gn')
+ if not self._filesystem.exists(args_gn_file):
+ _log.error('Unable to find %s', args_gn_file)
+ return False
+ contents = self._filesystem.read_text_file(args_gn_file)
+ return bool(
+ re.search(r'^\s*dcheck_always_on\s*=\s*true\s*(#.*)?$', contents,
+ re.MULTILINE))
+
def driver_stop_timeout(self):
"""Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
# We want to wait for at least 3 seconds, but if we are really slow, we
# want to be slow on cleanup as well (for things like ASAN, Valgrind, etc.)
return (3.0 * float(self.get_option('time_out_ms', '0')) /
- self.default_timeout_ms())
+ self._default_timeout_ms())
def default_batch_size(self):
"""Returns the default batch size to use for this port."""
@@ -843,7 +861,7 @@ class Port(object):
reftest_list.append((expectation, ref_absolute_path))
return reftest_list
- def tests(self, paths):
+ def tests(self, paths=None):
"""Returns all tests or tests matching supplied paths.
Args:
@@ -966,6 +984,14 @@ class Port(object):
return self.wpt_manifest(wpt_path).is_crash_test(path_in_wpt)
def is_slow_wpt_test(self, test_file):
+ # When DCHECK is enabled, idlharness tests run 5-6x slower due to the
+ # amount of JavaScript they use (most web_tests run very little JS).
+ # This causes flaky timeouts for a lot of them, as a 0.5-1s test becomes
+ # close to the default 6s timeout.
+ if (self.is_wpt_idlharness_test(test_file)
+ and self._build_has_dcheck_always_on()):
+ return True
+
match = self.WPT_REGEX.match(test_file)
if not match:
return False
@@ -1198,7 +1224,20 @@ class Port(object):
@memoized
def args_for_test(self, test_name):
- return self._lookup_virtual_test_args(test_name)
+ args = self._lookup_virtual_test_args(test_name)
+ tracing_categories = self.get_option('enable_tracing')
+ if tracing_categories:
+ args.append('--trace-startup=' + tracing_categories)
+ # Do not finish the trace until the test is finished.
+ args.append('--trace-startup-duration=0')
+ # Append the current time to the output file name to ensure that
+ # the subsequent repetitions of the test do not overwrite older
+ # trace files.
+ current_time = time.strftime("%Y-%m-%d-%H-%M-%S")
+ file_name = 'trace_layout_test_' + test_name.replace(
+ '/', '_').replace('.', '_') + '_' + current_time + '.json'
+ args.append('--trace-startup-file=' + file_name)
+ return args
@memoized
def name_for_test(self, test_name):
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/port/base_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/port/base_unittest.py
index 8b84972b0ac..bf8a1e17a81 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/port/base_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/port/base_unittest.py
@@ -29,6 +29,7 @@
import json
import optparse
import unittest
+import mock
from blinkpy.common.path_finder import RELATIVE_WEB_TESTS
from blinkpy.common.system.executive_mock import MockExecutive
@@ -1137,6 +1138,20 @@ class PortTest(LoggingTestCase):
port.is_slow_wpt_test(
'external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L.html'
))
+ self.assertFalse(
+ port.is_slow_wpt_test(
+ 'external/wpt/css/css-pseudo/idlharness.html'))
+
+ def test_is_slow_wpt_test_idlharness_with_dcheck(self):
+ port = self.make_port(with_tests=True)
+ PortTest._add_manifest_to_mock_file_system(port)
+ port.host.filesystem.write_text_file(port._build_path('args.gn'),
+ 'dcheck_always_on=true\n')
+ # We always consider idlharness tests slow, even if they aren't marked
+ # such in the manifest. See https://crbug.com/1047818
+ self.assertTrue(
+ port.is_slow_wpt_test(
+ 'external/wpt/css/css-pseudo/idlharness.html'))
def test_is_slow_wpt_test_with_variations(self):
port = self.make_port(with_tests=True)
@@ -1664,6 +1679,18 @@ class PortTest(LoggingTestCase):
port.additional_driver_flags())
+ def test_enable_tracing(self):
+ options, _ = optparse.OptionParser().parse_args([])
+ options.enable_tracing = '*,-blink'
+ port = self.make_port(with_tests=True, options=options)
+ with mock.patch('time.strftime', return_value='TIME'):
+ self.assertEqual([
+ '--trace-startup=*,-blink',
+ '--trace-startup-duration=0',
+ '--trace-startup-file=trace_layout_test_non_virtual_TIME.json',
+ ], port.args_for_test('non/virtual'))
+
+
class NaturalCompareTest(unittest.TestCase):
def setUp(self):
self._port = TestPort(MockSystemHost())
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test.py b/chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test.py
index a373941c3e4..dd397967a71 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test.py
@@ -70,12 +70,8 @@ class BrowserTestPortOverrides(object):
def driver_name(self):
return 'browser_tests'
- def default_timeout_ms(self):
- timeout_ms = 10 * 1000
- if self.get_option('configuration') == 'Debug': # pylint: disable=no-member
- # Debug is usually 2x-3x slower than Release.
- return 3 * timeout_ms
- return timeout_ms
+ def _default_timeout_ms(self):
+ return 10000
def virtual_test_suites(self):
return []
@@ -89,18 +85,10 @@ class BrowserTestMacPort(BrowserTestPortOverrides, mac.MacPort):
def _path_to_driver(self, target=None):
return self._build_path_with_target(target, self.driver_name())
- def default_timeout_ms(self):
- timeout_ms = 20 * 1000
- if self.get_option('configuration') == 'Debug': # pylint: disable=no-member
- # Debug is usually 2x-3x slower than Release.
- return 3 * timeout_ms
- return timeout_ms
+ def _default_timeout_ms(self):
+ return 20000
class BrowserTestWinPort(BrowserTestPortOverrides, win.WinPort):
- def default_timeout_ms(self):
- timeout_ms = 20 * 1000
- if self.get_option('configuration') == 'Debug': # pylint: disable=no-member
- # Debug is usually 2x-3x slower than Release.
- return 3 * timeout_ms
- return timeout_ms
+ def _default_timeout_ms(self):
+ return 20000
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test_unittest.py
index 075cbc964e2..a2ceb8ee689 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/port/browser_test_unittest.py
@@ -42,16 +42,7 @@ class _BrowserTestTestCaseMixin(object):
self.driver_name_endswith))
def test_default_timeout_ms(self):
- self.assertEqual(
- self.make_port(
- options=optparse.Values({
- 'configuration': 'Release'
- })).default_timeout_ms(), self.timeout_ms)
- self.assertEqual(
- self.make_port(
- options=optparse.Values({
- 'configuration': 'Debug'
- })).default_timeout_ms(), 3 * self.timeout_ms)
+ self.assertEqual(self.make_port().timeout_ms(), self.timeout_ms)
def test_driver_type(self):
self.assertTrue(
@@ -82,7 +73,7 @@ class BrowserTestLinuxTest(_BrowserTestTestCaseMixin,
os_name = 'linux'
os_version = 'trusty'
driver_name_endswith = 'browser_tests'
- timeout_ms = 10 * 1000
+ timeout_ms = 10000
class BrowserTestWinTest(_BrowserTestTestCaseMixin,
@@ -92,7 +83,7 @@ class BrowserTestWinTest(_BrowserTestTestCaseMixin,
os_name = 'win'
os_version = 'win7'
driver_name_endswith = 'browser_tests.exe'
- timeout_ms = 20 * 1000
+ timeout_ms = 20000
class BrowserTestMacTest(_BrowserTestTestCaseMixin,
@@ -102,7 +93,7 @@ class BrowserTestMacTest(_BrowserTestTestCaseMixin,
port_name = 'mac'
port_maker = browser_test.BrowserTestMacPort
driver_name_endswith = 'browser_tests'
- timeout_ms = 20 * 1000
+ timeout_ms = 20000
def test_driver_path(self):
test_port = self.make_port(
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/port/fuchsia.py b/chromium/third_party/blink/tools/blinkpy/web_tests/port/fuchsia.py
index 31d16289136..963fe9a7814 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/port/fuchsia.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/port/fuchsia.py
@@ -167,7 +167,8 @@ class _TargetHost(object):
forwarding_flags += ['-R', '%d:localhost:%d' % (port, port)]
self._proxy = self._target.RunCommandPiped([],
ssh_args=forwarding_flags,
- stderr=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
self._listener = self._target.RunCommandPiped(['log_listener'],
stdout=subprocess.PIPE,
@@ -277,11 +278,11 @@ class FuchsiaPort(base.Port):
# Run a single qemu instance.
return min(MAX_WORKERS, requested_num_workers)
- def default_timeout_ms(self):
+ def _default_timeout_ms(self):
# Use 20s timeout instead of the default 6s. This is necessary because
# the tests are executed in qemu, so they run slower compared to other
# platforms.
- return 20 * 1000
+ return 20000
def requires_http_server(self):
"""HTTP server is always required to avoid copying the tests to the VM.
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/port/mac.py b/chromium/third_party/blink/tools/blinkpy/web_tests/port/mac.py
index 6f79ed0dd53..8b6924083c6 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/port/mac.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/port/mac.py
@@ -49,7 +49,7 @@ class MacPort(base.Port):
FALLBACK_PATHS = {}
FALLBACK_PATHS['mac10.15'] = ['mac']
- FALLBACK_PATHS['mac10.14'] = ['mac']
+ FALLBACK_PATHS['mac10.14'] = ['mac-mac10.14'] + FALLBACK_PATHS['mac10.15']
FALLBACK_PATHS['mac10.13'] = ['mac-mac10.13'] + FALLBACK_PATHS['mac10.14']
FALLBACK_PATHS['mac10.12'] = ['mac-mac10.12'] + FALLBACK_PATHS['mac10.13']
FALLBACK_PATHS['mac10.11'] = ['mac-mac10.11'] + FALLBACK_PATHS['mac10.12']
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/port/port_testcase.py b/chromium/third_party/blink/tools/blinkpy/web_tests/port/port_testcase.py
index d7ff65adbdb..8528ce7d9bd 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/port/port_testcase.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/port/port_testcase.py
@@ -128,16 +128,38 @@ class PortTestCase(LoggingTestCase):
self.assertEqual(port.default_max_locked_shards(), 1)
def test_default_timeout_ms(self):
+ self.assertEqual(self.make_port().timeout_ms(), 6000)
+
+ def test_timeout_ms_release(self):
+ self.assertEqual(
+ self.make_port(options=optparse.Values(
+ {'configuration': 'Release'})).timeout_ms(),
+ self.make_port().timeout_ms())
+
+ def test_timeout_ms_debug(self):
+ self.assertEqual(
+ self.make_port(options=optparse.Values({'configuration': 'Debug'
+ })).timeout_ms(),
+ 5 * self.make_port().timeout_ms())
+
+ def make_dcheck_port(self, options):
+ host = MockSystemHost(os_name=self.os_name, os_version=self.os_version)
+ host.filesystem.write_text_file(
+ self.make_port(host)._build_path('args.gn'),
+ 'is_debug=false\ndcheck_always_on = true # comment\n')
+ port = self.make_port(host, options=options)
+ return port
+
+ def test_timeout_ms_with_dcheck(self):
+ default_timeout_ms = self.make_port().timeout_ms()
self.assertEqual(
- self.make_port(
- options=optparse.Values({
- 'configuration': 'Release'
- })).default_timeout_ms(), 6000)
+ self.make_dcheck_port(options=optparse.Values(
+ {'configuration': 'Release'})).timeout_ms(),
+ 2 * default_timeout_ms)
self.assertEqual(
- self.make_port(
- options=optparse.Values({
- 'configuration': 'Debug'
- })).default_timeout_ms(), 18000)
+ self.make_dcheck_port(options=optparse.Values(
+ {'configuration': 'Debug'})).timeout_ms(),
+ 5 * default_timeout_ms)
def test_driver_cmd_line(self):
port = self.make_port()
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests.py b/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests.py
index 44890171fbf..d3429f46a06 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests.py
@@ -295,11 +295,10 @@ def parse_args(args):
dest='build',
action='store_false',
help="Don't check to see if the build is up to date."),
- optparse.make_option(
- '--child-processes',
- '--jobs',
- '-j',
- help='Number of drivers to run in parallel.'),
+ optparse.make_option('--child-processes',
+ '--jobs',
+ '-j',
+ help='Number of drivers to run in parallel.'),
optparse.make_option(
'--disable-breakpad',
action='store_true',
@@ -317,6 +316,12 @@ def parse_args(args):
action='store_true',
help='Only alert on sanitizer-related errors and crashes'),
optparse.make_option(
+ '--enable-tracing',
+ type='string',
+ help='Capture and write a trace file with the specied '
+ 'categories for each test. Passes appropriate --trace-startup '
+ 'flags to the driver.'),
+ optparse.make_option(
'--exit-after-n-crashes-or-timeouts',
type='int',
default=None,
@@ -386,10 +391,9 @@ def parse_args(args):
"'random' == pseudo-random order (default). Seed can be specified "
'via --seed, otherwise it will default to the current unix timestamp. '
"'natural' == use the natural order")),
- optparse.make_option(
- '--profile',
- action='store_true',
- help='Output per-test profile information.'),
+ optparse.make_option('--profile',
+ action='store_true',
+ help='Output per-test profile information.'),
optparse.make_option(
'--profiler',
action='store',
@@ -515,8 +519,8 @@ def parse_args(args):
help='A colon-separated list of tests to run. Wildcards are '
'NOT supported. It is the same as listing the tests as '
'positional arguments.'),
- optparse.make_option(
- '--time-out-ms', help='Set the timeout for each test'),
+ optparse.make_option('--time-out-ms',
+ help='Set the timeout for each test'),
optparse.make_option(
'--wrapper',
help=
@@ -524,11 +528,10 @@ def parse_args(args):
'is split on whitespace before running. (Example: --wrapper="valgrind '
'--smc-check=all")')),
# FIXME: Display the default number of child processes that will run.
- optparse.make_option(
- '-f',
- '--fully-parallel',
- action='store_true',
- help='run all tests in parallel'),
+ optparse.make_option('-f',
+ '--fully-parallel',
+ action='store_true',
+ help='run all tests in parallel'),
optparse.make_option(
'--virtual-parallel',
action='store_true',
@@ -652,7 +655,7 @@ def _set_up_derived_options(port, options, args):
options.configuration = port.default_configuration()
if not options.time_out_ms:
- options.time_out_ms = str(port.default_timeout_ms())
+ options.time_out_ms = str(port.timeout_ms())
options.slow_time_out_ms = str(5 * int(options.time_out_ms))
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests_unittest.py
index 781061a5f12..794b5bc888e 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests_unittest.py
@@ -50,9 +50,6 @@ from blinkpy.web_tests.models.typ_types import ResultType
from blinkpy.web_tests.port import test
from blinkpy.web_tests.views.printing import Printer
-_MOCK_ROOT = os.path.join(path_finder.get_chromium_src_dir(), 'third_party',
- 'pymock')
-sys.path.insert(0, _MOCK_ROOT)
import mock # pylint: disable=wrong-import-position
@@ -389,9 +386,9 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
'passes/args.html'
]
tests_run = get_tests_run(['--order=natural'] + tests_to_run)
+ # because of deduping the test list, they should be run once.
self.assertEqual([
- 'passes/args.html', 'passes/args.html', 'passes/audio.html',
- 'passes/audio.html'
+ 'passes/args.html', 'passes/audio.html'
], tests_run)
def test_random_order(self):
@@ -447,8 +444,9 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
'passes/args.html'
]
tests_run = get_tests_run(['--order=random'] + tests_to_run)
- self.assertEqual(tests_run.count('passes/audio.html'), 2)
- self.assertEqual(tests_run.count('passes/args.html'), 2)
+ # because of deduping the test list, they should be run once.
+ self.assertEqual(tests_run.count('passes/audio.html'), 1)
+ self.assertEqual(tests_run.count('passes/args.html'), 1)
def test_no_order(self):
tests_to_run = [
@@ -464,7 +462,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
'passes/args.html'
]
tests_run = get_tests_run(['--order=none'] + tests_to_run)
- self.assertEqual(tests_to_run, tests_run)
+ # because of deduping the test list, they should be run once.
+ self.assertEqual(['passes/args.html', 'passes/audio.html'], tests_run)
def test_no_order_with_directory_entries_in_natural_order(self):
tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper.py b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper.py
index c90bbe709af..e37580c1105 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper.py
@@ -38,9 +38,10 @@ import signal
from blinkpy.common.host import Host
from blinkpy.common.system.log_utils import configure_logging
+from blinkpy.web_tests.port.base import ARTIFACTS_SUB_DIR
from blinkpy.web_tests.port.factory import configuration_options
from blinkpy.web_tests.port.factory import python_server_options
-from blinkpy.web_tests.port.base import ARTIFACTS_SUB_DIR
+from blinkpy.web_tests.servers.server_base import ServerError
_log = logging.getLogger(__name__)
@@ -64,8 +65,7 @@ def main(server_constructor,
description=None,
**kwargs):
host = Host()
- # Signals will interrupt sleep, so we can use a long duration.
- sleep_fn = sleep_fn or (lambda: host.sleep(10))
+ sleep_fn = sleep_fn or (lambda: host.sleep(1))
parser = optparse.OptionParser(
description=description, formatter=RawTextHelpFormatter())
@@ -106,6 +106,11 @@ def main(server_constructor,
try:
while True:
sleep_fn()
+ if not server.alive():
+ raise ServerError('Server is no longer listening')
+ except ServerError as e:
+ _log.error(e)
except (SystemExit, KeyboardInterrupt):
_log.info('Exiting...')
+ finally:
server.stop()
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper_unittest.py
index b1585aa3fca..5057367ed6b 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper_unittest.py
@@ -13,6 +13,7 @@ class MockServer(object):
self.kwargs = kwargs
self.start_called = False
self.stop_called = False
+ self.is_alive = True
def start(self):
self.start_called = True
@@ -20,12 +21,15 @@ class MockServer(object):
def stop(self):
self.stop_called = True
+ def alive(self):
+ return self.is_alive
+
class CliWrapperTest(unittest.TestCase):
def setUp(self):
self.server = None
- def test_main(self):
+ def test_main_success(self):
def mock_server_constructor(*args, **kwargs):
self.server = MockServer(args, kwargs)
return self.server
@@ -36,3 +40,17 @@ class CliWrapperTest(unittest.TestCase):
cli_wrapper.main(mock_server_constructor, sleep_fn=raise_exit, argv=[])
self.assertTrue(self.server.start_called)
self.assertTrue(self.server.stop_called)
+
+ def test_main_server_error_after_start(self):
+ def mock_server_constructor(*args, **kwargs):
+ self.server = MockServer(args, kwargs)
+ return self.server
+
+ def server_error():
+ self.server.is_alive = False
+
+ cli_wrapper.main(mock_server_constructor,
+ sleep_fn=server_error,
+ argv=[])
+ self.assertTrue(self.server.start_called)
+ self.assertTrue(self.server.stop_called)
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/server_base.py b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/server_base.py
index 0a3173031cd..acbd0fd131c 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/server_base.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/server_base.py
@@ -75,7 +75,11 @@ class ServerBase(object):
# redirect them to files.
self._stdout = self._executive.PIPE
self._stderr = self._executive.PIPE
+ # The entrypoint process of the server, which may not be the daemon,
+ # e.g. apachectl.
self._process = None
+ # The PID of the server daemon, which may be different from
+ # self._process.pid.
self._pid = None
self._error_log_path = None
@@ -150,6 +154,14 @@ class ServerBase(object):
# Make sure we delete the pid file no matter what happens.
self._remove_pid_file()
+ def alive(self):
+ """Checks whether the server is alive."""
+ # This by default checks both the process and ports.
+ # At this point, we think the server has started up, so successes are
+ # normal while failures are not.
+ return self._is_server_running_on_all_ports(
+ success_log_level=logging.DEBUG, failure_log_level=logging.INFO)
+
def _prepare_config(self):
"""This routine can be overridden by subclasses to do any sort
of initialization required prior to starting the server that may fail.
@@ -252,9 +264,17 @@ class ServerBase(object):
return False
- def _is_server_running_on_all_ports(self):
- """Returns whether the server is running on all the desired ports."""
+ def _is_server_running_on_all_ports(self,
+ success_log_level=logging.INFO,
+ failure_log_level=logging.DEBUG):
+ """Returns whether the server is running on all the desired ports.
+ Args:
+ success_log_level: Logging level for success (default: INFO)
+ failure_log_level: Logging level for failure (default: DEBUG)
+ """
+ # Check self._pid instead of self._process because the latter might be a
+ # control process that exits after spawning up the daemon.
# TODO(dpranke): crbug/378444 maybe pid is unreliable on win?
if (not self._platform.is_win()
and not self._executive.check_running_pid(self._pid)):
@@ -268,12 +288,14 @@ class ServerBase(object):
scheme = mapping['scheme']
try:
s.connect(('localhost', port))
- _log.info('Server running on %s://localhost:%d', scheme, port)
+ _log.log(success_log_level,
+ 'Server running on %s://localhost:%d', scheme, port)
except IOError as error:
if error.errno not in (errno.ECONNREFUSED, errno.ECONNRESET):
raise
- _log.debug('Server NOT running on %s://localhost:%d : %s',
- scheme, port, error)
+ _log.log(failure_log_level,
+ 'Server NOT running on %s://localhost:%d : %s',
+ scheme, port, error)
return False
finally:
s.close()
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve.py b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve.py
index 1985b6e15cb..06960b00820 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve.py
@@ -58,7 +58,8 @@ class WPTServe(server_base.ServerBase):
wpt_script = fs.join(path_to_wpt_root, 'wpt')
start_cmd = [
self._port_obj.host.executable, '-u', wpt_script, 'serve',
- '--config', self._config_file, '--doc_root', path_to_wpt_tests
+ '--config', self._config_file, '--doc_root', path_to_wpt_tests,
+ '--no-h2',
]
# Some users (e.g. run_webdriver_tests.py) do not need WebSocket
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve_unittest.py
index 48fd4feceaf..8784160a630 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/servers/wptserve_unittest.py
@@ -33,6 +33,7 @@ class TestWPTServe(LoggingTestCase):
server._config_file,
'--doc_root',
'/test.checkout/wtests/external/wpt',
+ '--no-h2',
])
def test_init_start_cmd_with_ws_handlers(self):
@@ -48,6 +49,7 @@ class TestWPTServe(LoggingTestCase):
server._config_file,
'--doc_root',
'/test.checkout/wtests/external/wpt',
+ '--no-h2',
'--ws_doc_root',
'/test.checkout/wtests/external/wpt/websockets/handlers',
])
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations.py b/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations.py
index 8e5611da09f..e4bf80a34d6 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations.py
@@ -63,6 +63,16 @@ def main(host, bot_test_expectations_factory, argv):
help='also remove lines if there were no results, '
'e.g. Android-only expectations for tests '
'that are not in SmokeTests')
+ # TODO(crbug.com/1077883): Including cq results might introduce false
+ # negatives. An in-review change that caused a test with fail test
+ # expectation but no longer fails, to fail again, will result in
+ # the test expectation not being removed even if the change
+ # does not ultimately land.
+ parser.add_argument(
+ '--include-cq-results',
+ action='store_true',
+ default=False,
+ help='include results from cq.')
parser.add_argument(
'--show-results',
'-s',
@@ -83,7 +93,8 @@ def main(host, bot_test_expectations_factory, argv):
return 1
remover = ExpectationsRemover(host, port, bot_test_expectations_factory,
- webbrowser, args.type, args.remove_missing)
+ webbrowser, args.type, args.remove_missing,
+ args.include_cq_results)
test_expectations = remover.get_updated_test_expectations()
if args.show_results:
@@ -101,16 +112,17 @@ class ExpectationsRemover(object):
bot_test_expectations_factory,
browser,
type_flag='all',
- remove_missing=False):
+ remove_missing=False,
+ include_cq_results=False):
self._host = host
self._port = port
self._expectations_factory = bot_test_expectations_factory
- self._builder_results_by_path = {}
self._browser = browser
self._expectations_to_remove_list = None
self._type = type_flag
self._bug_numbers = set()
self._remove_missing = remove_missing
+ self._include_cq_results = include_cq_results
self._builder_results_by_path = self._get_builder_results_by_path()
self._removed_test_names = set()
self._version_to_os = {}
@@ -163,7 +175,7 @@ class ExpectationsRemover(object):
# actual results or only a PASS expectation appears in the actual
# results.
builders_checked = []
- configurations = []
+ builders = []
for config in self._port.all_test_configurations():
if set(expectation.tags).issubset(
set([
@@ -171,17 +183,23 @@ class ExpectationsRemover(object):
config.version.lower(),
config.build_type.lower()
])):
- configurations.append(config)
- for config in configurations:
- builder_name = self._host.builders.builder_name_for_specifiers(
- config.version, config.build_type)
- if not builder_name:
- _log.debug('No builder with config %s', config)
- # For many configurations, there is no matching builder in
- # blinkpy/common/config/builders.json. We ignore these
- # configurations and make decisions based only on configurations
- # with actual builders.
- continue
+ try_server_configs = [False]
+ if self._include_cq_results:
+ try_server_configs.append(True)
+ for is_try_server in try_server_configs:
+ builder_name = self._host.builders.builder_name_for_specifiers(
+ config.version, config.build_type, is_try_server)
+ if not builder_name:
+ _log.debug('No builder with config %s for %s',
+ config, 'CQ' if is_try_server else 'CI')
+ # For many configurations, there is no matching builder in
+ # blinkpy/common/config/builders.json. We ignore these
+ # configurations and make decisions based only on configurations
+ # with actual builders.
+ continue
+ builders.append(builder_name)
+
+ for builder_name in builders:
builders_checked.append(builder_name)
@@ -228,9 +246,16 @@ class ExpectationsRemover(object):
}
"""
builder_results_by_path = {}
- for builder_name in self._host.builders.all_continuous_builder_names():
- expectations_for_builder = (self._expectations_factory.
- expectations_for_builder(builder_name))
+ builders = []
+ if self._include_cq_results:
+ builders = self._host.builders.all_builder_names()
+ else:
+ builders = self._host.builders.all_continuous_builder_names()
+
+ for builder_name in builders:
+ expectations_for_builder = (
+ self._expectations_factory.expectations_for_builder(builder_name)
+ )
if not expectations_for_builder:
# This is not fatal since we may not need to check these
diff --git a/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations_unittest.py b/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations_unittest.py
index 56006e83641..1ab0efd73f6 100644
--- a/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations_unittest.py
+++ b/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations_unittest.py
@@ -113,10 +113,12 @@ class UpdateTestExpectationsTest(LoggingTestCase):
def _create_expectations_remover(self,
type_flag='all',
- remove_missing=False):
+ remove_missing=False,
+ include_cq_results=False):
return ExpectationsRemover(
self._host, self._port, self._expectation_factory,
- self._mock_web_browser, type_flag, remove_missing)
+ self._mock_web_browser, type_flag, remove_missing,
+ include_cq_results)
def _parse_expectations(self, expectations):
path = self._port.path_to_generic_test_expectations_file()
@@ -951,6 +953,48 @@ class UpdateTestExpectationsTest(LoggingTestCase):
# shouldn't consider those passing so this line should remain.
test/a.html [ Skip ]"""))
+ def test_include_cq_results(self):
+ """By default, cq results are ignored."""
+ test_expectations_before = _strip_multiline_string_spaces("""
+ # results: [ Failure Pass ]
+ # Remove this if cq results are ignored.
+ crbug.com/1111 test/a.html [ Failure Pass ]""")
+
+ self._define_builders({
+ 'WebKit Linux': {
+ 'port_name': 'linux-trusty',
+ 'specifiers': ['Trusty', 'Release']
+ },
+ 'WebKit Linux try': {
+ 'port_name': 'linux-trusty',
+ 'specifiers': ['Trusty', 'Release'],
+ 'is_try_builder': True
+ },
+ })
+
+ self._port.all_build_types = ('release', )
+ self._port.all_systems = (('trusty', 'x86_64'), )
+
+ self._parse_expectations(test_expectations_before)
+ self._expectation_factory.all_results_by_builder = {
+ 'WebKit Linux': {'test/a.html': ['PASS', 'PASS', 'PASS'],},
+ 'WebKit Linux try': {'test/a.html': ['PASS', 'FAIL', 'PASS'],}
+
+ }
+ self._expectations_remover = self._create_expectations_remover()
+ updated_expectations = (
+ self._expectations_remover.get_updated_test_expectations())
+ self.assertEquals(
+ updated_expectations,
+ _strip_multiline_string_spaces("""
+ # results: [ Failure Pass ]"""))
+
+ self._expectations_remover = (
+ self._create_expectations_remover(include_cq_results=True))
+ updated_expectations = (
+ self._expectations_remover.get_updated_test_expectations())
+ self.assertEquals(updated_expectations, test_expectations_before)
+
def test_missing_builders_for_some_configurations(self):
"""Tests the behavior when there are no builders for some configurations.
diff --git a/chromium/third_party/blink/tools/build_wpt_metadata.py b/chromium/third_party/blink/tools/build_wpt_metadata.py
index 3f01666f031..ff70a9e60b3 100755
--- a/chromium/third_party/blink/tools/build_wpt_metadata.py
+++ b/chromium/third_party/blink/tools/build_wpt_metadata.py
@@ -8,7 +8,8 @@ import optparse
import sys
from blinkpy.common.host import Host
-from blinkpy.web_tests.port.android import AndroidPort
+from blinkpy.web_tests.port.android import (
+ AndroidPort, PRODUCTS)
from blinkpy.w3c.wpt_metadata_builder import WPTMetadataBuilder
from blinkpy.web_tests.models.test_expectations import TestExpectations
@@ -20,16 +21,23 @@ def main(args):
action="append",
help="Paths to additional expectations files for WPT.")
parser.add_argument(
- "--android-apk",
+ "--android-product",
default=None,
- help="Path to Android APK that is being tested")
+ choices=PRODUCTS,
+ help="Android product argument for wpt runner.")
+ parser.add_argument(
+ '--ignore-default-expectations',
+ action='store_true',
+ help='Do not use the default set of test expectations files.'
+ ' i.e TestExpectations, NeverFixTests, etc...')
known_args, rest_args = parser.parse_known_args(args)
options = optparse.Values(vars(known_args))
host = Host()
- if known_args.android_apk:
- port = AndroidPort(host, apk=known_args.android_apk, options=options)
+ if known_args.android_product:
+ port = AndroidPort(
+ host, product=known_args.android_product, options=options)
else:
port = host.port_factory.get(options=options)
diff --git a/chromium/third_party/blink/tools/run_blink_wptserve.py b/chromium/third_party/blink/tools/run_blink_wptserve.py
index 6f6a621d4e1..a8a91928f32 100755
--- a/chromium/third_party/blink/tools/run_blink_wptserve.py
+++ b/chromium/third_party/blink/tools/run_blink_wptserve.py
@@ -16,4 +16,8 @@ from blinkpy.common import version_check # pylint: disable=unused-import
from blinkpy.web_tests.servers import cli_wrapper
from blinkpy.web_tests.servers import wptserve
+print("Generated content (out/<build directory>/gen) is served from "
+ "out/Release/gen by default. Specify an alternate directory with "
+ "'-t <build directory>'.")
+
cli_wrapper.main(wptserve.WPTServe, description=__doc__)
diff --git a/chromium/third_party/blink/tools/run_web_tests.bat b/chromium/third_party/blink/tools/run_web_tests.bat
index a7df139bf5a..7f111ebbc0a 100755
--- a/chromium/third_party/blink/tools/run_web_tests.bat
+++ b/chromium/third_party/blink/tools/run_web_tests.bat
@@ -3,4 +3,4 @@
:: Use of this source code is governed by a BSD-style license that can be
:: found in the LICENSE file.
-python %~dp0\run_web_tests.py %*
+vpython %~dp0\run_web_tests.py %* \ No newline at end of file