summaryrefslogtreecommitdiff
path: root/deps/v8/tools/testrunner
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/tools/testrunner')
-rw-r--r--deps/v8/tools/testrunner/base_runner.py41
-rw-r--r--deps/v8/tools/testrunner/local/command.py102
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/pool.py11
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py1
-rw-r--r--deps/v8/tools/testrunner/local/variants.py7
-rw-r--r--deps/v8/tools/testrunner/objects/output.py4
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py1
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py8
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py51
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py46
11 files changed, 148 insertions, 173 deletions
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 6c2bcf8ae4..7ae2ac4eb4 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -61,7 +61,6 @@ TEST_MAP = {
"wasm-js",
"fuzzer",
"message",
- "preparser",
"intl",
"unittests",
"wasm-api-tests",
@@ -77,7 +76,6 @@ TEST_MAP = {
"wasm-js",
"fuzzer",
"message",
- "preparser",
"intl",
"unittests",
"wasm-api-tests",
@@ -88,7 +86,6 @@ TEST_MAP = {
"mjsunit",
"webkit",
"message",
- "preparser",
"intl",
],
# This needs to stay in sync with "v8_optimize_for_size" in test/BUILD.gn.
@@ -105,15 +102,17 @@ TEST_MAP = {
],
}
-# Double the timeout for these:
-SLOW_ARCHS = ["arm",
- "mips",
- "mipsel",
- "mips64",
- "mips64el",
- "s390",
- "s390x",
- "arm64"]
+# Increase the timeout for these:
+SLOW_ARCHS = [
+ "arm",
+ "arm64",
+ "mips",
+ "mipsel",
+ "mips64",
+ "mips64el",
+ "s390",
+ "s390x",
+]
class ModeConfig(object):
@@ -194,14 +193,10 @@ class BuildConfig(object):
self.is_full_debug = build_config['is_full_debug']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
- # TODO(https://crbug.com/v8/8531)
- # 'v8_use_snapshot' was removed, 'no_snap' can be removed as well.
- self.no_snap = False
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
# TODO(machenbach): We only have ubsan not ubsan_vptr.
self.ubsan_vptr = build_config['is_ubsan_vptr']
- self.embedded_builtins = build_config['v8_enable_embedded_builtins']
self.verify_csa = build_config['v8_enable_verify_csa']
self.lite_mode = build_config['v8_enable_lite_mode']
self.pointer_compression = build_config['v8_enable_pointer_compression']
@@ -230,16 +225,12 @@ class BuildConfig(object):
detected_options.append('msan')
if self.no_i18n:
detected_options.append('no_i18n')
- if self.no_snap:
- detected_options.append('no_snap')
if self.predictable:
detected_options.append('predictable')
if self.tsan:
detected_options.append('tsan')
if self.ubsan_vptr:
detected_options.append('ubsan_vptr')
- if self.embedded_builtins:
- detected_options.append('embedded_builtins')
if self.verify_csa:
detected_options.append('verify_csa')
if self.lite_mode:
@@ -354,9 +345,6 @@ class BaseTestRunner(object):
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -701,7 +689,6 @@ class BaseTestRunner(object):
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
- "no_snap": self.build_config.no_snap,
"novfp3": False,
"optimize_for_size": "--optimize-for-size" in options.extra_flags,
"predictable": self.build_config.predictable,
@@ -710,7 +697,6 @@ class BaseTestRunner(object):
"system": self.target_os,
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
- "embedded_builtins": self.build_config.embedded_builtins,
"verify_csa": self.build_config.verify_csa,
"lite_mode": self.build_config.lite_mode,
"pointer_compression": self.build_config.pointer_compression,
@@ -736,7 +722,7 @@ class BaseTestRunner(object):
"""Increases timeout for slow build configurations."""
factor = self.mode_options.timeout_scalefactor
if self.build_config.arch in SLOW_ARCHS:
- factor *= 4
+ factor *= 4.5
if self.build_config.lite_mode:
factor *= 2
if self.build_config.predictable:
@@ -803,9 +789,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index b68252c139..50403a0e5e 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -5,6 +5,7 @@
# for py2/py3 compatibility
from __future__ import print_function
+from contextlib import contextmanager
import os
import re
import signal
@@ -39,9 +40,37 @@ class AbortException(Exception):
pass
+@contextmanager
+def handle_sigterm(process, abort_fun, enabled):
+ """Call`abort_fun` on sigterm and restore previous handler to prevent
+ erroneous termination of an already terminated process.
+
+ Args:
+ process: The process to terminate.
+ abort_fun: Function taking two parameters: the process to terminate and
+ an array with a boolean for storing if an abort occured.
+ enabled: If False, this wrapper will be a no-op.
+ """
+ # Variable to communicate with the signal handler.
+ abort_occured = [False]
+ def handler(signum, frame):
+ abort_fun(process, abort_occured)
+
+ if enabled:
+ previous = signal.signal(signal.SIGTERM, handler)
+ try:
+ yield
+ finally:
+ if enabled:
+ signal.signal(signal.SIGTERM, previous)
+
+ if abort_occured[0]:
+ raise AbortException()
+
+
class BaseCommand(object):
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
- verbose=False, resources_func=None):
+ verbose=False, resources_func=None, handle_sigterm=False):
"""Initialize the command.
Args:
@@ -52,6 +81,9 @@ class BaseCommand(object):
env: Environment dict for execution.
verbose: Print additional output.
resources_func: Callable, returning all test files needed by this command.
+ handle_sigterm: Flag indicating if SIGTERM will be used to terminate the
+ underlying process. Should not be used from the main thread, e.g. when
+ using a command to list tests.
"""
assert(timeout > 0)
@@ -61,6 +93,7 @@ class BaseCommand(object):
self.timeout = timeout
self.env = env or {}
self.verbose = verbose
+ self.handle_sigterm = handle_sigterm
def execute(self):
if self.verbose:
@@ -68,26 +101,18 @@ class BaseCommand(object):
process = self._start_process()
- # Variable to communicate with the signal handler.
- abort_occured = [False]
- def handler(signum, frame):
- self._abort(process, abort_occured)
- signal.signal(signal.SIGTERM, handler)
-
- # Variable to communicate with the timer.
- timeout_occured = [False]
- timer = threading.Timer(
- self.timeout, self._abort, [process, timeout_occured])
- timer.start()
+ with handle_sigterm(process, self._abort, self.handle_sigterm):
+ # Variable to communicate with the timer.
+ timeout_occured = [False]
+ timer = threading.Timer(
+ self.timeout, self._abort, [process, timeout_occured])
+ timer.start()
- start_time = time.time()
- stdout, stderr = process.communicate()
- duration = time.time() - start_time
+ start_time = time.time()
+ stdout, stderr = process.communicate()
+ duration = time.time() - start_time
- timer.cancel()
-
- if abort_occured[0]:
- raise AbortException()
+ timer.cancel()
return output.Output(
process.returncode,
@@ -128,14 +153,16 @@ class BaseCommand(object):
def _abort(self, process, abort_called):
abort_called[0] = True
+ started_as = self.to_string(relative=True)
+ process_text = 'process %d started as:\n %s\n' % (process.pid, started_as)
try:
- print('Attempting to kill process %s' % process.pid)
+ print('Attempting to kill ' + process_text)
sys.stdout.flush()
self._kill_process(process)
except OSError as e:
print(e)
+ print('Unruly ' + process_text)
sys.stdout.flush()
- pass
def __str__(self):
return self.to_string()
@@ -182,6 +209,22 @@ class PosixCommand(BaseCommand):
process.kill()
+def taskkill_windows(process, verbose=False, force=True):
+ force_flag = ' /F' if force else ''
+ tk = subprocess.Popen(
+ 'taskkill /T%s /PID %d' % (force_flag, process.pid),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ stdout, stderr = tk.communicate()
+ if verbose:
+ print('Taskkill results for %d' % process.pid)
+ print(stdout)
+ print(stderr)
+ print('Return code: %d' % tk.returncode)
+ sys.stdout.flush()
+
+
class WindowsCommand(BaseCommand):
def _start_process(self, **kwargs):
# Try to change the error mode to avoid dialogs on fatal errors. Don't
@@ -211,18 +254,7 @@ class WindowsCommand(BaseCommand):
return subprocess.list2cmdline(self._to_args_list())
def _kill_process(self, process):
- tk = subprocess.Popen(
- 'taskkill /T /F /PID %d' % process.pid,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
- stdout, stderr = tk.communicate()
- if self.verbose:
- print('Taskkill results for %d' % process.pid)
- print(stdout)
- print(stderr)
- print('Return code: %d' % tk.returncode)
- sys.stdout.flush()
+ taskkill_windows(process, self.verbose)
class AndroidCommand(BaseCommand):
@@ -230,7 +262,7 @@ class AndroidCommand(BaseCommand):
driver = None
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
- verbose=False, resources_func=None):
+ verbose=False, resources_func=None, handle_sigterm=False):
"""Initialize the command and all files that need to be pushed to the
Android device.
"""
@@ -251,7 +283,7 @@ class AndroidCommand(BaseCommand):
super(AndroidCommand, self).__init__(
shell, args=rel_args, cmd_prefix=cmd_prefix, timeout=timeout, env=env,
- verbose=verbose)
+ verbose=verbose, handle_sigterm=handle_sigterm)
def execute(self, **additional_popen_kwargs):
"""Execute the command on the device.
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index 9defdd30ee..a6fb91f912 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -19,6 +19,7 @@ except ImportError:
from Queue import Empty # Python 2
from . import command
+from . import utils
def setup_testing():
@@ -243,6 +244,13 @@ class Pool():
"""
self.abort_now = True
+ def _terminate_processes(self):
+ for p in self.processes:
+ if utils.IsWindows():
+ command.taskkill_windows(p, verbose=True, force=False)
+ else:
+ os.kill(p.pid, signal.SIGTERM)
+
def _terminate(self):
"""Terminates execution and cleans up the queues.
@@ -267,8 +275,7 @@ class Pool():
self.work_queue.put("STOP")
if self.abort_now:
- for p in self.processes:
- os.kill(p.pid, signal.SIGTERM)
+ self._terminate_processes()
self.notify("Joining workers")
for p in self.processes:
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index db07a62885..f99941eb99 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -60,7 +60,6 @@ for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, NO_VARIANTS, FAIL_SLOPPY,
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little", "android",
- "android_arm", "android_arm64", "android_ia32", "android_x64",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
"x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
"linux", "aix", "r1", "r2", "r3", "r5", "r6"]:
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 57c16c0af1..c363c2a5ec 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -23,16 +23,17 @@ ALL_VARIANT_FLAGS = {
"nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up"]],
"slow_path": [["--force-slow-path"]],
"stress": [["--stress-opt", "--always-opt", "--no-liftoff",
- "--no-wasm-tier-up", '--stress-lazy-source-positions']],
+ "--stress-lazy-source-positions"]],
"stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
- "--wasm-code-gc",
"--stress-wasm-code-gc"]],
- "stress_incremental_marking": [["--stress-incremental-marking"]],
+ "stress_incremental_marking": [["--stress-incremental-marking"]],
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
"trusted": [["--no-untrusted-code-mitigations"]],
"no_wasm_traps": [["--no-wasm-trap-handler"]],
"turboprop": [["--turboprop"]],
+ "instruction_scheduling": [["--turbo-instruction-scheduling"]],
+ "stress_instruction_scheduling": [["--turbo-stress-instruction-scheduling"]],
"top_level_await": [["--harmony-top-level-await"]],
}
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index 78aa63d4c9..200f5462ec 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -66,6 +66,10 @@ class Output(object):
def IsSuccess(self):
return not self.HasCrashed() and not self.HasTimedOut()
+ @property
+ def exit_code_string(self):
+ return "%d [%02X]" % (self.exit_code, self.exit_code & 0xffffffff)
+
class _NullOutput(Output):
"""Useful to signal that the binary has not been run."""
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 6d4dcd1352..2a75cf60c4 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -271,6 +271,7 @@ class TestCase(object):
timeout=timeout,
verbose=self._test_config.verbose,
resources_func=self._get_resources,
+ handle_sigterm=True,
)
def _parse_source_flags(self, source=None):
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 4d9c73f2fc..0ca387000c 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -46,7 +46,8 @@ VARIANT_ALIASES = {
# Shortcut for the two above ('more' first - it has the longer running tests)
'exhaustive': MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
- 'extra': ['nooptimization', 'future', 'no_wasm_traps', 'turboprop'],
+ 'extra': ['nooptimization', 'future', 'no_wasm_traps', 'turboprop',
+ 'instruction_scheduling'],
}
GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
@@ -174,11 +175,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if self.build_config.asan:
options.extra_flags.append('--invoke-weak-callbacks')
- if self.build_config.no_snap:
- # Speed up slow nosnap runs. Allocation verification is covered by
- # running mksnapshot on other builders.
- options.extra_flags.append('--no-turbo-verify-allocation')
-
if options.novfp3:
options.extra_flags.append('--noenable-vfp3')
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
index 187145b4c8..271737897a 100644
--- a/deps/v8/tools/testrunner/testproc/fuzzer.py
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -8,6 +8,48 @@ import time
from . import base
+# Extra flags randomly added to all fuzz tests with numfuzz. List of tuples
+# (probability, flag).
+EXTRA_FLAGS = [
+ (0.1, '--always-opt'),
+ (0.1, '--assert-types'),
+ (0.1, '--cache=code'),
+ (0.1, '--force-slow-path'),
+ (0.2, '--future'),
+ (0.1, '--liftoff'),
+ (0.2, '--no-analyze-environment-liveness'),
+ (0.1, '--no-enable-sse3'),
+ (0.1, '--no-enable-ssse3'),
+ (0.1, '--no-enable-sse4_1'),
+ (0.1, '--no-enable-sse4_2'),
+ (0.1, '--no-enable-sahf'),
+ (0.1, '--no-enable-avx'),
+ (0.1, '--no-enable-fma3'),
+ (0.1, '--no-enable-bmi1'),
+ (0.1, '--no-enable-bmi2'),
+ (0.1, '--no-enable-lzcnt'),
+ (0.1, '--no-enable-popcnt'),
+ (0.1, '--no-liftoff'),
+ (0.1, '--no-opt'),
+ (0.2, '--no-regexp-tier-up'),
+ (0.1, '--no-wasm-tier-up'),
+ (0.1, '--regexp-interpret-all'),
+ (0.1, '--regexp-tier-up-ticks=10'),
+ (0.1, '--regexp-tier-up-ticks=100'),
+ (0.1, '--stress-background-compile'),
+ (0.1, '--stress-lazy-source-positions'),
+ (0.1, '--stress-wasm-code-gc'),
+ (0.1, '--turbo-instruction-scheduling'),
+ (0.1, '--turbo-stress-instruction-scheduling'),
+]
+
+def random_extra_flags(rng):
+ """Returns a random list of flags chosen from the configurations in
+ EXTRA_FLAGS.
+ """
+ return [flag for prob, flag in EXTRA_FLAGS if rng.random() < prob]
+
+
class FuzzerConfig(object):
def __init__(self, probability, analyzer, fuzzer):
"""
@@ -92,7 +134,6 @@ class FuzzerProc(base.TestProcProducer):
return self._create_subtest(test, 'analysis', flags=analysis_flags,
keep_output=True)
-
def _result_for(self, test, subtest, result):
if not self._disable_analysis:
if result is not None:
@@ -110,7 +151,7 @@ class FuzzerProc(base.TestProcProducer):
# analysis phase at all, so no fuzzer has it's own analyzer.
gens = []
indexes = []
- for i, fuzzer_config in enumerate(self._fuzzer_configs):
+ for fuzzer_config in self._fuzzer_configs:
analysis_value = None
if analysis_result and fuzzer_config.analyzer:
analysis_value = fuzzer_config.analyzer.do_analysis(analysis_result)
@@ -132,7 +173,7 @@ class FuzzerProc(base.TestProcProducer):
main_index = self._rng.choice(indexes)
_, main_gen = gens[main_index]
- flags = next(main_gen)
+ flags = random_extra_flags(self._rng) + next(main_gen)
for index, (p, gen) in enumerate(gens):
if index == main_index:
continue
@@ -205,7 +246,7 @@ class GcIntervalAnalyzer(Analyzer):
class GcIntervalFuzzer(Fuzzer):
def create_flags_generator(self, rng, test, analysis_value):
if analysis_value:
- value = analysis_value / 10
+ value = analysis_value // 10
else:
value = 10000
while True:
@@ -260,7 +301,7 @@ class DeoptFuzzer(Fuzzer):
def create_flags_generator(self, rng, test, analysis_value):
while True:
if analysis_value:
- value = analysis_value / 2
+ value = analysis_value // 2
else:
value = 10000
interval = rng.randint(self._min, max(value, self._min))
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 3bb9744f1e..d2e8a36038 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -14,7 +14,6 @@ import sys
import time
from . import base
-from ..local import junit_output
# Base dir of the build products for Release and Debug.
@@ -94,7 +93,7 @@ class SimpleProgressIndicator(ProgressIndicator):
print(result.output.stdout.strip())
print("Command: %s" % result.cmd.to_string())
if result.output.HasCrashed():
- print("exit code: %d" % result.output.exit_code)
+ print("exit code: %s" % result.output.exit_code_string)
print("--- CRASHED ---")
crashed += 1
if result.output.HasTimedOut():
@@ -248,7 +247,7 @@ class CompactProgressIndicator(ProgressIndicator):
print(self._templates['stderr'] % stderr)
print("Command: %s" % result.cmd.to_string(relative=True))
if output.HasCrashed():
- print("exit code: %d" % output.exit_code)
+ print("exit code: %s" % output.exit_code_string)
print("--- CRASHED ---")
if output.HasTimedOut():
print("--- TIMEOUT ---")
@@ -269,7 +268,7 @@ class CompactProgressIndicator(ProgressIndicator):
'progress': progress,
'failed': self._failed,
'test': name,
- 'mins': int(elapsed) / 60,
+ 'mins': int(elapsed) // 60,
'secs': int(elapsed) % 60
}
status = self._truncate(status, 78)
@@ -317,45 +316,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()