summaryrefslogtreecommitdiff
path: root/deps/v8/tools/testrunner
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/tools/testrunner')
-rw-r--r--deps/v8/tools/testrunner/README168
-rw-r--r--deps/v8/tools/testrunner/base_runner.py438
-rwxr-xr-xdeps/v8/tools/testrunner/deopt_fuzzer.py381
-rwxr-xr-xdeps/v8/tools/testrunner/gc_fuzzer.py341
-rw-r--r--deps/v8/tools/testrunner/local/commands.py20
-rw-r--r--deps/v8/tools/testrunner/local/execution.py36
-rw-r--r--deps/v8/tools/testrunner/local/progress.py15
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py152
-rwxr-xr-xdeps/v8/tools/testrunner/local/statusfile_unittest.py26
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py281
-rwxr-xr-xdeps/v8/tools/testrunner/local/testsuite_unittest.py53
-rw-r--r--deps/v8/tools/testrunner/local/variants.py25
-rw-r--r--deps/v8/tools/testrunner/local/verbose.py24
-rw-r--r--deps/v8/tools/testrunner/network/__init__.py26
-rw-r--r--deps/v8/tools/testrunner/network/distro.py90
-rw-r--r--deps/v8/tools/testrunner/network/endpoint.py125
-rw-r--r--deps/v8/tools/testrunner/network/network_execution.py253
-rw-r--r--deps/v8/tools/testrunner/objects/context.py15
-rw-r--r--deps/v8/tools/testrunner/objects/output.py8
-rw-r--r--deps/v8/tools/testrunner/objects/peer.py80
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py51
-rw-r--r--deps/v8/tools/testrunner/objects/workpacket.py90
-rw-r--r--deps/v8/tools/testrunner/server/__init__.py26
-rw-r--r--deps/v8/tools/testrunner/server/compression.py111
-rw-r--r--deps/v8/tools/testrunner/server/constants.py51
-rw-r--r--deps/v8/tools/testrunner/server/daemon.py147
-rw-r--r--deps/v8/tools/testrunner/server/local_handler.py119
-rw-r--r--deps/v8/tools/testrunner/server/main.py245
-rw-r--r--deps/v8/tools/testrunner/server/presence_handler.py120
-rw-r--r--deps/v8/tools/testrunner/server/signatures.py63
-rw-r--r--deps/v8/tools/testrunner/server/status_handler.py112
-rw-r--r--deps/v8/tools/testrunner/server/work_handler.py150
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py553
33 files changed, 2097 insertions, 2298 deletions
diff --git a/deps/v8/tools/testrunner/README b/deps/v8/tools/testrunner/README
deleted file mode 100644
index 0771ef9dc2..0000000000
--- a/deps/v8/tools/testrunner/README
+++ /dev/null
@@ -1,168 +0,0 @@
-Test suite runner for V8, including support for distributed running.
-====================================================================
-
-
-Local usage instructions:
-=========================
-
-Run the main script with --help to get detailed usage instructions:
-
-$ tools/run-tests.py --help
-
-The interface is mostly the same as it was for the old test runner.
-You'll likely want something like this:
-
-$ tools/run-tests.py --nonetwork --arch ia32 --mode release
-
---nonetwork is the default on Mac and Windows. If you don't specify --arch
-and/or --mode, all available values will be used and run in turn (e.g.,
-omitting --mode from the above example will run ia32 in both Release and Debug
-modes).
-
-
-Networked usage instructions:
-=============================
-
-Networked running is only supported on Linux currently. Make sure that all
-machines participating in the cluster are binary-compatible (e.g. mixing
-Ubuntu Lucid and Precise doesn't work).
-
-Setup:
-------
-
-1.) Copy tools/test-server.py to a new empty directory anywhere on your hard
- drive (preferably not inside your V8 checkout just to keep things clean).
- Please do create a copy, not just a symlink.
-
-2.) Navigate to the new directory and let the server setup itself:
-
-$ ./test-server.py setup
-
- This will install PIP and UltraJSON, create a V8 working directory, and
- generate a keypair.
-
-3.) Swap public keys with someone who's already part of the networked cluster.
-
-$ cp trusted/`cat data/mypubkey`.pem /where/peers/can/see/it/myname.pem
-$ ./test-server.py approve /wherever/they/put/it/yourname.pem
-
-
-Usage:
-------
-
-1.) Start your server:
-
-$ ./test-server.py start
-
-2.) (Optionally) inspect the server's status:
-
-$ ./test-server.py status
-
-3.) From your regular V8 working directory, run tests:
-
-$ tool/run-tests.py --arch ia32 --mode debug
-
-4.) (Optionally) enjoy the speeeeeeeeeeeeeeeed
-
-
-Architecture overview:
-======================
-
-Code organization:
-------------------
-
-This section is written from the point of view of the tools/ directory.
-
-./run-tests.py:
- Main script. Parses command-line options and drives the test execution
- procedure from a high level. Imports the actual implementation of all
- steps from the testrunner/ directory.
-
-./test-server.py:
- Interface to interact with the server. Contains code to setup the server's
- working environment and can start and stop server daemon processes.
- Imports some stuff from the testrunner/server/ directory.
-
-./testrunner/local/*:
- Implementation needed to run tests locally. Used by run-tests.py. Inspired by
- (and partly copied verbatim from) the original test.py script.
-
-./testrunner/objects/*:
- A bunch of data container classes, used by the scripts in the various other
- directories; serializable for transmission over the network.
-
-./testrunner/network/*:
- Equivalents and extensions of some of the functionality in ./testrunner/local/
- as required when dispatching tests to peers on the network.
-
-./testrunner/network/network_execution.py:
- Drop-in replacement for ./testrunner/local/execution that distributes
- test jobs to network peers instead of running them locally.
-
-./testrunner/network/endpoint.py:
- Receiving end of a network distributed job, uses the implementation
- in ./testrunner/local/execution.py for actually running the tests.
-
-./testrunner/server/*:
- Implementation of the daemon that accepts and runs test execution jobs from
- peers on the network. Should ideally have no dependencies on any of the other
- directories, but that turned out to be impractical, so there are a few
- exceptions.
-
-./testrunner/server/compression.py:
- Defines a wrapper around Python TCP sockets that provides JSON based
- serialization, gzip based compression, and ensures message completeness.
-
-
-Networking architecture:
-------------------------
-
-The distribution stuff is designed to be a layer between deciding which tests
-to run on the one side, and actually running them on the other. The frontend
-that the user interacts with is the same for local and networked execution,
-and the actual test execution and result gathering code is the same too.
-
-The server daemon starts four separate servers, each listening on another port:
-- "Local": Communication with a run-tests.py script running on the same host.
- The test driving script e.g. needs to ask for available peers. It then talks
- to those peers directly (one of them will be the locally running server).
-- "Work": Listens for test job requests from run-tests.py scripts on the network
- (including localhost). Accepts an arbitrary number of connections at the
- same time, but only works on them in a serialized fashion.
-- "Status": Used for communication with other servers on the network, e.g. for
- exchanging trusted public keys to create the transitive trust closure.
-- "Discovery": Used to detect presence of other peers on the network.
- In contrast to the other three, this uses UDP (as opposed to TCP).
-
-
-Give us a diagram! We love diagrams!
-------------------------------------
- .
- Machine A . Machine B
- .
-+------------------------------+ .
-| run-tests.py | .
-| with flag: | .
-|--nonetwork --network | .
-| | / | | .
-| | / | | .
-| v / v | .
-|BACKEND / distribution | .
-+--------- / --------| \ ------+ .
- / | \_____________________
- / | . \
- / | . \
-+----- v ----------- v --------+ . +---- v -----------------------+
-| LocalHandler | WorkHandler | . | WorkHandler | LocalHandler |
-| | | | . | | | |
-| | v | . | v | |
-| | BACKEND | . | BACKEND | |
-|------------- +---------------| . |---------------+--------------|
-| Discovery | StatusHandler <----------> StatusHandler | Discovery |
-+---- ^ -----------------------+ . +-------------------- ^ -------+
- | . |
- +---------------------------------------------------------+
-
-Note that the three occurrences of "BACKEND" are the same code
-(testrunner/local/execution.py and its imports), but running from three
-distinct directories (and on two different machines).
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
new file mode 100644
index 0000000000..b6ef6fb5cd
--- /dev/null
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -0,0 +1,438 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import json
+import optparse
+import os
+import sys
+
+
+# Add testrunner to the path.
+sys.path.insert(
+ 0,
+ os.path.dirname(
+ os.path.dirname(os.path.abspath(__file__))))
+
+
+from local import utils
+
+
+BASE_DIR = (
+ os.path.dirname(
+ os.path.dirname(
+ os.path.dirname(
+ os.path.abspath(__file__)))))
+
+DEFAULT_OUT_GN = 'out.gn'
+
+ARCH_GUESS = utils.DefaultArch()
+
+# Map of test name synonyms to lists of test suites. Should be ordered by
+# expected runtimes (suites with slow test cases first). These groups are
+# invoked in separate steps on the bots.
+TEST_MAP = {
+ # This needs to stay in sync with test/bot_default.isolate.
+ "bot_default": [
+ "debugger",
+ "mjsunit",
+ "cctest",
+ "wasm-spec-tests",
+ "inspector",
+ "webkit",
+ "mkgrokdump",
+ "fuzzer",
+ "message",
+ "preparser",
+ "intl",
+ "unittests",
+ ],
+ # This needs to stay in sync with test/default.isolate.
+ "default": [
+ "debugger",
+ "mjsunit",
+ "cctest",
+ "wasm-spec-tests",
+ "inspector",
+ "mkgrokdump",
+ "fuzzer",
+ "message",
+ "preparser",
+ "intl",
+ "unittests",
+ ],
+ # This needs to stay in sync with test/optimize_for_size.isolate.
+ "optimize_for_size": [
+ "debugger",
+ "mjsunit",
+ "cctest",
+ "inspector",
+ "webkit",
+ "intl",
+ ],
+ "unittests": [
+ "unittests",
+ ],
+}
+
+
+class ModeConfig(object):
+ def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
+ self.flags = flags
+ self.timeout_scalefactor = timeout_scalefactor
+ self.status_mode = status_mode
+ self.execution_mode = execution_mode
+
+
+DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
+RELEASE_FLAGS = ["--nohard-abort"]
+MODES = {
+ "debug": ModeConfig(
+ flags=DEBUG_FLAGS,
+ timeout_scalefactor=4,
+ status_mode="debug",
+ execution_mode="debug",
+ ),
+ "optdebug": ModeConfig(
+ flags=DEBUG_FLAGS,
+ timeout_scalefactor=4,
+ status_mode="debug",
+ execution_mode="debug",
+ ),
+ "release": ModeConfig(
+ flags=RELEASE_FLAGS,
+ timeout_scalefactor=1,
+ status_mode="release",
+ execution_mode="release",
+ ),
+ # Normal trybot release configuration. There, dchecks are always on which
+ # implies debug is set. Hence, the status file needs to assume debug-like
+ # behavior/timeouts.
+ "tryrelease": ModeConfig(
+ flags=RELEASE_FLAGS,
+ timeout_scalefactor=1,
+ status_mode="debug",
+ execution_mode="release",
+ ),
+ # This mode requires v8 to be compiled with dchecks and slow dchecks.
+ "slowrelease": ModeConfig(
+ flags=RELEASE_FLAGS + ["--enable-slow-asserts"],
+ timeout_scalefactor=2,
+ status_mode="debug",
+ execution_mode="release",
+ ),
+}
+
+
+class TestRunnerError(Exception):
+ pass
+
+
+class BuildConfig(object):
+ def __init__(self, build_config):
+ # In V8 land, GN's x86 is called ia32.
+ if build_config['v8_target_cpu'] == 'x86':
+ self.arch = 'ia32'
+ else:
+ self.arch = build_config['v8_target_cpu']
+
+ self.is_debug = build_config['is_debug']
+ self.asan = build_config['is_asan']
+ self.cfi_vptr = build_config['is_cfi']
+ self.dcheck_always_on = build_config['dcheck_always_on']
+ self.gcov_coverage = build_config['is_gcov_coverage']
+ self.msan = build_config['is_msan']
+ self.no_i18n = not build_config['v8_enable_i18n_support']
+ self.no_snap = not build_config['v8_use_snapshot']
+ self.predictable = build_config['v8_enable_verify_predictable']
+ self.tsan = build_config['is_tsan']
+ self.ubsan_vptr = build_config['is_ubsan_vptr']
+
+ def __str__(self):
+ detected_options = []
+
+ if self.asan:
+ detected_options.append('asan')
+ if self.cfi_vptr:
+ detected_options.append('cfi_vptr')
+ if self.dcheck_always_on:
+ detected_options.append('dcheck_always_on')
+ if self.gcov_coverage:
+ detected_options.append('gcov_coverage')
+ if self.msan:
+ detected_options.append('msan')
+ if self.no_i18n:
+ detected_options.append('no_i18n')
+ if self.no_snap:
+ detected_options.append('no_snap')
+ if self.predictable:
+ detected_options.append('predictable')
+ if self.tsan:
+ detected_options.append('tsan')
+ if self.ubsan_vptr:
+ detected_options.append('ubsan_vptr')
+
+ return '\n'.join(detected_options)
+
+
+class BaseTestRunner(object):
+ def __init__(self):
+ self.outdir = None
+ self.build_config = None
+ self.mode_name = None
+ self.mode_options = None
+
+ def execute(self):
+ try:
+ parser = self._create_parser()
+ options, args = self._parse_args(parser)
+
+ self._load_build_config(options)
+
+ try:
+ self._process_default_options(options)
+ self._process_options(options)
+ except TestRunnerError:
+ parser.print_help()
+ raise
+
+ self._setup_env()
+ return self._do_execute(options, args)
+ except TestRunnerError:
+ return 1
+
+ def _create_parser(self):
+ parser = optparse.OptionParser()
+ parser.usage = '%prog [options] [tests]'
+ parser.description = """TESTS: %s""" % (TEST_MAP["default"])
+ self._add_parser_default_options(parser)
+ self._add_parser_options(parser)
+ return parser
+
+ def _add_parser_default_options(self, parser):
+ parser.add_option("--gn", help="Scan out.gn for the last built"
+ " configuration",
+ default=False, action="store_true")
+ parser.add_option("--outdir", help="Base directory with compile output",
+ default="out")
+ parser.add_option("--buildbot", help="DEPRECATED!",
+ default=False, action="store_true")
+ parser.add_option("--arch",
+ help="The architecture to run tests for")
+ parser.add_option("-m", "--mode",
+ help="The test mode in which to run (uppercase for ninja"
+ " and buildbot builds): %s" % MODES.keys())
+ parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
+ "directory will be used")
+ parser.add_option("-v", "--verbose", help="Verbose output",
+ default=False, action="store_true")
+
+ def _add_parser_options(self, parser):
+ pass
+
+ def _parse_args(self, parser):
+ options, args = parser.parse_args()
+
+ if any(map(lambda v: v and ',' in v,
+ [options.arch, options.mode])):
+ print 'Multiple arch/mode are deprecated'
+ raise TestRunnerError()
+
+ return options, args
+
+ def _load_build_config(self, options):
+ for outdir in self._possible_outdirs(options):
+ try:
+ self.build_config = self._do_load_build_config(outdir, options.verbose)
+ except TestRunnerError:
+ pass
+
+ if not self.build_config:
+ print 'Failed to load build config'
+ raise TestRunnerError
+
+ print 'Build found: %s' % self.outdir
+ if str(self.build_config):
+ print '>>> Autodetected:'
+ print self.build_config
+
+ # Returns possible build paths in order:
+ # gn
+ # outdir
+ # outdir/arch.mode
+ # Each path is provided in two versions: <path> and <path>/mode for buildbot.
+ def _possible_outdirs(self, options):
+ def outdirs():
+ if options.gn:
+ yield self._get_gn_outdir()
+ return
+
+ yield options.outdir
+ if options.arch and options.mode:
+ yield os.path.join(options.outdir,
+ '%s.%s' % (options.arch, options.mode))
+
+ for outdir in outdirs():
+ yield os.path.join(BASE_DIR, outdir)
+
+ # buildbot option
+ if options.mode:
+ yield os.path.join(BASE_DIR, outdir, options.mode)
+
+ def _get_gn_outdir(self):
+ gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN)
+ latest_timestamp = -1
+ latest_config = None
+ for gn_config in os.listdir(gn_out_dir):
+ gn_config_dir = os.path.join(gn_out_dir, gn_config)
+ if not os.path.isdir(gn_config_dir):
+ continue
+ if os.path.getmtime(gn_config_dir) > latest_timestamp:
+ latest_timestamp = os.path.getmtime(gn_config_dir)
+ latest_config = gn_config
+ if latest_config:
+ print(">>> Latest GN build found: %s" % latest_config)
+ return os.path.join(DEFAULT_OUT_GN, latest_config)
+
+ def _do_load_build_config(self, outdir, verbose=False):
+ build_config_path = os.path.join(outdir, "v8_build_config.json")
+ if not os.path.exists(build_config_path):
+ if verbose:
+ print("Didn't find build config: %s" % build_config_path)
+ raise TestRunnerError()
+
+ with open(build_config_path) as f:
+ try:
+ build_config_json = json.load(f)
+ except Exception:
+ print("%s exists but contains invalid json. Is your build up-to-date?"
+ % build_config_path)
+ raise TestRunnerError()
+
+ # In auto-detect mode the outdir is always where we found the build config.
+ # This ensures that we'll also take the build products from there.
+ self.outdir = os.path.dirname(build_config_path)
+
+ return BuildConfig(build_config_json)
+
+ def _process_default_options(self, options):
+ # We don't use the mode for more path-magic.
+ # Therefore transform the buildbot mode here to fix build_config value.
+ if options.mode:
+ options.mode = self._buildbot_to_v8_mode(options.mode)
+
+ build_config_mode = 'debug' if self.build_config.is_debug else 'release'
+ if options.mode:
+ if options.mode not in MODES:
+ print '%s mode is invalid' % options.mode
+ raise TestRunnerError()
+ if MODES[options.mode].execution_mode != build_config_mode:
+ print ('execution mode (%s) for %s is inconsistent with build config '
+ '(%s)' % (
+ MODES[options.mode].execution_mode,
+ options.mode,
+ build_config_mode))
+ raise TestRunnerError()
+
+ self.mode_name = options.mode
+ else:
+ self.mode_name = build_config_mode
+
+ self.mode_options = MODES[self.mode_name]
+
+ if options.arch and options.arch != self.build_config.arch:
+ print('--arch value (%s) inconsistent with build config (%s).' % (
+ options.arch, self.build_config.arch))
+ raise TestRunnerError()
+
+ if options.shell_dir:
+ print('Warning: --shell-dir is deprecated. Searching for executables in '
+ 'build directory (%s) instead.' % self.outdir)
+
+ def _buildbot_to_v8_mode(self, config):
+ """Convert buildbot build configs to configs understood by the v8 runner.
+
+ V8 configs are always lower case and without the additional _x64 suffix
+ for 64 bit builds on windows with ninja.
+ """
+ mode = config[:-4] if config.endswith('_x64') else config
+ return mode.lower()
+
+ def _process_options(self, options):
+ pass
+
+ def _setup_env(self):
+ # Use the v8 root as cwd as some test cases use "load" with relative paths.
+ os.chdir(BASE_DIR)
+
+ # Many tests assume an English interface.
+ os.environ['LANG'] = 'en_US.UTF-8'
+
+ symbolizer_option = self._get_external_symbolizer_option()
+
+ if self.build_config.asan:
+ asan_options = [
+ symbolizer_option,
+ 'allow_user_segv_handler=1',
+ 'allocator_may_return_null=1',
+ ]
+ if not utils.GuessOS() in ['macos', 'windows']:
+ # LSAN is not available on mac and windows.
+ asan_options.append('detect_leaks=1')
+ else:
+ asan_options.append('detect_leaks=0')
+ os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
+
+ if self.build_config.cfi_vptr:
+ os.environ['UBSAN_OPTIONS'] = ":".join([
+ 'print_stacktrace=1',
+ 'print_summary=1',
+ 'symbolize=1',
+ symbolizer_option,
+ ])
+
+ if self.build_config.ubsan_vptr:
+ os.environ['UBSAN_OPTIONS'] = ":".join([
+ 'print_stacktrace=1',
+ symbolizer_option,
+ ])
+
+ if self.build_config.msan:
+ os.environ['MSAN_OPTIONS'] = symbolizer_option
+
+ if self.build_config.tsan:
+ suppressions_file = os.path.join(
+ BASE_DIR,
+ 'tools',
+ 'sanitizers',
+ 'tsan_suppressions.txt')
+ os.environ['TSAN_OPTIONS'] = " ".join([
+ symbolizer_option,
+ 'suppressions=%s' % suppressions_file,
+ 'exit_code=0',
+ 'report_thread_leaks=0',
+ 'history_size=7',
+ 'report_destroy_locked=0',
+ ])
+
+ def _get_external_symbolizer_option(self):
+ external_symbolizer_path = os.path.join(
+ BASE_DIR,
+ 'third_party',
+ 'llvm-build',
+ 'Release+Asserts',
+ 'bin',
+ 'llvm-symbolizer',
+ )
+
+ if utils.IsWindows():
+ # Quote, because sanitizers might confuse colon as option separator.
+ external_symbolizer_path = '"%s.exe"' % external_symbolizer_path
+
+ return 'external_symbolizer_path=%s' % external_symbolizer_path
+
+
+ # TODO(majeski): remove options & args parameters
+ def _do_execute(self, options, args):
+ raise NotImplementedError()
diff --git a/deps/v8/tools/testrunner/deopt_fuzzer.py b/deps/v8/tools/testrunner/deopt_fuzzer.py
new file mode 100755
index 0000000000..75878d442c
--- /dev/null
+++ b/deps/v8/tools/testrunner/deopt_fuzzer.py
@@ -0,0 +1,381 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from os.path import join
+import json
+import math
+import multiprocessing
+import os
+import random
+import shlex
+import sys
+import time
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import execution
+from testrunner.local import progress
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.local import verbose
+from testrunner.objects import context
+
+
+DEFAULT_TESTS = ["mjsunit", "webkit"]
+TIMEOUT_DEFAULT = 60
+
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+ "mipsel"]
+MAX_DEOPT = 1000000000
+DISTRIBUTION_MODES = ["smooth", "random"]
+
+
+class DeoptFuzzer(base_runner.BaseTestRunner):
+ def __init__(self):
+ super(DeoptFuzzer, self).__init__()
+
+ class RandomDistribution:
+ def __init__(self, seed=None):
+ seed = seed or random.randint(1, sys.maxint)
+ print "Using random distribution with seed %d" % seed
+ self._random = random.Random(seed)
+
+ def Distribute(self, n, m):
+ if n > m:
+ n = m
+ return self._random.sample(xrange(1, m + 1), n)
+
+ class SmoothDistribution:
+ """Distribute n numbers into the interval [1:m].
+ F1: Factor of the first derivation of the distribution function.
+ F2: Factor of the second derivation of the distribution function.
+ With F1 and F2 set to 0, the distribution will be equal.
+ """
+ def __init__(self, factor1=2.0, factor2=0.2):
+ self._factor1 = factor1
+ self._factor2 = factor2
+
+ def Distribute(self, n, m):
+ if n > m:
+ n = m
+ if n <= 1:
+ return [ 1 ]
+
+ result = []
+ x = 0.0
+ dx = 1.0
+ ddx = self._factor1
+ dddx = self._factor2
+ for i in range(0, n):
+ result += [ x ]
+ x += dx
+ dx += ddx
+ ddx += dddx
+
+ # Project the distribution into the interval [0:M].
+ result = [ x * m / result[-1] for x in result ]
+
+ # Equalize by n. The closer n is to m, the more equal will be the
+ # distribution.
+ for (i, x) in enumerate(result):
+ # The value of x if it was equally distributed.
+ equal_x = i / float(n - 1) * float(m - 1) + 1
+
+ # Difference factor between actual and equal distribution.
+ diff = 1 - (x / equal_x)
+
+ # Equalize x dependent on the number of values to distribute.
+ result[i] = int(x + (i + 1) * diff)
+ return result
+
+
+ def _distribution(self, options):
+ if options.distribution_mode == "random":
+ return self.RandomDistribution(options.seed)
+ if options.distribution_mode == "smooth":
+ return self.SmoothDistribution(options.distribution_factor1,
+ options.distribution_factor2)
+
+
+ def _add_parser_options(self, parser):
+ parser.add_option("--command-prefix",
+ help="Prepended to each shell command used to run a test",
+ default="")
+ parser.add_option("--coverage", help=("Exponential test coverage "
+ "(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
+ default=0.4, type="float")
+ parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
+ "with a small number of deopt points (range 0, inf)"),
+ default=20, type="int")
+ parser.add_option("--distribution-factor1", help=("Factor of the first "
+ "derivation of the distribution function"), default=2.0,
+ type="float")
+ parser.add_option("--distribution-factor2", help=("Factor of the second "
+ "derivation of the distribution function"), default=0.7,
+ type="float")
+ parser.add_option("--distribution-mode", help=("How to select deopt points "
+ "for a given test (smooth|random)"),
+ default="smooth")
+ parser.add_option("--dump-results-file", help=("Dump maximum number of "
+ "deopt points per test to a file"))
+ parser.add_option("--extra-flags",
+ help="Additional flags to pass to each test command",
+ default="")
+ parser.add_option("--isolates", help="Whether to test isolates",
+ default=False, action="store_true")
+ parser.add_option("-j", help="The number of parallel tasks to run",
+ default=0, type="int")
+ parser.add_option("-p", "--progress",
+ help=("The style of progress indicator"
+ " (verbose, dots, color, mono)"),
+ choices=progress.PROGRESS_INDICATORS.keys(),
+ default="mono")
+ parser.add_option("--shard-count",
+ help="Split testsuites into this number of shards",
+ default=1, type="int")
+ parser.add_option("--shard-run",
+ help="Run this shard from the split up tests.",
+ default=1, type="int")
+ parser.add_option("--seed", help="The seed for the random distribution",
+ type="int")
+ parser.add_option("-t", "--timeout", help="Timeout in seconds",
+ default= -1, type="int")
+ parser.add_option("--random-seed", default=0, dest="random_seed",
+ help="Default seed for initializing random generator")
+ parser.add_option("--fuzzer-random-seed", default=0,
+ help="Default seed for initializing fuzzer random "
+ "generator")
+ return parser
+
+
+ def _process_options(self, options):
+ # Special processing of other options, sorted alphabetically.
+ options.command_prefix = shlex.split(options.command_prefix)
+ options.extra_flags = shlex.split(options.extra_flags)
+ if options.j == 0:
+ options.j = multiprocessing.cpu_count()
+ while options.random_seed == 0:
+ options.random_seed = random.SystemRandom().randint(-2147483648,
+ 2147483647)
+ if not options.distribution_mode in DISTRIBUTION_MODES:
+ print "Unknown distribution mode %s" % options.distribution_mode
+ return False
+ if options.distribution_factor1 < 0.0:
+ print ("Distribution factor1 %s is out of range. Defaulting to 0.0"
+ % options.distribution_factor1)
+ options.distribution_factor1 = 0.0
+ if options.distribution_factor2 < 0.0:
+ print ("Distribution factor2 %s is out of range. Defaulting to 0.0"
+ % options.distribution_factor2)
+ options.distribution_factor2 = 0.0
+ if options.coverage < 0.0 or options.coverage > 1.0:
+ print ("Coverage %s is out of range. Defaulting to 0.4"
+ % options.coverage)
+ options.coverage = 0.4
+ if options.coverage_lift < 0:
+ print ("Coverage lift %s is out of range. Defaulting to 0"
+ % options.coverage_lift)
+ options.coverage_lift = 0
+ return True
+
+ def _shard_tests(self, tests, shard_count, shard_run):
+ if shard_count < 2:
+ return tests
+ if shard_run < 1 or shard_run > shard_count:
+ print "shard-run not a valid number, should be in [1:shard-count]"
+ print "defaulting back to running all tests"
+ return tests
+ count = 0
+ shard = []
+ for test in tests:
+ if count % shard_count == shard_run - 1:
+ shard.append(test)
+ count += 1
+ return shard
+
+ def _do_execute(self, options, args):
+ suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
+
+ if len(args) == 0:
+ suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
+ else:
+ args_suites = set()
+ for arg in args:
+ suite = arg.split(os.path.sep)[0]
+ if not suite in args_suites:
+ args_suites.add(suite)
+ suite_paths = [ s for s in suite_paths if s in args_suites ]
+
+ suites = []
+ for root in suite_paths:
+ suite = testsuite.TestSuite.LoadTestSuite(
+ os.path.join(base_runner.BASE_DIR, "test", root))
+ if suite:
+ suites.append(suite)
+
+ try:
+ return self._execute(args, options, suites)
+ except KeyboardInterrupt:
+ return 2
+
+
+ def _calculate_n_tests(self, m, options):
+ """Calculates the number of tests from m deopt points with exponential
+ coverage.
+ The coverage is expected to be between 0.0 and 1.0.
+ The 'coverage lift' lifts the coverage for tests with smaller m values.
+ """
+ c = float(options.coverage)
+ l = float(options.coverage_lift)
+ return int(math.pow(m, (m * c + l) / (m + l)))
+
+
+ def _execute(self, args, options, suites):
+ print(">>> Running tests for %s.%s" % (self.build_config.arch,
+ self.mode_name))
+
+ dist = self._distribution(options)
+
+ # Populate context object.
+ timeout = options.timeout
+ if timeout == -1:
+ # Simulators are slow, therefore allow a longer default timeout.
+ if self.build_config.arch in SLOW_ARCHS:
+ timeout = 2 * TIMEOUT_DEFAULT;
+ else:
+ timeout = TIMEOUT_DEFAULT;
+
+ timeout *= self.mode_options.timeout_scalefactor
+ ctx = context.Context(self.build_config.arch,
+ self.mode_options.execution_mode,
+ self.outdir,
+ self.mode_options.flags, options.verbose,
+ timeout, options.isolates,
+ options.command_prefix,
+ options.extra_flags,
+ False, # Keep i18n on by default.
+ options.random_seed,
+ True, # No sorting of test cases.
+ 0, # Don't rerun failing tests.
+ 0, # No use of a rerun-failing-tests maximum.
+ False, # No predictable mode.
+ False, # No no_harness mode.
+ False, # Don't use perf data.
+ False) # Coverage not supported.
+
+ # Find available test suites and read test cases from them.
+ variables = {
+ "arch": self.build_config.arch,
+ "asan": self.build_config.asan,
+ "byteorder": sys.byteorder,
+ "dcheck_always_on": self.build_config.dcheck_always_on,
+ "deopt_fuzzer": True,
+ "gc_fuzzer": False,
+ "gc_stress": False,
+ "gcov_coverage": self.build_config.gcov_coverage,
+ "isolates": options.isolates,
+ "mode": self.mode_options.status_mode,
+ "msan": self.build_config.msan,
+ "no_harness": False,
+ "no_i18n": self.build_config.no_i18n,
+ "no_snap": self.build_config.no_snap,
+ "novfp3": False,
+ "predictable": self.build_config.predictable,
+ "simulator": utils.UseSimulator(self.build_config.arch),
+ "simulator_run": False,
+ "system": utils.GuessOS(),
+ "tsan": self.build_config.tsan,
+ "ubsan_vptr": self.build_config.ubsan_vptr,
+ }
+ num_tests = 0
+ test_id = 0
+
+ # Remember test case prototypes for the fuzzing phase.
+ test_backup = dict((s, []) for s in suites)
+
+ for s in suites:
+ s.ReadStatusFile(variables)
+ s.ReadTestCases(ctx)
+ if len(args) > 0:
+ s.FilterTestCasesByArgs(args)
+ s.FilterTestCasesByStatus(False)
+ for t in s.tests:
+ t.flags += s.GetStatusfileFlags(t)
+
+ test_backup[s] = s.tests
+ analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
+ "--print-deopt-stress"]
+ s.tests = [t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests]
+ num_tests += len(s.tests)
+ for t in s.tests:
+ t.id = test_id
+ test_id += 1
+
+ if num_tests == 0:
+ print "No tests to run."
+ return 0
+
+ print(">>> Collection phase")
+ progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ runner = execution.Runner(suites, progress_indicator, ctx)
+
+ exit_code = runner.Run(options.j)
+
+ print(">>> Analysis phase")
+ num_tests = 0
+ test_id = 0
+ for s in suites:
+ test_results = {}
+ for t in s.tests:
+ for line in t.output.stdout.splitlines():
+ if line.startswith("=== Stress deopt counter: "):
+ test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
+ for t in s.tests:
+ if t.path not in test_results:
+ print "Missing results for %s" % t.path
+ if options.dump_results_file:
+ results_dict = dict((t.path, n) for (t, n) in test_results.iteritems())
+ with file("%s.%d.txt" % (options.dump_results_file, time.time()),
+ "w") as f:
+ f.write(json.dumps(results_dict))
+
+ # Reset tests and redistribute the prototypes from the collection phase.
+ s.tests = []
+ if options.verbose:
+ print "Test distributions:"
+ for t in test_backup[s]:
+ max_deopt = test_results.get(t.path, 0)
+ if max_deopt == 0:
+ continue
+ n_deopt = self._calculate_n_tests(max_deopt, options)
+ distribution = dist.Distribute(n_deopt, max_deopt)
+ if options.verbose:
+ print "%s %s" % (t.path, distribution)
+ for i in distribution:
+ fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
+ s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
+ num_tests += len(s.tests)
+ for t in s.tests:
+ t.id = test_id
+ test_id += 1
+
+ if num_tests == 0:
+ print "No tests to run."
+ return 0
+
+ print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
+ progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ runner = execution.Runner(suites, progress_indicator, ctx)
+
+ code = runner.Run(options.j)
+ return exit_code or code
+
+
+if __name__ == '__main__':
+ sys.exit(DeoptFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/gc_fuzzer.py b/deps/v8/tools/testrunner/gc_fuzzer.py
new file mode 100755
index 0000000000..4130fff8be
--- /dev/null
+++ b/deps/v8/tools/testrunner/gc_fuzzer.py
@@ -0,0 +1,341 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from os.path import join
+import itertools
+import json
+import math
+import multiprocessing
+import os
+import random
+import shlex
+import sys
+import time
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import execution
+from testrunner.local import progress
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.local import verbose
+from testrunner.objects import context
+
+
+DEFAULT_TESTS = ["mjsunit", "webkit"]
+TIMEOUT_DEFAULT = 60
+
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+ "mipsel"]
+
+
+class GCFuzzer(base_runner.BaseTestRunner):
+ def __init__(self):
+ super(GCFuzzer, self).__init__()
+
+ self.fuzzer_rng = None
+
+ def _add_parser_options(self, parser):
+ parser.add_option("--command-prefix",
+ help="Prepended to each shell command used to run a test",
+ default="")
+ parser.add_option("--coverage", help=("Exponential test coverage "
+ "(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
+ default=0.4, type="float")
+ parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
+ "with a low memory size reached (range 0, inf)"),
+ default=20, type="int")
+ parser.add_option("--dump-results-file", help="Dump maximum limit reached")
+ parser.add_option("--extra-flags",
+ help="Additional flags to pass to each test command",
+ default="")
+ parser.add_option("--isolates", help="Whether to test isolates",
+ default=False, action="store_true")
+ parser.add_option("-j", help="The number of parallel tasks to run",
+ default=0, type="int")
+ parser.add_option("-p", "--progress",
+ help=("The style of progress indicator"
+ " (verbose, dots, color, mono)"),
+ choices=progress.PROGRESS_INDICATORS.keys(),
+ default="mono")
+ parser.add_option("--shard-count",
+ help="Split testsuites into this number of shards",
+ default=1, type="int")
+ parser.add_option("--shard-run",
+ help="Run this shard from the split up tests.",
+ default=1, type="int")
+ parser.add_option("-t", "--timeout", help="Timeout in seconds",
+ default= -1, type="int")
+ parser.add_option("--random-seed", default=0,
+ help="Default seed for initializing random generator")
+ parser.add_option("--fuzzer-random-seed", default=0,
+ help="Default seed for initializing fuzzer random "
+ "generator")
+ parser.add_option("--stress-compaction", default=False, action="store_true",
+ help="Enable stress_compaction_percentage flag")
+
+ parser.add_option("--distribution-factor1", help="DEPRECATED")
+ parser.add_option("--distribution-factor2", help="DEPRECATED")
+ parser.add_option("--distribution-mode", help="DEPRECATED")
+ parser.add_option("--seed", help="DEPRECATED")
+ return parser
+
+
+ def _process_options(self, options):
+ # Special processing of other options, sorted alphabetically.
+ options.command_prefix = shlex.split(options.command_prefix)
+ options.extra_flags = shlex.split(options.extra_flags)
+ if options.j == 0:
+ options.j = multiprocessing.cpu_count()
+ while options.random_seed == 0:
+ options.random_seed = random.SystemRandom().randint(-2147483648,
+ 2147483647)
+ while options.fuzzer_random_seed == 0:
+ options.fuzzer_random_seed = random.SystemRandom().randint(-2147483648,
+ 2147483647)
+ self.fuzzer_rng = random.Random(options.fuzzer_random_seed)
+ return True
+
+ def _shard_tests(self, tests, shard_count, shard_run):
+ if shard_count < 2:
+ return tests
+ if shard_run < 1 or shard_run > shard_count:
+ print "shard-run not a valid number, should be in [1:shard-count]"
+ print "defaulting back to running all tests"
+ return tests
+ count = 0
+ shard = []
+ for test in tests:
+ if count % shard_count == shard_run - 1:
+ shard.append(test)
+ count += 1
+ return shard
+
+ def _do_execute(self, options, args):
+ suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
+
+ if len(args) == 0:
+ suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
+ else:
+ args_suites = set()
+ for arg in args:
+ suite = arg.split(os.path.sep)[0]
+ if not suite in args_suites:
+ args_suites.add(suite)
+ suite_paths = [ s for s in suite_paths if s in args_suites ]
+
+ suites = []
+ for root in suite_paths:
+ suite = testsuite.TestSuite.LoadTestSuite(
+ os.path.join(base_runner.BASE_DIR, "test", root))
+ if suite:
+ suites.append(suite)
+
+ try:
+ return self._execute(args, options, suites)
+ except KeyboardInterrupt:
+ return 2
+
+
+ def _calculate_n_tests(self, m, options):
+ """Calculates the number of tests from m points with exponential coverage.
+ The coverage is expected to be between 0.0 and 1.0.
+ The 'coverage lift' lifts the coverage for tests with smaller m values.
+ """
+ c = float(options.coverage)
+ l = float(options.coverage_lift)
+ return int(math.pow(m, (m * c + l) / (m + l)))
+
+
+ def _execute(self, args, options, suites):
+ print(">>> Running tests for %s.%s" % (self.build_config.arch,
+ self.mode_name))
+
+ # Populate context object.
+ timeout = options.timeout
+ if timeout == -1:
+ # Simulators are slow, therefore allow a longer default timeout.
+ if self.build_config.arch in SLOW_ARCHS:
+ timeout = 2 * TIMEOUT_DEFAULT;
+ else:
+ timeout = TIMEOUT_DEFAULT;
+
+ timeout *= self.mode_options.timeout_scalefactor
+ ctx = context.Context(self.build_config.arch,
+ self.mode_options.execution_mode,
+ self.outdir,
+ self.mode_options.flags, options.verbose,
+ timeout, options.isolates,
+ options.command_prefix,
+ options.extra_flags,
+ False, # Keep i18n on by default.
+ options.random_seed,
+ True, # No sorting of test cases.
+ 0, # Don't rerun failing tests.
+ 0, # No use of a rerun-failing-tests maximum.
+ False, # No predictable mode.
+ False, # No no_harness mode.
+ False, # Don't use perf data.
+ False) # Coverage not supported.
+
+ num_tests = self._load_tests(args, options, suites, ctx)
+ if num_tests == 0:
+ print "No tests to run."
+ return 0
+
+ test_backup = dict(map(lambda s: (s, s.tests), suites))
+
+ print('>>> Collection phase')
+ for s in suites:
+ analysis_flags = [
+ # > 100% to not influence default incremental marking, but we need this
+ # flag to print reached incremental marking limit.
+ '--stress_marking', '1000',
+ '--trace_incremental_marking',
+ ]
+ s.tests = map(lambda t: t.CopyAddingFlags(t.variant, analysis_flags),
+ s.tests)
+
+ progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ runner = execution.Runner(suites, progress_indicator, ctx)
+ exit_code = runner.Run(options.j)
+
+ print('>>> Analysis phase')
+ test_results = dict()
+ for s in suites:
+ for t in s.tests:
+ # Skip failed tests.
+ if s.HasUnexpectedOutput(t):
+ print '%s failed, skipping' % t.path
+ continue
+ max_limit = self._get_max_limit_reached(t)
+ if max_limit:
+ test_results[t.path] = max_limit
+
+ if options.dump_results_file:
+ with file("%s.%d.txt" % (options.dump_results_file, time.time()),
+ "w") as f:
+ f.write(json.dumps(test_results))
+
+ num_tests = 0
+ for s in suites:
+ s.tests = []
+ for t in test_backup[s]:
+ max_percent = test_results.get(t.path, 0)
+ if not max_percent or max_percent < 1.0:
+ continue
+ max_percent = int(max_percent)
+
+ subtests_count = self._calculate_n_tests(max_percent, options)
+
+ if options.verbose:
+ print ('%s [x%d] (max marking limit=%.02f)' %
+ (t.path, subtests_count, max_percent))
+ for _ in xrange(0, subtests_count):
+ fuzzer_seed = self._next_fuzzer_seed()
+ fuzzing_flags = [
+ '--stress_marking', str(max_percent),
+ '--fuzzer_random_seed', str(fuzzer_seed),
+ ]
+ if options.stress_compaction:
+ fuzzing_flags.append('--stress_compaction_random')
+ s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
+ num_tests += len(s.tests)
+
+ if num_tests == 0:
+ print "No tests to run."
+ return 0
+
+ print(">>> Fuzzing phase (%d test cases)" % num_tests)
+ progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ runner = execution.Runner(suites, progress_indicator, ctx)
+
+ return runner.Run(options.j) or exit_code
+
+ def _load_tests(self, args, options, suites, ctx):
+ # Find available test suites and read test cases from them.
+ variables = {
+ "arch": self.build_config.arch,
+ "asan": self.build_config.asan,
+ "byteorder": sys.byteorder,
+ "dcheck_always_on": self.build_config.dcheck_always_on,
+ "deopt_fuzzer": False,
+ "gc_fuzzer": True,
+ "gc_stress": False,
+ "gcov_coverage": self.build_config.gcov_coverage,
+ "isolates": options.isolates,
+ "mode": self.mode_options.status_mode,
+ "msan": self.build_config.msan,
+ "no_harness": False,
+ "no_i18n": self.build_config.no_i18n,
+ "no_snap": self.build_config.no_snap,
+ "novfp3": False,
+ "predictable": self.build_config.predictable,
+ "simulator": utils.UseSimulator(self.build_config.arch),
+ "simulator_run": False,
+ "system": utils.GuessOS(),
+ "tsan": self.build_config.tsan,
+ "ubsan_vptr": self.build_config.ubsan_vptr,
+ }
+
+ num_tests = 0
+ test_id = 0
+ for s in suites:
+ s.ReadStatusFile(variables)
+ s.ReadTestCases(ctx)
+ if len(args) > 0:
+ s.FilterTestCasesByArgs(args)
+ s.FilterTestCasesByStatus(False)
+ for t in s.tests:
+ t.flags += s.GetStatusfileFlags(t)
+
+ num_tests += len(s.tests)
+ for t in s.tests:
+ t.id = test_id
+ test_id += 1
+
+ return num_tests
+
+ # Parses test stdout and returns what was the highest reached percent of the
+ # incremental marking limit (0-100).
+ # Skips values >=100% since they already trigger incremental marking.
+ @staticmethod
+ def _get_max_limit_reached(test):
+ def is_im_line(l):
+ return 'IncrementalMarking' in l and '% of the memory limit reached' in l
+
+ def line_to_percent(l):
+ return filter(lambda part: '%' in part, l.split(' '))[0]
+
+ def percent_str_to_float(s):
+ return float(s[:-1])
+
+ if not (test.output and test.output.stdout):
+ return None
+
+ im_lines = filter(is_im_line, test.output.stdout.splitlines())
+ percents_str = map(line_to_percent, im_lines)
+ percents = map(percent_str_to_float, percents_str)
+
+ # Skip >= 100%.
+ percents = filter(lambda p: p < 100, percents)
+
+ if not percents:
+ return None
+ return max(percents)
+
+ def _next_fuzzer_seed(self):
+ fuzzer_seed = None
+ while not fuzzer_seed:
+ fuzzer_seed = self.fuzzer_rng.randint(-2147483648, 2147483647)
+ return fuzzer_seed
+
+
+if __name__ == '__main__':
+ sys.exit(GCFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py
index b2dc74e4d4..4afd450d2f 100644
--- a/deps/v8/tools/testrunner/local/commands.py
+++ b/deps/v8/tools/testrunner/local/commands.py
@@ -106,7 +106,24 @@ def RunProcess(verbose, timeout, args, additional_env, **rest):
print "Return code: %d" % tk.returncode
sys.stdout.flush()
else:
+ if utils.GuessOS() == "macos":
+ # TODO(machenbach): Temporary output for investigating hanging test
+ # driver on mac.
+ print "Attempting to kill process %d - cmd %s" % (process.pid, args)
+ try:
+ print subprocess.check_output(
+ "ps -e | egrep 'd8|cctest|unittests'", shell=True)
+ except Exception:
+ pass
+ sys.stdout.flush()
process.kill()
+ if utils.GuessOS() == "macos":
+ # TODO(machenbach): Temporary output for investigating hanging test
+ # driver on mac. This will probably not print much, since kill only
+ # sends the signal.
+ print "Return code after signalling the kill: %s" % process.returncode
+ sys.stdout.flush()
+
except OSError:
sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
@@ -127,6 +144,9 @@ def RunProcess(verbose, timeout, args, additional_env, **rest):
)
+# TODO(machenbach): Instead of passing args around, we should introduce an
+# immutable Command class (that just represents the command with all flags and
+# is pretty-printable) and a member method for running such a command.
def Execute(args, verbose=False, timeout=None, env=None):
args = [ c for c in args if c != "" ]
return RunProcess(verbose, timeout, args, env or {})
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index dc55129a14..8cc3556cae 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -85,23 +85,28 @@ def MakeProcessContext(context, suite_names):
def GetCommand(test, context):
d8testflag = []
- shell = test.shell()
+ shell = test.suite.GetShellForTestCase(test)
if shell == "d8":
d8testflag = ["--test"]
if utils.IsWindows():
shell += ".exe"
if context.random_seed:
d8testflag += ["--random-seed=%s" % context.random_seed]
- cmd = (context.command_prefix +
- [os.path.abspath(os.path.join(context.shell_dir, shell))] +
- d8testflag +
- test.suite.GetFlagsForTestCase(test, context) +
- context.extra_flags)
- return cmd
+ files, flags, env = test.suite.GetParametersForTestCase(test, context)
+ cmd = (
+ context.command_prefix +
+ [os.path.abspath(os.path.join(context.shell_dir, shell))] +
+ d8testflag +
+ files +
+ context.extra_flags +
+ # Flags from test cases can overwrite extra cmd-line flags.
+ flags
+ )
+ return cmd, env
def _GetInstructions(test, context):
- command = GetCommand(test, context)
+ command, env = GetCommand(test, context)
timeout = context.timeout
if ("--stress-opt" in test.flags or
"--stress-opt" in context.mode_flags or
@@ -109,11 +114,10 @@ def _GetInstructions(test, context):
timeout *= 4
if "--noenable-vfp3" in context.extra_flags:
timeout *= 2
- # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
- # the like.
- if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
- timeout *= 2
- return Instructions(command, test.id, timeout, context.verbose, test.env)
+
+ # TODO(majeski): make it slow outcome dependent.
+ timeout *= 2
+ return Instructions(command, test.id, timeout, context.verbose, env)
class Job(object):
@@ -156,8 +160,9 @@ class TestJob(Job):
failures).
"""
if context.sancov_dir and output.pid is not None:
+ shell = self.test.suite.GetShellForTestCase(self.test)
sancov_file = os.path.join(
- context.sancov_dir, "%s.%d.sancov" % (self.test.shell(), output.pid))
+ context.sancov_dir, "%s.%d.sancov" % (shell, output.pid))
# Some tests are expected to fail and don't produce coverage data.
if os.path.exists(sancov_file):
@@ -177,6 +182,7 @@ class TestJob(Job):
self.test.SetSuiteObject(process_context.suites)
instr = _GetInstructions(self.test, process_context.context)
except Exception, e:
+ # TODO(majeski): Better exception reporting.
return SetupProblem(e, self.test)
start_time = time.time()
@@ -203,7 +209,7 @@ class Runner(object):
self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first.
- slow_key = lambda t: statusfile.IsSlow(t.outcomes)
+ slow_key = lambda t: statusfile.IsSlow(t.suite.GetStatusFileOutcomes(t))
self.tests.sort(key=slow_key, reverse=True)
# Sort by stored duration of not opted out.
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index 6321cadece..e57a6e36c9 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -71,7 +71,7 @@ class ProgressIndicator(object):
}
def _EscapeCommand(self, test):
- command = execution.GetCommand(test, self.runner.context)
+ command, _ = execution.GetCommand(test, self.runner.context)
parts = []
for part in command:
if ' ' in part:
@@ -336,7 +336,8 @@ class JsonTestProgressIndicator(ProgressIndicator):
"flags": test.flags,
"command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"duration": test.duration,
- "marked_slow": statusfile.IsSlow(test.outcomes),
+ "marked_slow": statusfile.IsSlow(
+ test.suite.GetStatusFileOutcomes(test)),
} for test in timed_tests[:20]
]
@@ -369,13 +370,13 @@ class JsonTestProgressIndicator(ProgressIndicator):
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
- "expected": list(test.outcomes or ["PASS"]),
+ "expected": test.suite.GetExpectedOutcomes(test),
"duration": test.duration,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
- "target_name": test.suite.shell(),
+ "target_name": test.suite.GetShellForTestCase(test),
"variant": test.variant,
})
@@ -414,11 +415,7 @@ class FlakinessTestProgressIndicator(ProgressIndicator):
assert outcome in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
if test.run == 1:
# First run of this test.
- expected_outcomes = ([
- expected
- for expected in (test.outcomes or ["PASS"])
- if expected in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
- ] or ["PASS"])
+ expected_outcomes = test.suite.GetExpectedOutcomes(test)
self.results[key] = {
"actual": outcome,
"expected": " ".join(expected_outcomes),
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 880837b8a7..7caf0711ca 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -31,31 +31,29 @@ import re
from variants import ALL_VARIANTS
from utils import Freeze
-# These outcomes can occur in a TestCase's outcomes list:
-SKIP = "SKIP"
+# Possible outcomes
FAIL = "FAIL"
PASS = "PASS"
-OKAY = "OKAY"
-TIMEOUT = "TIMEOUT"
-CRASH = "CRASH"
+TIMEOUT = "TIMEOUT" # TODO(majeski): unused in status files
+CRASH = "CRASH" # TODO(majeski): unused in status files
+
+# Outcomes only for status file, need special handling
+FAIL_OK = "FAIL_OK"
+FAIL_SLOPPY = "FAIL_SLOPPY"
+
+# Modifiers
+SKIP = "SKIP"
SLOW = "SLOW"
FAST_VARIANTS = "FAST_VARIANTS"
NO_VARIANTS = "NO_VARIANTS"
-# These are just for the status files and are mapped below in DEFS:
-FAIL_OK = "FAIL_OK"
-PASS_OR_FAIL = "PASS_OR_FAIL"
-FAIL_SLOPPY = "FAIL_SLOPPY"
ALWAYS = "ALWAYS"
KEYWORDS = {}
-for key in [SKIP, FAIL, PASS, OKAY, CRASH, SLOW, FAIL_OK,
- FAST_VARIANTS, NO_VARIANTS, PASS_OR_FAIL, FAIL_SLOPPY, ALWAYS]:
+for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, FAST_VARIANTS, NO_VARIANTS,
+ FAIL_SLOPPY, ALWAYS]:
KEYWORDS[key] = key
-DEFS = {FAIL_OK: [FAIL, OKAY],
- PASS_OR_FAIL: [PASS, FAIL]}
-
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little",
@@ -87,25 +85,13 @@ def OnlyFastVariants(outcomes):
def IsPassOrFail(outcomes):
- return ((PASS in outcomes) and (FAIL in outcomes) and
- (not CRASH in outcomes) and (not OKAY in outcomes))
+ return (PASS in outcomes and
+ FAIL in outcomes and
+ CRASH not in outcomes)
def IsFailOk(outcomes):
- return (FAIL in outcomes) and (OKAY in outcomes)
-
-
-def _AddOutcome(result, new):
- global DEFS
- if new in DEFS:
- mapped = DEFS[new]
- if type(mapped) == list:
- for m in mapped:
- _AddOutcome(result, m)
- elif type(mapped) == str:
- _AddOutcome(result, mapped)
- else:
- result.add(new)
+ return FAIL_OK in outcomes
def _JoinsPassAndFail(outcomes1, outcomes2):
@@ -114,13 +100,17 @@ def _JoinsPassAndFail(outcomes1, outcomes2):
"""
return (
PASS in outcomes1 and
- not FAIL in outcomes1 and
- FAIL in outcomes2
+ not (FAIL in outcomes1 or FAIL_OK in outcomes1) and
+ (FAIL in outcomes2 or FAIL_OK in outcomes2)
)
VARIANT_EXPRESSION = object()
def _EvalExpression(exp, variables):
+ """Evaluates expression and returns its result. In case of NameError caused by
+ undefined "variant" identifier returns VARIANT_EXPRESSION marker.
+ """
+
try:
return eval(exp, variables)
except NameError as e:
@@ -129,32 +119,35 @@ def _EvalExpression(exp, variables):
return VARIANT_EXPRESSION
-def _EvalVariantExpression(section, rules, wildcards, variant, variables):
- variables_with_variant = {}
- variables_with_variant.update(variables)
+def _EvalVariantExpression(
+ condition, section, variables, variant, rules, prefix_rules):
+ variables_with_variant = dict(variables)
variables_with_variant["variant"] = variant
- result = _EvalExpression(section[0], variables_with_variant)
+ result = _EvalExpression(condition, variables_with_variant)
assert result != VARIANT_EXPRESSION
if result is True:
_ReadSection(
- section[1],
- rules[variant],
- wildcards[variant],
+ section,
variables_with_variant,
+ rules[variant],
+ prefix_rules[variant],
)
else:
assert result is False, "Make sure expressions evaluate to boolean values"
-def _ParseOutcomeList(rule, outcomes, target_dict, variables):
+def _ParseOutcomeList(rule, outcomes, variables, target_dict):
+ """Outcome list format: [condition, outcome, outcome, ...]"""
+
result = set([])
if type(outcomes) == str:
outcomes = [outcomes]
for item in outcomes:
if type(item) == str:
- _AddOutcome(result, item)
+ result.add(item)
elif type(item) == list:
- exp = _EvalExpression(item[0], variables)
+ condition = item[0]
+ exp = _EvalExpression(condition, variables)
assert exp != VARIANT_EXPRESSION, (
"Nested variant expressions are not supported")
if exp is False:
@@ -166,10 +159,11 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
for outcome in item[1:]:
assert type(outcome) == str
- _AddOutcome(result, outcome)
+ result.add(outcome)
else:
assert False
- if len(result) == 0: return
+ if len(result) == 0:
+ return
if rule in target_dict:
# A FAIL without PASS in one rule has always precedence over a single
# PASS (without FAIL) in another. Otherwise the default PASS expectation
@@ -186,51 +180,69 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
def ReadContent(content):
- global KEYWORDS
return eval(content, KEYWORDS)
def ReadStatusFile(content, variables):
- # Empty defaults for rules and wildcards. Variant-independent
+ """Status file format
+ Status file := [section]
+ section = [CONDITION, section_rules]
+ section_rules := {path: outcomes}
+ outcomes := outcome | [outcome, ...]
+ outcome := SINGLE_OUTCOME | [CONDITION, SINGLE_OUTCOME, SINGLE_OUTCOME, ...]
+ """
+
+ # Empty defaults for rules and prefix_rules. Variant-independent
# rules are mapped by "", others by the variant name.
rules = {variant: {} for variant in ALL_VARIANTS}
rules[""] = {}
- wildcards = {variant: {} for variant in ALL_VARIANTS}
- wildcards[""] = {}
+ prefix_rules = {variant: {} for variant in ALL_VARIANTS}
+ prefix_rules[""] = {}
variables.update(VARIABLES)
- for section in ReadContent(content):
- assert type(section) == list
- assert len(section) == 2
- exp = _EvalExpression(section[0], variables)
+ for conditional_section in ReadContent(content):
+ assert type(conditional_section) == list
+ assert len(conditional_section) == 2
+ condition, section = conditional_section
+ exp = _EvalExpression(condition, variables)
+
+ # The expression is variant-independent and evaluates to False.
if exp is False:
- # The expression is variant-independent and evaluates to False.
continue
- elif exp == VARIANT_EXPRESSION:
- # If the expression contains one or more "variant" keywords, we evaluate
- # it for all possible variants and create rules for those that apply.
- for variant in ALL_VARIANTS:
- _EvalVariantExpression(section, rules, wildcards, variant, variables)
- else:
- # The expression is variant-independent and evaluates to True.
- assert exp is True, "Make sure expressions evaluate to boolean values"
+
+ # The expression is variant-independent and evaluates to True.
+ if exp is True:
_ReadSection(
- section[1],
- rules[""],
- wildcards[""],
+ section,
variables,
+ rules[''],
+ prefix_rules[''],
)
- return Freeze(rules), Freeze(wildcards)
+ continue
+ # The expression is variant-dependent (contains "variant" keyword)
+ if exp == VARIANT_EXPRESSION:
+ # If the expression contains one or more "variant" keywords, we evaluate
+ # it for all possible variants and create rules for those that apply.
+ for variant in ALL_VARIANTS:
+ _EvalVariantExpression(
+ condition, section, variables, variant, rules, prefix_rules)
+ continue
-def _ReadSection(section, rules, wildcards, variables):
+ assert False, "Make sure expressions evaluate to boolean values"
+
+ return Freeze(rules), Freeze(prefix_rules)
+
+
+def _ReadSection(section, variables, rules, prefix_rules):
assert type(section) == dict
- for rule in section:
+ for rule, outcome_list in section.iteritems():
assert type(rule) == str
+
if rule[-1] == '*':
- _ParseOutcomeList(rule, section[rule], wildcards, variables)
+ _ParseOutcomeList(rule[:-1], outcome_list, variables, prefix_rules)
else:
- _ParseOutcomeList(rule, section[rule], rules, variables)
+ _ParseOutcomeList(rule, outcome_list, variables, rules)
JS_TEST_PATHS = {
'debugger': [[]],
@@ -266,6 +278,8 @@ def PresubmitCheck(path):
"Suite name prefix must not be used in rule keys")
_assert(not rule.endswith('.js'),
".js extension must not be used in rule keys.")
+ _assert('*' not in rule or (rule.count('*') == 1 and rule[-1] == '*'),
+ "Only the last character of a rule key can be a wildcard")
if basename in JS_TEST_PATHS and '*' not in rule:
_assert(any(os.path.exists(os.path.join(os.path.dirname(path),
*(paths + [rule + ".js"])))
diff --git a/deps/v8/tools/testrunner/local/statusfile_unittest.py b/deps/v8/tools/testrunner/local/statusfile_unittest.py
index f64ab3425e..299e332c1c 100755
--- a/deps/v8/tools/testrunner/local/statusfile_unittest.py
+++ b/deps/v8/tools/testrunner/local/statusfile_unittest.py
@@ -87,7 +87,7 @@ class StatusFileTest(unittest.TestCase):
)
def test_read_statusfile_section_true(self):
- rules, wildcards = statusfile.ReadStatusFile(
+ rules, prefix_rules = statusfile.ReadStatusFile(
TEST_STATUS_FILE % 'system==linux', make_variables())
self.assertEquals(
@@ -99,15 +99,15 @@ class StatusFileTest(unittest.TestCase):
)
self.assertEquals(
{
- 'foo/*': set(['SLOW', 'FAIL']),
+ 'foo/': set(['SLOW', 'FAIL']),
},
- wildcards[''],
+ prefix_rules[''],
)
self.assertEquals({}, rules['default'])
- self.assertEquals({}, wildcards['default'])
+ self.assertEquals({}, prefix_rules['default'])
def test_read_statusfile_section_false(self):
- rules, wildcards = statusfile.ReadStatusFile(
+ rules, prefix_rules = statusfile.ReadStatusFile(
TEST_STATUS_FILE % 'system==windows', make_variables())
self.assertEquals(
@@ -119,15 +119,15 @@ class StatusFileTest(unittest.TestCase):
)
self.assertEquals(
{
- 'foo/*': set(['PASS', 'SLOW']),
+ 'foo/': set(['PASS', 'SLOW']),
},
- wildcards[''],
+ prefix_rules[''],
)
self.assertEquals({}, rules['default'])
- self.assertEquals({}, wildcards['default'])
+ self.assertEquals({}, prefix_rules['default'])
def test_read_statusfile_section_variant(self):
- rules, wildcards = statusfile.ReadStatusFile(
+ rules, prefix_rules = statusfile.ReadStatusFile(
TEST_STATUS_FILE % 'system==linux and variant==default',
make_variables(),
)
@@ -141,9 +141,9 @@ class StatusFileTest(unittest.TestCase):
)
self.assertEquals(
{
- 'foo/*': set(['PASS', 'SLOW']),
+ 'foo/': set(['PASS', 'SLOW']),
},
- wildcards[''],
+ prefix_rules[''],
)
self.assertEquals(
{
@@ -153,9 +153,9 @@ class StatusFileTest(unittest.TestCase):
)
self.assertEquals(
{
- 'foo/*': set(['FAIL']),
+ 'foo/': set(['FAIL']),
},
- wildcards['default'],
+ prefix_rules['default'],
)
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 3b8f956a7f..946e89a3fc 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -50,15 +50,17 @@ class VariantGenerator(object):
def FilterVariantsByTest(self, testcase):
result = self.all_variants
- if testcase.outcomes:
- if statusfile.OnlyStandardVariant(testcase.outcomes):
+ outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
+ if outcomes:
+ if statusfile.OnlyStandardVariant(outcomes):
return self.standard_variant
- if statusfile.OnlyFastVariants(testcase.outcomes):
+ if statusfile.OnlyFastVariants(outcomes):
result = self.fast_variants
return result
def GetFlagSets(self, testcase, variant):
- if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+ outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
+ if outcomes and statusfile.OnlyFastVariants(outcomes):
return FAST_VARIANT_FLAGS[variant]
else:
return ALL_VARIANT_FLAGS[variant]
@@ -86,12 +88,11 @@ class TestSuite(object):
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
- self.rules = None # dictionary mapping test path to list of outcomes
- self.wildcards = None # dictionary mapping test paths to list of outcomes
+ self.rules = None # {variant: {test name: [rule]}}
+ self.prefix_rules = None # {variant: {test name prefix: [rule]}}
self.total_duration = None # float, assigned on demand
- def shell(self):
- return "d8"
+ self._outcomes_cache = dict()
def suffix(self):
return ".js"
@@ -131,109 +132,104 @@ class TestSuite(object):
"""
pass
- def DownloadData(self):
- pass
-
def ReadStatusFile(self, variables):
with open(self.status_file()) as f:
- self.rules, self.wildcards = (
+ self.rules, self.prefix_rules = (
statusfile.ReadStatusFile(f.read(), variables))
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
- @staticmethod
- def _FilterSlow(slow, mode):
- return (mode == "run" and not slow) or (mode == "skip" and slow)
+ def GetStatusfileFlags(self, test):
+ """Gets runtime flags from a status file.
- @staticmethod
- def _FilterPassFail(pass_fail, mode):
- return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
-
- def FilterTestCasesByStatus(self, warn_unused_rules,
- slow_tests="dontcare",
- pass_fail_tests="dontcare",
- variants=False):
-
- # Use only variants-dependent rules and wildcards when filtering
- # respective test cases and generic rules when filtering generic test
- # cases.
- if not variants:
- rules = self.rules[""]
- wildcards = self.wildcards[""]
- else:
- # We set rules and wildcards to a variant-specific version for each test
- # below.
- rules = {}
- wildcards = {}
+ Every outcome that starts with "--" is a flag. Status file has to be loaded
+ before using this function.
+ """
+ flags = []
+ for outcome in self.GetStatusFileOutcomes(test):
+ if outcome.startswith('--'):
+ flags.append(outcome)
+ return flags
- filtered = []
+ def FilterTestCasesByStatus(self,
+ slow_tests_mode=None,
+ pass_fail_tests_mode=None):
+ """Filters tests by outcomes from status file.
+
+ Status file has to be loaded before using this function.
+
+ Args:
+ slow_tests_mode: What to do with slow tests.
+ pass_fail_tests_mode: What to do with pass or fail tests.
- # Remember used rules as tuples of (rule, variant), where variant is "" for
- # variant-independent rules.
+ Mode options:
+ None (default) - don't skip
+ "skip" - skip if slow/pass_fail
+ "run" - skip if not slow/pass_fail
+ """
+ def _skip_slow(is_slow, mode):
+ return (
+ (mode == 'run' and not is_slow) or
+ (mode == 'skip' and is_slow))
+
+ def _skip_pass_fail(pass_fail, mode):
+ return (
+ (mode == 'run' and not pass_fail) or
+ (mode == 'skip' and pass_fail))
+
+ def _compliant(test):
+ outcomes = self.GetStatusFileOutcomes(test)
+ if statusfile.DoSkip(outcomes):
+ return False
+ if _skip_slow(statusfile.IsSlow(outcomes), slow_tests_mode):
+ return False
+ if _skip_pass_fail(statusfile.IsPassOrFail(outcomes),
+ pass_fail_tests_mode):
+ return False
+ return True
+
+ self.tests = filter(_compliant, self.tests)
+
+ def WarnUnusedRules(self, check_variant_rules=False):
+ """Finds and prints unused rules in status file.
+
+ Rule X is unused when it doesn't apply to any tests, which can also mean
+ that all matching tests were skipped by another rule before evaluating X.
+
+ Status file has to be loaded before using this function.
+ """
+
+ if check_variant_rules:
+ variants = list(ALL_VARIANTS)
+ else:
+ variants = ['']
used_rules = set()
for t in self.tests:
- slow = False
- pass_fail = False
testname = self.CommonTestName(t)
variant = t.variant or ""
- if variants:
- rules = self.rules[variant]
- wildcards = self.wildcards[variant]
- if testname in rules:
- used_rules.add((testname, variant))
- # Even for skipped tests, as the TestCase object stays around and
- # PrintReport() uses it.
- t.outcomes = t.outcomes | rules[testname]
- if statusfile.DoSkip(t.outcomes):
- continue # Don't add skipped tests to |filtered|.
- for outcome in t.outcomes:
- if outcome.startswith('Flags: '):
- t.flags += outcome[7:].split()
- slow = statusfile.IsSlow(t.outcomes)
- pass_fail = statusfile.IsPassOrFail(t.outcomes)
- skip = False
- for rule in wildcards:
- assert rule[-1] == '*'
- if testname.startswith(rule[:-1]):
- used_rules.add((rule, variant))
- t.outcomes = t.outcomes | wildcards[rule]
- if statusfile.DoSkip(t.outcomes):
- skip = True
- break # "for rule in wildcards"
- slow = slow or statusfile.IsSlow(t.outcomes)
- pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
- if (skip
- or self._FilterSlow(slow, slow_tests)
- or self._FilterPassFail(pass_fail, pass_fail_tests)):
- continue # "for t in self.tests"
- filtered.append(t)
- self.tests = filtered
-
- if not warn_unused_rules:
- return
-
- if not variants:
- for rule in self.rules[""]:
- if (rule, "") not in used_rules:
- print("Unused rule: %s -> %s (variant independent)" % (
- rule, self.rules[""][rule]))
- for rule in self.wildcards[""]:
- if (rule, "") not in used_rules:
- print("Unused rule: %s -> %s (variant independent)" % (
- rule, self.wildcards[""][rule]))
- else:
- for variant in ALL_VARIANTS:
- for rule in self.rules[variant]:
- if (rule, variant) not in used_rules:
- print("Unused rule: %s -> %s (variant: %s)" % (
- rule, self.rules[variant][rule], variant))
- for rule in self.wildcards[variant]:
- if (rule, variant) not in used_rules:
- print("Unused rule: %s -> %s (variant: %s)" % (
- rule, self.wildcards[variant][rule], variant))
+ if testname in self.rules.get(variant, {}):
+ used_rules.add((testname, variant))
+ if statusfile.DoSkip(self.rules[variant][testname]):
+ continue
+
+ for prefix in self.prefix_rules.get(variant, {}):
+ if testname.startswith(prefix):
+ used_rules.add((prefix, variant))
+ if statusfile.DoSkip(self.prefix_rules[variant][prefix]):
+ break
+
+ for variant in variants:
+ for rule, value in (list(self.rules.get(variant, {}).iteritems()) +
+ list(self.prefix_rules.get(variant, {}).iteritems())):
+ if (rule, variant) not in used_rules:
+ if variant == '':
+ variant_desc = 'variant independent'
+ else:
+ variant_desc = 'variant: %s' % variant
+ print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
@@ -260,7 +256,66 @@ class TestSuite(object):
break
self.tests = filtered
- def GetFlagsForTestCase(self, testcase, context):
+ def GetExpectedOutcomes(self, testcase):
+ """Gets expected outcomes from status file.
+
+ It differs from GetStatusFileOutcomes by selecting only outcomes that can
+ be result of test execution.
+ Status file has to be loaded before using this function.
+ """
+ outcomes = self.GetStatusFileOutcomes(testcase)
+
+ expected = []
+ if (statusfile.FAIL in outcomes or
+ statusfile.FAIL_OK in outcomes):
+ expected.append(statusfile.FAIL)
+
+ if statusfile.CRASH in outcomes:
+ expected.append(statusfile.CRASH)
+
+ if statusfile.PASS in outcomes:
+ expected.append(statusfile.PASS)
+
+ return expected or [statusfile.PASS]
+
+ def GetStatusFileOutcomes(self, testcase):
+ """Gets outcomes from status file.
+
+ Merges variant dependent and independent rules. Status file has to be loaded
+ before using this function.
+ """
+ variant = testcase.variant or ''
+ testname = self.CommonTestName(testcase)
+ cache_key = '%s$%s' % (testname, variant)
+
+ if cache_key not in self._outcomes_cache:
+ # Load statusfile to get outcomes for the first time.
+ assert(self.rules is not None)
+ assert(self.prefix_rules is not None)
+
+ outcomes = frozenset()
+
+ for key in set([variant, '']):
+ rules = self.rules.get(key, {})
+ prefix_rules = self.prefix_rules.get(key, {})
+
+ if testname in rules:
+ outcomes |= rules[testname]
+
+ for prefix in prefix_rules:
+ if testname.startswith(prefix):
+ outcomes |= prefix_rules[prefix]
+
+ self._outcomes_cache[cache_key] = outcomes
+
+ return self._outcomes_cache[cache_key]
+
+ def GetShellForTestCase(self, testcase):
+ """Returns shell to be executed for this test case."""
+ return 'd8'
+
+ def GetParametersForTestCase(self, testcase, context):
+ """Returns a tuple of (files, flags, env) for this test case."""
raise NotImplementedError
def GetSourceForTest(self, testcase):
@@ -290,8 +345,7 @@ class TestSuite(object):
return statusfile.PASS
def HasUnexpectedOutput(self, testcase):
- outcome = self.GetOutcome(testcase)
- return not outcome in (testcase.outcomes or [statusfile.PASS])
+ return self.GetOutcome(testcase) not in self.GetExpectedOutcomes(testcase)
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
@@ -315,18 +369,24 @@ class GoogleTestSuite(TestSuite):
super(GoogleTestSuite, self).__init__(name, root)
def ListTests(self, context):
- shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
+ shell = os.path.abspath(
+ os.path.join(context.shell_dir, self.GetShellForTestCase(None)))
if utils.IsWindows():
shell += ".exe"
output = None
for i in xrange(3): # Try 3 times in case of errors.
- output = commands.Execute(context.command_prefix +
- [shell, "--gtest_list_tests"] +
- context.extra_flags)
+ cmd = (
+ context.command_prefix +
+ [shell, "--gtest_list_tests"] +
+ context.extra_flags
+ )
+ output = commands.Execute(cmd)
if output.exit_code == 0:
break
- print "Test executable failed to list the tests (try %d).\n\nStdout:" % i
+ print "Test executable failed to list the tests (try %d).\n\nCmd:" % i
+ print ' '.join(cmd)
+ print "\nStdout:"
print output.stdout
print "\nStderr:"
print output.stderr
@@ -346,14 +406,17 @@ class GoogleTestSuite(TestSuite):
tests.sort(key=lambda t: t.path)
return tests
- def GetFlagsForTestCase(self, testcase, context):
- return (testcase.flags + ["--gtest_filter=" + testcase.path] +
- ["--gtest_random_seed=%s" % context.random_seed] +
- ["--gtest_print_time=0"] +
- context.mode_flags)
+ def GetParametersForTestCase(self, testcase, context):
+ flags = (
+ testcase.flags +
+ ["--gtest_filter=" + testcase.path] +
+ ["--gtest_random_seed=%s" % context.random_seed] +
+ ["--gtest_print_time=0"] +
+ context.mode_flags)
+ return [], flags, {}
def _VariantGeneratorFactory(self):
return StandardVariantGenerator
- def shell(self):
+ def GetShellForTestCase(self, testcase):
return self.name
diff --git a/deps/v8/tools/testrunner/local/testsuite_unittest.py b/deps/v8/tools/testrunner/local/testsuite_unittest.py
index 1e10ef5564..a8483b9fc0 100755
--- a/deps/v8/tools/testrunner/local/testsuite_unittest.py
+++ b/deps/v8/tools/testrunner/local/testsuite_unittest.py
@@ -29,17 +29,18 @@ class TestSuiteTest(unittest.TestCase):
'baz/bar': set(['PASS', 'FAIL']),
},
}
- suite.wildcards = {
+ suite.prefix_rules = {
'': {
- 'baz/*': set(['PASS', 'SLOW']),
+ 'baz/': set(['PASS', 'SLOW']),
},
}
- suite.FilterTestCasesByStatus(warn_unused_rules=False)
+ suite.FilterTestCasesByStatus()
self.assertEquals(
[TestCase(suite, 'baz/bar')],
suite.tests,
)
- self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), suite.tests[0].outcomes)
+ outcomes = suite.GetStatusFileOutcomes(suite.tests[0])
+ self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), outcomes)
def test_filter_testcases_by_status_second_pass(self):
suite = TestSuite('foo', 'bar')
@@ -47,10 +48,6 @@ class TestSuiteTest(unittest.TestCase):
test1 = TestCase(suite, 'foo/bar')
test2 = TestCase(suite, 'baz/bar')
- # Contrived outcomes from filtering by variant-independent rules.
- test1.outcomes = set(['PREV'])
- test2.outcomes = set(['PREV'])
-
suite.tests = [
test1.CopyAddingFlags(variant='default', flags=[]),
test1.CopyAddingFlags(variant='stress', flags=['-v']),
@@ -59,6 +56,9 @@ class TestSuiteTest(unittest.TestCase):
]
suite.rules = {
+ '': {
+ 'foo/bar': set(['PREV']),
+ },
'default': {
'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL']),
@@ -67,15 +67,18 @@ class TestSuiteTest(unittest.TestCase):
'baz/bar': set(['SKIP']),
},
}
- suite.wildcards = {
+ suite.prefix_rules = {
+ '': {
+ 'baz/': set(['PREV']),
+ },
'default': {
- 'baz/*': set(['PASS', 'SLOW']),
+ 'baz/': set(['PASS', 'SLOW']),
},
'stress': {
- 'foo/*': set(['PASS', 'SLOW']),
+ 'foo/': set(['PASS', 'SLOW']),
},
}
- suite.FilterTestCasesByStatus(warn_unused_rules=False, variants=True)
+ suite.FilterTestCasesByStatus()
self.assertEquals(
[
TestCase(suite, 'foo/bar', flags=['-v']),
@@ -85,14 +88,32 @@ class TestSuiteTest(unittest.TestCase):
)
self.assertEquals(
- set(['PASS', 'SLOW', 'PREV']),
- suite.tests[0].outcomes,
+ set(['PREV', 'PASS', 'SLOW']),
+ suite.GetStatusFileOutcomes(suite.tests[0]),
)
self.assertEquals(
- set(['PASS', 'FAIL', 'SLOW', 'PREV']),
- suite.tests[1].outcomes,
+ set(['PREV', 'PASS', 'FAIL', 'SLOW']),
+ suite.GetStatusFileOutcomes(suite.tests[1]),
)
+ def test_fail_ok_outcome(self):
+ suite = TestSuite('foo', 'bar')
+ suite.tests = [
+ TestCase(suite, 'foo/bar'),
+ TestCase(suite, 'baz/bar'),
+ ]
+ suite.rules = {
+ '': {
+ 'foo/bar': set(['FAIL_OK']),
+ 'baz/bar': set(['FAIL']),
+ },
+ }
+ suite.prefix_rules = {}
+
+ for t in suite.tests:
+ expected_outcomes = suite.GetExpectedOutcomes(t)
+ self.assertEquals(['FAIL'], expected_outcomes)
+
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 9efa060bba..c8c7ce64a8 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -5,28 +5,37 @@
# Use this to run several variants of the tests.
ALL_VARIANT_FLAGS = {
"default": [[]],
+ "future": [["--future"]],
+ "liftoff": [["--liftoff"]],
"stress": [["--stress-opt", "--always-opt"]],
- "stress_incremental_marking": [["--stress-incremental-marking"]],
+ # TODO(6792): Write protected code has been temporary added to the below
+ # variant until the feature has been enabled (or staged) by default.
+ "stress_incremental_marking": [["--stress-incremental-marking", "--write-protect-code-memory"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
"nooptimization": [["--noopt"]],
- "stress_asm_wasm": [["--validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
- "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks"]],
+ "stress_background_compile": [["--background-compile", "--stress-background-compile"]],
+ "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
}
# FAST_VARIANTS implies no --always-opt.
FAST_VARIANT_FLAGS = {
"default": [[]],
+ "future": [["--future"]],
+ "liftoff": [["--liftoff"]],
"stress": [["--stress-opt"]],
- "stress_incremental_marking": [["--stress-incremental-marking"]],
+ # TODO(6792): Write protected code has been temporary added to the below
+ # variant until the feature has been enabled (or staged) by default.
+ "stress_incremental_marking": [["--stress-incremental-marking", "--write-protect-code-memory"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
"nooptimization": [["--noopt"]],
- "stress_asm_wasm": [["--validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
- "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks"]],
+ "stress_background_compile": [["--background-compile", "--stress-background-compile"]],
+ "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
}
-ALL_VARIANTS = set(["default", "stress", "stress_incremental_marking",
- "nooptimization", "stress_asm_wasm", "wasm_traps"])
+ALL_VARIANTS = set(["default", "future", "liftoff", "stress",
+ "stress_incremental_marking", "nooptimization",
+ "stress_background_compile", "wasm_traps"])
diff --git a/deps/v8/tools/testrunner/local/verbose.py b/deps/v8/tools/testrunner/local/verbose.py
index 00c330d2d9..f28398fa42 100644
--- a/deps/v8/tools/testrunner/local/verbose.py
+++ b/deps/v8/tools/testrunner/local/verbose.py
@@ -35,7 +35,6 @@ from . import statusfile
REPORT_TEMPLATE = (
"""Total: %(total)i tests
* %(skipped)4d tests will be skipped
- * %(timeout)4d tests are expected to timeout sometimes
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
@@ -44,24 +43,27 @@ REPORT_TEMPLATE = (
def PrintReport(tests):
total = len(tests)
- skipped = timeout = nocrash = passes = fail_ok = fail = 0
+ skipped = nocrash = passes = fail_ok = fail = 0
for t in tests:
- if "outcomes" not in dir(t) or not t.outcomes:
+ outcomes = t.suite.GetStatusFileOutcomes(t)
+ if not outcomes:
passes += 1
continue
- o = t.outcomes
- if statusfile.DoSkip(o):
+ if statusfile.DoSkip(outcomes):
skipped += 1
continue
- if statusfile.TIMEOUT in o: timeout += 1
- if statusfile.IsPassOrFail(o): nocrash += 1
- if list(o) == [statusfile.PASS]: passes += 1
- if statusfile.IsFailOk(o): fail_ok += 1
- if list(o) == [statusfile.FAIL]: fail += 1
+ if statusfile.IsPassOrFail(outcomes):
+ nocrash += 1
+ if list(outcomes) == [statusfile.PASS]:
+ passes += 1
+ if statusfile.IsFailOk(outcomes):
+ fail_ok += 1
+ if list(outcomes) == [statusfile.FAIL]:
+ fail += 1
+
print REPORT_TEMPLATE % {
"total": total,
"skipped": skipped,
- "timeout": timeout,
"nocrash": nocrash,
"pass": passes,
"fail_ok": fail_ok,
diff --git a/deps/v8/tools/testrunner/network/__init__.py b/deps/v8/tools/testrunner/network/__init__.py
deleted file mode 100644
index 202a262709..0000000000
--- a/deps/v8/tools/testrunner/network/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/tools/testrunner/network/distro.py b/deps/v8/tools/testrunner/network/distro.py
deleted file mode 100644
index 9d5a471d44..0000000000
--- a/deps/v8/tools/testrunner/network/distro.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class Shell(object):
- def __init__(self, shell):
- self.shell = shell
- self.tests = []
- self.total_duration = 0.0
-
- def AddSuite(self, suite):
- self.tests += suite.tests
- self.total_duration += suite.total_duration
-
- def SortTests(self):
- self.tests.sort(cmp=lambda x, y: cmp(x.duration, y.duration))
-
-
-def Assign(suites, peers):
- total_work = 0.0
- for s in suites:
- total_work += s.CalculateTotalDuration()
-
- total_power = 0.0
- for p in peers:
- p.assigned_work = 0.0
- total_power += p.jobs * p.relative_performance
- for p in peers:
- p.needed_work = total_work * p.jobs * p.relative_performance / total_power
-
- shells = {}
- for s in suites:
- shell = s.shell()
- if not shell in shells:
- shells[shell] = Shell(shell)
- shells[shell].AddSuite(s)
- # Convert |shells| to list and sort it, shortest total_duration first.
- shells = [ shells[s] for s in shells ]
- shells.sort(cmp=lambda x, y: cmp(x.total_duration, y.total_duration))
- # Sort tests within each shell, longest duration last (so it's
- # pop()'ed first).
- for s in shells: s.SortTests()
- # Sort peers, least needed_work first.
- peers.sort(cmp=lambda x, y: cmp(x.needed_work, y.needed_work))
- index = 0
- for shell in shells:
- while len(shell.tests) > 0:
- while peers[index].needed_work <= 0:
- index += 1
- if index == len(peers):
- print("BIG FAT WARNING: Assigning tests to peers failed. "
- "Remaining tests: %d. Going to slow mode." % len(shell.tests))
- # Pick the least-busy peer. Sorting the list for each test
- # is terribly slow, but this is just an emergency fallback anyway.
- peers.sort(cmp=lambda x, y: cmp(x.needed_work, y.needed_work))
- peers[0].ForceAddOneTest(shell.tests.pop(), shell)
- # If the peer already has a shell assigned and would need this one
- # and then yet another, try to avoid it.
- peer = peers[index]
- if (shell.total_duration < peer.needed_work and
- len(peer.shells) > 0 and
- index < len(peers) - 1 and
- shell.total_duration <= peers[index + 1].needed_work):
- peers[index + 1].AddTests(shell)
- else:
- peer.AddTests(shell)
diff --git a/deps/v8/tools/testrunner/network/endpoint.py b/deps/v8/tools/testrunner/network/endpoint.py
deleted file mode 100644
index 516578ace4..0000000000
--- a/deps/v8/tools/testrunner/network/endpoint.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import multiprocessing
-import os
-import Queue
-import threading
-import time
-
-from ..local import execution
-from ..local import progress
-from ..local import testsuite
-from ..local import utils
-from ..server import compression
-
-
-class EndpointProgress(progress.ProgressIndicator):
- def __init__(self, sock, server, ctx):
- super(EndpointProgress, self).__init__()
- self.sock = sock
- self.server = server
- self.context = ctx
- self.results_queue = [] # Accessors must synchronize themselves.
- self.sender_lock = threading.Lock()
- self.senderthread = threading.Thread(target=self._SenderThread)
- self.senderthread.start()
-
- def HasRun(self, test, has_unexpected_output):
- # The runners that call this have a lock anyway, so this is safe.
- self.results_queue.append(test)
-
- def _SenderThread(self):
- keep_running = True
- tests = []
- self.sender_lock.acquire()
- while keep_running:
- time.sleep(0.1)
- # This should be "atomic enough" without locking :-)
- # (We don't care which list any new elements get appended to, as long
- # as we don't lose any and the last one comes last.)
- current = self.results_queue
- self.results_queue = []
- for c in current:
- if c is None:
- keep_running = False
- else:
- tests.append(c)
- if keep_running and len(tests) < 1:
- continue # Wait for more results.
- if len(tests) < 1: break # We're done here.
- result = []
- for t in tests:
- result.append(t.PackResult())
- try:
- compression.Send(result, self.sock)
- except:
- self.runner.terminate = True
- for t in tests:
- self.server.CompareOwnPerf(t, self.context.arch, self.context.mode)
- tests = []
- self.sender_lock.release()
-
-
-def Execute(workspace, ctx, tests, sock, server):
- suite_paths = utils.GetSuitePaths(os.path.join(workspace, "test"))
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(workspace, "test", root))
- if suite:
- suite.SetupWorkingDirectory()
- suites.append(suite)
-
- suites_dict = {}
- for s in suites:
- suites_dict[s.name] = s
- s.tests = []
- for t in tests:
- suite = suites_dict[t.suite]
- t.suite = suite
- suite.tests.append(t)
-
- suites = [ s for s in suites if len(s.tests) > 0 ]
- for s in suites:
- s.DownloadData()
-
- progress_indicator = EndpointProgress(sock, server, ctx)
- runner = execution.Runner(suites, progress_indicator, ctx)
- try:
- runner.Run(server.jobs)
- except IOError, e:
- if e.errno == 2:
- message = ("File not found: %s, maybe you forgot to 'git add' it?" %
- e.filename)
- else:
- message = "%s" % e
- compression.Send([[-1, message]], sock)
- progress_indicator.HasRun(None, None) # Sentinel to signal the end.
- progress_indicator.sender_lock.acquire() # Released when sending is done.
- progress_indicator.sender_lock.release()
diff --git a/deps/v8/tools/testrunner/network/network_execution.py b/deps/v8/tools/testrunner/network/network_execution.py
deleted file mode 100644
index a95440178b..0000000000
--- a/deps/v8/tools/testrunner/network/network_execution.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import socket
-import subprocess
-import threading
-import time
-
-from . import distro
-from ..local import execution
-from ..local import perfdata
-from ..objects import peer
-from ..objects import workpacket
-from ..server import compression
-from ..server import constants
-from ..server import local_handler
-from ..server import signatures
-
-
-def GetPeers():
- data = local_handler.LocalQuery([constants.REQUEST_PEERS])
- if not data: return []
- return [ peer.Peer.Unpack(p) for p in data ]
-
-
-class NetworkedRunner(execution.Runner):
- def __init__(self, suites, progress_indicator, context, peers, workspace):
- self.suites = suites
- datapath = os.path.join("out", "testrunner_data")
- # TODO(machenbach): These fields should exist now in the superclass.
- # But there is no super constructor call. Check if this is a problem.
- self.perf_data_manager = perfdata.PerfDataManager(datapath)
- self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
- for s in suites:
- for t in s.tests:
- t.duration = self.perfdata.FetchPerfData(t) or 1.0
- self._CommonInit(suites, progress_indicator, context)
- self.tests = [] # Only used if we need to fall back to local execution.
- self.tests_lock = threading.Lock()
- self.peers = peers
- self.pubkey_fingerprint = None # Fetched later.
- self.base_rev = subprocess.check_output(
- "cd %s; git log -1 --format=%%H --grep=git-svn-id" % workspace,
- shell=True).strip()
- self.base_svn_rev = subprocess.check_output(
- "cd %s; git log -1 %s" # Get commit description.
- " | grep -e '^\s*git-svn-id:'" # Extract "git-svn-id" line.
- " | awk '{print $2}'" # Extract "repository@revision" part.
- " | sed -e 's/.*@//'" % # Strip away "repository@".
- (workspace, self.base_rev), shell=True).strip()
- self.patch = subprocess.check_output(
- "cd %s; git diff %s" % (workspace, self.base_rev), shell=True)
- self.binaries = {}
- self.initialization_lock = threading.Lock()
- self.initialization_lock.acquire() # Released when init is done.
- self._OpenLocalConnection()
- self.local_receiver_thread = threading.Thread(
- target=self._ListenLocalConnection)
- self.local_receiver_thread.daemon = True
- self.local_receiver_thread.start()
- self.initialization_lock.acquire()
- self.initialization_lock.release()
-
- def _OpenLocalConnection(self):
- self.local_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- code = self.local_socket.connect_ex(("localhost", constants.CLIENT_PORT))
- if code != 0:
- raise RuntimeError("Failed to connect to local server")
- compression.Send([constants.REQUEST_PUBKEY_FINGERPRINT], self.local_socket)
-
- def _ListenLocalConnection(self):
- release_lock_countdown = 1 # Pubkey.
- self.local_receiver = compression.Receiver(self.local_socket)
- while not self.local_receiver.IsDone():
- data = self.local_receiver.Current()
- if data[0] == constants.REQUEST_PUBKEY_FINGERPRINT:
- pubkey = data[1]
- if not pubkey: raise RuntimeError("Received empty public key")
- self.pubkey_fingerprint = pubkey
- release_lock_countdown -= 1
- if release_lock_countdown == 0:
- self.initialization_lock.release()
- release_lock_countdown -= 1 # Prevent repeated triggering.
- self.local_receiver.Advance()
-
- def Run(self, jobs):
- self.indicator.Starting()
- need_libv8 = False
- for s in self.suites:
- shell = s.shell()
- if shell not in self.binaries:
- path = os.path.join(self.context.shell_dir, shell)
- # Check if this is a shared library build.
- try:
- ldd = subprocess.check_output("ldd %s | grep libv8\\.so" % (path),
- shell=True)
- ldd = ldd.strip().split(" ")
- assert ldd[0] == "libv8.so"
- assert ldd[1] == "=>"
- need_libv8 = True
- binary_needs_libv8 = True
- libv8 = signatures.ReadFileAndSignature(ldd[2])
- except:
- binary_needs_libv8 = False
- binary = signatures.ReadFileAndSignature(path)
- if binary[0] is None:
- print("Error: Failed to create signature.")
- assert binary[1] != 0
- return binary[1]
- binary.append(binary_needs_libv8)
- self.binaries[shell] = binary
- if need_libv8:
- self.binaries["libv8.so"] = libv8
- distro.Assign(self.suites, self.peers)
- # Spawn one thread for each peer.
- threads = []
- for p in self.peers:
- thread = threading.Thread(target=self._TalkToPeer, args=[p])
- threads.append(thread)
- thread.start()
- try:
- for thread in threads:
- # Use a timeout so that signals (Ctrl+C) will be processed.
- thread.join(timeout=10000000)
- self._AnalyzePeerRuntimes()
- except KeyboardInterrupt:
- self.terminate = True
- raise
- except Exception, _e:
- # If there's an exception we schedule an interruption for any
- # remaining threads...
- self.terminate = True
- # ...and then reraise the exception to bail out.
- raise
- compression.Send(constants.END_OF_STREAM, self.local_socket)
- self.local_socket.close()
- if self.tests:
- self._RunInternal(jobs)
- self.indicator.Done()
- return not self.failed
-
- def _TalkToPeer(self, peer):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.settimeout(self.context.timeout + 10)
- code = sock.connect_ex((peer.address, constants.PEER_PORT))
- if code == 0:
- try:
- peer.runtime = None
- start_time = time.time()
- packet = workpacket.WorkPacket(peer=peer, context=self.context,
- base_revision=self.base_svn_rev,
- patch=self.patch,
- pubkey=self.pubkey_fingerprint)
- data, test_map = packet.Pack(self.binaries)
- compression.Send(data, sock)
- compression.Send(constants.END_OF_STREAM, sock)
- rec = compression.Receiver(sock)
- while not rec.IsDone() and not self.terminate:
- data_list = rec.Current()
- for data in data_list:
- test_id = data[0]
- if test_id < 0:
- # The peer is reporting an error.
- with self.lock:
- print("\nPeer %s reports error: %s" % (peer.address, data[1]))
- continue
- test = test_map.pop(test_id)
- test.MergeResult(data)
- try:
- self.perfdata.UpdatePerfData(test)
- except Exception, e:
- print("UpdatePerfData exception: %s" % e)
- pass # Just keep working.
- with self.lock:
- perf_key = self.perfdata.GetKey(test)
- compression.Send(
- [constants.INFORM_DURATION, perf_key, test.duration,
- self.context.arch, self.context.mode],
- self.local_socket)
- has_unexpected_output = test.suite.HasUnexpectedOutput(test)
- if has_unexpected_output:
- self.failed.append(test)
- if test.output.HasCrashed():
- self.crashed += 1
- else:
- self.succeeded += 1
- self.remaining -= 1
- self.indicator.HasRun(test, has_unexpected_output)
- rec.Advance()
- peer.runtime = time.time() - start_time
- except KeyboardInterrupt:
- sock.close()
- raise
- except Exception, e:
- print("Got exception: %s" % e)
- pass # Fall back to local execution.
- else:
- compression.Send([constants.UNRESPONSIVE_PEER, peer.address],
- self.local_socket)
- sock.close()
- if len(test_map) > 0:
- # Some tests have not received any results. Run them locally.
- print("\nNo results for %d tests, running them locally." % len(test_map))
- self._EnqueueLocally(test_map)
-
- def _EnqueueLocally(self, test_map):
- with self.tests_lock:
- for test in test_map:
- self.tests.append(test_map[test])
-
- def _AnalyzePeerRuntimes(self):
- total_runtime = 0.0
- total_work = 0.0
- for p in self.peers:
- if p.runtime is None:
- return
- total_runtime += p.runtime
- total_work += p.assigned_work
- for p in self.peers:
- p.assigned_work /= total_work
- p.runtime /= total_runtime
- perf_correction = p.assigned_work / p.runtime
- old_perf = p.relative_performance
- p.relative_performance = (old_perf + perf_correction) / 2.0
- compression.Send([constants.UPDATE_PERF, p.address,
- p.relative_performance],
- self.local_socket)
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
index 6bcbfb67aa..fb5d717728 100644
--- a/deps/v8/tools/testrunner/objects/context.py
+++ b/deps/v8/tools/testrunner/objects/context.py
@@ -49,18 +49,3 @@ class Context():
self.no_harness = no_harness
self.use_perf_data = use_perf_data
self.sancov_dir = sancov_dir
-
- def Pack(self):
- return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
- self.command_prefix, self.extra_flags, self.noi18n,
- self.random_seed, self.no_sorting, self.rerun_failures_count,
- self.rerun_failures_max, self.predictable, self.no_harness,
- self.use_perf_data, self.sancov_dir]
-
- @staticmethod
- def Unpack(packed):
- # For the order of the fields, refer to Pack() above.
- return Context(packed[0], packed[1], None, packed[2], False,
- packed[3], packed[4], packed[5], packed[6], packed[7],
- packed[8], packed[9], packed[10], packed[11], packed[12],
- packed[13], packed[14], packed[15])
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index b4bb01f797..99d6137698 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -51,11 +51,3 @@ class Output(object):
def HasTimedOut(self):
return self.timed_out
-
- def Pack(self):
- return [self.exit_code, self.timed_out, self.stdout, self.stderr, self.pid]
-
- @staticmethod
- def Unpack(packed):
- # For the order of the fields, refer to Pack() above.
- return Output(packed[0], packed[1], packed[2], packed[3], packed[4])
diff --git a/deps/v8/tools/testrunner/objects/peer.py b/deps/v8/tools/testrunner/objects/peer.py
deleted file mode 100644
index 18a6bec7a8..0000000000
--- a/deps/v8/tools/testrunner/objects/peer.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class Peer(object):
- def __init__(self, address, jobs, rel_perf, pubkey):
- self.address = address # string: IP address
- self.jobs = jobs # integer: number of CPUs
- self.relative_performance = rel_perf
- self.pubkey = pubkey # string: pubkey's fingerprint
- self.shells = set() # set of strings
- self.needed_work = 0
- self.assigned_work = 0
- self.tests = [] # list of TestCase objects
- self.trusting_me = False # This peer trusts my public key.
- self.trusted = False # I trust this peer's public key.
-
- def __str__(self):
- return ("Peer at %s, jobs: %d, performance: %.2f, trust I/O: %s/%s" %
- (self.address, self.jobs, self.relative_performance,
- self.trusting_me, self.trusted))
-
- def AddTests(self, shell):
- """Adds tests from |shell| to this peer.
-
- Stops when self.needed_work reaches zero, or when all of shell's tests
- are assigned."""
- assert self.needed_work > 0
- if shell.shell not in self.shells:
- self.shells.add(shell.shell)
- while len(shell.tests) > 0 and self.needed_work > 0:
- t = shell.tests.pop()
- self.needed_work -= t.duration
- self.assigned_work += t.duration
- shell.total_duration -= t.duration
- self.tests.append(t)
-
- def ForceAddOneTest(self, test, shell):
- """Forcibly adds another test to this peer, disregarding needed_work."""
- if shell.shell not in self.shells:
- self.shells.add(shell.shell)
- self.needed_work -= test.duration
- self.assigned_work += test.duration
- shell.total_duration -= test.duration
- self.tests.append(test)
-
-
- def Pack(self):
- """Creates a JSON serializable representation of this Peer."""
- return [self.address, self.jobs, self.relative_performance]
-
- @staticmethod
- def Unpack(packed):
- """Creates a Peer object built from a packed representation."""
- pubkey_dummy = "" # Callers of this don't care (only the server does).
- return Peer(packed[0], packed[1], packed[2], pubkey_dummy)
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 37e3cb4ec2..fd8c27bc59 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -26,76 +26,29 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from . import output
-
class TestCase(object):
- def __init__(self, suite, path, variant=None, flags=None,
- override_shell=None):
+ def __init__(self, suite, path, variant=None, flags=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.variant = variant # name of the used testing variant
- self.override_shell = override_shell
- self.outcomes = frozenset([])
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
- self.env = {}
def CopyAddingFlags(self, variant, flags):
- copy = TestCase(self.suite, self.path, variant, self.flags + flags,
- self.override_shell)
- copy.outcomes = self.outcomes
- copy.env = self.env
- return copy
-
- def PackTask(self):
- """
- Extracts those parts of this object that are required to run the test
- and returns them as a JSON serializable object.
- """
- assert self.id is not None
- return [self.suitename(), self.path, self.variant, self.flags,
- self.override_shell, list(self.outcomes or []),
- self.id, self.env]
-
- @staticmethod
- def UnpackTask(task):
- """Creates a new TestCase object based on packed task data."""
- # For the order of the fields, refer to PackTask() above.
- test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
- test.outcomes = frozenset(task[5])
- test.id = task[6]
- test.run = 1
- test.env = task[7]
- return test
+ return TestCase(self.suite, self.path, variant, self.flags + flags)
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
- def PackResult(self):
- """Serializes the output of the TestCase after it has run."""
- self.suite.StripOutputForTransmit(self)
- return [self.id, self.output.Pack(), self.duration]
-
- def MergeResult(self, result):
- """Applies the contents of a Result to this object."""
- assert result[0] == self.id
- self.output = output.Output.Unpack(result[1])
- self.duration = result[2]
-
def suitename(self):
return self.suite.name
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
- def shell(self):
- if self.override_shell:
- return self.override_shell
- return self.suite.shell()
-
def __getstate__(self):
"""Representation to pickle test cases.
diff --git a/deps/v8/tools/testrunner/objects/workpacket.py b/deps/v8/tools/testrunner/objects/workpacket.py
deleted file mode 100644
index d07efe76ec..0000000000
--- a/deps/v8/tools/testrunner/objects/workpacket.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-from . import context
-from . import testcase
-
-class WorkPacket(object):
- def __init__(self, peer=None, context=None, tests=None, binaries=None,
- base_revision=None, patch=None, pubkey=None):
- self.peer = peer
- self.context = context
- self.tests = tests
- self.binaries = binaries
- self.base_revision = base_revision
- self.patch = patch
- self.pubkey_fingerprint = pubkey
-
- def Pack(self, binaries_dict):
- """
- Creates a JSON serializable object containing the data of this
- work packet.
- """
- need_libv8 = False
- binaries = []
- for shell in self.peer.shells:
- prefetched_binary = binaries_dict[shell]
- binaries.append({"name": shell,
- "blob": prefetched_binary[0],
- "sign": prefetched_binary[1]})
- if prefetched_binary[2]:
- need_libv8 = True
- if need_libv8:
- libv8 = binaries_dict["libv8.so"]
- binaries.append({"name": "libv8.so",
- "blob": libv8[0],
- "sign": libv8[1]})
- tests = []
- test_map = {}
- for t in self.peer.tests:
- test_map[t.id] = t
- tests.append(t.PackTask())
- result = {
- "binaries": binaries,
- "pubkey": self.pubkey_fingerprint,
- "context": self.context.Pack(),
- "base_revision": self.base_revision,
- "patch": self.patch,
- "tests": tests
- }
- return result, test_map
-
- @staticmethod
- def Unpack(packed):
- """
- Creates a WorkPacket object from the given packed representation.
- """
- binaries = packed["binaries"]
- pubkey_fingerprint = packed["pubkey"]
- ctx = context.Context.Unpack(packed["context"])
- base_revision = packed["base_revision"]
- patch = packed["patch"]
- tests = [ testcase.TestCase.UnpackTask(t) for t in packed["tests"] ]
- return WorkPacket(context=ctx, tests=tests, binaries=binaries,
- base_revision=base_revision, patch=patch,
- pubkey=pubkey_fingerprint)
diff --git a/deps/v8/tools/testrunner/server/__init__.py b/deps/v8/tools/testrunner/server/__init__.py
deleted file mode 100644
index 202a262709..0000000000
--- a/deps/v8/tools/testrunner/server/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/tools/testrunner/server/compression.py b/deps/v8/tools/testrunner/server/compression.py
deleted file mode 100644
index d5ed415976..0000000000
--- a/deps/v8/tools/testrunner/server/compression.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import cStringIO as StringIO
-try:
- import ujson as json
-except ImportError:
- import json
-import os
-import struct
-import zlib
-
-from . import constants
-
-def Send(obj, sock):
- """
- Sends a JSON encodable object over the specified socket (zlib-compressed).
- """
- obj = json.dumps(obj)
- compression_level = 2 # 1 = fastest, 9 = best compression
- compressed = zlib.compress(obj, compression_level)
- payload = struct.pack('>i', len(compressed)) + compressed
- sock.sendall(payload)
-
-
-class Receiver(object):
- def __init__(self, sock):
- self.sock = sock
- self.data = StringIO.StringIO()
- self.datalength = 0
- self._next = self._GetNext()
-
- def IsDone(self):
- return self._next == None
-
- def Current(self):
- return self._next
-
- def Advance(self):
- try:
- self._next = self._GetNext()
- except:
- raise
-
- def _GetNext(self):
- try:
- while self.datalength < constants.SIZE_T:
- try:
- chunk = self.sock.recv(8192)
- except:
- raise
- if not chunk: return None
- self._AppendData(chunk)
- size = self._PopData(constants.SIZE_T)
- size = struct.unpack(">i", size)[0]
- while self.datalength < size:
- try:
- chunk = self.sock.recv(8192)
- except:
- raise
- if not chunk: return None
- self._AppendData(chunk)
- result = self._PopData(size)
- result = zlib.decompress(result)
- result = json.loads(result)
- if result == constants.END_OF_STREAM:
- return None
- return result
- except:
- raise
-
- def _AppendData(self, new):
- self.data.seek(0, os.SEEK_END)
- self.data.write(new)
- self.datalength += len(new)
-
- def _PopData(self, length):
- self.data.seek(0)
- chunk = self.data.read(length)
- remaining = self.data.read()
- self.data.close()
- self.data = StringIO.StringIO()
- self.data.write(remaining)
- assert self.datalength - length == len(remaining)
- self.datalength = len(remaining)
- return chunk
diff --git a/deps/v8/tools/testrunner/server/constants.py b/deps/v8/tools/testrunner/server/constants.py
deleted file mode 100644
index 5aefcbad0d..0000000000
--- a/deps/v8/tools/testrunner/server/constants.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-CLIENT_PORT = 9991 # Port for the local client to connect to.
-PEER_PORT = 9992 # Port for peers on the network to connect to.
-PRESENCE_PORT = 9993 # Port for presence daemon.
-STATUS_PORT = 9994 # Port for network requests not related to workpackets.
-
-END_OF_STREAM = "end of dtest stream" # Marker for end of network requests.
-SIZE_T = 4 # Number of bytes used for network request size header.
-
-# Messages understood by the local request handler.
-ADD_TRUSTED = "add trusted"
-INFORM_DURATION = "inform about duration"
-REQUEST_PEERS = "get peers"
-UNRESPONSIVE_PEER = "unresponsive peer"
-REQUEST_PUBKEY_FINGERPRINT = "get pubkey fingerprint"
-REQUEST_STATUS = "get status"
-UPDATE_PERF = "update performance"
-
-# Messages understood by the status request handler.
-LIST_TRUSTED_PUBKEYS = "list trusted pubkeys"
-GET_SIGNED_PUBKEY = "pass on signed pubkey"
-NOTIFY_NEW_TRUSTED = "new trusted peer"
-TRUST_YOU_NOW = "trust you now"
-DO_YOU_TRUST = "do you trust"
diff --git a/deps/v8/tools/testrunner/server/daemon.py b/deps/v8/tools/testrunner/server/daemon.py
deleted file mode 100644
index baa66fbea9..0000000000
--- a/deps/v8/tools/testrunner/server/daemon.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-
-# This code has been written by Sander Marechal and published at:
-# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
-# where the author has placed it in the public domain (see comment #6 at
-# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/#c6
-# ).
-# Some minor modifications have been made by the V8 authors. The work remains
-# in the public domain.
-
-import atexit
-import os
-from signal import SIGTERM
-from signal import SIGINT
-import sys
-import time
-
-
-class Daemon(object):
- """
- A generic daemon class.
-
- Usage: subclass the Daemon class and override the run() method
- """
- def __init__(self, pidfile, stdin='/dev/null',
- stdout='/dev/null', stderr='/dev/null'):
- self.stdin = stdin
- self.stdout = stdout
- self.stderr = stderr
- self.pidfile = pidfile
-
- def daemonize(self):
- """
- do the UNIX double-fork magic, see Stevens' "Advanced
- Programming in the UNIX Environment" for details (ISBN 0201563177)
- http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
- """
- try:
- pid = os.fork()
- if pid > 0:
- # exit first parent
- sys.exit(0)
- except OSError, e:
- sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
- sys.exit(1)
-
- # decouple from parent environment
- os.chdir("/")
- os.setsid()
- os.umask(0)
-
- # do second fork
- try:
- pid = os.fork()
- if pid > 0:
- # exit from second parent
- sys.exit(0)
- except OSError, e:
- sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
- sys.exit(1)
-
- # redirect standard file descriptors
- sys.stdout.flush()
- sys.stderr.flush()
- si = file(self.stdin, 'r')
- so = file(self.stdout, 'a+')
- se = file(self.stderr, 'a+', 0)
- # TODO: (debug) re-enable this!
- #os.dup2(si.fileno(), sys.stdin.fileno())
- #os.dup2(so.fileno(), sys.stdout.fileno())
- #os.dup2(se.fileno(), sys.stderr.fileno())
-
- # write pidfile
- atexit.register(self.delpid)
- pid = str(os.getpid())
- file(self.pidfile, 'w+').write("%s\n" % pid)
-
- def delpid(self):
- os.remove(self.pidfile)
-
- def start(self):
- """
- Start the daemon
- """
- # Check for a pidfile to see if the daemon already runs
- try:
- pf = file(self.pidfile, 'r')
- pid = int(pf.read().strip())
- pf.close()
- except IOError:
- pid = None
-
- if pid:
- message = "pidfile %s already exist. Daemon already running?\n"
- sys.stderr.write(message % self.pidfile)
- sys.exit(1)
-
- # Start the daemon
- self.daemonize()
- self.run()
-
- def stop(self):
- """
- Stop the daemon
- """
- # Get the pid from the pidfile
- try:
- pf = file(self.pidfile, 'r')
- pid = int(pf.read().strip())
- pf.close()
- except IOError:
- pid = None
-
- if not pid:
- message = "pidfile %s does not exist. Daemon not running?\n"
- sys.stderr.write(message % self.pidfile)
- return # not an error in a restart
-
- # Try killing the daemon process
- try:
- # Give the process a one-second chance to exit gracefully.
- os.kill(pid, SIGINT)
- time.sleep(1)
- while 1:
- os.kill(pid, SIGTERM)
- time.sleep(0.1)
- except OSError, err:
- err = str(err)
- if err.find("No such process") > 0:
- if os.path.exists(self.pidfile):
- os.remove(self.pidfile)
- else:
- print str(err)
- sys.exit(1)
-
- def restart(self):
- """
- Restart the daemon
- """
- self.stop()
- self.start()
-
- def run(self):
- """
- You should override this method when you subclass Daemon. It will be
- called after the process has been daemonized by start() or restart().
- """
diff --git a/deps/v8/tools/testrunner/server/local_handler.py b/deps/v8/tools/testrunner/server/local_handler.py
deleted file mode 100644
index 3b3ac495d0..0000000000
--- a/deps/v8/tools/testrunner/server/local_handler.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import socket
-import SocketServer
-import StringIO
-
-from . import compression
-from . import constants
-
-
-def LocalQuery(query):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- code = sock.connect_ex(("localhost", constants.CLIENT_PORT))
- if code != 0: return None
- compression.Send(query, sock)
- compression.Send(constants.END_OF_STREAM, sock)
- rec = compression.Receiver(sock)
- data = None
- while not rec.IsDone():
- data = rec.Current()
- assert data[0] == query[0]
- data = data[1]
- rec.Advance()
- sock.close()
- return data
-
-
-class LocalHandler(SocketServer.BaseRequestHandler):
- def handle(self):
- rec = compression.Receiver(self.request)
- while not rec.IsDone():
- data = rec.Current()
- action = data[0]
-
- if action == constants.REQUEST_PEERS:
- with self.server.daemon.peer_list_lock:
- response = [ p.Pack() for p in self.server.daemon.peers
- if p.trusting_me ]
- compression.Send([action, response], self.request)
-
- elif action == constants.UNRESPONSIVE_PEER:
- self.server.daemon.DeletePeer(data[1])
-
- elif action == constants.REQUEST_PUBKEY_FINGERPRINT:
- compression.Send([action, self.server.daemon.pubkey_fingerprint],
- self.request)
-
- elif action == constants.REQUEST_STATUS:
- compression.Send([action, self._GetStatusMessage()], self.request)
-
- elif action == constants.ADD_TRUSTED:
- fingerprint = self.server.daemon.CopyToTrusted(data[1])
- compression.Send([action, fingerprint], self.request)
-
- elif action == constants.INFORM_DURATION:
- test_key = data[1]
- test_duration = data[2]
- arch = data[3]
- mode = data[4]
- self.server.daemon.AddPerfData(test_key, test_duration, arch, mode)
-
- elif action == constants.UPDATE_PERF:
- address = data[1]
- perf = data[2]
- self.server.daemon.UpdatePeerPerformance(data[1], data[2])
-
- rec.Advance()
- compression.Send(constants.END_OF_STREAM, self.request)
-
- def _GetStatusMessage(self):
- sio = StringIO.StringIO()
- sio.write("Peers:\n")
- with self.server.daemon.peer_list_lock:
- for p in self.server.daemon.peers:
- sio.write("%s\n" % p)
- sio.write("My own jobs: %d, relative performance: %.2f\n" %
- (self.server.daemon.jobs, self.server.daemon.relative_perf))
- # Low-priority TODO: Return more information. Ideas:
- # - currently running anything,
- # - time since last job,
- # - time since last repository fetch
- # - number of workpackets/testcases handled since startup
- # - slowest test(s)
- result = sio.getvalue()
- sio.close()
- return result
-
-
-class LocalSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
- def __init__(self, daemon):
- SocketServer.TCPServer.__init__(self, ("localhost", constants.CLIENT_PORT),
- LocalHandler)
- self.daemon = daemon
diff --git a/deps/v8/tools/testrunner/server/main.py b/deps/v8/tools/testrunner/server/main.py
deleted file mode 100644
index c237e1adb4..0000000000
--- a/deps/v8/tools/testrunner/server/main.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import multiprocessing
-import os
-import shutil
-import subprocess
-import threading
-import time
-
-from . import daemon
-from . import local_handler
-from . import presence_handler
-from . import signatures
-from . import status_handler
-from . import work_handler
-from ..network import perfdata
-
-
-class Server(daemon.Daemon):
-
- def __init__(self, pidfile, root, stdin="/dev/null",
- stdout="/dev/null", stderr="/dev/null"):
- super(Server, self).__init__(pidfile, stdin, stdout, stderr)
- self.root = root
- self.local_handler = None
- self.local_handler_thread = None
- self.work_handler = None
- self.work_handler_thread = None
- self.status_handler = None
- self.status_handler_thread = None
- self.presence_daemon = None
- self.presence_daemon_thread = None
- self.peers = []
- self.jobs = multiprocessing.cpu_count()
- self.peer_list_lock = threading.Lock()
- self.perf_data_lock = None
- self.presence_daemon_lock = None
- self.datadir = os.path.join(self.root, "data")
- pubkey_fingerprint_filename = os.path.join(self.datadir, "mypubkey")
- with open(pubkey_fingerprint_filename) as f:
- self.pubkey_fingerprint = f.read().strip()
- self.relative_perf_filename = os.path.join(self.datadir, "myperf")
- if os.path.exists(self.relative_perf_filename):
- with open(self.relative_perf_filename) as f:
- try:
- self.relative_perf = float(f.read())
- except:
- self.relative_perf = 1.0
- else:
- self.relative_perf = 1.0
-
- def run(self):
- os.nice(20)
- self.ip = presence_handler.GetOwnIP()
- self.perf_data_manager = perfdata.PerfDataManager(self.datadir)
- self.perf_data_lock = threading.Lock()
-
- self.local_handler = local_handler.LocalSocketServer(self)
- self.local_handler_thread = threading.Thread(
- target=self.local_handler.serve_forever)
- self.local_handler_thread.start()
-
- self.work_handler = work_handler.WorkSocketServer(self)
- self.work_handler_thread = threading.Thread(
- target=self.work_handler.serve_forever)
- self.work_handler_thread.start()
-
- self.status_handler = status_handler.StatusSocketServer(self)
- self.status_handler_thread = threading.Thread(
- target=self.status_handler.serve_forever)
- self.status_handler_thread.start()
-
- self.presence_daemon = presence_handler.PresenceDaemon(self)
- self.presence_daemon_thread = threading.Thread(
- target=self.presence_daemon.serve_forever)
- self.presence_daemon_thread.start()
-
- self.presence_daemon.FindPeers()
- time.sleep(0.5) # Give those peers some time to reply.
-
- with self.peer_list_lock:
- for p in self.peers:
- if p.address == self.ip: continue
- status_handler.RequestTrustedPubkeys(p, self)
-
- while True:
- try:
- self.PeriodicTasks()
- time.sleep(60)
- except Exception, e:
- print("MAIN LOOP EXCEPTION: %s" % e)
- self.Shutdown()
- break
- except KeyboardInterrupt:
- self.Shutdown()
- break
-
- def Shutdown(self):
- with open(self.relative_perf_filename, "w") as f:
- f.write("%s" % self.relative_perf)
- self.presence_daemon.shutdown()
- self.presence_daemon.server_close()
- self.local_handler.shutdown()
- self.local_handler.server_close()
- self.work_handler.shutdown()
- self.work_handler.server_close()
- self.status_handler.shutdown()
- self.status_handler.server_close()
-
- def PeriodicTasks(self):
- # If we know peers we don't trust, see if someone else trusts them.
- with self.peer_list_lock:
- for p in self.peers:
- if p.trusted: continue
- if self.IsTrusted(p.pubkey):
- p.trusted = True
- status_handler.ITrustYouNow(p)
- continue
- for p2 in self.peers:
- if not p2.trusted: continue
- status_handler.TryTransitiveTrust(p2, p.pubkey, self)
- # TODO: Ping for more peers waiting to be discovered.
- # TODO: Update the checkout (if currently idle).
-
- def AddPeer(self, peer):
- with self.peer_list_lock:
- for p in self.peers:
- if p.address == peer.address:
- return
- self.peers.append(peer)
- if peer.trusted:
- status_handler.ITrustYouNow(peer)
-
- def DeletePeer(self, peer_address):
- with self.peer_list_lock:
- for i in xrange(len(self.peers)):
- if self.peers[i].address == peer_address:
- del self.peers[i]
- return
-
- def MarkPeerAsTrusting(self, peer_address):
- with self.peer_list_lock:
- for p in self.peers:
- if p.address == peer_address:
- p.trusting_me = True
- break
-
- def UpdatePeerPerformance(self, peer_address, performance):
- with self.peer_list_lock:
- for p in self.peers:
- if p.address == peer_address:
- p.relative_performance = performance
-
- def CopyToTrusted(self, pubkey_filename):
- with open(pubkey_filename, "r") as f:
- lines = f.readlines()
- fingerprint = lines[-1].strip()
- target_filename = self._PubkeyFilename(fingerprint)
- shutil.copy(pubkey_filename, target_filename)
- with self.peer_list_lock:
- for peer in self.peers:
- if peer.address == self.ip: continue
- if peer.pubkey == fingerprint:
- status_handler.ITrustYouNow(peer)
- else:
- result = self.SignTrusted(fingerprint)
- status_handler.NotifyNewTrusted(peer, result)
- return fingerprint
-
- def _PubkeyFilename(self, pubkey_fingerprint):
- return os.path.join(self.root, "trusted", "%s.pem" % pubkey_fingerprint)
-
- def IsTrusted(self, pubkey_fingerprint):
- return os.path.exists(self._PubkeyFilename(pubkey_fingerprint))
-
- def ListTrusted(self):
- path = os.path.join(self.root, "trusted")
- if not os.path.exists(path): return []
- return [ f[:-4] for f in os.listdir(path) if f.endswith(".pem") ]
-
- def SignTrusted(self, pubkey_fingerprint):
- if not self.IsTrusted(pubkey_fingerprint):
- return []
- filename = self._PubkeyFilename(pubkey_fingerprint)
- result = signatures.ReadFileAndSignature(filename) # Format: [key, sig].
- return [pubkey_fingerprint, result[0], result[1], self.pubkey_fingerprint]
-
- def AcceptNewTrusted(self, data):
- # The format of |data| matches the return value of |SignTrusted()|.
- if not data: return
- fingerprint = data[0]
- pubkey = data[1]
- signature = data[2]
- signer = data[3]
- if not self.IsTrusted(signer):
- return
- if self.IsTrusted(fingerprint):
- return # Already trusted.
- filename = self._PubkeyFilename(fingerprint)
- signer_pubkeyfile = self._PubkeyFilename(signer)
- if not signatures.VerifySignature(filename, pubkey, signature,
- signer_pubkeyfile):
- return
- return # Nothing more to do.
-
- def AddPerfData(self, test_key, duration, arch, mode):
- data_store = self.perf_data_manager.GetStore(arch, mode)
- data_store.RawUpdatePerfData(str(test_key), duration)
-
- def CompareOwnPerf(self, test, arch, mode):
- data_store = self.perf_data_manager.GetStore(arch, mode)
- observed = data_store.FetchPerfData(test)
- if not observed: return
- own_perf_estimate = observed / test.duration
- with self.perf_data_lock:
- kLearnRateLimiter = 9999
- self.relative_perf *= kLearnRateLimiter
- self.relative_perf += own_perf_estimate
- self.relative_perf /= (kLearnRateLimiter + 1)
diff --git a/deps/v8/tools/testrunner/server/presence_handler.py b/deps/v8/tools/testrunner/server/presence_handler.py
deleted file mode 100644
index 1dc2ef163a..0000000000
--- a/deps/v8/tools/testrunner/server/presence_handler.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import socket
-import SocketServer
-import threading
-try:
- import ujson as json
-except:
- import json
-
-from . import constants
-from ..objects import peer
-
-
-STARTUP_REQUEST = "V8 test peer starting up"
-STARTUP_RESPONSE = "Let's rock some tests!"
-EXIT_REQUEST = "V8 testing peer going down"
-
-
-def GetOwnIP():
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- s.connect(("8.8.8.8", 80))
- ip = s.getsockname()[0]
- s.close()
- return ip
-
-
-class PresenceHandler(SocketServer.BaseRequestHandler):
-
- def handle(self):
- data = json.loads(self.request[0].strip())
-
- if data[0] == STARTUP_REQUEST:
- jobs = data[1]
- relative_perf = data[2]
- pubkey_fingerprint = data[3]
- trusted = self.server.daemon.IsTrusted(pubkey_fingerprint)
- response = [STARTUP_RESPONSE, self.server.daemon.jobs,
- self.server.daemon.relative_perf,
- self.server.daemon.pubkey_fingerprint, trusted]
- response = json.dumps(response)
- self.server.SendTo(self.client_address[0], response)
- p = peer.Peer(self.client_address[0], jobs, relative_perf,
- pubkey_fingerprint)
- p.trusted = trusted
- self.server.daemon.AddPeer(p)
-
- elif data[0] == STARTUP_RESPONSE:
- jobs = data[1]
- perf = data[2]
- pubkey_fingerprint = data[3]
- p = peer.Peer(self.client_address[0], jobs, perf, pubkey_fingerprint)
- p.trusted = self.server.daemon.IsTrusted(pubkey_fingerprint)
- p.trusting_me = data[4]
- self.server.daemon.AddPeer(p)
-
- elif data[0] == EXIT_REQUEST:
- self.server.daemon.DeletePeer(self.client_address[0])
- if self.client_address[0] == self.server.daemon.ip:
- self.server.shutdown_lock.release()
-
-
-class PresenceDaemon(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
- def __init__(self, daemon):
- self.daemon = daemon
- address = (daemon.ip, constants.PRESENCE_PORT)
- SocketServer.UDPServer.__init__(self, address, PresenceHandler)
- self.shutdown_lock = threading.Lock()
-
- def shutdown(self):
- self.shutdown_lock.acquire()
- self.SendToAll(json.dumps([EXIT_REQUEST]))
- self.shutdown_lock.acquire()
- self.shutdown_lock.release()
- SocketServer.UDPServer.shutdown(self)
-
- def SendTo(self, target, message):
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- sock.sendto(message, (target, constants.PRESENCE_PORT))
- sock.close()
-
- def SendToAll(self, message):
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- ip = self.daemon.ip.split(".")
- for i in range(1, 254):
- ip[-1] = str(i)
- sock.sendto(message, (".".join(ip), constants.PRESENCE_PORT))
- sock.close()
-
- def FindPeers(self):
- request = [STARTUP_REQUEST, self.daemon.jobs, self.daemon.relative_perf,
- self.daemon.pubkey_fingerprint]
- request = json.dumps(request)
- self.SendToAll(request)
diff --git a/deps/v8/tools/testrunner/server/signatures.py b/deps/v8/tools/testrunner/server/signatures.py
deleted file mode 100644
index 9957a18a26..0000000000
--- a/deps/v8/tools/testrunner/server/signatures.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import base64
-import os
-import subprocess
-
-
-def ReadFileAndSignature(filename):
- with open(filename, "rb") as f:
- file_contents = base64.b64encode(f.read())
- signature_file = filename + ".signature"
- if (not os.path.exists(signature_file) or
- os.path.getmtime(signature_file) < os.path.getmtime(filename)):
- private_key = "~/.ssh/v8_dtest"
- code = subprocess.call("openssl dgst -out %s -sign %s %s" %
- (signature_file, private_key, filename),
- shell=True)
- if code != 0: return [None, code]
- with open(signature_file) as f:
- signature = base64.b64encode(f.read())
- return [file_contents, signature]
-
-
-def VerifySignature(filename, file_contents, signature, pubkeyfile):
- with open(filename, "wb") as f:
- f.write(base64.b64decode(file_contents))
- signature_file = filename + ".foreign_signature"
- with open(signature_file, "wb") as f:
- f.write(base64.b64decode(signature))
- code = subprocess.call("openssl dgst -verify %s -signature %s %s" %
- (pubkeyfile, signature_file, filename),
- shell=True)
- matched = (code == 0)
- if not matched:
- os.remove(signature_file)
- os.remove(filename)
- return matched
diff --git a/deps/v8/tools/testrunner/server/status_handler.py b/deps/v8/tools/testrunner/server/status_handler.py
deleted file mode 100644
index 3f2271dc69..0000000000
--- a/deps/v8/tools/testrunner/server/status_handler.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import socket
-import SocketServer
-
-from . import compression
-from . import constants
-
-
-def _StatusQuery(peer, query):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- code = sock.connect_ex((peer.address, constants.STATUS_PORT))
- if code != 0:
- # TODO(jkummerow): disconnect (after 3 failures?)
- return
- compression.Send(query, sock)
- compression.Send(constants.END_OF_STREAM, sock)
- rec = compression.Receiver(sock)
- data = None
- while not rec.IsDone():
- data = rec.Current()
- assert data[0] == query[0]
- data = data[1]
- rec.Advance()
- sock.close()
- return data
-
-
-def RequestTrustedPubkeys(peer, server):
- pubkey_list = _StatusQuery(peer, [constants.LIST_TRUSTED_PUBKEYS])
- for pubkey in pubkey_list:
- if server.IsTrusted(pubkey): continue
- result = _StatusQuery(peer, [constants.GET_SIGNED_PUBKEY, pubkey])
- server.AcceptNewTrusted(result)
-
-
-def NotifyNewTrusted(peer, data):
- _StatusQuery(peer, [constants.NOTIFY_NEW_TRUSTED] + data)
-
-
-def ITrustYouNow(peer):
- _StatusQuery(peer, [constants.TRUST_YOU_NOW])
-
-
-def TryTransitiveTrust(peer, pubkey, server):
- if _StatusQuery(peer, [constants.DO_YOU_TRUST, pubkey]):
- result = _StatusQuery(peer, [constants.GET_SIGNED_PUBKEY, pubkey])
- server.AcceptNewTrusted(result)
-
-
-class StatusHandler(SocketServer.BaseRequestHandler):
- def handle(self):
- rec = compression.Receiver(self.request)
- while not rec.IsDone():
- data = rec.Current()
- action = data[0]
-
- if action == constants.LIST_TRUSTED_PUBKEYS:
- response = self.server.daemon.ListTrusted()
- compression.Send([action, response], self.request)
-
- elif action == constants.GET_SIGNED_PUBKEY:
- response = self.server.daemon.SignTrusted(data[1])
- compression.Send([action, response], self.request)
-
- elif action == constants.NOTIFY_NEW_TRUSTED:
- self.server.daemon.AcceptNewTrusted(data[1:])
- pass # No response.
-
- elif action == constants.TRUST_YOU_NOW:
- self.server.daemon.MarkPeerAsTrusting(self.client_address[0])
- pass # No response.
-
- elif action == constants.DO_YOU_TRUST:
- response = self.server.daemon.IsTrusted(data[1])
- compression.Send([action, response], self.request)
-
- rec.Advance()
- compression.Send(constants.END_OF_STREAM, self.request)
-
-
-class StatusSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
- def __init__(self, daemon):
- address = (daemon.ip, constants.STATUS_PORT)
- SocketServer.TCPServer.__init__(self, address, StatusHandler)
- self.daemon = daemon
diff --git a/deps/v8/tools/testrunner/server/work_handler.py b/deps/v8/tools/testrunner/server/work_handler.py
deleted file mode 100644
index 6bf7d43cf9..0000000000
--- a/deps/v8/tools/testrunner/server/work_handler.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import SocketServer
-import stat
-import subprocess
-import threading
-
-from . import compression
-from . import constants
-from . import signatures
-from ..network import endpoint
-from ..objects import workpacket
-
-
-class WorkHandler(SocketServer.BaseRequestHandler):
-
- def handle(self):
- rec = compression.Receiver(self.request)
- while not rec.IsDone():
- data = rec.Current()
- with self.server.job_lock:
- self._WorkOnWorkPacket(data)
- rec.Advance()
-
- def _WorkOnWorkPacket(self, data):
- server_root = self.server.daemon.root
- v8_root = os.path.join(server_root, "v8")
- os.chdir(v8_root)
- packet = workpacket.WorkPacket.Unpack(data)
- self.ctx = packet.context
- self.ctx.shell_dir = os.path.join("out",
- "%s.%s" % (self.ctx.arch, self.ctx.mode))
- if not os.path.isdir(self.ctx.shell_dir):
- os.makedirs(self.ctx.shell_dir)
- for binary in packet.binaries:
- if not self._UnpackBinary(binary, packet.pubkey_fingerprint):
- return
-
- if not self._CheckoutRevision(packet.base_revision):
- return
-
- if not self._ApplyPatch(packet.patch):
- return
-
- tests = packet.tests
- endpoint.Execute(v8_root, self.ctx, tests, self.request, self.server.daemon)
- self._SendResponse()
-
- def _SendResponse(self, error_message=None):
- try:
- if error_message:
- compression.Send([[-1, error_message]], self.request)
- compression.Send(constants.END_OF_STREAM, self.request)
- return
- except Exception, e:
- pass # Peer is gone. There's nothing we can do.
- # Clean up.
- self._Call("git checkout -f")
- self._Call("git clean -f -d")
- self._Call("rm -rf %s" % self.ctx.shell_dir)
-
- def _UnpackBinary(self, binary, pubkey_fingerprint):
- binary_name = binary["name"]
- if binary_name == "libv8.so":
- libdir = os.path.join(self.ctx.shell_dir, "lib.target")
- if not os.path.exists(libdir): os.makedirs(libdir)
- target = os.path.join(libdir, binary_name)
- else:
- target = os.path.join(self.ctx.shell_dir, binary_name)
- pubkeyfile = "../trusted/%s.pem" % pubkey_fingerprint
- if not signatures.VerifySignature(target, binary["blob"],
- binary["sign"], pubkeyfile):
- self._SendResponse("Signature verification failed")
- return False
- os.chmod(target, stat.S_IRWXU)
- return True
-
- def _CheckoutRevision(self, base_svn_revision):
- get_hash_cmd = (
- "git log -1 --format=%%H --remotes --grep='^git-svn-id:.*@%s'" %
- base_svn_revision)
- try:
- base_revision = subprocess.check_output(get_hash_cmd, shell=True)
- if not base_revision: raise ValueError
- except:
- self._Call("git fetch")
- try:
- base_revision = subprocess.check_output(get_hash_cmd, shell=True)
- if not base_revision: raise ValueError
- except:
- self._SendResponse("Base revision not found.")
- return False
- code = self._Call("git checkout -f %s" % base_revision)
- if code != 0:
- self._SendResponse("Error trying to check out base revision.")
- return False
- code = self._Call("git clean -f -d")
- if code != 0:
- self._SendResponse("Failed to reset checkout")
- return False
- return True
-
- def _ApplyPatch(self, patch):
- if not patch: return True # Just skip if the patch is empty.
- patchfilename = "_dtest_incoming_patch.patch"
- with open(patchfilename, "w") as f:
- f.write(patch)
- code = self._Call("git apply %s" % patchfilename)
- if code != 0:
- self._SendResponse("Error applying patch.")
- return False
- return True
-
- def _Call(self, cmd):
- return subprocess.call(cmd, shell=True)
-
-
-class WorkSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
- def __init__(self, daemon):
- address = (daemon.ip, constants.PEER_PORT)
- SocketServer.TCPServer.__init__(self, address, WorkHandler)
- self.job_lock = threading.Lock()
- self.daemon = daemon
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
new file mode 100755
index 0000000000..d838df783c
--- /dev/null
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -0,0 +1,553 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from collections import OrderedDict
+from os.path import join
+import multiprocessing
+import os
+import random
+import shlex
+import subprocess
+import sys
+import time
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import execution
+from testrunner.local import progress
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.local import verbose
+from testrunner.local.variants import ALL_VARIANTS
+from testrunner.objects import context
+
+
+TIMEOUT_DEFAULT = 60
+
+# Variants ordered by expected runtime (slowest first).
+VARIANTS = ["default"]
+
+MORE_VARIANTS = [
+ "stress",
+ "stress_incremental_marking",
+ "nooptimization",
+ "stress_background_compile",
+ "wasm_traps",
+]
+
+VARIANT_ALIASES = {
+ # The default for developer workstations.
+ "dev": VARIANTS,
+ # Additional variants, run on all bots.
+ "more": MORE_VARIANTS,
+ # Shortcut for the two above ("more" first - it has the longer running tests).
+ "exhaustive": MORE_VARIANTS + VARIANTS,
+ # Additional variants, run on a subset of bots.
+ "extra": ["future", "liftoff"],
+}
+
+GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
+ "--concurrent-recompilation-queue-length=64",
+ "--concurrent-recompilation-delay=500",
+ "--concurrent-recompilation"]
+
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+ "mips",
+ "mipsel",
+ "mips64",
+ "mips64el",
+ "s390",
+ "s390x",
+ "arm64"]
+
+
+class StandardTestRunner(base_runner.BaseTestRunner):
+ def __init__(self):
+ super(StandardTestRunner, self).__init__()
+
+ self.sancov_dir = None
+
+ def _do_execute(self, options, args):
+ if options.swarming:
+ # Swarming doesn't print how isolated commands are called. Lets make
+ # this less cryptic by printing it ourselves.
+ print ' '.join(sys.argv)
+
+ if utils.GuessOS() == "macos":
+ # TODO(machenbach): Temporary output for investigating hanging test
+ # driver on mac.
+ print "V8 related processes running on this host:"
+ try:
+ print subprocess.check_output(
+ "ps -e | egrep 'd8|cctest|unittests'", shell=True)
+ except Exception:
+ pass
+
+ suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
+
+ # Use default tests if no test configuration was provided at the cmd line.
+ if len(args) == 0:
+ args = ["default"]
+
+ # Expand arguments with grouped tests. The args should reflect the list
+ # of suites as otherwise filters would break.
+ def ExpandTestGroups(name):
+ if name in base_runner.TEST_MAP:
+ return [suite for suite in base_runner.TEST_MAP[name]]
+ else:
+ return [name]
+ args = reduce(lambda x, y: x + y,
+ [ExpandTestGroups(arg) for arg in args],
+ [])
+
+ args_suites = OrderedDict() # Used as set
+ for arg in args:
+ args_suites[arg.split('/')[0]] = True
+ suite_paths = [ s for s in args_suites if s in suite_paths ]
+
+ suites = []
+ for root in suite_paths:
+ suite = testsuite.TestSuite.LoadTestSuite(
+ os.path.join(base_runner.BASE_DIR, "test", root))
+ if suite:
+ suites.append(suite)
+
+ for s in suites:
+ s.PrepareSources()
+
+ try:
+ return self._execute(args, options, suites)
+ except KeyboardInterrupt:
+ return 2
+
+ def _add_parser_options(self, parser):
+ parser.add_option("--sancov-dir",
+ help="Directory where to collect coverage data")
+ parser.add_option("--cfi-vptr",
+ help="Run tests with UBSAN cfi_vptr option.",
+ default=False, action="store_true")
+ parser.add_option("--novfp3",
+ help="Indicates that V8 was compiled without VFP3"
+ " support",
+ default=False, action="store_true")
+ parser.add_option("--cat", help="Print the source of the tests",
+ default=False, action="store_true")
+ parser.add_option("--slow-tests",
+ help="Regard slow tests (run|skip|dontcare)",
+ default="dontcare")
+ parser.add_option("--pass-fail-tests",
+ help="Regard pass|fail tests (run|skip|dontcare)",
+ default="dontcare")
+ parser.add_option("--gc-stress",
+ help="Switch on GC stress mode",
+ default=False, action="store_true")
+ parser.add_option("--command-prefix",
+ help="Prepended to each shell command used to run a"
+ " test",
+ default="")
+ parser.add_option("--extra-flags",
+ help="Additional flags to pass to each test command",
+ action="append", default=[])
+ parser.add_option("--isolates", help="Whether to test isolates",
+ default=False, action="store_true")
+ parser.add_option("-j", help="The number of parallel tasks to run",
+ default=0, type="int")
+ parser.add_option("--no-harness", "--noharness",
+ help="Run without test harness of a given suite",
+ default=False, action="store_true")
+ parser.add_option("--no-presubmit", "--nopresubmit",
+ help='Skip presubmit checks (deprecated)',
+ default=False, dest="no_presubmit", action="store_true")
+ parser.add_option("--no-sorting", "--nosorting",
+ help="Don't sort tests according to duration of last"
+ " run.",
+ default=False, dest="no_sorting", action="store_true")
+ parser.add_option("--no-variants", "--novariants",
+ help="Deprecated. "
+ "Equivalent to passing --variants=default",
+ default=False, dest="no_variants", action="store_true")
+ parser.add_option("--variants",
+ help="Comma-separated list of testing variants;"
+ " default: \"%s\"" % ",".join(VARIANTS))
+ parser.add_option("--exhaustive-variants",
+ default=False, action="store_true",
+ help="Deprecated. "
+ "Equivalent to passing --variants=exhaustive")
+ parser.add_option("-p", "--progress",
+ help=("The style of progress indicator"
+ " (verbose, dots, color, mono)"),
+ choices=progress.PROGRESS_INDICATORS.keys(),
+ default="mono")
+ parser.add_option("--quickcheck", default=False, action="store_true",
+ help=("Quick check mode (skip slow tests)"))
+ parser.add_option("--report", help="Print a summary of the tests to be"
+ " run",
+ default=False, action="store_true")
+ parser.add_option("--json-test-results",
+ help="Path to a file for storing json results.")
+ parser.add_option("--flakiness-results",
+ help="Path to a file for storing flakiness json.")
+ parser.add_option("--rerun-failures-count",
+ help=("Number of times to rerun each failing test case."
+ " Very slow tests will be rerun only once."),
+ default=0, type="int")
+ parser.add_option("--rerun-failures-max",
+ help="Maximum number of failing test cases to rerun.",
+ default=100, type="int")
+ parser.add_option("--shard-count",
+ help="Split testsuites into this number of shards",
+ default=1, type="int")
+ parser.add_option("--shard-run",
+ help="Run this shard from the split up tests.",
+ default=1, type="int")
+ parser.add_option("--dont-skip-slow-simulator-tests",
+ help="Don't skip more slow tests when using a"
+ " simulator.",
+ default=False, action="store_true",
+ dest="dont_skip_simulator_slow_tests")
+ parser.add_option("--swarming",
+ help="Indicates running test driver on swarming.",
+ default=False, action="store_true")
+ parser.add_option("--time", help="Print timing information after running",
+ default=False, action="store_true")
+ parser.add_option("-t", "--timeout", help="Timeout in seconds",
+ default=TIMEOUT_DEFAULT, type="int")
+ parser.add_option("--warn-unused", help="Report unused rules",
+ default=False, action="store_true")
+ parser.add_option("--junitout", help="File name of the JUnit output")
+ parser.add_option("--junittestsuite",
+ help="The testsuite name in the JUnit output file",
+ default="v8tests")
+ parser.add_option("--random-seed", default=0, dest="random_seed",
+ help="Default seed for initializing random generator",
+ type=int)
+ parser.add_option("--random-seed-stress-count", default=1, type="int",
+ dest="random_seed_stress_count",
+ help="Number of runs with different random seeds")
+
+ def _process_options(self, options):
+ global VARIANTS
+
+ if options.sancov_dir:
+ self.sancov_dir = options.sancov_dir
+ if not os.path.exists(self.sancov_dir):
+ print("sancov-dir %s doesn't exist" % self.sancov_dir)
+ raise base_runner.TestRunnerError()
+
+ options.command_prefix = shlex.split(options.command_prefix)
+ options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+
+ if options.gc_stress:
+ options.extra_flags += GC_STRESS_FLAGS
+
+ if self.build_config.asan:
+ options.extra_flags.append("--invoke-weak-callbacks")
+ options.extra_flags.append("--omit-quit")
+
+ if options.novfp3:
+ options.extra_flags.append("--noenable-vfp3")
+
+ if options.no_variants:
+ print ("Option --no-variants is deprecated. "
+ "Pass --variants=default instead.")
+ assert not options.variants
+ options.variants = "default"
+
+ if options.exhaustive_variants:
+ # TODO(machenbach): Switch infra to --variants=exhaustive after M65.
+ print ("Option --exhaustive-variants is deprecated. "
+ "Pass --variants=exhaustive instead.")
+ # This is used on many bots. It includes a larger set of default
+ # variants.
+ # Other options for manipulating variants still apply afterwards.
+ assert not options.variants
+ options.variants = "exhaustive"
+
+ if options.quickcheck:
+ assert not options.variants
+ options.variants = "stress,default"
+ options.slow_tests = "skip"
+ options.pass_fail_tests = "skip"
+
+ if self.build_config.predictable:
+ options.variants = "default"
+ options.extra_flags.append("--predictable")
+ options.extra_flags.append("--verify_predictable")
+ options.extra_flags.append("--no-inline-new")
+
+ # TODO(machenbach): Figure out how to test a bigger subset of variants on
+ # msan.
+ if self.build_config.msan:
+ options.variants = "default"
+
+ if options.j == 0:
+ options.j = multiprocessing.cpu_count()
+
+ if options.random_seed_stress_count <= 1 and options.random_seed == 0:
+ options.random_seed = self._random_seed()
+
+ # Use developer defaults if no variant was specified.
+ options.variants = options.variants or "dev"
+
+ # Resolve variant aliases and dedupe.
+ # TODO(machenbach): Don't mutate global variable. Rather pass mutated
+ # version as local variable.
+ VARIANTS = list(set(reduce(
+ list.__add__,
+ (VARIANT_ALIASES.get(v, [v]) for v in options.variants.split(",")),
+ [],
+ )))
+
+ if not set(VARIANTS).issubset(ALL_VARIANTS):
+ print "All variants must be in %s" % str(ALL_VARIANTS)
+ raise base_runner.TestRunnerError()
+
+ def CheckTestMode(name, option):
+ if not option in ["run", "skip", "dontcare"]:
+ print "Unknown %s mode %s" % (name, option)
+ raise base_runner.TestRunnerError()
+ CheckTestMode("slow test", options.slow_tests)
+ CheckTestMode("pass|fail test", options.pass_fail_tests)
+ if self.build_config.no_i18n:
+ base_runner.TEST_MAP["bot_default"].remove("intl")
+ base_runner.TEST_MAP["default"].remove("intl")
+
+ def _setup_env(self):
+ super(StandardTestRunner, self)._setup_env()
+
+ symbolizer_option = self._get_external_symbolizer_option()
+
+ if self.sancov_dir:
+ os.environ['ASAN_OPTIONS'] = ":".join([
+ 'coverage=1',
+ 'coverage_dir=%s' % self.sancov_dir,
+ symbolizer_option,
+ "allow_user_segv_handler=1",
+ ])
+
+ def _random_seed(self):
+ seed = 0
+ while not seed:
+ seed = random.SystemRandom().randint(-2147483648, 2147483647)
+ return seed
+
+ def _execute(self, args, options, suites):
+ print(">>> Running tests for %s.%s" % (self.build_config.arch,
+ self.mode_name))
+ # Populate context object.
+
+ # Simulators are slow, therefore allow a longer timeout.
+ if self.build_config.arch in SLOW_ARCHS:
+ options.timeout *= 2
+
+ options.timeout *= self.mode_options.timeout_scalefactor
+
+ if self.build_config.predictable:
+ # Predictable mode is slower.
+ options.timeout *= 2
+
+ ctx = context.Context(self.build_config.arch,
+ self.mode_options.execution_mode,
+ self.outdir,
+ self.mode_options.flags,
+ options.verbose,
+ options.timeout,
+ options.isolates,
+ options.command_prefix,
+ options.extra_flags,
+ self.build_config.no_i18n,
+ options.random_seed,
+ options.no_sorting,
+ options.rerun_failures_count,
+ options.rerun_failures_max,
+ self.build_config.predictable,
+ options.no_harness,
+ use_perf_data=not options.swarming,
+ sancov_dir=self.sancov_dir)
+
+ # TODO(all): Combine "simulator" and "simulator_run".
+ # TODO(machenbach): In GN we can derive simulator run from
+ # target_arch != v8_target_arch in the dumped build config.
+ simulator_run = (
+ not options.dont_skip_simulator_slow_tests and
+ self.build_config.arch in [
+ 'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
+ 'ppc64', 's390', 's390x'] and
+ bool(base_runner.ARCH_GUESS) and
+ self.build_config.arch != base_runner.ARCH_GUESS)
+ # Find available test suites and read test cases from them.
+ variables = {
+ "arch": self.build_config.arch,
+ "asan": self.build_config.asan,
+ "byteorder": sys.byteorder,
+ "dcheck_always_on": self.build_config.dcheck_always_on,
+ "deopt_fuzzer": False,
+ "gc_fuzzer": False,
+ "gc_stress": options.gc_stress,
+ "gcov_coverage": self.build_config.gcov_coverage,
+ "isolates": options.isolates,
+ "mode": self.mode_options.status_mode,
+ "msan": self.build_config.msan,
+ "no_harness": options.no_harness,
+ "no_i18n": self.build_config.no_i18n,
+ "no_snap": self.build_config.no_snap,
+ "novfp3": options.novfp3,
+ "predictable": self.build_config.predictable,
+ "simulator": utils.UseSimulator(self.build_config.arch),
+ "simulator_run": simulator_run,
+ "system": utils.GuessOS(),
+ "tsan": self.build_config.tsan,
+ "ubsan_vptr": self.build_config.ubsan_vptr,
+ }
+ all_tests = []
+ num_tests = 0
+ for s in suites:
+ s.ReadStatusFile(variables)
+ s.ReadTestCases(ctx)
+ if len(args) > 0:
+ s.FilterTestCasesByArgs(args)
+ all_tests += s.tests
+
+ # First filtering by status applying the generic rules (tests without
+ # variants)
+ if options.warn_unused:
+ s.WarnUnusedRules(check_variant_rules=False)
+ s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
+
+ if options.cat:
+ verbose.PrintTestSource(s.tests)
+ continue
+ variant_gen = s.CreateVariantGenerator(VARIANTS)
+ variant_tests = [ t.CopyAddingFlags(v, flags)
+ for t in s.tests
+ for v in variant_gen.FilterVariantsByTest(t)
+ for flags in variant_gen.GetFlagSets(t, v) ]
+
+ if options.random_seed_stress_count > 1:
+ # Duplicate test for random seed stress mode.
+ def iter_seed_flags():
+ for _ in range(0, options.random_seed_stress_count):
+ # Use given random seed for all runs (set by default in
+ # execution.py) or a new random seed if none is specified.
+ if options.random_seed:
+ yield []
+ else:
+ yield ["--random-seed=%d" % self._random_seed()]
+ s.tests = [
+ t.CopyAddingFlags(t.variant, flags)
+ for t in variant_tests
+ for flags in iter_seed_flags()
+ ]
+ else:
+ s.tests = variant_tests
+
+ # Second filtering by status applying also the variant-dependent rules.
+ if options.warn_unused:
+ s.WarnUnusedRules(check_variant_rules=True)
+ s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
+
+ for t in s.tests:
+ t.flags += s.GetStatusfileFlags(t)
+
+ s.tests = self._shard_tests(s.tests, options)
+ num_tests += len(s.tests)
+
+ if options.cat:
+ return 0 # We're done here.
+
+ if options.report:
+ verbose.PrintReport(all_tests)
+
+ # Run the tests.
+ start_time = time.time()
+ progress_indicator = progress.IndicatorNotifier()
+ progress_indicator.Register(
+ progress.PROGRESS_INDICATORS[options.progress]())
+ if options.junitout:
+ progress_indicator.Register(progress.JUnitTestProgressIndicator(
+ options.junitout, options.junittestsuite))
+ if options.json_test_results:
+ progress_indicator.Register(progress.JsonTestProgressIndicator(
+ options.json_test_results,
+ self.build_config.arch,
+ self.mode_options.execution_mode,
+ ctx.random_seed))
+ if options.flakiness_results:
+ progress_indicator.Register(progress.FlakinessTestProgressIndicator(
+ options.flakiness_results))
+
+ runner = execution.Runner(suites, progress_indicator, ctx)
+ exit_code = runner.Run(options.j)
+ overall_duration = time.time() - start_time
+
+ if options.time:
+ verbose.PrintTestDurations(suites, overall_duration)
+
+ if num_tests == 0:
+ print("Warning: no tests were run!")
+
+ if exit_code == 1 and options.json_test_results:
+ print("Force exit code 0 after failures. Json test results file "
+ "generated with failure information.")
+ exit_code = 0
+
+ if self.sancov_dir:
+ # If tests ran with sanitizer coverage, merge coverage files in the end.
+ try:
+ print "Merging sancov files."
+ subprocess.check_call([
+ sys.executable,
+ join(
+ base_runner.BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
+ "--coverage-dir=%s" % self.sancov_dir])
+ except:
+ print >> sys.stderr, "Error: Merging sancov files failed."
+ exit_code = 1
+
+ return exit_code
+
+ def _shard_tests(self, tests, options):
+ # Read gtest shard configuration from environment (e.g. set by swarming).
+ # If none is present, use values passed on the command line.
+ shard_count = int(
+ os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
+ shard_run = os.environ.get('GTEST_SHARD_INDEX')
+ if shard_run is not None:
+ # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
+ shard_run = int(shard_run) + 1
+ else:
+ shard_run = options.shard_run
+
+ if options.shard_count > 1:
+ # Log if a value was passed on the cmd line and it differs from the
+ # environment variables.
+ if options.shard_count != shard_count:
+ print("shard_count from cmd line differs from environment variable "
+ "GTEST_TOTAL_SHARDS")
+ if options.shard_run > 1 and options.shard_run != shard_run:
+ print("shard_run from cmd line differs from environment variable "
+ "GTEST_SHARD_INDEX")
+
+ if shard_count < 2:
+ return tests
+ if shard_run < 1 or shard_run > shard_count:
+ print "shard-run not a valid number, should be in [1:shard-count]"
+ print "defaulting back to running all tests"
+ return tests
+ count = 0
+ shard = []
+ for test in tests:
+ if count % shard_count == shard_run - 1:
+ shard.append(test)
+ count += 1
+ return shard
+
+
+if __name__ == '__main__':
+ sys.exit(StandardTestRunner().execute())