summaryrefslogtreecommitdiff
path: root/deps/v8/tools/testrunner/testproc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/tools/testrunner/testproc')
-rw-r--r--deps/v8/tools/testrunner/testproc/__init__.py3
-rw-r--r--deps/v8/tools/testrunner/testproc/base.py207
-rw-r--r--deps/v8/tools/testrunner/testproc/execution.py92
-rw-r--r--deps/v8/tools/testrunner/testproc/filter.py83
-rw-r--r--deps/v8/tools/testrunner/testproc/loader.py27
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py385
-rw-r--r--deps/v8/tools/testrunner/testproc/rerun.py59
-rw-r--r--deps/v8/tools/testrunner/testproc/result.py97
-rw-r--r--deps/v8/tools/testrunner/testproc/shard.py30
-rw-r--r--deps/v8/tools/testrunner/testproc/variant.py68
10 files changed, 1051 insertions, 0 deletions
diff --git a/deps/v8/tools/testrunner/testproc/__init__.py b/deps/v8/tools/testrunner/testproc/__init__.py
new file mode 100644
index 0000000000..4433538556
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/testproc/base.py b/deps/v8/tools/testrunner/testproc/base.py
new file mode 100644
index 0000000000..1a87dbed55
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/base.py
@@ -0,0 +1,207 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from .result import SKIPPED
+
+
+"""
+Pipeline
+
+Test processors are chained together and communicate with each other by
+calling previous/next processor in the chain.
+ ----next_test()----> ----next_test()---->
+Proc1 Proc2 Proc3
+ <---result_for()---- <---result_for()----
+
+For every next_test there is exactly one result_for call.
+If processor ignores the test it has to return SkippedResult.
+If it created multiple subtests for one test and wants to pass all of them to
+the previous processor it can enclose them in GroupedResult.
+
+
+Subtests
+
+When test processor needs to modify the test or create some variants of the
+test it creates subtests and sends them to the next processor.
+Each subtest has:
+- procid - globally unique id that should contain id of the parent test and
+ some suffix given by test processor, e.g. its name + subtest type.
+- processor - which created it
+- origin - pointer to the parent (sub)test
+"""
+
+
+DROP_RESULT = 0
+DROP_OUTPUT = 1
+DROP_PASS_OUTPUT = 2
+DROP_PASS_STDOUT = 3
+
+def get_reduce_result_function(requirement):
+ if requirement == DROP_RESULT:
+ return lambda _: None
+
+ if requirement == DROP_OUTPUT:
+ def f(result):
+ result.output = None
+ return result
+ return f
+
+ if requirement == DROP_PASS_OUTPUT:
+ def f(result):
+ if not result.has_unexpected_output:
+ result.output = None
+ return result
+ return f
+
+ if requirement == DROP_PASS_STDOUT:
+ def f(result):
+ if not result.has_unexpected_output:
+ result.output.stdout = None
+ result.output.stderr = None
+ return result
+ return f
+
+
+class TestProc(object):
+ def __init__(self):
+ self._prev_proc = None
+ self._next_proc = None
+ self._requirement = DROP_RESULT
+ self._prev_requirement = None
+ self._reduce_result = lambda result: result
+
+ def connect_to(self, next_proc):
+ """Puts `next_proc` after itself in the chain."""
+ next_proc._prev_proc = self
+ self._next_proc = next_proc
+
+ def remove_from_chain(self):
+ if self._prev_proc:
+ self._prev_proc._next_proc = self._next_proc
+ if self._next_proc:
+ self._next_proc._prev_proc = self._prev_proc
+
+ def setup(self, requirement=DROP_RESULT):
+ """
+ Method called by previous processor or processor pipeline creator to let
+ the processors know what part of the result can be ignored.
+ """
+ self._prev_requirement = requirement
+ if self._next_proc:
+ self._next_proc.setup(max(requirement, self._requirement))
+ if self._prev_requirement < self._requirement:
+ self._reduce_result = get_reduce_result_function(self._prev_requirement)
+
+ def next_test(self, test):
+ """
+ Method called by previous processor whenever it produces new test.
+ This method shouldn't be called by anyone except previous processor.
+ """
+ raise NotImplementedError()
+
+ def result_for(self, test, result):
+ """
+ Method called by next processor whenever it has result for some test.
+ This method shouldn't be called by anyone except next processor.
+ """
+ raise NotImplementedError()
+
+ def heartbeat(self):
+ if self._prev_proc:
+ self._prev_proc.heartbeat()
+
+ ### Communication
+
+ def _send_test(self, test):
+ """Helper method for sending test to the next processor."""
+ self._next_proc.next_test(test)
+
+ def _send_result(self, test, result):
+ """Helper method for sending result to the previous processor."""
+ result = self._reduce_result(result)
+ self._prev_proc.result_for(test, result)
+
+
+
+class TestProcObserver(TestProc):
+ """Processor used for observing the data."""
+ def __init__(self):
+ super(TestProcObserver, self).__init__()
+
+ def next_test(self, test):
+ self._on_next_test(test)
+ self._send_test(test)
+
+ def result_for(self, test, result):
+ self._on_result_for(test, result)
+ self._send_result(test, result)
+
+ def heartbeat(self):
+ self._on_heartbeat()
+ super(TestProcObserver, self).heartbeat()
+
+ def _on_next_test(self, test):
+ """Method called after receiving test from previous processor but before
+ sending it to the next one."""
+ pass
+
+ def _on_result_for(self, test, result):
+ """Method called after receiving result from next processor but before
+ sending it to the previous one."""
+ pass
+
+ def _on_heartbeat(self):
+ pass
+
+
+class TestProcProducer(TestProc):
+ """Processor for creating subtests."""
+
+ def __init__(self, name):
+ super(TestProcProducer, self).__init__()
+ self._name = name
+
+ def next_test(self, test):
+ self._next_test(test)
+
+ def result_for(self, subtest, result):
+ self._result_for(subtest.origin, subtest, result)
+
+ ### Implementation
+ def _next_test(self, test):
+ raise NotImplementedError()
+
+ def _result_for(self, test, subtest, result):
+ """
+ result_for method extended with `subtest` parameter.
+
+ Args
+ test: test used by current processor to create the subtest.
+ subtest: test for which the `result` is.
+ result: subtest execution result created by the output processor.
+ """
+ raise NotImplementedError()
+
+ ### Managing subtests
+ def _create_subtest(self, test, subtest_id, **kwargs):
+ """Creates subtest with subtest id <processor name>-`subtest_id`."""
+ return test.create_subtest(self, '%s-%s' % (self._name, subtest_id),
+ **kwargs)
+
+
+class TestProcFilter(TestProc):
+ """Processor for filtering tests."""
+
+ def next_test(self, test):
+ if self._filter(test):
+ self._send_result(test, SKIPPED)
+ else:
+ self._send_test(test)
+
+ def result_for(self, test, result):
+ self._send_result(test, result)
+
+ def _filter(self, test):
+ """Returns whether test should be filtered out."""
+ raise NotImplementedError()
diff --git a/deps/v8/tools/testrunner/testproc/execution.py b/deps/v8/tools/testrunner/testproc/execution.py
new file mode 100644
index 0000000000..021b02af3e
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/execution.py
@@ -0,0 +1,92 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import traceback
+
+from . import base
+from ..local import pool
+
+
+# Global function for multiprocessing, because pickling a static method doesn't
+# work on Windows.
+def run_job(job, process_context):
+ return job.run(process_context)
+
+
+def create_process_context(requirement):
+ return ProcessContext(base.get_reduce_result_function(requirement))
+
+
+JobResult = collections.namedtuple('JobResult', ['id', 'result'])
+ProcessContext = collections.namedtuple('ProcessContext', ['reduce_result_f'])
+
+
+class Job(object):
+ def __init__(self, test_id, cmd, outproc, keep_output):
+ self.test_id = test_id
+ self.cmd = cmd
+ self.outproc = outproc
+ self.keep_output = keep_output
+
+ def run(self, process_ctx):
+ output = self.cmd.execute()
+ result = self.outproc.process(output)
+ if not self.keep_output:
+ result = process_ctx.reduce_result_f(result)
+ return JobResult(self.test_id, result)
+
+
+class ExecutionProc(base.TestProc):
+ """Last processor in the chain. Instead of passing tests further it creates
+ commands and output processors, executes them in multiple worker processes and
+ sends results to the previous processor.
+ """
+
+ def __init__(self, jobs, context):
+ super(ExecutionProc, self).__init__()
+ self._pool = pool.Pool(jobs)
+ self._context = context
+ self._tests = {}
+
+ def connect_to(self, next_proc):
+ assert False, 'ExecutionProc cannot be connected to anything'
+
+ def start(self):
+ try:
+ it = self._pool.imap_unordered(
+ fn=run_job,
+ gen=[],
+ process_context_fn=create_process_context,
+ process_context_args=[self._prev_requirement],
+ )
+ for pool_result in it:
+ if pool_result.heartbeat:
+ continue
+
+ job_result = pool_result.value
+ test_id, result = job_result
+
+ test, result.cmd = self._tests[test_id]
+ del self._tests[test_id]
+ self._send_result(test, result)
+ except KeyboardInterrupt:
+ raise
+ except:
+ traceback.print_exc()
+ raise
+ finally:
+ self._pool.terminate()
+
+ def next_test(self, test):
+ test_id = test.procid
+ cmd = test.get_command(self._context)
+ self._tests[test_id] = test, cmd
+
+ # TODO(majeski): Needs factory for outproc as in local/execution.py
+ outproc = test.output_proc
+ self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
+
+ def result_for(self, test, result):
+ assert False, 'ExecutionProc cannot receive results'
diff --git a/deps/v8/tools/testrunner/testproc/filter.py b/deps/v8/tools/testrunner/testproc/filter.py
new file mode 100644
index 0000000000..5081997751
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/filter.py
@@ -0,0 +1,83 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import defaultdict
+import fnmatch
+
+from . import base
+
+
+class StatusFileFilterProc(base.TestProcFilter):
+ """Filters tests by outcomes from status file.
+
+ Status file has to be loaded before using this function.
+
+ Args:
+ slow_tests_mode: What to do with slow tests.
+ pass_fail_tests_mode: What to do with pass or fail tests.
+
+ Mode options:
+ None (default): don't skip
+ "skip": skip if slow/pass_fail
+ "run": skip if not slow/pass_fail
+ """
+
+ def __init__(self, slow_tests_mode, pass_fail_tests_mode):
+ super(StatusFileFilterProc, self).__init__()
+ self._slow_tests_mode = slow_tests_mode
+ self._pass_fail_tests_mode = pass_fail_tests_mode
+
+ def _filter(self, test):
+ return (
+ test.do_skip or
+ self._skip_slow(test.is_slow) or
+ self._skip_pass_fail(test.is_pass_or_fail)
+ )
+
+ def _skip_slow(self, is_slow):
+ return (
+ (self._slow_tests_mode == 'run' and not is_slow) or
+ (self._slow_tests_mode == 'skip' and is_slow)
+ )
+
+ def _skip_pass_fail(self, is_pass_fail):
+ return (
+ (self._pass_fail_tests_mode == 'run' and not is_pass_fail) or
+ (self._pass_fail_tests_mode == 'skip' and is_pass_fail)
+ )
+
+
+class NameFilterProc(base.TestProcFilter):
+ """Filters tests based on command-line arguments.
+
+ args can be a glob: asterisks in any position of the name
+ represent zero or more characters. Without asterisks, only exact matches
+ will be used with the exeption of the test-suite name as argument.
+ """
+ def __init__(self, args):
+ super(NameFilterProc, self).__init__()
+
+ self._globs = defaultdict(list)
+ for a in args:
+ argpath = a.split('/')
+ suitename = argpath[0]
+ path = '/'.join(argpath[1:]) or '*'
+ self._globs[suitename].append(path)
+
+ for s, globs in self._globs.iteritems():
+ if not globs or '*' in globs:
+ self._globs[s] = []
+
+ def _filter(self, test):
+ globs = self._globs.get(test.suite.name)
+ if globs is None:
+ return True
+
+ if not globs:
+ return False
+
+ for g in globs:
+ if fnmatch.fnmatch(test.path, g):
+ return False
+ return True
diff --git a/deps/v8/tools/testrunner/testproc/loader.py b/deps/v8/tools/testrunner/testproc/loader.py
new file mode 100644
index 0000000000..0a3d0df1b3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/loader.py
@@ -0,0 +1,27 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class LoadProc(base.TestProc):
+ """First processor in the chain that passes all tests to the next processor.
+ """
+
+ def load_tests(self, tests):
+ loaded = set()
+ for test in tests:
+ if test.procid in loaded:
+ print 'Warning: %s already obtained' % test.procid
+ continue
+
+ loaded.add(test.procid)
+ self._send_test(test)
+
+ def next_test(self, test):
+ assert False, 'Nothing can be connected to the LoadProc'
+
+ def result_for(self, test, result):
+ # Ignore all results.
+ pass
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
new file mode 100644
index 0000000000..78514f7252
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -0,0 +1,385 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import sys
+import time
+
+from . import base
+from ..local import junit_output
+
+
+def print_failure_header(test):
+ if test.output_proc.negative:
+ negative_marker = '[negative] '
+ else:
+ negative_marker = ''
+ print "=== %(label)s %(negative)s===" % {
+ 'label': test,
+ 'negative': negative_marker,
+ }
+
+
+class TestsCounter(base.TestProcObserver):
+ def __init__(self):
+ super(TestsCounter, self).__init__()
+ self.total = 0
+
+ def _on_next_test(self, test):
+ self.total += 1
+
+
+class ResultsTracker(base.TestProcObserver):
+ def __init__(self):
+ super(ResultsTracker, self).__init__()
+ self._requirement = base.DROP_OUTPUT
+
+ self.failed = 0
+ self.remaining = 0
+ self.total = 0
+
+ def _on_next_test(self, test):
+ self.total += 1
+ self.remaining += 1
+
+ def _on_result_for(self, test, result):
+ self.remaining -= 1
+ if result.has_unexpected_output:
+ self.failed += 1
+
+
+class ProgressIndicator(base.TestProcObserver):
+ def finished(self):
+ pass
+
+
+class SimpleProgressIndicator(ProgressIndicator):
+ def __init__(self):
+ super(SimpleProgressIndicator, self).__init__()
+ self._requirement = base.DROP_PASS_OUTPUT
+
+ self._failed = []
+ self._total = 0
+
+ def _on_next_test(self, test):
+ self._total += 1
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ if result.has_unexpected_output:
+ self._failed.append((test, result))
+
+ def finished(self):
+ crashed = 0
+ print
+ for test, result in self._failed:
+ print_failure_header(test)
+ if result.output.stderr:
+ print "--- stderr ---"
+ print result.output.stderr.strip()
+ if result.output.stdout:
+ print "--- stdout ---"
+ print result.output.stdout.strip()
+ print "Command: %s" % result.cmd.to_string()
+ if result.output.HasCrashed():
+ print "exit code: %d" % result.output.exit_code
+ print "--- CRASHED ---"
+ crashed += 1
+ if result.output.HasTimedOut():
+ print "--- TIMEOUT ---"
+ if len(self._failed) == 0:
+ print "==="
+ print "=== All tests succeeded"
+ print "==="
+ else:
+ print
+ print "==="
+ print "=== %i tests failed" % len(self._failed)
+ if crashed > 0:
+ print "=== %i tests CRASHED" % crashed
+ print "==="
+
+
+class VerboseProgressIndicator(SimpleProgressIndicator):
+ def _on_result_for(self, test, result):
+ super(VerboseProgressIndicator, self)._on_result_for(test, result)
+ # TODO(majeski): Support for dummy/grouped results
+ if result.has_unexpected_output:
+ if result.output.HasCrashed():
+ outcome = 'CRASH'
+ else:
+ outcome = 'FAIL'
+ else:
+ outcome = 'pass'
+ print 'Done running %s: %s' % (test, outcome)
+ sys.stdout.flush()
+
+ def _on_heartbeat(self):
+ print 'Still working...'
+ sys.stdout.flush()
+
+
+class DotsProgressIndicator(SimpleProgressIndicator):
+ def __init__(self):
+ super(DotsProgressIndicator, self).__init__()
+ self._count = 0
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ self._count += 1
+ if self._count > 1 and self._count % 50 == 1:
+ sys.stdout.write('\n')
+ if result.has_unexpected_output:
+ if result.output.HasCrashed():
+ sys.stdout.write('C')
+ sys.stdout.flush()
+ elif result.output.HasTimedOut():
+ sys.stdout.write('T')
+ sys.stdout.flush()
+ else:
+ sys.stdout.write('F')
+ sys.stdout.flush()
+ else:
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+
+class CompactProgressIndicator(ProgressIndicator):
+ def __init__(self, templates):
+ super(CompactProgressIndicator, self).__init__()
+ self._requirement = base.DROP_PASS_OUTPUT
+
+ self._templates = templates
+ self._last_status_length = 0
+ self._start_time = time.time()
+
+ self._total = 0
+ self._passed = 0
+ self._failed = 0
+
+ def _on_next_test(self, test):
+ self._total += 1
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ if result.has_unexpected_output:
+ self._failed += 1
+ else:
+ self._passed += 1
+
+ self._print_progress(str(test))
+ if result.has_unexpected_output:
+ output = result.output
+ stdout = output.stdout.strip()
+ stderr = output.stderr.strip()
+
+ self._clear_line(self._last_status_length)
+ print_failure_header(test)
+ if len(stdout):
+ print self._templates['stdout'] % stdout
+ if len(stderr):
+ print self._templates['stderr'] % stderr
+ print "Command: %s" % result.cmd
+ if output.HasCrashed():
+ print "exit code: %d" % output.exit_code
+ print "--- CRASHED ---"
+ if output.HasTimedOut():
+ print "--- TIMEOUT ---"
+
+ def finished(self):
+ self._print_progress('Done')
+ print
+
+ def _print_progress(self, name):
+ self._clear_line(self._last_status_length)
+ elapsed = time.time() - self._start_time
+ if not self._total:
+ progress = 0
+ else:
+ progress = (self._passed + self._failed) * 100 // self._total
+ status = self._templates['status_line'] % {
+ 'passed': self._passed,
+ 'progress': progress,
+ 'failed': self._failed,
+ 'test': name,
+ 'mins': int(elapsed) / 60,
+ 'secs': int(elapsed) % 60
+ }
+ status = self._truncate(status, 78)
+ self._last_status_length = len(status)
+ print status,
+ sys.stdout.flush()
+
+ def _truncate(self, string, length):
+ if length and len(string) > (length - 3):
+ return string[:(length - 3)] + "..."
+ else:
+ return string
+
+ def _clear_line(self, last_length):
+ raise NotImplementedError()
+
+
+class ColorProgressIndicator(CompactProgressIndicator):
+ def __init__(self):
+ templates = {
+ 'status_line': ("[%(mins)02i:%(secs)02i|"
+ "\033[34m%%%(progress) 4d\033[0m|"
+ "\033[32m+%(passed) 4d\033[0m|"
+ "\033[31m-%(failed) 4d\033[0m]: %(test)s"),
+ 'stdout': "\033[1m%s\033[0m",
+ 'stderr': "\033[31m%s\033[0m",
+ }
+ super(ColorProgressIndicator, self).__init__(templates)
+
+ def _clear_line(self, last_length):
+ print "\033[1K\r",
+
+
+class MonochromeProgressIndicator(CompactProgressIndicator):
+ def __init__(self):
+ templates = {
+ 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
+ "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
+ 'stdout': '%s',
+ 'stderr': '%s',
+ }
+ super(MonochromeProgressIndicator, self).__init__(templates)
+
+ def _clear_line(self, last_length):
+ print ("\r" + (" " * last_length) + "\r"),
+
+
+class JUnitTestProgressIndicator(ProgressIndicator):
+ def __init__(self, junitout, junittestsuite):
+ super(JUnitTestProgressIndicator, self).__init__()
+ self._requirement = base.DROP_PASS_STDOUT
+
+ self.outputter = junit_output.JUnitTestOutput(junittestsuite)
+ if junitout:
+ self.outfile = open(junitout, "w")
+ else:
+ self.outfile = sys.stdout
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ fail_text = ""
+ output = result.output
+ if result.has_unexpected_output:
+ stdout = output.stdout.strip()
+ if len(stdout):
+ fail_text += "stdout:\n%s\n" % stdout
+ stderr = output.stderr.strip()
+ if len(stderr):
+ fail_text += "stderr:\n%s\n" % stderr
+ fail_text += "Command: %s" % result.cmd.to_string()
+ if output.HasCrashed():
+ fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
+ if output.HasTimedOut():
+ fail_text += "--- TIMEOUT ---"
+ self.outputter.HasRunTest(
+ test_name=str(test),
+ test_cmd=result.cmd.to_string(relative=True),
+ test_duration=output.duration,
+ test_failure=fail_text)
+
+ def finished(self):
+ self.outputter.FinishAndWrite(self.outfile)
+ if self.outfile != sys.stdout:
+ self.outfile.close()
+
+
+class JsonTestProgressIndicator(ProgressIndicator):
+ def __init__(self, json_test_results, arch, mode, random_seed):
+ super(JsonTestProgressIndicator, self).__init__()
+ # We want to drop stdout/err for all passed tests on the first try, but we
+ # need to get outputs for all runs after the first one. To accommodate that,
+ # reruns are set to keep the result no matter what requirement says, i.e.
+ # keep_output set to True in the RerunProc.
+ self._requirement = base.DROP_PASS_STDOUT
+
+ self.json_test_results = json_test_results
+ self.arch = arch
+ self.mode = mode
+ self.random_seed = random_seed
+ self.results = []
+ self.tests = []
+
+ def _on_result_for(self, test, result):
+ if result.is_rerun:
+ self.process_results(test, result.results)
+ else:
+ self.process_results(test, [result])
+
+ def process_results(self, test, results):
+ for run, result in enumerate(results):
+ # TODO(majeski): Support for dummy/grouped results
+ output = result.output
+ # Buffer all tests for sorting the durations in the end.
+ # TODO(machenbach): Running average + buffer only slowest 20 tests.
+ self.tests.append((test, output.duration, result.cmd))
+
+ # Omit tests that run as expected on the first try.
+ # Everything that happens after the first run is included in the output
+ # even if it flakily passes.
+ if not result.has_unexpected_output and run == 0:
+ continue
+
+ self.results.append({
+ "name": str(test),
+ "flags": result.cmd.args,
+ "command": result.cmd.to_string(relative=True),
+ "run": run + 1,
+ "stdout": output.stdout,
+ "stderr": output.stderr,
+ "exit_code": output.exit_code,
+ "result": test.output_proc.get_outcome(output),
+ "expected": test.expected_outcomes,
+ "duration": output.duration,
+
+ # TODO(machenbach): This stores only the global random seed from the
+ # context and not possible overrides when using random-seed stress.
+ "random_seed": self.random_seed,
+ "target_name": test.get_shell(),
+ "variant": test.variant,
+ })
+
+ def finished(self):
+ complete_results = []
+ if os.path.exists(self.json_test_results):
+ with open(self.json_test_results, "r") as f:
+ # Buildbot might start out with an empty file.
+ complete_results = json.loads(f.read() or "[]")
+
+ duration_mean = None
+ if self.tests:
+ # Get duration mean.
+ duration_mean = (
+ sum(duration for (_, duration, cmd) in self.tests) /
+ float(len(self.tests)))
+
+ # Sort tests by duration.
+ self.tests.sort(key=lambda (_, duration, cmd): duration, reverse=True)
+ slowest_tests = [
+ {
+ "name": str(test),
+ "flags": cmd.args,
+ "command": cmd.to_string(relative=True),
+ "duration": duration,
+ "marked_slow": test.is_slow,
+ } for (test, duration, cmd) in self.tests[:20]
+ ]
+
+ complete_results.append({
+ "arch": self.arch,
+ "mode": self.mode,
+ "results": self.results,
+ "slowest_tests": slowest_tests,
+ "duration_mean": duration_mean,
+ "test_total": len(self.tests),
+ })
+
+ with open(self.json_test_results, "w") as f:
+ f.write(json.dumps(complete_results))
diff --git a/deps/v8/tools/testrunner/testproc/rerun.py b/deps/v8/tools/testrunner/testproc/rerun.py
new file mode 100644
index 0000000000..7f96e0260c
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/rerun.py
@@ -0,0 +1,59 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+from . import base
+from .result import RerunResult
+
+
+class RerunProc(base.TestProcProducer):
+ def __init__(self, rerun_max, rerun_max_total=None):
+ super(RerunProc, self).__init__('Rerun')
+ self._requirement = base.DROP_OUTPUT
+
+ self._rerun = {}
+ self._results = collections.defaultdict(list)
+ self._rerun_max = rerun_max
+ self._rerun_total_left = rerun_max_total
+
+ def _next_test(self, test):
+ self._send_next_subtest(test)
+
+ def _result_for(self, test, subtest, result):
+ # First result
+ if subtest.procid[-2:] == '-1':
+ # Passed, no reruns
+ if not result.has_unexpected_output:
+ self._send_result(test, result)
+ return
+
+ self._rerun[test.procid] = 0
+
+ results = self._results[test.procid]
+ results.append(result)
+
+ if self._needs_rerun(test, result):
+ self._rerun[test.procid] += 1
+ if self._rerun_total_left is not None:
+ self._rerun_total_left -= 1
+ self._send_next_subtest(test, self._rerun[test.procid])
+ else:
+ result = RerunResult.create(results)
+ self._finalize_test(test)
+ self._send_result(test, result)
+
+ def _needs_rerun(self, test, result):
+ # TODO(majeski): Limit reruns count for slow tests.
+ return ((self._rerun_total_left is None or self._rerun_total_left > 0) and
+ self._rerun[test.procid] < self._rerun_max and
+ result.has_unexpected_output)
+
+ def _send_next_subtest(self, test, run=0):
+ subtest = self._create_subtest(test, str(run + 1), keep_output=(run != 0))
+ self._send_test(subtest)
+
+ def _finalize_test(self, test):
+ del self._rerun[test.procid]
+ del self._results[test.procid]
diff --git a/deps/v8/tools/testrunner/testproc/result.py b/deps/v8/tools/testrunner/testproc/result.py
new file mode 100644
index 0000000000..c817fc06ec
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/result.py
@@ -0,0 +1,97 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ResultBase(object):
+ @property
+ def is_skipped(self):
+ return False
+
+ @property
+ def is_grouped(self):
+ return False
+
+ @property
+ def is_rerun(self):
+ return False
+
+
+class Result(ResultBase):
+ """Result created by the output processor."""
+
+ def __init__(self, has_unexpected_output, output, cmd=None):
+ self.has_unexpected_output = has_unexpected_output
+ self.output = output
+ self.cmd = cmd
+
+
+class GroupedResult(ResultBase):
+ """Result consisting of multiple results. It can be used by processors that
+ create multiple subtests for each test and want to pass all results back.
+ """
+
+ @staticmethod
+ def create(results):
+ """Create grouped result from the list of results. It filters out skipped
+ results. If all results are skipped results it returns skipped result.
+
+ Args:
+ results: list of pairs (test, result)
+ """
+ results = [(t, r) for (t, r) in results if not r.is_skipped]
+ if not results:
+ return SKIPPED
+ return GroupedResult(results)
+
+ def __init__(self, results):
+ self.results = results
+
+ @property
+ def is_grouped(self):
+ return True
+
+
+class SkippedResult(ResultBase):
+ """Result without any meaningful value. Used primarily to inform the test
+ processor that it's test wasn't executed.
+ """
+
+ @property
+ def is_skipped(self):
+ return True
+
+
+SKIPPED = SkippedResult()
+
+
+class RerunResult(Result):
+ """Result generated from several reruns of the same test. It's a subclass of
+ Result since the result of rerun is result of the last run. In addition to
+ normal result it contains results of all reruns.
+ """
+ @staticmethod
+ def create(results):
+ """Create RerunResult based on list of results. List cannot be empty. If it
+ has only one element it's returned as a result.
+ """
+ assert results
+
+ if len(results) == 1:
+ return results[0]
+ return RerunResult(results)
+
+ def __init__(self, results):
+ """Has unexpected output and the output itself of the RerunResult equals to
+ the last result in the passed list.
+ """
+ assert results
+
+ last = results[-1]
+ super(RerunResult, self).__init__(last.has_unexpected_output, last.output,
+ last.cmd)
+ self.results = results
+
+ @property
+ def is_rerun(self):
+ return True
diff --git a/deps/v8/tools/testrunner/testproc/shard.py b/deps/v8/tools/testrunner/testproc/shard.py
new file mode 100644
index 0000000000..1caac9fee6
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/shard.py
@@ -0,0 +1,30 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class ShardProc(base.TestProcFilter):
+ """Processor distributing tests between shards.
+ It simply passes every n-th test. To be deterministic it has to be placed
+ before all processors that generate tests dynamically.
+ """
+ def __init__(self, myid, shards_count):
+ """
+ Args:
+ myid: id of the shard within [0; shards_count - 1]
+ shards_count: number of shards
+ """
+ super(ShardProc, self).__init__()
+
+ assert myid >= 0 and myid < shards_count
+
+ self._myid = myid
+ self._shards_count = shards_count
+ self._last = 0
+
+ def _filter(self, test):
+ res = self._last != self._myid
+ self._last = (self._last + 1) % self._shards_count
+ return res
diff --git a/deps/v8/tools/testrunner/testproc/variant.py b/deps/v8/tools/testrunner/testproc/variant.py
new file mode 100644
index 0000000000..dba1af91fc
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/variant.py
@@ -0,0 +1,68 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+from ..local.variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
+from .result import GroupedResult
+
+
+STANDARD_VARIANT = set(["default"])
+
+
+class VariantProc(base.TestProcProducer):
+ """Processor creating variants.
+
+ For each test it keeps generator that returns variant, flags and id suffix.
+ It produces variants one at a time, so it's waiting for the result of one
+ variant to create another variant of the same test.
+ It maintains the order of the variants passed to the init.
+
+ There are some cases when particular variant of the test is not valid. To
+ ignore subtests like that, StatusFileFilterProc should be placed somewhere
+ after the VariantProc.
+ """
+
+ def __init__(self, variants):
+ super(VariantProc, self).__init__('VariantProc')
+ self._next_variant = {}
+ self._variant_gens = {}
+ self._variants = variants
+
+ def setup(self, requirement=base.DROP_RESULT):
+ super(VariantProc, self).setup(requirement)
+
+ # VariantProc is optimized for dropping the result and it should be placed
+ # in the chain where it's possible.
+ assert requirement == base.DROP_RESULT
+
+ def _next_test(self, test):
+ gen = self._variants_gen(test)
+ self._next_variant[test.procid] = gen
+ self._try_send_new_subtest(test, gen)
+
+ def _result_for(self, test, subtest, result):
+ gen = self._next_variant[test.procid]
+ self._try_send_new_subtest(test, gen)
+
+ def _try_send_new_subtest(self, test, variants_gen):
+ for variant, flags, suffix in variants_gen:
+ subtest = self._create_subtest(test, '%s-%s' % (variant, suffix),
+ variant=variant, flags=flags)
+ self._send_test(subtest)
+ return
+
+ del self._next_variant[test.procid]
+ self._send_result(test, None)
+
+ def _variants_gen(self, test):
+ """Generator producing (variant, flags, procid suffix) tuples."""
+ return self._get_variants_gen(test).gen(test)
+
+ def _get_variants_gen(self, test):
+ key = test.suite.name
+ variants_gen = self._variant_gens.get(key)
+ if not variants_gen:
+ variants_gen = test.suite.get_variants_gen(self._variants)
+ self._variant_gens[key] = variants_gen
+ return variants_gen