summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy/layout_tests/views/printing.py
diff options
context:
space:
mode:
Diffstat (limited to 'Tools/Scripts/webkitpy/layout_tests/views/printing.py')
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/printing.py310
1 files changed, 307 insertions, 3 deletions
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing.py b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
index 2dd909930..1c2fecd7b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/printing.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
@@ -29,10 +29,12 @@
"""Package that handles non-debug, non-file output for run-webkit-tests."""
+import math
import optparse
from webkitpy.tool import grammar
from webkitpy.common.net import resultsjsonparser
+from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations
from webkitpy.layout_tests.views.metered_stream import MeteredStream
@@ -192,15 +194,313 @@ class Printer(object):
def help_printing(self):
self._write(HELP_PRINTING)
+ def print_config(self):
+ """Prints the configuration for the test run."""
+ self._print_config("Using port '%s'" % self._port.name())
+ self._print_config("Test configuration: %s" % self._port.test_configuration())
+ self._print_config("Placing test results in %s" % self._options.results_directory)
+
+ # FIXME: should these options be in printing_options?
+ if self._options.new_baseline:
+ self._print_config("Placing new baselines in %s" % self._port.baseline_path())
+
+ fs = self._port.host.filesystem
+ fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
+ self._print_config("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
+
+ self._print_config("Using %s build" % self._options.configuration)
+ if self._options.pixel_tests:
+ self._print_config("Pixel tests enabled")
+ else:
+ self._print_config("Pixel tests disabled")
+
+ self._print_config("Regular timeout: %s, slow test timeout: %s" %
+ (self._options.time_out_ms, self._options.slow_time_out_ms))
+
+ self._print_config('Command line: ' + ' '.join(self._port.driver_cmd_line()))
+ self._print_config('')
+
+ def print_expected(self, num_all_test_files, result_summary, tests_with_result_type_callback):
+ self._print_expected('Found %s.' % grammar.pluralize('test', num_all_test_files))
+ self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback)
+ self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback)
+ self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
+ self._print_expected_results_of_type(result_summary, test_expectations.SKIP, "skipped", tests_with_result_type_callback)
+ self._print_expected('')
+
+ if self._options.repeat_each > 1:
+ self._print_expected('Running each test %d times.' % self._options.repeat_each)
+ if self._options.iterations > 1:
+ self._print_expected('Running %d iterations of the tests.' % self._options.iterations)
+ if self._options.iterations > 1 or self._options.repeat_each > 1:
+ self._print_expected('')
+
+ def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
+ driver_name = self._port.driver_name()
+ if num_workers == 1:
+ self._print_config("Running 1 %s over %s." %
+ (driver_name, grammar.pluralize('shard', num_shards)))
+ else:
+ self._print_config("Running %d %ss in parallel over %d shards (%d locked)." %
+ (num_workers, driver_name, num_shards, num_locked_shards))
+ self._print_config('')
+
+ def _print_expected_results_of_type(self, result_summary,
+ result_type, result_type_str, tests_with_result_type_callback):
+ """Print the number of the tests in a given result class.
+
+ Args:
+ result_summary - the object containing all the results to report on
+ result_type - the particular result type to report in the summary.
+ result_type_str - a string description of the result_type.
+ expectations - populated TestExpectations object for stats
+ """
+ tests = tests_with_result_type_callback(result_type)
+ now = result_summary.tests_by_timeline[test_expectations.NOW]
+ wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
+
+ # We use a fancy format string in order to print the data out in a
+ # nicely-aligned table.
+ fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
+ % (self._num_digits(now), self._num_digits(wontfix)))
+ self._print_expected(fmtstr %
+ (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
+
+ def _num_digits(self, num):
+ """Returns the number of digits needed to represent the length of a
+ sequence."""
+ ndigits = 1
+ if len(num):
+ ndigits = int(math.log10(len(num))) + 1
+ return ndigits
+
+ def print_results(self, run_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results):
+ self._print_timing_statistics(run_time, thread_timings, test_timings, individual_test_timings, result_summary)
+ self._print_result_summary(result_summary)
+
+ self.print_one_line_summary(result_summary.total - result_summary.expected_skips, result_summary.expected - result_summary.expected_skips, result_summary.unexpected)
+
+ self.print_unexpected_results(unexpected_results)
+
+ def _print_timing_statistics(self, total_time, thread_timings,
+ directory_test_timings, individual_test_timings,
+ result_summary):
+ """Record timing-specific information for the test run.
+
+ Args:
+ total_time: total elapsed time (in seconds) for the test run
+ thread_timings: wall clock time each thread ran for
+ directory_test_timings: timing by directory
+ individual_test_timings: timing by file
+ result_summary: summary object for the test run
+ """
+ self.print_timing("Test timing:")
+ self.print_timing(" %6.2f total testing time" % total_time)
+ self.print_timing("")
+ self.print_timing("Thread timing:")
+ cuml_time = 0
+ for t in thread_timings:
+ self.print_timing(" %10s: %5d tests, %6.2f secs" %
+ (t['name'], t['num_tests'], t['total_time']))
+ cuml_time += t['total_time']
+ self.print_timing(" %6.2f cumulative, %6.2f optimal" %
+ (cuml_time, cuml_time / int(self._options.child_processes)))
+ self.print_timing("")
+
+ self._print_aggregate_test_statistics(individual_test_timings)
+ self._print_individual_test_times(individual_test_timings,
+ result_summary)
+ self._print_directory_timings(directory_test_timings)
+
+ def _print_aggregate_test_statistics(self, individual_test_timings):
+ """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
+ Args:
+ individual_test_timings: List of TestResults for all tests.
+ """
+ times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings]
+ self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):",
+ times_for_dump_render_tree)
+
+ def _print_individual_test_times(self, individual_test_timings,
+ result_summary):
+ """Prints the run times for slow, timeout and crash tests.
+ Args:
+ individual_test_timings: List of TestStats for all tests.
+ result_summary: summary object for test run
+ """
+ # Reverse-sort by the time spent in DumpRenderTree.
+ individual_test_timings.sort(lambda a, b:
+ cmp(b.test_run_time, a.test_run_time))
+
+ num_printed = 0
+ slow_tests = []
+ timeout_or_crash_tests = []
+ unexpected_slow_tests = []
+ for test_tuple in individual_test_timings:
+ test_name = test_tuple.test_name
+ is_timeout_crash_or_slow = False
+ if test_name in result_summary.slow_tests:
+ is_timeout_crash_or_slow = True
+ slow_tests.append(test_tuple)
+
+ if test_name in result_summary.failures:
+ result = result_summary.results[test_name].type
+ if (result == test_expectations.TIMEOUT or
+ result == test_expectations.CRASH):
+ is_timeout_crash_or_slow = True
+ timeout_or_crash_tests.append(test_tuple)
+
+ if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
+ num_printed = num_printed + 1
+ unexpected_slow_tests.append(test_tuple)
+
+ self.print_timing("")
+ self._print_test_list_timing("%s slowest tests that are not "
+ "marked as SLOW and did not timeout/crash:" % NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
+ self.print_timing("")
+ self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
+ self.print_timing("")
+ self._print_test_list_timing("Tests that timed out or crashed:",
+ timeout_or_crash_tests)
+ self.print_timing("")
+
+ def _print_test_list_timing(self, title, test_list):
+ """Print timing info for each test.
+
+ Args:
+ title: section heading
+ test_list: tests that fall in this section
+ """
+ if self.disabled('slowest'):
+ return
+
+ self.print_timing(title)
+ for test_tuple in test_list:
+ test_run_time = round(test_tuple.test_run_time, 1)
+ self.print_timing(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
+
+ def _print_directory_timings(self, directory_test_timings):
+ """Print timing info by directory for any directories that
+ take > 10 seconds to run.
+
+ Args:
+ directory_test_timing: time info for each directory
+ """
+ timings = []
+ for directory in directory_test_timings:
+ num_tests, time_for_directory = directory_test_timings[directory]
+ timings.append((round(time_for_directory, 1), directory,
+ num_tests))
+ timings.sort()
+
+ self.print_timing("Time to process slowest subdirectories:")
+ min_seconds_to_print = 10
+ for timing in timings:
+ if timing[0] > min_seconds_to_print:
+ self.print_timing(
+ " %s took %s seconds to run %s tests." % (timing[1],
+ timing[0], timing[2]))
+ self.print_timing("")
+
+ def _print_statistics_for_test_timings(self, title, timings):
+ """Prints the median, mean and standard deviation of the values in
+ timings.
+
+ Args:
+ title: Title for these timings.
+ timings: A list of floats representing times.
+ """
+ self.print_timing(title)
+ timings.sort()
+
+ num_tests = len(timings)
+ if not num_tests:
+ return
+ percentile90 = timings[int(.9 * num_tests)]
+ percentile99 = timings[int(.99 * num_tests)]
+
+ if num_tests % 2 == 1:
+ median = timings[((num_tests - 1) / 2) - 1]
+ else:
+ lower = timings[num_tests / 2 - 1]
+ upper = timings[num_tests / 2]
+ median = (float(lower + upper)) / 2
+
+ mean = sum(timings) / num_tests
+
+ for timing in timings:
+ sum_of_deviations = math.pow(timing - mean, 2)
+
+ std_deviation = math.sqrt(sum_of_deviations / num_tests)
+ self.print_timing(" Median: %6.3f" % median)
+ self.print_timing(" Mean: %6.3f" % mean)
+ self.print_timing(" 90th percentile: %6.3f" % percentile90)
+ self.print_timing(" 99th percentile: %6.3f" % percentile99)
+ self.print_timing(" Standard dev: %6.3f" % std_deviation)
+ self.print_timing("")
+
+ def _print_result_summary(self, result_summary):
+ """Print a short summary about how many tests passed.
+
+ Args:
+ result_summary: information to log
+ """
+ failed = result_summary.total_failures
+ total = result_summary.total - result_summary.expected_skips
+ passed = total - failed
+ pct_passed = 0.0
+ if total > 0:
+ pct_passed = float(passed) * 100 / total
+
+ self.print_actual("")
+ self.print_actual("=> Results: %d/%d tests passed (%.1f%%)" %
+ (passed, total, pct_passed))
+ self.print_actual("")
+ self._print_result_summary_entry(result_summary,
+ test_expectations.NOW, "Tests to be fixed")
+
+ self.print_actual("")
+ self._print_result_summary_entry(result_summary,
+ test_expectations.WONTFIX,
+ "Tests that will only be fixed if they crash (WONTFIX)")
+ self.print_actual("")
+
+ def _print_result_summary_entry(self, result_summary, timeline,
+ heading):
+ """Print a summary block of results for a particular timeline of test.
+
+ Args:
+ result_summary: summary to print results for
+ timeline: the timeline to print results for (NOT, WONTFIX, etc.)
+ heading: a textual description of the timeline
+ """
+ total = len(result_summary.tests_by_timeline[timeline])
+ not_passing = (total -
+ len(result_summary.tests_by_expectation[test_expectations.PASS] &
+ result_summary.tests_by_timeline[timeline]))
+ self.print_actual("=> %s (%d):" % (heading, not_passing))
+
+ for result in TestExpectations.EXPECTATION_ORDER:
+ if result == test_expectations.PASS:
+ continue
+ results = (result_summary.tests_by_expectation[result] &
+ result_summary.tests_by_timeline[timeline])
+ desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
+ if not_passing and len(results):
+ pct = len(results) * 100.0 / not_passing
+ self.print_actual(" %5d %-24s (%4.1f%%)" %
+ (len(results), desc[len(results) != 1], pct))
+
+
def print_actual(self, msg):
if self.disabled('actual'):
return
self._buildbot_stream.write("%s\n" % msg)
- def print_config(self, msg):
+ def _print_config(self, msg):
self.write(msg, 'config')
- def print_expected(self, msg):
+ def _print_expected(self, msg):
self.write(msg, 'expected')
def print_timing(self, msg):
@@ -235,6 +535,10 @@ class Printer(object):
self._write("%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str))
self._write("")
+ def print_finished_test(self, result, expected, exp_str, got_str, result_summary, retrying, test_files_list):
+ self.print_test_result(result, expected, exp_str, got_str)
+ self.print_progress(result_summary, retrying, test_files_list)
+
def print_test_result(self, result, expected, exp_str, got_str):
"""Print the result of the test as determined by --print.
@@ -396,7 +700,7 @@ class Printer(object):
if len(unexpected_results['tests']) and self._options.verbose:
self._buildbot_stream.write("%s\n" % ("-" * 78))
- def print_update(self, msg):
+ def write_update(self, msg):
if self.disabled('updates'):
return
self._meter.write_update(msg)