summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy/layout_tests/controllers
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-07-18 13:59:13 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-07-18 13:59:28 +0200
commit4d6084feccab99c0a7b3ecef26bb49c41dd50201 (patch)
treefd1195897f551eee6d5a15d07ff5733b15aa2a5c /Tools/Scripts/webkitpy/layout_tests/controllers
parentae901828d4689ab9e89113f6b6ea8042b37a9fda (diff)
downloadqtwebkit-4d6084feccab99c0a7b3ecef26bb49c41dd50201.tar.gz
Imported WebKit commit ff52235a78888e5cb8e286a828a8698042200e67 (http://svn.webkit.org/repository/webkit/trunk@122948)
New snapshot that should fix the rendering issues recently introduced
Diffstat (limited to 'Tools/Scripts/webkitpy/layout_tests/controllers')
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager.py406
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py110
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/worker.py4
3 files changed, 146 insertions, 374 deletions
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
index 7aee0c2fb..0d07d3a68 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -417,19 +417,14 @@ class Manager(object):
files = test_files[slice_start:slice_end]
- tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ((slice_end - slice_start), slice_start, slice_end, num_tests)
- self._printer.print_expected(tests_run_msg)
+ _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
# If we reached the end and we don't have enough tests, we run some
# from the beginning.
if slice_end - slice_start < chunk_len:
extra = chunk_len - (slice_end - slice_start)
- extra_msg = (' last chunk is partial, appending [0:%d]' % extra)
- self._printer.print_expected(extra_msg)
- tests_run_msg += "\n" + extra_msg
+ _log.debug(' last chunk is partial, appending [0:%d]' % extra)
files.extend(test_files[0:extra])
- tests_run_filename = self._filesystem.join(self._results_directory, "tests_run.txt")
- self._filesystem.write_text_file(tests_run_filename, tests_run_msg)
len_skip_chunk = int(len(files) * len(skipped) / float(len(self._test_files)))
skip_chunk_list = list(skipped)[0:len_skip_chunk]
@@ -512,11 +507,7 @@ class Manager(object):
(self._options.iterations if self._options.iterations else 1)
result_summary = ResultSummary(self._expectations, self._test_files | skipped, iterations)
- self._printer.print_expected('Found %s.' % grammar.pluralize('test', num_all_test_files))
- self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes")
- self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures")
- self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky")
- self._print_expected_results_of_type(result_summary, test_expectations.SKIP, "skipped")
+ self._printer.print_expected(num_all_test_files, result_summary, self._expectations.get_tests_with_result_type)
if self._options.skipped != 'ignore':
# Note that we don't actually run the skipped tests (they were
@@ -526,15 +517,7 @@ class Manager(object):
result = test_results.TestResult(test)
result.type = test_expectations.SKIP
for iteration in range(iterations):
- result_summary.add(result, expected=True)
- self._printer.print_expected('')
-
- if self._options.repeat_each > 1:
- self._printer.print_expected('Running each test %d times.' % self._options.repeat_each)
- if self._options.iterations > 1:
- self._printer.print_expected('Running %d iterations of the tests.' % self._options.iterations)
- if iterations > 1:
- self._printer.print_expected('')
+ result_summary.add(result, expected=True, test_is_slow=self._test_is_slow(test))
return result_summary
@@ -567,7 +550,13 @@ class Manager(object):
def _test_is_slow(self, test_file):
return self._expectations.has_modifier(test_file, test_expectations.SLOW)
- def _shard_tests(self, test_files, num_workers, fully_parallel):
+ def _is_ref_test(self, test_input):
+ if test_input.reference_files is None:
+ # Lazy initialization.
+ test_input.reference_files = self._port.reference_files(test_input.test_name)
+ return bool(test_input.reference_files)
+
+ def _shard_tests(self, test_files, num_workers, fully_parallel, shard_ref_tests):
"""Groups tests into batches.
This helps ensure that tests that depend on each other (aka bad tests!)
continue to run together as most cross-tests dependencies tend to
@@ -581,30 +570,40 @@ class Manager(object):
# own class or module. Consider grouping it with the chunking logic
# in prepare_lists as well.
if num_workers == 1:
- return self._shard_in_two(test_files)
+ return self._shard_in_two(test_files, shard_ref_tests)
elif fully_parallel:
return self._shard_every_file(test_files)
- return self._shard_by_directory(test_files, num_workers)
+ return self._shard_by_directory(test_files, num_workers, shard_ref_tests)
- def _shard_in_two(self, test_files):
+ def _shard_in_two(self, test_files, shard_ref_tests):
"""Returns two lists of shards, one with all the tests requiring a lock and one with the rest.
This is used when there's only one worker, to minimize the per-shard overhead."""
locked_inputs = []
+ locked_ref_test_inputs = []
unlocked_inputs = []
+ unlocked_ref_test_inputs = []
for test_file in test_files:
test_input = self._get_test_input_for_file(test_file)
if self._test_requires_lock(test_file):
- locked_inputs.append(test_input)
+ if shard_ref_tests and self._is_ref_test(test_input):
+ locked_ref_test_inputs.append(test_input)
+ else:
+ locked_inputs.append(test_input)
else:
- unlocked_inputs.append(test_input)
+ if shard_ref_tests and self._is_ref_test(test_input):
+ unlocked_ref_test_inputs.append(test_input)
+ else:
+ unlocked_inputs.append(test_input)
+ locked_inputs.extend(locked_ref_test_inputs)
+ unlocked_inputs.extend(unlocked_ref_test_inputs)
locked_shards = []
unlocked_shards = []
if locked_inputs:
locked_shards = [TestShard('locked_tests', locked_inputs)]
if unlocked_inputs:
- unlocked_shards = [TestShard('unlocked_tests', unlocked_inputs)]
+ unlocked_shards.append(TestShard('unlocked_tests', unlocked_inputs))
return locked_shards, unlocked_shards
@@ -627,7 +626,7 @@ class Manager(object):
return locked_shards, unlocked_shards
- def _shard_by_directory(self, test_files, num_workers):
+ def _shard_by_directory(self, test_files, num_workers, shard_ref_tests):
"""Returns two lists of shards, each shard containing all the files in a directory.
This is the default mode, and gets as much parallelism as we can while
@@ -635,13 +634,18 @@ class Manager(object):
locked_shards = []
unlocked_shards = []
tests_by_dir = {}
+ ref_tests_by_dir = {}
# FIXME: Given that the tests are already sorted by directory,
# we can probably rewrite this to be clearer and faster.
for test_file in test_files:
directory = self._get_dir_for_test_file(test_file)
test_input = self._get_test_input_for_file(test_file)
- tests_by_dir.setdefault(directory, [])
- tests_by_dir[directory].append(test_input)
+ if shard_ref_tests and self._is_ref_test(test_input):
+ ref_tests_by_dir.setdefault(directory, [])
+ ref_tests_by_dir[directory].append(test_input)
+ else:
+ tests_by_dir.setdefault(directory, [])
+ tests_by_dir[directory].append(test_input)
for directory, test_inputs in tests_by_dir.iteritems():
shard = TestShard(directory, test_inputs)
@@ -650,6 +654,14 @@ class Manager(object):
else:
unlocked_shards.append(shard)
+ for directory, test_inputs in ref_tests_by_dir.iteritems():
+ # '~' to place the ref tests after other tests after sorted.
+ shard = TestShard('~ref:' + directory, test_inputs)
+ if self._test_requires_lock(directory):
+ locked_shards.append(shard)
+ else:
+ unlocked_shards.append(shard)
+
# Sort the shards by directory name.
locked_shards.sort(key=lambda shard: shard.name)
unlocked_shards.sort(key=lambda shard: shard.name)
@@ -706,16 +718,6 @@ class Manager(object):
extract_and_flatten(some_shards)))
return new_shards
- def _log_num_workers(self, num_workers, num_shards, num_locked_shards):
- driver_name = self._port.driver_name()
- if num_workers == 1:
- self._printer.print_config("Running 1 %s over %s." %
- (driver_name, grammar.pluralize('shard', num_shards)))
- else:
- self._printer.print_config("Running %d %ss in parallel over %d shards (%d locked)." %
- (num_workers, driver_name, num_shards, num_locked_shards))
- self._printer.print_config('')
-
def _run_tests(self, file_list, result_summary, num_workers):
"""Runs the tests in the file_list.
@@ -740,8 +742,8 @@ class Manager(object):
keyboard_interrupted = False
interrupted = False
- self._printer.print_update('Sharding tests ...')
- locked_shards, unlocked_shards = self._shard_tests(file_list, int(self._options.child_processes), self._options.fully_parallel)
+ self._printer.write_update('Sharding tests ...')
+ locked_shards, unlocked_shards = self._shard_tests(file_list, int(self._options.child_processes), self._options.fully_parallel, self._options.shard_ref_tests)
# FIXME: We don't have a good way to coordinate the workers so that
# they don't try to run the shards that need a lock if we don't actually
@@ -757,7 +759,7 @@ class Manager(object):
self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))
num_workers = min(num_workers, len(all_shards))
- self._log_num_workers(num_workers, len(all_shards), len(locked_shards))
+ self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))
def worker_factory(worker_connection):
return worker.Worker(worker_connection, self.results_directory(), self._options)
@@ -765,7 +767,7 @@ class Manager(object):
if self._options.dry_run:
return (keyboard_interrupted, interrupted, self._worker_stats.values(), self._group_stats, self._all_results)
- self._printer.print_update('Starting %s ...' % grammar.pluralize('worker', num_workers))
+ self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))
try:
with message_pool.get(self, worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
@@ -793,9 +795,6 @@ class Manager(object):
self._filesystem.maybe_make_directory(self._filesystem.join(self._results_directory, 'retries'))
return self._filesystem.join(self._results_directory, 'retries')
- def update(self):
- self.update_summary(self._current_result_summary)
-
def needs_servers(self):
return any(self._test_requires_lock(test_name) for test_name in self._test_files) and self._options.http
@@ -809,12 +808,12 @@ class Manager(object):
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
- self._printer.print_update("Starting pixel test helper ...")
+ self._printer.write_update("Starting pixel test helper ...")
self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
- self._printer.print_update("Checking system dependencies ...")
+ self._printer.write_update("Checking system dependencies ...")
if not self._port.check_sys_deps(self.needs_servers()):
self._port.stop_helper()
return None
@@ -827,7 +826,7 @@ class Manager(object):
self._port.setup_test_run()
- self._printer.print_update("Preparing tests ...")
+ self._printer.write_update("Preparing tests ...")
result_summary = self.prepare_lists_and_print_output()
if not result_summary:
return None
@@ -882,13 +881,9 @@ class Manager(object):
self._look_for_new_crash_logs(retry_summary, start_time)
self._clean_up_run()
- self._print_timing_statistics(end_time - start_time, thread_timings, test_timings, individual_test_timings, result_summary)
- self._print_result_summary(result_summary)
-
- self._printer.print_one_line_summary(result_summary.total - result_summary.expected_skips, result_summary.expected - result_summary.expected_skips, result_summary.unexpected)
-
unexpected_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=True, interrupted=interrupted)
- self._printer.print_unexpected_results(unexpected_results)
+
+ self._printer.print_results(end_time - start_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results)
# Re-raise a KeyboardInterrupt if necessary so the caller can handle it.
if keyboard_interrupted:
@@ -911,25 +906,25 @@ class Manager(object):
return self._port.exit_code_from_summarized_results(unexpected_results)
def start_servers_with_lock(self, number_of_servers):
- self._printer.print_update('Acquiring http lock ...')
+ self._printer.write_update('Acquiring http lock ...')
self._port.acquire_http_lock()
if self._http_tests():
- self._printer.print_update('Starting HTTP server ...')
+ self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(number_of_servers=number_of_servers)
if self._websocket_tests():
- self._printer.print_update('Starting WebSocket server ...')
+ self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._has_http_lock = True
def stop_servers_with_lock(self):
if self._has_http_lock:
if self._http_tests():
- self._printer.print_update('Stopping HTTP server ...')
+ self._printer.write_update('Stopping HTTP server ...')
self._port.stop_http_server()
if self._websocket_tests():
- self._printer.print_update('Stopping WebSocket server ...')
+ self._printer.write_update('Stopping WebSocket server ...')
self._port.stop_websocket_server()
- self._printer.print_update('Releasing server lock ...')
+ self._printer.write_update('Releasing server lock ...')
self._port.release_http_lock()
self._has_http_lock = False
@@ -967,17 +962,6 @@ class Manager(object):
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
- def update_summary(self, result_summary):
- """Update the summary and print results with any completed tests."""
- while True:
- try:
- result = test_results.TestResult.loads(self._result_queue.get_nowait())
- except Queue.Empty:
- self._printer.print_progress(result_summary, self._retrying, self._test_files_list)
- return
-
- self._update_summary_with_result(result_summary, result)
-
def _mark_interrupted_tests_as_skipped(self, result_summary):
for test_name in self._test_files:
if test_name not in result_summary.results:
@@ -985,7 +969,7 @@ class Manager(object):
# FIXME: We probably need to loop here if there are multiple iterations.
# FIXME: Also, these results are really neither expected nor unexpected. We probably
# need a third type of result.
- result_summary.add(result, expected=False)
+ result_summary.add(result, expected=False, test_is_slow=self._test_is_slow(test_name))
def _interrupt_if_at_failure_limits(self, result_summary):
# Note: The messages in this method are constructed to match old-run-webkit-tests
@@ -1010,21 +994,25 @@ class Manager(object):
def _update_summary_with_result(self, result_summary, result):
if result.type == test_expectations.SKIP:
- result_summary.add(result, expected=True)
+ exp_str = got_str = 'SKIP'
+ expected = True
else:
expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or test_failures.is_reftest_failure(result.failures))
- result_summary.add(result, expected)
exp_str = self._expectations.get_expectations_string(result.test_name)
got_str = self._expectations.expectation_to_string(result.type)
- self._printer.print_test_result(result, expected, exp_str, got_str)
- self._printer.print_progress(result_summary, self._retrying, self._test_files_list)
+
+ result_summary.add(result, expected, self._test_is_slow(result.test_name))
+
+ # FIXME: there's too many arguments to this function.
+ self._printer.print_finished_test(result, expected, exp_str, got_str, result_summary, self._retrying, self._test_files_list)
+
self._interrupt_if_at_failure_limits(result_summary)
def _clobber_old_results(self):
# Just clobber the actual test results directories since the other
# files in the results directory are explicitly used for cross-run
# tracking.
- self._printer.print_update("Clobbering old results in %s" %
+ self._printer.write_update("Clobbering old results in %s" %
self._results_directory)
layout_tests_dir = self._port.layout_tests_dir()
possible_dirs = self._port.test_dirs()
@@ -1105,53 +1093,6 @@ class Manager(object):
self._filesystem.remove(times_json_path)
self._filesystem.remove(incremental_results_path)
- def print_config(self):
- """Prints the configuration for the test run."""
- p = self._printer
- p.print_config("Using port '%s'" % self._port.name())
- p.print_config("Test configuration: %s" % self._port.test_configuration())
- p.print_config("Placing test results in %s" % self._results_directory)
- if self._options.new_baseline:
- p.print_config("Placing new baselines in %s" %
- self._port.baseline_path())
-
- fallback_path = [self._filesystem.split(x)[1] for x in self._port.baseline_search_path()]
- p.print_config("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
-
- p.print_config("Using %s build" % self._options.configuration)
- if self._options.pixel_tests:
- p.print_config("Pixel tests enabled")
- else:
- p.print_config("Pixel tests disabled")
-
- p.print_config("Regular timeout: %s, slow test timeout: %s" %
- (self._options.time_out_ms,
- self._options.slow_time_out_ms))
-
- p.print_config('Command line: ' +
- ' '.join(self._port.driver_cmd_line()))
- p.print_config("")
-
- def _print_expected_results_of_type(self, result_summary,
- result_type, result_type_str):
- """Print the number of the tests in a given result class.
-
- Args:
- result_summary - the object containing all the results to report on
- result_type - the particular result type to report in the summary.
- result_type_str - a string description of the result_type.
- """
- tests = self._expectations.get_tests_with_result_type(result_type)
- now = result_summary.tests_by_timeline[test_expectations.NOW]
- wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
-
- # We use a fancy format string in order to print the data out in a
- # nicely-aligned table.
- fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
- % (self._num_digits(now), self._num_digits(wontfix)))
- self._printer.print_expected(fmtstr %
- (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
-
def _num_digits(self, num):
"""Returns the number of digits needed to represent the length of a
sequence."""
@@ -1160,217 +1101,6 @@ class Manager(object):
ndigits = int(math.log10(len(num))) + 1
return ndigits
- def _print_timing_statistics(self, total_time, thread_timings,
- directory_test_timings, individual_test_timings,
- result_summary):
- """Record timing-specific information for the test run.
-
- Args:
- total_time: total elapsed time (in seconds) for the test run
- thread_timings: wall clock time each thread ran for
- directory_test_timings: timing by directory
- individual_test_timings: timing by file
- result_summary: summary object for the test run
- """
- self._printer.print_timing("Test timing:")
- self._printer.print_timing(" %6.2f total testing time" % total_time)
- self._printer.print_timing("")
- self._printer.print_timing("Thread timing:")
- cuml_time = 0
- for t in thread_timings:
- self._printer.print_timing(" %10s: %5d tests, %6.2f secs" %
- (t['name'], t['num_tests'], t['total_time']))
- cuml_time += t['total_time']
- self._printer.print_timing(" %6.2f cumulative, %6.2f optimal" %
- (cuml_time, cuml_time / int(self._options.child_processes)))
- self._printer.print_timing("")
-
- self._print_aggregate_test_statistics(individual_test_timings)
- self._print_individual_test_times(individual_test_timings,
- result_summary)
- self._print_directory_timings(directory_test_timings)
-
- def _print_aggregate_test_statistics(self, individual_test_timings):
- """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
- Args:
- individual_test_timings: List of TestResults for all tests.
- """
- times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings]
- self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):",
- times_for_dump_render_tree)
-
- def _print_individual_test_times(self, individual_test_timings,
- result_summary):
- """Prints the run times for slow, timeout and crash tests.
- Args:
- individual_test_timings: List of TestStats for all tests.
- result_summary: summary object for test run
- """
- # Reverse-sort by the time spent in DumpRenderTree.
- individual_test_timings.sort(lambda a, b:
- cmp(b.test_run_time, a.test_run_time))
-
- num_printed = 0
- slow_tests = []
- timeout_or_crash_tests = []
- unexpected_slow_tests = []
- for test_tuple in individual_test_timings:
- test_name = test_tuple.test_name
- is_timeout_crash_or_slow = False
- if self._test_is_slow(test_name):
- is_timeout_crash_or_slow = True
- slow_tests.append(test_tuple)
-
- if test_name in result_summary.failures:
- result = result_summary.results[test_name].type
- if (result == test_expectations.TIMEOUT or
- result == test_expectations.CRASH):
- is_timeout_crash_or_slow = True
- timeout_or_crash_tests.append(test_tuple)
-
- if (not is_timeout_crash_or_slow and
- num_printed < printing.NUM_SLOW_TESTS_TO_LOG):
- num_printed = num_printed + 1
- unexpected_slow_tests.append(test_tuple)
-
- self._printer.print_timing("")
- self._print_test_list_timing("%s slowest tests that are not "
- "marked as SLOW and did not timeout/crash:" %
- printing.NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
- self._printer.print_timing("")
- self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
- self._printer.print_timing("")
- self._print_test_list_timing("Tests that timed out or crashed:",
- timeout_or_crash_tests)
- self._printer.print_timing("")
-
- def _print_test_list_timing(self, title, test_list):
- """Print timing info for each test.
-
- Args:
- title: section heading
- test_list: tests that fall in this section
- """
- if self._printer.disabled('slowest'):
- return
-
- self._printer.print_timing(title)
- for test_tuple in test_list:
- test_run_time = round(test_tuple.test_run_time, 1)
- self._printer.print_timing(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
-
- def _print_directory_timings(self, directory_test_timings):
- """Print timing info by directory for any directories that
- take > 10 seconds to run.
-
- Args:
- directory_test_timing: time info for each directory
- """
- timings = []
- for directory in directory_test_timings:
- num_tests, time_for_directory = directory_test_timings[directory]
- timings.append((round(time_for_directory, 1), directory,
- num_tests))
- timings.sort()
-
- self._printer.print_timing("Time to process slowest subdirectories:")
- min_seconds_to_print = 10
- for timing in timings:
- if timing[0] > min_seconds_to_print:
- self._printer.print_timing(
- " %s took %s seconds to run %s tests." % (timing[1],
- timing[0], timing[2]))
- self._printer.print_timing("")
-
- def _print_statistics_for_test_timings(self, title, timings):
- """Prints the median, mean and standard deviation of the values in
- timings.
-
- Args:
- title: Title for these timings.
- timings: A list of floats representing times.
- """
- self._printer.print_timing(title)
- timings.sort()
-
- num_tests = len(timings)
- if not num_tests:
- return
- percentile90 = timings[int(.9 * num_tests)]
- percentile99 = timings[int(.99 * num_tests)]
-
- if num_tests % 2 == 1:
- median = timings[((num_tests - 1) / 2) - 1]
- else:
- lower = timings[num_tests / 2 - 1]
- upper = timings[num_tests / 2]
- median = (float(lower + upper)) / 2
-
- mean = sum(timings) / num_tests
-
- for timing in timings:
- sum_of_deviations = math.pow(timing - mean, 2)
-
- std_deviation = math.sqrt(sum_of_deviations / num_tests)
- self._printer.print_timing(" Median: %6.3f" % median)
- self._printer.print_timing(" Mean: %6.3f" % mean)
- self._printer.print_timing(" 90th percentile: %6.3f" % percentile90)
- self._printer.print_timing(" 99th percentile: %6.3f" % percentile99)
- self._printer.print_timing(" Standard dev: %6.3f" % std_deviation)
- self._printer.print_timing("")
-
- def _print_result_summary(self, result_summary):
- """Print a short summary about how many tests passed.
-
- Args:
- result_summary: information to log
- """
- failed = result_summary.total_failures
- total = result_summary.total - result_summary.expected_skips
- passed = total - failed
- pct_passed = 0.0
- if total > 0:
- pct_passed = float(passed) * 100 / total
-
- self._printer.print_actual("")
- self._printer.print_actual("=> Results: %d/%d tests passed (%.1f%%)" %
- (passed, total, pct_passed))
- self._printer.print_actual("")
- self._print_result_summary_entry(result_summary,
- test_expectations.NOW, "Tests to be fixed")
-
- self._printer.print_actual("")
- self._print_result_summary_entry(result_summary,
- test_expectations.WONTFIX,
- "Tests that will only be fixed if they crash (WONTFIX)")
- self._printer.print_actual("")
-
- def _print_result_summary_entry(self, result_summary, timeline,
- heading):
- """Print a summary block of results for a particular timeline of test.
-
- Args:
- result_summary: summary to print results for
- timeline: the timeline to print results for (NOT, WONTFIX, etc.)
- heading: a textual description of the timeline
- """
- total = len(result_summary.tests_by_timeline[timeline])
- not_passing = (total -
- len(result_summary.tests_by_expectation[test_expectations.PASS] &
- result_summary.tests_by_timeline[timeline]))
- self._printer.print_actual("=> %s (%d):" % (heading, not_passing))
-
- for result in TestExpectations.EXPECTATION_ORDER:
- if result == test_expectations.PASS:
- continue
- results = (result_summary.tests_by_expectation[result] &
- result_summary.tests_by_timeline[timeline])
- desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
- if not_passing and len(results):
- pct = len(results) * 100.0 / not_passing
- self._printer.print_actual(" %5d %-24s (%4.1f%%)" %
- (len(results), desc[len(results) != 1], pct))
-
def _copy_results_html_file(self):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
index 27f06a70e..ae20a8a50 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
@@ -59,9 +59,16 @@ from webkitpy.common.host_mock import MockHost
class ManagerWrapper(Manager):
+ def __init__(self, ref_tests, **kwargs):
+ Manager.__init__(self, **kwargs)
+ self._ref_tests = ref_tests
+
def _get_test_input_for_file(self, test_file):
return test_file
+ def _is_ref_test(self, test_input):
+ return test_input in self._ref_tests
+
class ShardingTests(unittest.TestCase):
test_list = [
@@ -77,14 +84,21 @@ class ShardingTests(unittest.TestCase):
"perf/object-keys.html",
]
- def get_shards(self, num_workers, fully_parallel, test_list=None, max_locked_shards=None):
+ ref_tests = [
+ "http/tests/security/view-source-no-refresh.html",
+ "http/tests/websocket/tests/websocket-protocol-ignored.html",
+ "ietestcenter/Javascript/11.1.5_4-4-c-1.html",
+ "dom/html/level2/html/HTMLAnchorElement06.html",
+ ]
+
+ def get_shards(self, num_workers, fully_parallel, shard_ref_tests=False, test_list=None, max_locked_shards=None):
test_list = test_list or self.test_list
host = MockHost()
port = host.port_factory.get(port_name='test')
port._filesystem = MockFileSystem()
options = MockOptions(max_locked_shards=max_locked_shards)
- self.manager = ManagerWrapper(port=port, options=options, printer=Mock())
- return self.manager._shard_tests(test_list, num_workers, fully_parallel)
+ self.manager = ManagerWrapper(self.ref_tests, port=port, options=options, printer=Mock())
+ return self.manager._shard_tests(test_list, num_workers, fully_parallel, shard_ref_tests)
def test_shard_by_dir(self):
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False)
@@ -110,6 +124,31 @@ class ShardingTests(unittest.TestCase):
TestShard('ietestcenter/Javascript',
['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
+ def test_shard_by_dir_sharding_ref_tests(self):
+ locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False, shard_ref_tests=True)
+
+ # Note that although there are tests in multiple dirs that need locks,
+ # they are crammed into a single shard in order to reduce the # of
+ # workers hitting the server at once.
+ self.assertEquals(locked,
+ [TestShard('locked_shard_1',
+ ['http/tests/websocket/tests/unicode.htm',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html',
+ 'http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html'])])
+ self.assertEquals(unlocked,
+ [TestShard('animations',
+ ['animations/keyframes.html']),
+ TestShard('dom/html/level2/html',
+ ['dom/html/level2/html/HTMLAnchorElement03.html']),
+ TestShard('fast/css',
+ ['fast/css/display-none-inline-style-change-crash.html']),
+ TestShard('~ref:dom/html/level2/html',
+ ['dom/html/level2/html/HTMLAnchorElement06.html']),
+ TestShard('~ref:ietestcenter/Javascript',
+ ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
+
def test_shard_every_file(self):
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True)
self.assertEquals(locked,
@@ -142,6 +181,23 @@ class ShardingTests(unittest.TestCase):
'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
'dom/html/level2/html/HTMLAnchorElement06.html'])])
+ def test_shard_in_two_sharding_ref_tests(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, shard_ref_tests=True)
+ self.assertEquals(locked,
+ [TestShard('locked_tests',
+ ['http/tests/websocket/tests/unicode.htm',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html',
+ 'http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html'])])
+ self.assertEquals(unlocked,
+ [TestShard('unlocked_tests',
+ ['animations/keyframes.html',
+ 'fast/css/display-none-inline-style-change-crash.html',
+ 'dom/html/level2/html/HTMLAnchorElement03.html',
+ 'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
+ 'dom/html/level2/html/HTMLAnchorElement06.html'])])
+
def test_shard_in_two_has_no_locked_shards(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
test_list=['animations/keyframe.html'])
@@ -200,25 +256,6 @@ class ManagerTest(unittest.TestCase):
def get_options(self):
return MockOptions(pixel_tests=False, new_baseline=False, time_out_ms=6000, slow_time_out_ms=30000, worker_model='inline')
- def get_printer(self):
- class FakePrinter(object):
- def __init__(self):
- self.output = []
-
- def print_config(self, msg):
- self.output.append(msg)
-
- return FakePrinter()
-
- def test_fallback_path_in_config(self):
- options = self.get_options()
- host = MockHost()
- port = host.port_factory.get('test-mac-leopard', options=options)
- printer = self.get_printer()
- manager = Manager(port, options, printer)
- manager.print_config()
- self.assertTrue('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in printer.output)
-
def test_http_locking(tester):
options, args = run_webkit_tests.parse_args(['--platform=test', '--print=nothing', 'http/tests/passes', 'passes'])
host = MockHost()
@@ -253,6 +290,8 @@ class ManagerTest(unittest.TestCase):
manager._options = MockOptions(exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None)
manager._test_files = ['foo/bar.html', 'baz.html']
+ manager._test_is_slow = lambda test_name: False
+
result_summary = ResultSummary(expectations=Mock(), test_files=manager._test_files)
result_summary.unexpected_failures = 100
result_summary.unexpected_crashes = 50
@@ -486,29 +525,30 @@ class ResultSummaryTest(unittest.TestCase):
if extra_expectations:
expectations += extra_expectations
+ test_is_slow = False
paths, rs, exp = self.get_result_summary(port, tests, expectations)
if expected:
- rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected)
- rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected)
- rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected)
+ rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
+ rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
+ rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
elif passing:
- rs.add(self.get_result('passes/text.html'), expected)
- rs.add(self.get_result('failures/expected/timeout.html'), expected)
- rs.add(self.get_result('failures/expected/crash.html'), expected)
+ rs.add(self.get_result('passes/text.html'), expected, test_is_slow)
+ rs.add(self.get_result('failures/expected/timeout.html'), expected, test_is_slow)
+ rs.add(self.get_result('failures/expected/crash.html'), expected, test_is_slow)
else:
- rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected)
- rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected)
- rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected)
+ rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow)
+ rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow)
+ rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)
for test in extra_tests:
- rs.add(self.get_result(test, test_expectations.CRASH), expected)
+ rs.add(self.get_result(test, test_expectations.CRASH), expected, test_is_slow)
retry = rs
if flaky:
paths, retry, exp = self.get_result_summary(port, tests, expectations)
- retry.add(self.get_result('passes/text.html'), True)
- retry.add(self.get_result('failures/expected/timeout.html'), True)
- retry.add(self.get_result('failures/expected/crash.html'), True)
+ retry.add(self.get_result('passes/text.html'), True, test_is_slow)
+ retry.add(self.get_result('failures/expected/timeout.html'), True, test_is_slow)
+ retry.add(self.get_result('failures/expected/crash.html'), True, test_is_slow)
unexpected_results = manager.summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False)
expected_results = manager.summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=False, interrupted=False)
return expected_results, unexpected_results
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
index 837aea86b..378826c51 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
@@ -83,7 +83,9 @@ class Worker(object):
self._caller.post('finished_test_list', test_list_name, len(test_inputs), elapsed_time)
def _update_test_input(self, test_input):
- test_input.reference_files = self._port.reference_files(test_input.test_name)
+ if test_input.reference_files is None:
+ # Lazy initialization.
+ test_input.reference_files = self._port.reference_files(test_input.test_name)
if test_input.reference_files:
test_input.should_run_pixel_test = True
elif self._options.pixel_tests: