diff options
Diffstat (limited to 'testsuite/driver/testlib.py')
-rw-r--r-- | testsuite/driver/testlib.py | 70 |
1 files changed, 27 insertions, 43 deletions
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py index 710800b9f0..dd66340e5f 100644 --- a/testsuite/driver/testlib.py +++ b/testsuite/driver/testlib.py @@ -65,7 +65,7 @@ def isCompilerStatsTest(): def isStatsTest(): opts = getTestOpts() - return bool(opts.stats_range_fields) + return opts.is_stats_test # This can be called at the top of a file of tests, to set default test options @@ -348,29 +348,18 @@ def testing_metrics(): # measures the performance numbers of the compiler. # As this is a fairly rare case in the testsuite, it defaults to false to # indicate that it is a 'normal' performance test. -def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False): +def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False): if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name): failBecause('This test has an invalid name.') - tests = Perf.get_perf_stats('HEAD^') - - # Might have multiple metrics being measured for a single test. - test = [t for t in tests if t.test == name] - - if tests == [] or test == []: - # There are no prior metrics for this test. - if isinstance(metric, str): - if metric == 'all': - for field in testing_metrics(): - opts.stats_range_fields[field] = None - else: - opts.stats_range_fields[metric] = None - if isinstance(metric, list): - for field in metric: - opts.stats_range_fields[field] = None - - return + # Normalize metrics to a list of strings. + if isinstance(metrics, str): + if metrics == 'all': + metrics = testing_metrics() + else: + metrics = [metrics] + opts.is_stats_test = True if is_compiler_stats_test: opts.is_compiler_stats_test = True @@ -379,24 +368,12 @@ def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False): if config.compiler_debugged and is_compiler_stats_test: opts.skip = 1 - # get the average value of the given metric from test - def get_avg_val(metric_2): - metric_2_metrics = [float(t.value) for t in test if t.metric == metric_2] - return sum(metric_2_metrics) / len(metric_2_metrics) - - # 'all' is a shorthand to test for bytes allocated, peak megabytes allocated, and max bytes used. - if isinstance(metric, str): - if metric == 'all': - for field in testing_metrics(): - opts.stats_range_fields[field] = (get_avg_val(field), deviation) - return - else: - opts.stats_range_fields[metric] = (get_avg_val(metric), deviation) - return + for metric in metrics: + def baselineByWay(way, target_commit, metric=metric): + return Perf.baseline_metric( \ + target_commit, name, config.test_env, metric, way) - if isinstance(metric, list): - for field in metric: - opts.stats_range_fields[field] = (get_avg_val(field), deviation) + opts.stats_range_fields[metric] = (baselineByWay, deviation) # ----- @@ -1179,10 +1156,11 @@ def metric_dict(name, way, metric, value): # name: name of the test. # way: the way. # stats_file: the path of the stats_file containing the stats for the test. -# range_fields +# range_fields: see TestOptions.stats_range_fields # Returns a pass/fail object. Passes if the stats are withing the expected value ranges. # This prints the results for the user. def check_stats(name, way, stats_file, range_fields): + head_commit = Perf.commit_hash('HEAD') result = passed() if range_fields: try: @@ -1192,7 +1170,7 @@ def check_stats(name, way, stats_file, range_fields): stats_file_contents = f.read() f.close() - for (metric, range_val_dev) in range_fields.items(): + for (metric, baseline_and_dev) in range_fields.items(): field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents) if field_match == None: print('Failed to find metric: ', metric) @@ -1205,14 +1183,15 @@ def check_stats(name, way, stats_file, range_fields): change = None # If this is the first time running the benchmark, then pass. - if range_val_dev == None: + baseline = baseline_and_dev[0](way, head_commit) + if baseline == None: metric_result = passed() change = MetricChange.NewMetric else: - (expected_val, tolerance_dev) = range_val_dev + tolerance_dev = baseline_and_dev[1] (change, metric_result) = Perf.check_stats_change( perf_stat, - expected_val, + baseline, tolerance_dev, config.allowed_perf_changes, config.verbose >= 4) @@ -1348,8 +1327,13 @@ def simple_run(name, way, prog, extra_run_opts): my_rts_flags = rts_flags(way) + # Collect stats if necessary: + # isStatsTest and not isCompilerStatsTest(): + # assume we are running a ghc compiled program. Collect stats. + # isStatsTest and way == 'ghci': + # assume we are running a program via ghci. Collect stats stats_file = name + '.stats' - if isStatsTest() and not isCompilerStatsTest(): + if isStatsTest() and (not isCompilerStatsTest() or way == 'ghci'): stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS' else: stats_args = '' |