summaryrefslogtreecommitdiff
path: root/testsuite/driver/testlib.py
diff options
context:
space:
mode:
Diffstat (limited to 'testsuite/driver/testlib.py')
-rw-r--r--testsuite/driver/testlib.py69
1 files changed, 43 insertions, 26 deletions
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index b637b1992d..040e674312 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -65,7 +65,7 @@ def isCompilerStatsTest():
def isStatsTest():
opts = getTestOpts()
- return opts.is_stats_test
+ return bool(opts.stats_range_fields)
# This can be called at the top of a file of tests, to set default test options
@@ -348,18 +348,29 @@ def testing_metrics():
# measures the performance numbers of the compiler.
# As this is a fairly rare case in the testsuite, it defaults to false to
# indicate that it is a 'normal' performance test.
-def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False):
+def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False):
if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
failBecause('This test has an invalid name.')
- # Normalize metrics to a list of strings.
- if isinstance(metrics, str):
- if metrics == 'all':
- metrics = testing_metrics()
- else:
- metrics = [metrics]
+ tests = Perf.get_perf_stats('HEAD^')
+
+ # Might have multiple metrics being measured for a single test.
+ test = [t for t in tests if t.test == name]
+
+ if tests == [] or test == []:
+ # There are no prior metrics for this test.
+ if isinstance(metric, str):
+ if metric == 'all':
+ for field in testing_metrics():
+ opts.stats_range_fields[field] = None
+ else:
+ opts.stats_range_fields[metric] = None
+ if isinstance(metric, list):
+ for field in metric:
+ opts.stats_range_fields[field] = None
+
+ return
- opts.is_stats_test = True
if is_compiler_stats_test:
opts.is_compiler_stats_test = True
@@ -368,11 +379,24 @@ def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False)
if config.compiler_debugged and is_compiler_stats_test:
opts.skip = 1
- for metric in metrics:
- baselineByWay = lambda way, target_commit: Perf.baseline_metric( \
- target_commit, name, config.test_env, metric, way)
+ # get the average value of the given metric from test
+ def get_avg_val(metric_2):
+ metric_2_metrics = [float(t.value) for t in test if t.metric == metric_2]
+ return sum(metric_2_metrics) / len(metric_2_metrics)
+
+ # 'all' is a shorthand to test for bytes allocated, peak megabytes allocated, and max bytes used.
+ if isinstance(metric, str):
+ if metric == 'all':
+ for field in testing_metrics():
+ opts.stats_range_fields[field] = (get_avg_val(field), deviation)
+ return
+ else:
+ opts.stats_range_fields[metric] = (get_avg_val(metric), deviation)
+ return
- opts.stats_range_fields[metric] = (baselineByWay, deviation)
+ if isinstance(metric, list):
+ for field in metric:
+ opts.stats_range_fields[field] = (get_avg_val(field), deviation)
# -----
@@ -1140,11 +1164,10 @@ def metric_dict(name, way, metric, value):
# name: name of the test.
# way: the way.
# stats_file: the path of the stats_file containing the stats for the test.
-# range_fields: see TestOptions.stats_range_fields
+# range_fields
# Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
# This prints the results for the user.
def check_stats(name, way, stats_file, range_fields):
- head_commit = Perf.commit_hash('HEAD')
result = passed()
if range_fields:
try:
@@ -1154,7 +1177,7 @@ def check_stats(name, way, stats_file, range_fields):
stats_file_contents = f.read()
f.close()
- for (metric, baseline_and_dev) in range_fields.items():
+ for (metric, range_val_dev) in range_fields.items():
field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
if field_match == None:
print('Failed to find metric: ', metric)
@@ -1167,15 +1190,14 @@ def check_stats(name, way, stats_file, range_fields):
change = None
# If this is the first time running the benchmark, then pass.
- baseline = baseline_and_dev[0](way, head_commit)
- if baseline == None:
+ if range_val_dev == None:
metric_result = passed()
change = MetricChange.NewMetric
else:
- tolerance_dev = baseline_and_dev[1]
+ (expected_val, tolerance_dev) = range_val_dev
(change, metric_result) = Perf.check_stats_change(
perf_stat,
- baseline,
+ expected_val,
tolerance_dev,
config.allowed_perf_changes,
config.verbose >= 4)
@@ -1308,13 +1330,8 @@ def simple_run(name, way, prog, extra_run_opts):
my_rts_flags = rts_flags(way)
- # Collect stats if necessary:
- # isStatsTest and not isCompilerStatsTest():
- # assume we are running a ghc compiled program. Collect stats.
- # isStatsTest and way == 'ghci':
- # assume we are running a program via ghci. Collect stats
stats_file = name + '.stats'
- if isStatsTest() and (not isCompilerStatsTest() or way == 'ghci'):
+ if isStatsTest() and not isCompilerStatsTest():
stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
else:
stats_args = ''