diff options
author | David Eichmann <EichmannD@gmail.com> | 2018-11-30 16:48:25 +0000 |
---|---|---|
committer | David Eichmann <EichmannD@gmail.com> | 2018-11-30 16:54:14 +0000 |
commit | 6e24a0bee0d0be8da040c3d5a1e90141abd89852 (patch) | |
tree | b7ac281c805fca986aae01cdb993c441caf9214c | |
parent | f10df65fa2c9a5ec2f4c09b97e02e87c377beac3 (diff) | |
download | haskell-6e24a0bee0d0be8da040c3d5a1e90141abd89852.tar.gz |
Skip all performance tests if not in a git repo.
Reviewers: bgamari, tdammers, osa1
Reviewed By: tdammers
Subscribers: osa1, tdammers, rwbarton, carter
GHC Trac Issues: #15923
Differential Revision: https://phabricator.haskell.org/D5367
-rw-r--r-- | testsuite/driver/perf_notes.py | 9 | ||||
-rw-r--r-- | testsuite/driver/runtests.py | 32 |
2 files changed, 35 insertions, 6 deletions
diff --git a/testsuite/driver/perf_notes.py b/testsuite/driver/perf_notes.py index f162164e3e..c275041252 100644 --- a/testsuite/driver/perf_notes.py +++ b/testsuite/driver/perf_notes.py @@ -20,6 +20,15 @@ from math import ceil, trunc from testutil import passed, failBecause +# Check if "git status" can be run successfully. +# True implies the current directory is a git repo. +def can_git_status(): + try: + subprocess.check_call(['git', 'status']) + return True + except subprocess.CalledProcessError: + return False + # # Some data access functions. A the moment this uses git notes. # diff --git a/testsuite/driver/runtests.py b/testsuite/driver/runtests.py index fb3fe6ad54..c8966b4580 100644 --- a/testsuite/driver/runtests.py +++ b/testsuite/driver/runtests.py @@ -25,7 +25,7 @@ import subprocess from testutil import getStdout, Watcher, str_warn, str_info from testglobals import getConfig, ghc_env, getTestRun, TestOptions, brokens -from perf_notes import MetricChange +from perf_notes import MetricChange, can_git_status from junit import junit # Readline sometimes spews out ANSI escapes for some values of TERM, @@ -84,6 +84,7 @@ if args.rootdir: config.rootdirs = args.rootdir config.metrics_file = args.metrics_file +hasMetricsFile = bool(config.metrics_file) config.summary_file = args.summary_file config.no_print_summary = args.no_print_summary @@ -117,7 +118,12 @@ if args.threads: if args.verbose is not None: config.verbose = args.verbose -config.skip_perf_tests = args.skip_perf_tests +# Note force skip perf tests: skip if this is not a git repo (estimated with can_git_status) +# and no metrics file is given. In this case there is no way to read the previous commit's +# perf test results, nor a way to store new perf test results. +canGitStatus = can_git_status() +forceSkipPerfTests = not hasMetricsFile and not canGitStatus +config.skip_perf_tests = args.skip_perf_tests or forceSkipPerfTests config.only_perf_tests = args.only_perf_tests if args.test_env: @@ -351,12 +357,24 @@ else: # flush everything before we continue sys.stdout.flush() + # Warn if had to force skip perf tests (see Note force skip perf tests). + spacing = " " + if forceSkipPerfTests and not args.skip_perf_tests: + print() + print(str_warn('Skipping All Performance Tests') + ' `git status` exited with non-zero exit code.') + print(spacing + 'Git is required because performance test results are compared with the previous git commit\'s results (stored with git notes).') + print(spacing + 'You can still run the tests without git by specifying an output file with --metrics-file FILE.') + # Warn of new metrics. new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric] - spacing = " " if any(new_metrics): + if canGitStatus: + reason = 'the previous git commit doesn\'t have recorded metrics for the following tests.' + \ + ' If the tests exist on the previous commit, then check it out and run the tests to generate the missing metrics.' + else: + reason = 'this is not a git repo so the previous git commit\'s metrics cannot be loaded from git notes:' print() - print(str_warn('New Metrics') + ' the previous git commit doesn\'t have metrics for the following tests:') + print(str_warn('New Metrics') + ' these metrics trivially pass because ' + reason) print(spacing + ('\n' + spacing).join(set([metric.test for metric in new_metrics]))) # Inform of how to accept metric changes. @@ -369,14 +387,16 @@ else: summary(t, sys.stdout, config.no_print_summary, True) + # Write perf stats if any exist or if a metrics file is specified. stats = [stat for (_, stat) in t.metrics] - if config.metrics_file: + if hasMetricsFile: print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file) with open(config.metrics_file, 'a') as file: file.write("\n" + Perf.format_perf_stat(stats)) - else: + elif canGitStatus and any(stats): Perf.append_perf_stat(stats) + # Write summary if config.summary_file: with open(config.summary_file, 'w') as file: summary(t, file) |