summaryrefslogtreecommitdiff
path: root/testsuite/driver
diff options
context:
space:
mode:
authorDavid Eichmann <EichmannD@gmail.com>2019-02-19 11:38:51 +0000
committerMarge Bot <ben+marge-bot@smart-cactus.org>2019-02-21 04:20:09 -0500
commit32f44ed81b0f16099d780e73ad2ea1a3cd812448 (patch)
tree7df37791de91cbc53575ccc8c3568620825a58c6 /testsuite/driver
parent2a0be1468cf440547e2ceb93f0d01d23637affc6 (diff)
downloadhaskell-32f44ed81b0f16099d780e73ad2ea1a3cd812448.tar.gz
Fix test runner crash when not in a git repo
Respect `inside_git_repo()` when checking performance stats.
Diffstat (limited to 'testsuite/driver')
-rw-r--r--testsuite/driver/perf_notes.py16
-rw-r--r--testsuite/driver/runtests.py11
-rw-r--r--testsuite/driver/testlib.py5
3 files changed, 18 insertions, 14 deletions
diff --git a/testsuite/driver/perf_notes.py b/testsuite/driver/perf_notes.py
index 365732ca69..2b4835392c 100644
--- a/testsuite/driver/perf_notes.py
+++ b/testsuite/driver/perf_notes.py
@@ -23,13 +23,17 @@ from testutil import passed, failBecause
# Check if "git rev-parse" can be run successfully.
# True implies the current directory is a git repo.
+_inside_git_repo_cache = None
def inside_git_repo():
- try:
- subprocess.check_call(['git', 'rev-parse', 'HEAD'],
- stdout=subprocess.DEVNULL)
- return True
- except subprocess.CalledProcessError:
- return False
+ global _inside_git_repo_cache
+ if _inside_git_repo_cache == None:
+ try:
+ subprocess.check_call(['git', 'rev-parse', 'HEAD'],
+ stdout=subprocess.DEVNULL)
+ _inside_git_repo_cache = True
+ except subprocess.CalledProcessError:
+ _inside_git_repo_cache = False
+ return _inside_git_repo_cache
# Check if the worktree is dirty.
def is_worktree_dirty():
diff --git a/testsuite/driver/runtests.py b/testsuite/driver/runtests.py
index f61ebbc6b1..51f682e5ee 100644
--- a/testsuite/driver/runtests.py
+++ b/testsuite/driver/runtests.py
@@ -122,8 +122,7 @@ if args.verbose is not None:
# Note force skip perf tests: skip if this is not a git repo (estimated with inside_git_repo)
# and no metrics file is given. In this case there is no way to read the previous commit's
# perf test results, nor a way to store new perf test results.
-canGitStatus = inside_git_repo()
-forceSkipPerfTests = not hasMetricsFile and not canGitStatus
+forceSkipPerfTests = not hasMetricsFile and not inside_git_repo()
config.skip_perf_tests = args.skip_perf_tests or forceSkipPerfTests
config.only_perf_tests = args.only_perf_tests
@@ -371,14 +370,14 @@ else:
spacing = " "
if forceSkipPerfTests and not args.skip_perf_tests:
print()
- print(str_warn('Skipping All Performance Tests') + ' `git status` exited with non-zero exit code.')
- print(spacing + 'Git is required because performance test results are compared with the previous git commit\'s results (stored with git notes).')
+ print(str_warn('Skipping All Performance Tests') + ' `git` exited with non-zero exit code.')
+ print(spacing + 'Git is required because performance test results are compared with ancestor git commits\' results (stored with git notes).')
print(spacing + 'You can still run the tests without git by specifying an output file with --metrics-file FILE.')
# Warn of new metrics.
new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
if any(new_metrics):
- if canGitStatus:
+ if inside_git_repo():
reason = 'a baseline (expected value) cannot be recovered from' + \
' previous git commits. This may be due to HEAD having' + \
' new tests or having expected changes, the presence of' + \
@@ -421,7 +420,7 @@ else:
print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file)
with open(config.metrics_file, 'a') as file:
file.write("\n" + Perf.format_perf_stat(stats))
- elif canGitStatus and any(stats):
+ elif inside_git_repo() and any(stats):
if is_worktree_dirty():
print()
print(str_warn('Performance Metrics NOT Saved') + \
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index e800772cd1..fb7888c4d3 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -1182,7 +1182,7 @@ def metric_dict(name, way, metric, value):
# Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
# This prints the results for the user.
def check_stats(name, way, stats_file, range_fields):
- head_commit = Perf.commit_hash('HEAD')
+ head_commit = Perf.commit_hash('HEAD') if Perf.inside_git_repo() else None
result = passed()
if range_fields:
try:
@@ -1205,7 +1205,8 @@ def check_stats(name, way, stats_file, range_fields):
change = None
# If this is the first time running the benchmark, then pass.
- baseline = baseline_and_dev[0](way, head_commit)
+ baseline = baseline_and_dev[0](way, head_commit) \
+ if Perf.inside_git_repo() else None
if baseline == None:
metric_result = passed()
change = MetricChange.NewMetric