diff options
author | dalyd <david.daly@mongodb.com> | 2015-09-10 16:24:07 -0400 |
---|---|---|
committer | dalyd <david.daly@mongodb.com> | 2015-09-11 17:27:34 -0400 |
commit | 6fa5547c842a31712af91cf2927a631dbbcbdbf4 (patch) | |
tree | 5c0aaac2e266b81a39f16ed7956027339c677171 | |
parent | f43f394340f213f21ae0d7d50fe9c2766807f2be (diff) | |
download | mongo-6fa5547c842a31712af91cf2927a631dbbcbdbf4.tar.gz |
SERVER-20035: Update perf_regresison_check.py script to output report.json summarizing results
-rw-r--r-- | buildscripts/perf_regression_check.py | 38 | ||||
-rw-r--r-- | etc/perf.yml | 5 |
2 files changed, 36 insertions, 7 deletions
diff --git a/buildscripts/perf_regression_check.py b/buildscripts/perf_regression_check.py index 5d44b41602d..a6cf29141ba 100644 --- a/buildscripts/perf_regression_check.py +++ b/buildscripts/perf_regression_check.py @@ -110,10 +110,14 @@ def main(args): j = get_json(args.file) h = History(j) testnames = h.testnames() - failed = False + failed = 0 + results = [] for test in testnames: + # The first entry is valid. The rest is dummy data to match the existing format + result = {'test_file' : test, 'exit_code' : 0, 'elapsed' : 5, 'start': 1441227291.962453, 'end': 1441227293.428761} this_one = h.seriesAtRevision(test, args.rev) + testFailed = False print "checking %s.." % (test) if not this_one: print "\tno data at this revision, skipping" @@ -128,17 +132,39 @@ def main(args): continue if compareResults(this_one, previous[0], args.threshold, "Previous", h.noiseLevels(test), args.noise, args.threadThreshold, args.threadNoise): - failed = True + testFailed = True + result['PreviousCompare'] = 'fail' + else : + result['PreviousCompare'] = 'pass' + daysprevious = h.seriesItemsNDaysBefore(test, args.rev,args.ndays) reference = h.seriesAtRevision(test, args.reference) if compareResults(this_one, daysprevious, args.threshold, "NDays", h.noiseLevels(test), args.noise, args.threadThreshold, args.threadNoise): - failed = True + testFailed = True + result['NDayCompare'] = 'fail' + else : + result['NDayCompare'] = 'pass' if compareResults(this_one, reference, args.threshold, "Reference", h.noiseLevels(test), args.noise, args.threadThreshold, args.threadNoise): - failed = True - - if failed: + testFailed = True + result['BaselineCompare'] = 'fail' + else : + result['BaselineCompare'] = 'pass' + if testFailed : + result['status'] = 'fail' + failed += 1 + else : + result['status'] = 'pass' + results.append(result) + + report = {} + report['failures'] = failed + report['results'] = results + + reportFile = open('report.json', 'w') + json.dump(report, reportFile, indent=4, separators=(',', ': ')) + if failed > 0 : sys.exit(1) else: sys.exit(0) diff --git a/etc/perf.yml b/etc/perf.yml index 40a0d175929..805d843447e 100644 --- a/etc/perf.yml +++ b/etc/perf.yml @@ -83,6 +83,9 @@ functions: set -o verbose source ./venv/bin/activate python perf_regression_check.py -f history.json --rev ${revision} + - command: attach.results + params: + file_location: src/report.json "run perf tests": - command: shell.exec params: @@ -97,7 +100,7 @@ functions: params: working_dir: src script: | - set -e + set -e set -v source ./venv/bin/activate cd perf |