summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlison Felizzi <alison.felizzi@mongodb.com>2021-10-28 04:09:39 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-10-28 04:36:16 +0000
commit4cebb004066371fc84459c9b4496529bbc118f2b (patch)
tree797ff9a200f67dab0c8f95abce7c7bf9b284f50f
parentdd8a6b97b38df896be09a4439c0ee71f4c1bf75e (diff)
downloadmongo-4cebb004066371fc84459c9b4496529bbc118f2b.tar.gz
Import wiredtiger: 92f4687823f4828c9bd73a1caa9dd6425788e61e from branch mongodb-master
ref: 7b006681ce..92f4687823 for: 5.2.0 WT-8271 Capture git branch and commit status in perf test result output
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py7
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py14
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py10
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py65
-rw-r--r--src/third_party/wiredtiger/import.data2
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen.yml8
6 files changed, 81 insertions, 25 deletions
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py
index 9d9d64971c9..76e9b651035 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py
@@ -73,3 +73,10 @@ class PerfStat:
return self.get_skipminmax_average()
else:
return self.get_average()
+
+ def are_values_all_zero(self):
+ result = True
+ for value in self.values:
+ if value != 0:
+ result = False
+ return result
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py
index 2dd3bb58e45..06cc1507055 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py
@@ -57,10 +57,12 @@ class PerfStatCollection:
def to_value_list(self, brief: bool):
as_list = []
for stat in self.perf_stats.values():
- as_dict = {}
- as_dict['name'] = stat.output_label
- as_dict['value'] = stat.get_core_average()
- if not brief:
- as_dict['values'] = stat.values
- as_list.append(as_dict)
+ if not stat.are_values_all_zero():
+ as_dict = {
+ 'name': stat.output_label,
+ 'value': stat.get_core_average()
+ }
+ if not brief:
+ as_dict['values'] = stat.values
+ as_list.append(as_dict)
return as_list
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py
index a8d3548250d..8fd1910d3b4 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py
@@ -35,13 +35,17 @@ class WTPerfConfig:
test: str,
environment: str = None,
run_max: int = 1,
- verbose: bool = False):
+ verbose: bool = False,
+ git_root: str = None,
+ json_info: dict = {}):
self.wtperf_path: str = wtperf_path
self.home_dir: str = home_dir
self.test: str = test
self.environment: str = environment
self.run_max: int = run_max
self.verbose: bool = verbose
+ self.git_root: str = git_root
+ self.json_info: dict = json_info
def to_value_dict(self):
as_dict = {'wt_perf_path': self.wtperf_path,
@@ -49,5 +53,7 @@ class WTPerfConfig:
'home_dir': self.home_dir,
'environment': self.environment,
'run_max': self.run_max,
- 'verbose': self.verbose}
+ 'verbose': self.verbose,
+ 'git_root': self.git_root,
+ 'json_info': self.json_info}
return as_dict
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py
index c919b8bcbef..72d7016bc97 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py
@@ -36,6 +36,8 @@ import subprocess
import sys
import platform
import psutil
+from pygit2 import discover_repository, Repository
+from pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE, GIT_SORT_NONE
from wtperf_config import WTPerfConfig
from perf_stat import PerfStat
@@ -62,6 +64,33 @@ def find_stat(test_stat_path: str, pattern: str, position_of_value: int):
return 0
+def get_git_info(git_working_tree_dir):
+ repository_path = discover_repository(git_working_tree_dir)
+ assert repository_path is not None
+
+ repo = Repository(repository_path)
+ commits = list(repo.walk(repo.head.target, GIT_SORT_NONE))
+ head_commit = commits[0]
+ diff = repo.diff()
+
+ git_info = {
+ 'head_commit': {
+ 'hash': head_commit.hex,
+ 'message': head_commit.message,
+ 'author': head_commit.author.name
+ },
+ 'branch': {
+ 'name': repo.head.shorthand
+ },
+ 'stats': {
+ 'files_changed': diff.stats.files_changed,
+ },
+ 'num_commits': len(commits)
+ }
+
+ return git_info
+
+
def construct_wtperf_command_line(wtperf: str, env: str, test: str, home: str):
command_line = []
if env is not None:
@@ -77,15 +106,12 @@ def construct_wtperf_command_line(wtperf: str, env: str, test: str, home: str):
def brief_perf_stats(config: WTPerfConfig, perf_stats: PerfStatCollection):
- as_list = []
- as_list.append(
- {
- "info": {
- "test_name": os.path.basename(config.test)
- },
- "metrics": perf_stats.to_value_list(brief=True)
- }
- )
+ as_list = [{
+ "info": {
+ "test_name": os.path.basename(config.test)
+ },
+ "metrics": perf_stats.to_value_list(brief=True)
+ }]
return as_list
@@ -101,6 +127,10 @@ def detailed_perf_stats(config: WTPerfConfig, perf_stats: PerfStatCollection):
'platform': platform.platform()
}
}
+
+ if config.git_root:
+ as_dict['git'] = get_git_info(config.git_root)
+
return as_dict
@@ -111,7 +141,6 @@ def run_test(config: WTPerfConfig, test_run: int):
env=config.environment,
test=config.test,
home=test_home)
- # print('Command Line for test: {}'.format(command_line))
subprocess.run(command_line)
@@ -161,13 +190,15 @@ def main():
parser.add_argument('-e', '--env', help='any environment variables that need to be set for running wtperf')
parser.add_argument('-t', '--test', help='path of the wtperf test to execute')
parser.add_argument('-o', '--outfile', help='path of the file to write test output to')
- parser.add_argument('-b', '--brief_output', action="store_true", help='brief(not detailed) test output')
+ parser.add_argument('-b', '--brief_output', action="store_true", help='brief (not detailed) test output')
parser.add_argument('-m', '--runmax', type=int, default=1, help='maximum number of times to run the test')
parser.add_argument('-ho', '--home', help='path of the "home" directory that wtperf will use')
parser.add_argument('-re',
'--reuse',
action="store_true",
help='reuse and reanalyse results from previous tests rather than running tests again')
+ parser.add_argument('-g', '--git_root', help='path of the Git working directory')
+ parser.add_argument('-i', '--json_info', help='additional test information in a json format string')
parser.add_argument('-v', '--verbose', action="store_true", help='be verbose')
args = parser.parse_args()
@@ -179,9 +210,12 @@ def main():
print(" Environment: {}".format(args.env))
print(" Test path: {}".format(args.test))
print(" Home base: {}".format(args.home))
+ print(" Git root: {}".format(args.git_root))
print(" Outfile: {}".format(args.outfile))
print(" Runmax: {}".format(args.runmax))
+ print(" JSON info {}".format(args.json_info))
print(" Reuse results: {}".format(args.reuse))
+ print(" Brief output: {}".format(args.brief_output))
if args.wtperf is None:
sys.exit('The path to the wtperf executable is required')
@@ -190,12 +224,16 @@ def main():
if args.home is None:
sys.exit('The path to the "home" directory is required')
+ json_info = json.loads(args.json_info) if args.json_info else {}
+
config = WTPerfConfig(wtperf_path=args.wtperf,
home_dir=args.home,
test=args.test,
environment=args.env,
run_max=args.runmax,
- verbose=args.verbose)
+ verbose=args.verbose,
+ git_root=args.git_root,
+ json_info=json_info)
perf_stats: PerfStatCollection = setup_perf_stats()
@@ -207,7 +245,8 @@ def main():
print("Completed test {}".format(test_run))
if not args.verbose and not args.outfile:
- sys.exit("Enable verbosity (or provide a file path) to dump the stats. Try 'python3 wtperf_run.py --help' for more information.")
+ sys.exit("Enable verbosity (or provide a file path) to dump the stats. "
+ "Try 'python3 wtperf_run.py --help' for more information.")
process_results(config, perf_stats)
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index 7e81d54a2f2..4b93af9f68a 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-master",
- "commit": "7b006681ce7fbefdb7f96597477873a0001a6edb"
+ "commit": "92f4687823f4828c9bd73a1caa9dd6425788e61e"
}
diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml
index ae1201d1bd8..74415354a12 100755
--- a/src/third_party/wiredtiger/test/evergreen.yml
+++ b/src/third_party/wiredtiger/test/evergreen.yml
@@ -602,9 +602,11 @@ functions:
set -o verbose
${virtualenv_binary} -p ${python_binary} venv
source venv/bin/activate
- ${pip3_binary} install psutil
- ${python_binary} wtperf_run.py -p ../../../cmake_build/bench/wtperf/wtperf -t ../runners/${perf-test-name}.wtperf -ho WT_TEST -m ${maxruns} -b -v -o test_stats/evergreen_out.json
- ${python_binary} wtperf_run.py -p ../../../cmake_build/bench/wtperf/wtperf -t ../runners/${perf-test-name}.wtperf -ho WT_TEST -m ${maxruns} -re -v -o test_stats/atlas_out.json
+ ${pip3_binary} install psutil pygit2
+ JSON_TASK_INFO='{ "evergreen_task_info": { "is_patch": "'${is_patch}'", "task_id": "'${task_id}'" } }'
+ echo "JSON_TASK_INFO: $JSON_TASK_INFO"
+ ${python_binary} wtperf_run.py -p ../../../cmake_build/bench/wtperf/wtperf -t ../runners/${perf-test-name}.wtperf -ho WT_TEST -m ${maxruns} -g "../.." -v -i "$JSON_TASK_INFO" -b -o test_stats/evergreen_out.json
+ ${python_binary} wtperf_run.py -p ../../../cmake_build/bench/wtperf/wtperf -t ../runners/${perf-test-name}.wtperf -ho WT_TEST -m ${maxruns} -g "../.." -v -i "$JSON_TASK_INFO" -re -o test_stats/atlas_out.json
- command: shell.exec
params: