summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuke Chen <luke.chen@mongodb.com>2021-11-02 18:34:33 +1100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-11-02 07:59:03 +0000
commit6333a82ece20d4f7c9caeddc1f0fc08c6fbfab44 (patch)
tree98968622b5ac8447aa8f1566ce7c86c9204a4cac
parent318cd8acd85ebe2d48ee87339ad5d63eea4612d5 (diff)
downloadmongo-6333a82ece20d4f7c9caeddc1f0fc08c6fbfab44.tar.gz
Import wiredtiger: 6ddfc52f11222bcd47518fce6b5ed458075f9c31 from branch mongodb-master
ref: 667eff4002..6ddfc52f11 for: 5.2.0 WT-5008 Migrate Jenkins “wiredtiger-perf-btree” job to Evergreen
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/runners/update-btree.json14
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py11
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py3
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py81
-rw-r--r--src/third_party/wiredtiger/import.data2
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen.yml168
6 files changed, 212 insertions, 67 deletions
diff --git a/src/third_party/wiredtiger/bench/wtperf/runners/update-btree.json b/src/third_party/wiredtiger/bench/wtperf/runners/update-btree.json
new file mode 100644
index 00000000000..950166d76e5
--- /dev/null
+++ b/src/third_party/wiredtiger/bench/wtperf/runners/update-btree.json
@@ -0,0 +1,14 @@
+[
+ {
+ "argument": "-o threads=((count=2,inserts=1,throttle=20000),(count=2,reads=1),(count=2,updates=1,throttle=20000))",
+ "operation": "read"
+ },
+ {
+ "argument": "-o threads=((count=2,inserts=1,throttle=20000),(count=2,reads=1,throttle=20000),(count=2,updates=1))",
+ "operation": "update"
+ },
+ {
+ "argument": "-o threads=((count=2,inserts=1),(count=2,reads=1,throttle=20000),(count=2,updates=1,throttle=20000))",
+ "operation": "insert"
+ }
+ ]
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py
index 06cc1507055..dd940ce88e1 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py
@@ -47,12 +47,13 @@ class PerfStatCollection:
def add_stat(self, perf_stat: PerfStat):
self.perf_stats[perf_stat.short_label] = perf_stat
- def find_stats(self, test_stat_path: str):
+ def find_stats(self, test_stat_path: str, operation: str):
for stat in self.perf_stats.values():
- value = find_stat(test_stat_path=test_stat_path,
- pattern=stat.pattern,
- position_of_value=stat.input_offset)
- stat.add_value(value=value)
+ if operation is None or stat.short_label == operation:
+ value = find_stat(test_stat_path=test_stat_path,
+ pattern=stat.pattern,
+ position_of_value=stat.input_offset)
+ stat.add_value(value=value)
def to_value_list(self, brief: bool):
as_list = []
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py
index 8fd1910d3b4..4d210a572b7 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_config.py
@@ -33,6 +33,7 @@ class WTPerfConfig:
wtperf_path: str,
home_dir: str,
test: str,
+ arg_file: str = None,
environment: str = None,
run_max: int = 1,
verbose: bool = False,
@@ -41,6 +42,7 @@ class WTPerfConfig:
self.wtperf_path: str = wtperf_path
self.home_dir: str = home_dir
self.test: str = test
+ self.arg_file = arg_file
self.environment: str = environment
self.run_max: int = run_max
self.verbose: bool = verbose
@@ -50,6 +52,7 @@ class WTPerfConfig:
def to_value_dict(self):
as_dict = {'wt_perf_path': self.wtperf_path,
'test': self.test,
+ 'arg_file': self.arg_file,
'home_dir': self.home_dir,
'environment': self.environment,
'run_max': self.run_max,
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py
index 72d7016bc97..015dc0881f9 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py
@@ -48,8 +48,11 @@ from perf_stat_collection import PerfStatCollection
test_stats_file = 'test.stat'
-def create_test_home_path(home: str, test_run: int):
- return '{}_{}'.format(home, test_run)
+def create_test_home_path(home: str, test_run: int, operation: str):
+ home_path = "{}_{}".format(home, test_run)
+ if operation is not None:
+ home_path += "_{}".format(operation)
+ return home_path
def create_test_stat_path(test_home_path: str):
@@ -91,7 +94,7 @@ def get_git_info(git_working_tree_dir):
return git_info
-def construct_wtperf_command_line(wtperf: str, env: str, test: str, home: str):
+def construct_wtperf_command_line(wtperf: str, env: str, test: str, home: str, argument: str):
command_line = []
if env is not None:
command_line.append(env)
@@ -99,6 +102,8 @@ def construct_wtperf_command_line(wtperf: str, env: str, test: str, home: str):
if test is not None:
command_line.append('-O')
command_line.append(test)
+ if argument is not None:
+ command_line.append(argument)
if home is not None:
command_line.append('-h')
command_line.append(home)
@@ -134,23 +139,31 @@ def detailed_perf_stats(config: WTPerfConfig, perf_stats: PerfStatCollection):
return as_dict
-def run_test(config: WTPerfConfig, test_run: int):
- test_home = create_test_home_path(home=config.home_dir, test_run=test_run)
+def run_test_wrapper(config: WTPerfConfig, operation: str=None, argument: str=None):
+ for test_run in range(config.run_max):
+ print("Starting test {}".format(test_run))
+ run_test(config=config, test_run=test_run, operation=operation, argument=argument)
+ print("Completed test {}".format(test_run))
+
+
+def run_test(config: WTPerfConfig, test_run: int, operation: str, argument:str):
+ test_home = create_test_home_path(home=config.home_dir, test_run=test_run, operation=operation)
command_line = construct_wtperf_command_line(
wtperf=config.wtperf_path,
env=config.environment,
+ argument=argument,
test=config.test,
home=test_home)
subprocess.run(command_line)
-def process_results(config: WTPerfConfig, perf_stats: PerfStatCollection):
+def process_results(config: WTPerfConfig, perf_stats: PerfStatCollection, operation: str=None):
for test_run in range(config.run_max):
- test_home = create_test_home_path(home=config.home_dir, test_run=test_run)
+ test_home = create_test_home_path(home=config.home_dir, test_run=test_run, operation=operation)
test_stats_path = create_test_stat_path(test_home)
if config.verbose:
print('Reading test stats file: {}'.format(test_stats_path))
- perf_stats.find_stats(test_stat_path=test_stats_path)
+ perf_stats.find_stats(test_stat_path=test_stats_path, operation=operation)
def setup_perf_stats():
@@ -199,6 +212,7 @@ def main():
help='reuse and reanalyse results from previous tests rather than running tests again')
parser.add_argument('-g', '--git_root', help='path of the Git working directory')
parser.add_argument('-i', '--json_info', help='additional test information in a json format string')
+ parser.add_argument('-a', '--arg_file', help='additional wtperf arguments in a json format file')
parser.add_argument('-v', '--verbose', action="store_true", help='be verbose')
args = parser.parse_args()
@@ -206,16 +220,17 @@ def main():
print('WTPerfPy')
print('========')
print("Configuration:")
- print(" WtPerf path: {}".format(args.wtperf))
- print(" Environment: {}".format(args.env))
- print(" Test path: {}".format(args.test))
- print(" Home base: {}".format(args.home))
- print(" Git root: {}".format(args.git_root))
- print(" Outfile: {}".format(args.outfile))
- print(" Runmax: {}".format(args.runmax))
- print(" JSON info {}".format(args.json_info))
- print(" Reuse results: {}".format(args.reuse))
- print(" Brief output: {}".format(args.brief_output))
+ print(" WtPerf path: {}".format(args.wtperf))
+ print(" Environment: {}".format(args.env))
+ print(" Test path: {}".format(args.test))
+ print(" Home base: {}".format(args.home))
+ print(" Addition arguments(file): {}".format(args.arg_file))
+ print(" Git root: {}".format(args.git_root))
+ print(" Outfile: {}".format(args.outfile))
+ print(" Runmax: {}".format(args.runmax))
+ print(" JSON info {}".format(args.json_info))
+ print(" Reuse results: {}".format(args.reuse))
+ print(" Brief output: {}".format(args.brief_output))
if args.wtperf is None:
sys.exit('The path to the wtperf executable is required')
@@ -223,12 +238,15 @@ def main():
sys.exit('The path to the test file is required')
if args.home is None:
sys.exit('The path to the "home" directory is required')
+ if args.arg_file and not os.path.isfile(args.arg_file):
+ sys.exit("arg_file: {} not found!".format(args.arg_file))
json_info = json.loads(args.json_info) if args.json_info else {}
config = WTPerfConfig(wtperf_path=args.wtperf,
home_dir=args.home,
test=args.test,
+ arg_file=args.arg_file,
environment=args.env,
run_max=args.runmax,
verbose=args.verbose,
@@ -237,19 +255,34 @@ def main():
perf_stats: PerfStatCollection = setup_perf_stats()
- # Run tests (if we're not reusing results)
+ if config.arg_file:
+ if args.verbose:
+ print("Reading arguments file {}".format(config.arg_file))
+ with open(config.arg_file, "r") as file:
+ arg_file_contents = json.load(file)
+
+ # Run test
if not args.reuse:
- for test_run in range(args.runmax):
- print("Starting test {}".format(test_run))
- run_test(config=config, test_run=test_run)
- print("Completed test {}".format(test_run))
+ if config.arg_file:
+ for content in arg_file_contents:
+ if args.verbose:
+ print("Argument: {}, Operation: {}".format(content["argument"], content["operation"]))
+ run_test_wrapper(config=config, operation=content["operation"], argument=content["argument"])
+ else:
+ run_test_wrapper(config=config)
if not args.verbose and not args.outfile:
sys.exit("Enable verbosity (or provide a file path) to dump the stats. "
"Try 'python3 wtperf_run.py --help' for more information.")
- process_results(config, perf_stats)
+ # Process result
+ if config.arg_file:
+ for content in arg_file_contents:
+ process_results(config, perf_stats, operation=content["operation"])
+ else:
+ process_results(config, perf_stats)
+ # Output result
if args.brief_output:
if args.verbose:
print("Brief stats output (Evergreen compatible format):")
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index dc5802b2db3..e83685a837b 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-master",
- "commit": "667eff40028852a989c9295be0e1c8c9d323ba75"
+ "commit": "6ddfc52f11222bcd47518fce6b5ed458075f9c31"
}
diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml
index bbf198aa017..689e7dc9c31 100755
--- a/src/third_party/wiredtiger/test/evergreen.yml
+++ b/src/third_party/wiredtiger/test/evergreen.yml
@@ -605,8 +605,8 @@ functions:
${pip3_binary} install psutil pygit2
JSON_TASK_INFO='{ "evergreen_task_info": { "is_patch": "'${is_patch}'", "task_id": "'${task_id}'" } }'
echo "JSON_TASK_INFO: $JSON_TASK_INFO"
- ${python_binary} wtperf_run.py -p ../../../cmake_build/bench/wtperf/wtperf -t ../runners/${perf-test-name}.wtperf -ho WT_TEST -m ${maxruns} -g "../.." -v -i "$JSON_TASK_INFO" -b -o test_stats/evergreen_out.json
- ${python_binary} wtperf_run.py -p ../../../cmake_build/bench/wtperf/wtperf -t ../runners/${perf-test-name}.wtperf -ho WT_TEST -m ${maxruns} -g "../.." -v -i "$JSON_TASK_INFO" -re -o test_stats/atlas_out.json
+ ${test_env_vars|} ${python_binary} wtperf_run.py -p ../../../cmake_build/bench/wtperf/wtperf -t ../runners/${perf-test-name}.wtperf -ho WT_TEST -m ${maxruns} -g "../.." -v -i "$JSON_TASK_INFO" -b -o test_stats/evergreen_out.json ${wtarg}
+ ${test_env_vars|} ${python_binary} wtperf_run.py -p ../../../cmake_build/bench/wtperf/wtperf -t ../runners/${perf-test-name}.wtperf -ho WT_TEST -m ${maxruns} -g "../.." -v -i "$JSON_TASK_INFO" -re -o test_stats/atlas_out.json ${wtarg}
- command: shell.exec
params:
@@ -2959,54 +2959,66 @@ tasks:
#############################
- name: perf-test-small-lsm
+ tags: ["lsm-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: small-lsm
maxruns: 3
- name: perf-test-medium-lsm
+ tags: ["lsm-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: medium-lsm
maxruns: 1
- name: perf-test-medium-lsm-compact
+ tags: ["lsm-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: medium-lsm-compact
maxruns: 1
- name: perf-test-medium-multi-lsm
+ tags: ["lsm-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: medium-multi-lsm
maxruns: 1
- name: perf-test-parallel-pop-lsm
+ tags: ["lsm-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: parallel-pop-lsm
maxruns: 1
- name: perf-test-update-lsm
+ tags: ["lsm-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: update-lsm
@@ -3017,41 +3029,116 @@ tasks:
###############################
- name: perf-test-small-btree
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: small-btree
maxruns: 1
- name: perf-test-small-btree-backup
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: small-btree-backup
maxruns: 1
- name: perf-test-medium-btree
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: medium-btree
maxruns: 3
- name: perf-test-medium-btree-backup
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
commands:
- - func: "get project"
- - func: "compile wiredtiger"
+ - func: "fetch artifacts"
- func: "generic-perf-test"
vars:
perf-test-name: medium-btree-backup
maxruns: 3
+ - name: perf-test-parallel-pop-btree
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
+ commands:
+ - func: "fetch artifacts"
+ - func: "generic-perf-test"
+ vars:
+ perf-test-name: parallel-pop-btree
+ maxruns: 1
+
+ - name: perf-test-update-only-btree
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
+ commands:
+ - func: "fetch artifacts"
+ - func: "generic-perf-test"
+ vars:
+ perf-test-name: update-only-btree
+ maxruns: 3
+
+ - name: perf-test-update-btree
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
+ commands:
+ - func: "fetch artifacts"
+ - func: "generic-perf-test"
+ vars:
+ perf-test-name: update-btree
+ maxruns: 1
+ wtarg: "-a ../runners/update-btree.json"
+
+ - name: perf-test-update-large-record-btree
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
+ commands:
+ - func: "fetch artifacts"
+ - func: "generic-perf-test"
+ vars:
+ perf-test-name: update-large-record-btree
+ maxruns: 3
+
+ - name: perf-test-modify-large-record-btree
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
+ commands:
+ - func: "fetch artifacts"
+ - func: "generic-perf-test"
+ vars:
+ perf-test-name: modify-large-record-btree
+ maxruns: 3
+
+ - name: perf-test-modify-force-update-large-record-btree
+ tags: ["btree-perf"]
+ depends_on:
+ - name: compile
+ commands:
+ - func: "fetch artifacts"
+ - func: "generic-perf-test"
+ vars:
+ perf-test-name: modify-force-update-large-record-btree
+ maxruns: 3
+
#######################################
# Buildvariants #
#######################################
@@ -3309,8 +3396,18 @@ buildvariants:
run_on:
- ubuntu2004-large
expansions:
- test_env_vars: LD_LIBRARY_PATH=$(pwd) WT_BUILDDIR=$(pwd)
- posix_configure_flags: -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/mongodbtoolchain_v3_gcc.cmake -DCMAKE_C_FLAGS="-ggdb" -DHAVE_DIAGNOSTIC=1 -DENABLE_ZLIB=1 -DENABLE_SNAPPY=1 -DENABLE_STRICT=1 -DCMAKE_INSTALL_PREFIX=$(pwd)/LOCAL_INSTALL
+ test_env_vars:
+ top_dir=$(git rev-parse --show-toplevel)
+ top_builddir=$top_dir/cmake_build
+ LD_LIBRARY_PATH=$top_builddir/test/utility/:$top_builddir
+ posix_configure_flags:
+ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/mongodbtoolchain_v3_gcc.cmake
+ -DCMAKE_C_FLAGS="-ggdb"
+ -DHAVE_DIAGNOSTIC=1
+ -DENABLE_ZLIB=1
+ -DENABLE_SNAPPY=1
+ -DENABLE_STRICT=1
+ -DCMAKE_INSTALL_PREFIX=$(pwd)/LOCAL_INSTALL
python_binary: '/opt/mongodbtoolchain/v3/bin/python3'
pip3_binary: '/opt/mongodbtoolchain/v3/bin/pip3'
virtualenv_binary: '/opt/mongodbtoolchain/v3/bin/virtualenv'
@@ -3319,19 +3416,16 @@ buildvariants:
make_command: ninja
is_cmake_build: true
tasks:
- # btree tests
- - name: perf-test-small-btree
- - name: perf-test-small-btree-backup
- - name: perf-test-medium-btree
- - name: perf-test-medium-btree-backup
- # lsm tests
- - name: perf-test-small-lsm
- - name: perf-test-medium-lsm
- - name: perf-test-medium-lsm-compact
- - name: perf-test-medium-multi-lsm
- - name: perf-test-parallel-pop-lsm
- - name: perf-test-update-lsm
-
+ - name: compile
+ - name: ".btree-perf"
+ - name: ".lsm-perf"
+ display_tasks:
+ - name: Wiredtiger-perf-btree-jobs
+ execution_tasks:
+ - ".btree-perf"
+ - name: Wiredtiger-perf-lsm-jobs
+ execution_tasks:
+ - ".lsm-perf"
- name: large-scale-tests
display_name: "Large scale tests"