summaryrefslogtreecommitdiff
path: root/src/third_party/wiredtiger/bench
diff options
context:
space:
mode:
authorEtienne Petrel <etienne.petrel@mongodb.com>2021-11-10 05:42:07 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-11-10 08:34:06 +0000
commitede4be834ed63788912cc66bbf86912984007814 (patch)
tree441f7a1572bb85735d0ee8843d7c2ccb672b13a4 /src/third_party/wiredtiger/bench
parent83b4d36659a53e0ca507bda9c97e5cc9cf510b82 (diff)
downloadmongo-ede4be834ed63788912cc66bbf86912984007814.tar.gz
Import wiredtiger: 28109e1c6a2444d1ebcf8ef87ddc6944cf88eb61 from branch mongodb-master
ref: 5f92dc4c80..28109e1c6a for: 5.2.0 WT-8346 Migrate Jenkins “wiredtiger-perf-stress” remaining tests to Evergreen
Diffstat (limited to 'src/third_party/wiredtiger/bench')
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/runners/evict-btree-stress-multi.json6
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/runners/evict-fairness.json6
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py80
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py34
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py35
5 files changed, 117 insertions, 44 deletions
diff --git a/src/third_party/wiredtiger/bench/wtperf/runners/evict-btree-stress-multi.json b/src/third_party/wiredtiger/bench/wtperf/runners/evict-btree-stress-multi.json
new file mode 100644
index 00000000000..586b7a2e396
--- /dev/null
+++ b/src/third_party/wiredtiger/bench/wtperf/runners/evict-btree-stress-multi.json
@@ -0,0 +1,6 @@
+[
+ {
+ "arguments": null,
+ "operations": ["warnings", "max_latencies"]
+ }
+]
diff --git a/src/third_party/wiredtiger/bench/wtperf/runners/evict-fairness.json b/src/third_party/wiredtiger/bench/wtperf/runners/evict-fairness.json
new file mode 100644
index 00000000000..68b96f67d4c
--- /dev/null
+++ b/src/third_party/wiredtiger/bench/wtperf/runners/evict-fairness.json
@@ -0,0 +1,6 @@
+[
+ {
+ "arguments": ["-C statistics_log=(wait=10000,on_close=true,json=false,sources=[file:])", "-o reopen_connection=false"],
+ "operations": ["eviction_page_seen"]
+ }
+]
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py
index 7aab7d20b68..51787e75f9d 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat.py
@@ -27,20 +27,25 @@
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
+import glob, re, os
+import json
+
class PerfStat:
def __init__(self,
short_label: str,
- pattern: str,
- input_offset: int,
output_label: str,
+ input_offset: int = 0,
output_precision: int = 0,
+ pattern: str = None,
+ stat_file: str = 'test.stat',
conversion_function=int):
self.short_label: str = short_label
- self.pattern: str = pattern
- self.input_offset: int = input_offset
self.output_label: str = output_label
+ self.input_offset: int = input_offset
self.output_precision: int = output_precision
+ self.pattern: str = pattern
+ self.stat_file = stat_file
self.conversion_function = conversion_function
self.values = []
@@ -49,6 +54,14 @@ class PerfStat:
converted_value = self.conversion_function(val)
self.values.append(converted_value)
+ def find_stat(self, test_stat_path: str):
+ matches = []
+ for line in open(test_stat_path):
+ match = re.search(self.pattern, line)
+ if match:
+ matches.append(float(line.split()[self.input_offset]))
+ return matches
+
def average(self, vals):
return self.conversion_function(sum(vals) / len(vals))
@@ -60,6 +73,15 @@ class PerfStat:
else:
return self.average(self.values)
+ def get_value_list(self, brief: bool):
+ as_dict = {
+ 'name': self.output_label,
+ 'value': self.get_value()
+ }
+ if not brief:
+ as_dict['values'] = self.values
+ return [as_dict]
+
def are_values_all_zero(self):
result = True
for value in self.values:
@@ -74,8 +96,58 @@ class PerfStatMin(PerfStat):
min_3_vals = sorted(self.values)[:3]
return self.average(min_3_vals)
+
class PerfStatMax(PerfStat):
def get_value(self):
"""Return the averaged maximum of all gathered values"""
max_3_vals = sorted(self.values)[-3:]
return self.average(max_3_vals)
+
+
+class PerfStatCount(PerfStat):
+ def find_stat(self, test_stat_path: str):
+ """Return the total number of times a pattern matched"""
+ total = 0
+ test_stat_path = glob.glob(test_stat_path)[0]
+ for line in open(test_stat_path):
+ match = re.search(self.pattern, line)
+ if match:
+ total += 1
+ return [total]
+
+
+class PerfStatLatency(PerfStat):
+ def __init__(self,
+ short_label: str,
+ stat_file:str,
+ output_label: str,
+ num_max: int):
+ super().__init__(short_label=short_label,
+ stat_file=stat_file,
+ output_label=output_label)
+ self.num_max = num_max
+
+ def find_stat(self, test_stat_path: str):
+ values = []
+ if os.path.isfile(test_stat_path):
+ for line in open(test_stat_path):
+ as_dict = json.loads(line)
+ values.append(as_dict["wtperf"]["read"]["max latency"])
+ values.append(as_dict["wtperf"]["update"]["max latency"])
+ return values
+
+ def get_value(self, nth_max: int):
+ """Return the nth maximum number from all the gathered values"""
+ return sorted(self.values)[-nth_max]
+
+ def get_value_list(self, brief: bool):
+ as_list = []
+ for i in range(1, self.num_max + 1):
+ as_dict = {
+ 'name': self.output_label + str(i),
+ 'value': self.get_value(i)
+ }
+ if not brief:
+ as_dict['values'] = self.values
+ as_list.append(as_dict)
+ return as_list
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py
index db3652fcab9..d62441f69a0 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/perf_stat_collection.py
@@ -28,19 +28,13 @@
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
-import re
-from typing import List
-
+import os
from perf_stat import PerfStat
+from typing import List
-def find_stat(test_stat_path: str, pattern: str, position_of_value: int):
- matches = []
- for line in open(test_stat_path):
- match = re.search(pattern, line)
- if match:
- matches.append(float(line.split()[position_of_value]))
- return matches
+def create_test_stat_path(test_home_path: str, test_stats_file: str):
+ return os.path.join(test_home_path, test_stats_file)
class PerfStatCollection:
@@ -50,23 +44,17 @@ class PerfStatCollection:
def add_stat(self, perf_stat: PerfStat):
self.perf_stats[perf_stat.short_label] = perf_stat
- def find_stats(self, test_stat_path: str, operations: List[str]):
+ def find_stats(self, test_home: str, operations: List[str]):
for stat in self.perf_stats.values():
if not operations or stat.short_label in operations:
- values = find_stat(test_stat_path=test_stat_path,
- pattern=stat.pattern,
- position_of_value=stat.input_offset)
+ test_stat_path = create_test_stat_path(test_home, stat.stat_file)
+ values = stat.find_stat(test_stat_path=test_stat_path)
stat.add_values(values=values)
def to_value_list(self, brief: bool):
- as_list = []
+ stats_list = []
for stat in self.perf_stats.values():
if not stat.are_values_all_zero():
- as_dict = {
- 'name': stat.output_label,
- 'value': stat.get_value()
- }
- if not brief:
- as_dict['values'] = stat.values
- as_list.append(as_dict)
- return as_list
+ stat_list = stat.get_value_list(brief = brief)
+ stats_list.extend(stat_list)
+ return stats_list
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py
index dc84f31ac5d..0ae18756b53 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf_run_py/wtperf_run.py
@@ -29,23 +29,18 @@
# OTHER DEALINGS IN THE SOFTWARE.
import argparse
-import json
import os.path
-import subprocess
-import sys
import platform
import psutil
+import subprocess
+import sys
+import json
+from perf_stat import PerfStat, PerfStatCount, PerfStatLatency, PerfStatMax, PerfStatMin
+from perf_stat_collection import PerfStatCollection
from pygit2 import discover_repository, Repository
from pygit2 import GIT_SORT_NONE
from typing import List
-
from wtperf_config import WTPerfConfig
-from perf_stat import PerfStat, PerfStatMax, PerfStatMin
-from perf_stat_collection import PerfStatCollection
-
-# the 'test.stat' file is where wt-perf.c writes out it's statistics
-# (within the directory specified by the 'home' parameter)
-test_stats_file = 'test.stat'
def create_test_home_path(home: str, test_run: int, operations: List[str] = None):
@@ -56,10 +51,6 @@ def create_test_home_path(home: str, test_run: int, operations: List[str] = None
return home_path
-def create_test_stat_path(test_home_path: str):
- return os.path.join(test_home_path, test_stats_file)
-
-
def get_git_info(git_working_tree_dir):
repository_path = discover_repository(git_working_tree_dir)
assert repository_path is not None
@@ -158,10 +149,9 @@ def run_test(config: WTPerfConfig, test_run: int, operations: List[str] = None,
def process_results(config: WTPerfConfig, perf_stats: PerfStatCollection, operations: List[str] = None):
for test_run in range(config.run_max):
test_home = create_test_home_path(home=config.home_dir, test_run=test_run, operations=operations)
- test_stats_path = create_test_stat_path(test_home)
if config.verbose:
- print('Reading test stats file: {}'.format(test_stats_path))
- perf_stats.find_stats(test_stat_path=test_stats_path, operations=operations)
+ print('Reading stats from {} directory.'.format(test_home))
+ perf_stats.find_stats(test_home=test_home, operations=operations)
def setup_perf_stats():
@@ -200,6 +190,17 @@ def setup_perf_stats():
pattern=r'updates,',
input_offset=8,
output_label='Min update throughput'))
+ perf_stats.add_stat(PerfStatCount(short_label="warnings",
+ pattern='WARN',
+ output_label='Warnings'))
+ perf_stats.add_stat(PerfStatLatency(short_label="max_latencies",
+ stat_file='monitor.json',
+ output_label='Latency Max',
+ num_max = 5))
+ perf_stats.add_stat(PerfStatCount(short_label="eviction_page_seen",
+ stat_file='WiredTigerStat*',
+ pattern='[0-9].wt cache: pages seen by eviction',
+ output_label='Pages seen by eviction'))
return perf_stats