summaryrefslogtreecommitdiff
path: root/bench/wtperf
diff options
context:
space:
mode:
Diffstat (limited to 'bench/wtperf')
-rw-r--r--bench/wtperf/config.c2
-rw-r--r--bench/wtperf/runners/checkpoint-stress.wtperf15
-rw-r--r--bench/wtperf/runners/multi-btree-read-heavy-stress.wtperf21
3 files changed, 31 insertions, 7 deletions
diff --git a/bench/wtperf/config.c b/bench/wtperf/config.c
index 48127afc10e..4c7b17f102a 100644
--- a/bench/wtperf/config.c
+++ b/bench/wtperf/config.c
@@ -830,7 +830,7 @@ config_consolidate(CONFIG *cfg)
CONFIG_QUEUE_ENTRY *conf_line, *test_line, *tmp;
char *string_key;
- /*
+ /*
* This loop iterates over the config queue and for entry checks if an
* entry later in the queue has the same key. If a match is found then
* the current queue entry is removed and we continue.
diff --git a/bench/wtperf/runners/checkpoint-stress.wtperf b/bench/wtperf/runners/checkpoint-stress.wtperf
index d992f69eb67..0c98a0c2db0 100644
--- a/bench/wtperf/runners/checkpoint-stress.wtperf
+++ b/bench/wtperf/runners/checkpoint-stress.wtperf
@@ -1,23 +1,26 @@
# A stress configuration to create long running checkpoints while doing a lot
# of updates.
-conn_config="cache_size=10GB,log=(enabled=false)"
+conn_config="cache_size=16GB,eviction=(threads_max=4),log=(enabled=false)"
table_config="leaf_page_max=32k,internal_page_max=16k,allocation_size=4k,split_pct=90,type=file"
-# Enough data to fill the cache. 100 million 1k records results in two ~6GB
+# Enough data to fill the cache. 150 million 1k records results in two ~11GB
# tables
-icount=100000000
+icount=150000000
create=true
compression="snappy"
-populate_threads=1
checkpoint_interval=60
checkpoint_threads=1
+populate_threads=1
report_interval=10
# Run for a longer duration to ensure checkpoints are completing.
run_time=600
+# Sampling isn't being currently used by any automated test. Keeping it to be
+# used in the future
+sample_interval=10
+sample_rate=1
# MongoDB always has multiple tables, and checkpoints behave differently when
# there is more than a single table.
table_count=2
threads=((count=6,updates=1))
value_sz=1000
-sample_interval=10
-sample_rate=1
+# Wait for the throughput to stabilize
warmup=120
diff --git a/bench/wtperf/runners/multi-btree-read-heavy-stress.wtperf b/bench/wtperf/runners/multi-btree-read-heavy-stress.wtperf
new file mode 100644
index 00000000000..d7b27f8fda4
--- /dev/null
+++ b/bench/wtperf/runners/multi-btree-read-heavy-stress.wtperf
@@ -0,0 +1,21 @@
+# Drive a constant high workload through, even if WiredTiger isn't keeping
+# up by dividing the workload across a lot of threads. This needs to be
+# tuned to the particular machine so the workload is close to capacity in the
+# steady state, but not overwhelming.
+conn_config="cache_size=20GB,session_max=1000,eviction=(threads_min=4,threads_max=4),log=(enabled=false),transaction_sync=(enabled=false),checkpoint_sync=true,checkpoint=(wait=60),statistics=(fast),statistics_log=(json,wait=1)"
+table_config="allocation_size=4k,memory_page_max=10MB,prefix_compression=false,split_pct=90,leaf_page_max=32k,internal_page_max=16k,type=file"
+# Divide original icount by database_count.
+table_count=8
+compression=snappy
+icount=200000000
+populate_threads=1
+reopen_connection=false
+log_like_table=true
+#pareto=5
+report_interval=1
+run_time=3600
+threads=((count=10,throttle=250,inserts=1),(count=10,throttle=250,updates=1),(count=80,throttle=600,reads=1,ops_per_txn=3))
+value_sz=500
+sample_interval=5
+sample_rate=1
+