diff options
author | David Hows <howsdav@gmail.com> | 2015-11-27 14:45:42 +1100 |
---|---|---|
committer | David Hows <howsdav@gmail.com> | 2015-11-27 14:45:42 +1100 |
commit | a5b4ace6e5ae840f47effc03b63115ddd283ea24 (patch) | |
tree | 74f5823fcdcf96c2e68d685442c95e51ade852c6 /bench | |
parent | 982c5862fb31b556eded6bb7389274777fc4684b (diff) | |
download | mongo-a5b4ace6e5ae840f47effc03b63115ddd283ea24.tar.gz |
WTPERF - Add a multiplier to the truncate stones to deal with very high throughput cases
Diffstat (limited to 'bench')
-rw-r--r-- | bench/wtperf/wtperf.c | 2 | ||||
-rw-r--r-- | bench/wtperf/wtperf.h | 1 | ||||
-rw-r--r-- | bench/wtperf/wtperf_truncate.c | 27 |
3 files changed, 25 insertions, 5 deletions
diff --git a/bench/wtperf/wtperf.c b/bench/wtperf/wtperf.c index 9ac96862fa1..6edc29b6106 100644 --- a/bench/wtperf/wtperf.c +++ b/bench/wtperf/wtperf.c @@ -559,8 +559,6 @@ worker(void *arg) trk = &thread->truncate; else trk = &thread->truncate_sleep; - /* Pause between truncate attempts */ - (void)usleep(1000); break; } goto op_err; diff --git a/bench/wtperf/wtperf.h b/bench/wtperf/wtperf.h index e4b9fc00798..b26e978c13b 100644 --- a/bench/wtperf/wtperf.h +++ b/bench/wtperf/wtperf.h @@ -116,6 +116,7 @@ struct __truncate_struct { uint64_t last_total_inserts; uint64_t num_stones; uint64_t last_key; + uint64_t catchup_multiplier; }; /* Queue entry for use with the Truncate Logic */ diff --git a/bench/wtperf/wtperf_truncate.c b/bench/wtperf/wtperf_truncate.c index 581d1987947..7b6fc1e70ed 100644 --- a/bench/wtperf/wtperf_truncate.c +++ b/bench/wtperf/wtperf_truncate.c @@ -54,6 +54,9 @@ setup_truncate(CONFIG *cfg, CONFIG_THREAD *thread, WT_SESSION *session) { session, cfg->uris[0], NULL, NULL, &cursor)) != 0) goto err; + /* If we find the workload getting behind we multiply the stone gap */ + trunc_cfg->catchup_multiplier = 1; + /* How many entries between each stone. */ trunc_cfg->stone_gap = (workload->truncate_count * workload->truncate_pct) / 100; @@ -133,6 +136,7 @@ run_truncate(CONFIG *cfg, CONFIG_THREAD *thread, TRUNCATE_QUEUE_ENTRY *truncate_item; char *truncate_key; int ret, t_ret; + uint64_t used_stone_gap; ret = 0; trunc_cfg = &thread->trunc_cfg; @@ -145,11 +149,28 @@ run_truncate(CONFIG *cfg, CONFIG_THREAD *thread, trunc_cfg->last_total_inserts = trunc_cfg->total_inserts; /* We are done if there isn't enough data to trigger a new milestone. */ - if (trunc_cfg->expected_total <= trunc_cfg->needed_stones) + if (trunc_cfg->expected_total <= thread->workload->truncate_count) return (0); + used_stone_gap = trunc_cfg->stone_gap; + /* + * If we are falling behind and using more than one stone per lap we + * should widen the stone gap for this lap to try and catch up quicker. + */ + if (trunc_cfg->expected_total > + thread->workload->truncate_count + trunc_cfg->stone_gap) { + if (trunc_cfg->catchup_multiplier < trunc_cfg->needed_stones-1) + trunc_cfg->catchup_multiplier++; + used_stone_gap = + trunc_cfg->stone_gap * trunc_cfg->catchup_multiplier; + } else { + /* Back off if we start seeing an improvement */ + if (trunc_cfg->catchup_multiplier > 1) + trunc_cfg->catchup_multiplier--; + } + while (trunc_cfg->num_stones < trunc_cfg->needed_stones) { - trunc_cfg->last_key += trunc_cfg->stone_gap; + trunc_cfg->last_key += used_stone_gap; truncate_key = calloc(cfg->key_sz, 1); if (truncate_key == NULL) { lprintf(cfg, ENOMEM, 0, @@ -165,7 +186,7 @@ run_truncate(CONFIG *cfg, CONFIG_THREAD *thread, } generate_key(cfg, truncate_key, trunc_cfg->last_key); truncate_item->key = truncate_key; - truncate_item->diff = trunc_cfg->stone_gap; + truncate_item->diff = used_stone_gap; TAILQ_INSERT_TAIL(&cfg->stone_head, truncate_item, q); trunc_cfg->num_stones++; } |