summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include
diff options
context:
space:
mode:
authorOran Agra <oran@redislabs.com>2020-05-20 14:08:40 +0300
committerOran Agra <oran@redislabs.com>2020-05-20 16:04:42 +0300
commit88d71f479338c1a70fac15ea37f87315f9401f99 (patch)
treedbd563607e11c608248c581ec2fa37735ddc1faf /deps/jemalloc/include
parent5d83e9e1de94fb0250073565c051e71b7648d42d (diff)
downloadredis-88d71f479338c1a70fac15ea37f87315f9401f99.tar.gz
fix a rare active defrag edge case bug leading to stagnation
There's a rare case which leads to stagnation in the defragger, causing it to keep scanning the keyspace and do nothing (not moving any allocation), this happens when all the allocator slabs of a certain bin have the same % utilization, but the slab from which new allocations are made have a lower utilization. this commit fixes it by removing the current slab from the overall average utilization of the bin, and also eliminate any precision loss in the utilization calculation and move the decision about the defrag to reside inside jemalloc. and also add a test that consistently reproduce this issue.
Diffstat (limited to 'deps/jemalloc/include')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h23
1 files changed, 17 insertions, 6 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
index 290e5cf99..2685802b8 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
@@ -216,7 +216,7 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
}
JEMALLOC_ALWAYS_INLINE int
-iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
+iget_defrag_hint(tsdn_t *tsdn, void* ptr) {
int defrag = 0;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -232,11 +232,22 @@ iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
malloc_mutex_lock(tsdn, &bin->lock);
/* don't bother moving allocations from the slab currently used for new allocations */
if (slab != bin->slabcur) {
- const bin_info_t *bin_info = &bin_infos[binind];
- size_t availregs = bin_info->nregs * bin->stats.curslabs;
- *bin_util = ((long long)bin->stats.curregs<<16) / availregs;
- *run_util = ((long long)(bin_info->nregs - extent_nfree_get(slab))<<16) / bin_info->nregs;
- defrag = 1;
+ int free_in_slab = extent_nfree_get(slab);
+ if (free_in_slab) {
+ const bin_info_t *bin_info = &bin_infos[binind];
+ int curslabs = bin->stats.curslabs;
+ size_t curregs = bin->stats.curregs;
+ if (bin->slabcur) {
+ /* remove slabcur from the overall utilization */
+ curregs -= bin_info->nregs - extent_nfree_get(bin->slabcur);
+ curslabs -= 1;
+ }
+ /* Compare the utilization ratio of the slab in question to the total average,
+ * to avoid precision lost and division, we do that by extrapolating the usage
+ * of the slab as if all slabs have the same usage. If this slab is less used
+ * than the average, we'll prefer to evict the data to hopefully more used ones */
+ defrag = (bin_info->nregs - free_in_slab) * curslabs <= curregs;
+ }
}
malloc_mutex_unlock(tsdn, &bin->lock);
}