summaryrefslogtreecommitdiff
path: root/deps/jemalloc
diff options
context:
space:
mode:
authorOran Agra <oran@redislabs.com>2020-05-20 14:08:40 +0300
committerOran Agra <oran@redislabs.com>2020-05-20 16:04:42 +0300
commit88d71f479338c1a70fac15ea37f87315f9401f99 (patch)
treedbd563607e11c608248c581ec2fa37735ddc1faf /deps/jemalloc
parent5d83e9e1de94fb0250073565c051e71b7648d42d (diff)
downloadredis-88d71f479338c1a70fac15ea37f87315f9401f99.tar.gz
fix a rare active defrag edge case bug leading to stagnation
There's a rare case which leads to stagnation in the defragger, causing it to keep scanning the keyspace and do nothing (not moving any allocation), this happens when all the allocator slabs of a certain bin have the same % utilization, but the slab from which new allocations are made have a lower utilization. this commit fixes it by removing the current slab from the overall average utilization of the bin, and also eliminate any precision loss in the utilization calculation and move the decision about the defrag to reside inside jemalloc. and also add a test that consistently reproduce this issue.
Diffstat (limited to 'deps/jemalloc')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h23
-rw-r--r--deps/jemalloc/src/jemalloc.c8
2 files changed, 20 insertions, 11 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
index 290e5cf99..2685802b8 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
@@ -216,7 +216,7 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
}
JEMALLOC_ALWAYS_INLINE int
-iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
+iget_defrag_hint(tsdn_t *tsdn, void* ptr) {
int defrag = 0;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -232,11 +232,22 @@ iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
malloc_mutex_lock(tsdn, &bin->lock);
/* don't bother moving allocations from the slab currently used for new allocations */
if (slab != bin->slabcur) {
- const bin_info_t *bin_info = &bin_infos[binind];
- size_t availregs = bin_info->nregs * bin->stats.curslabs;
- *bin_util = ((long long)bin->stats.curregs<<16) / availregs;
- *run_util = ((long long)(bin_info->nregs - extent_nfree_get(slab))<<16) / bin_info->nregs;
- defrag = 1;
+ int free_in_slab = extent_nfree_get(slab);
+ if (free_in_slab) {
+ const bin_info_t *bin_info = &bin_infos[binind];
+ int curslabs = bin->stats.curslabs;
+ size_t curregs = bin->stats.curregs;
+ if (bin->slabcur) {
+ /* remove slabcur from the overall utilization */
+ curregs -= bin_info->nregs - extent_nfree_get(bin->slabcur);
+ curslabs -= 1;
+ }
+ /* Compare the utilization ratio of the slab in question to the total average,
+ * to avoid precision lost and division, we do that by extrapolating the usage
+ * of the slab as if all slabs have the same usage. If this slab is less used
+ * than the average, we'll prefer to evict the data to hopefully more used ones */
+ defrag = (bin_info->nregs - free_in_slab) * curslabs <= curregs;
+ }
}
malloc_mutex_unlock(tsdn, &bin->lock);
}
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index 5b936cb48..585645a28 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -3326,12 +3326,10 @@ jemalloc_postfork_child(void) {
/******************************************************************************/
/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
- * returns 0 if the allocation is in the currently active run,
- * or when it is not causing any frag issue (large or huge bin)
- * returns the bin utilization and run utilization both in fixed point 16:16.
+ * returns 1 if the allocation should be moved, and 0 if the allocation be kept.
* If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
+get_defrag_hint(void* ptr) {
assert(ptr != NULL);
- return iget_defrag_hint(TSDN_NULL, ptr, bin_util, run_util);
+ return iget_defrag_hint(TSDN_NULL, ptr);
}