summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h23
1 files changed, 17 insertions, 6 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
index 290e5cf99..2685802b8 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
@@ -216,7 +216,7 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
}
JEMALLOC_ALWAYS_INLINE int
-iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
+iget_defrag_hint(tsdn_t *tsdn, void* ptr) {
int defrag = 0;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -232,11 +232,22 @@ iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
malloc_mutex_lock(tsdn, &bin->lock);
/* don't bother moving allocations from the slab currently used for new allocations */
if (slab != bin->slabcur) {
- const bin_info_t *bin_info = &bin_infos[binind];
- size_t availregs = bin_info->nregs * bin->stats.curslabs;
- *bin_util = ((long long)bin->stats.curregs<<16) / availregs;
- *run_util = ((long long)(bin_info->nregs - extent_nfree_get(slab))<<16) / bin_info->nregs;
- defrag = 1;
+ int free_in_slab = extent_nfree_get(slab);
+ if (free_in_slab) {
+ const bin_info_t *bin_info = &bin_infos[binind];
+ int curslabs = bin->stats.curslabs;
+ size_t curregs = bin->stats.curregs;
+ if (bin->slabcur) {
+ /* remove slabcur from the overall utilization */
+ curregs -= bin_info->nregs - extent_nfree_get(bin->slabcur);
+ curslabs -= 1;
+ }
+ /* Compare the utilization ratio of the slab in question to the total average,
+ * to avoid precision lost and division, we do that by extrapolating the usage
+ * of the slab as if all slabs have the same usage. If this slab is less used
+ * than the average, we'll prefer to evict the data to hopefully more used ones */
+ defrag = (bin_info->nregs - free_in_slab) * curslabs <= curregs;
+ }
}
malloc_mutex_unlock(tsdn, &bin->lock);
}