summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorOran Agra <oran@redislabs.com>2018-06-26 14:13:24 +0300
committerYoav Steinberg <yoav@monfort.co.il>2021-10-10 18:29:13 +0300
commit29d7f97c96dd19db7200cf49595478be2e72be7f (patch)
tree9e8997a73c02b77377f135dcac8063a272b760d2 /deps
parent9e5cd2cb26e29fc4d334b469b2cce784aa563208 (diff)
downloadredis-29d7f97c96dd19db7200cf49595478be2e72be7f.tar.gz
add defrag hint support into jemalloc 5
Diffstat (limited to 'deps')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h28
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_macros.h.in4
-rw-r--r--deps/jemalloc/src/jemalloc.c11
3 files changed, 43 insertions, 0 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
index cdb10eb21..a48e63a82 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
@@ -219,4 +219,32 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
newsize);
}
+JEMALLOC_ALWAYS_INLINE int
+iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
+ int defrag = 0;
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ szind_t szind;
+ bool is_slab;
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &is_slab);
+ if (likely(is_slab)) {
+ /* Small allocation. */
+ extent_t *slab = iealloc(tsdn, ptr);
+ arena_t *arena = extent_arena_get(slab);
+ szind_t binind = extent_szind_get(slab);
+ bin_t *bin = &arena->bins[binind];
+ malloc_mutex_lock(tsdn, &bin->lock);
+ /* don't bother moving allocations from the slab currently used for new allocations */
+ if (slab != bin->slabcur) {
+ const bin_info_t *bin_info = &bin_infos[binind];
+ size_t availregs = bin_info->nregs * bin->stats.curslabs;
+ *bin_util = (bin->stats.curregs<<16) / availregs;
+ *run_util = ((bin_info->nregs - extent_nfree_get(slab))<<16) / bin_info->nregs;
+ defrag = 1;
+ }
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ }
+ return defrag;
+}
+
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
index 59e29558c..3421321a4 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
@@ -127,3 +127,7 @@
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#endif
+
+/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
+ * function. */
+#define JEMALLOC_FRAG_HINT
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index ed13718d4..680bf9760 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -3920,3 +3920,14 @@ jemalloc_postfork_child(void) {
}
/******************************************************************************/
+
+/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
+ * returns 0 if the allocation is in the currently active run,
+ * or when it is not causing any frag issue (large or huge bin)
+ * returns the bin utilization and run utilization both in fixed point 16:16.
+ * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
+ assert(ptr != NULL);
+ return iget_defrag_hint(TSDN_NULL, ptr, bin_util, run_util);
+}