summaryrefslogtreecommitdiff
path: root/deps/jemalloc
diff options
context:
space:
mode:
authorOran Agra <oran@redislabs.com>2018-06-26 14:13:24 +0300
committerOran Agra <oran@redislabs.com>2018-06-27 10:52:39 +0300
commite8099cabd19c4e3a46c94c39e69e13191d43f5eb (patch)
tree503e5dbdfd94195335881e1fc2c20e71d45777fc /deps/jemalloc
parentbb666d445d83287295832699d0b86d61866fedef (diff)
downloadredis-e8099cabd19c4e3a46c94c39e69e13191d43f5eb.tar.gz
add defrag hint support into jemalloc 5
Diffstat (limited to 'deps/jemalloc')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h28
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_macros.h.in4
-rw-r--r--deps/jemalloc/src/jemalloc.c11
3 files changed, 43 insertions, 0 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
index c829ac60c..540c168e5 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
@@ -215,4 +215,32 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero);
}
+JEMALLOC_ALWAYS_INLINE int
+iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
+ int defrag = 0;
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ szind_t szind;
+ bool is_slab;
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &is_slab);
+ if (likely(is_slab)) {
+ /* Small allocation. */
+ extent_t *slab = iealloc(tsdn, ptr);
+ arena_t *arena = extent_arena_get(slab);
+ szind_t binind = extent_szind_get(slab);
+ bin_t *bin = &arena->bins[binind];
+ malloc_mutex_lock(tsdn, &bin->lock);
+ /* don't bother moving allocations from the slab currently used for new allocations */
+ if (slab != bin->slabcur) {
+ const bin_info_t *bin_info = &bin_infos[binind];
+ size_t availregs = bin_info->nregs * bin->stats.curslabs;
+ *bin_util = (bin->stats.curregs<<16) / availregs;
+ *run_util = ((bin_info->nregs - extent_nfree_get(slab))<<16) / bin_info->nregs;
+ defrag = 1;
+ }
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ }
+ return defrag;
+}
+
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
index aee55438c..daf9e571b 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
@@ -120,3 +120,7 @@
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#endif
+
+/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
+ * function. */
+#define JEMALLOC_FRAG_HINT
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index f93c16fa3..5b936cb48 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -3324,3 +3324,14 @@ jemalloc_postfork_child(void) {
}
/******************************************************************************/
+
+/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
+ * returns 0 if the allocation is in the currently active run,
+ * or when it is not causing any frag issue (large or huge bin)
+ * returns the bin utilization and run utilization both in fixed point 16:16.
+ * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
+ assert(ptr != NULL);
+ return iget_defrag_hint(TSDN_NULL, ptr, bin_util, run_util);
+}