summaryrefslogtreecommitdiff
path: root/deps/jemalloc
diff options
context:
space:
mode:
authororanagra <oran@redislabs.com>2016-12-30 03:37:52 +0200
committeroranagra <oran@redislabs.com>2016-12-30 03:37:52 +0200
commit7aa9e6d2ae1d500d8ba900b239207143993ecc3e (patch)
treeed9d684b562250c09d9570c109f0630554b5af6c /deps/jemalloc
parent6712bce92c79de5c2caa38e9b597a3fa52fd497f (diff)
downloadredis-7aa9e6d2ae1d500d8ba900b239207143993ecc3e.tar.gz
active memory defragmentation
Diffstat (limited to 'deps/jemalloc')
-rw-r--r--deps/jemalloc/src/jemalloc.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index 5a2d32406..fe77c2475 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -2591,3 +2591,35 @@ jemalloc_postfork_child(void)
}
/******************************************************************************/
+
+/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
+ * returns 0 if the allocation is in the currently active run,
+ * or when it is not causing any frag issue (large or huge bin)
+ * returns the bin utilization and run utilization both in fixed point 16:16.
+ * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
+ int defrag = 0;
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) { /* indication that this is not a HUGE alloc */
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
+ if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */
+ arena_t *arena = extent_node_arena_get(&chunk->node);
+ size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ arena_run_t *run = &arena_miscelm_get(chunk, rpages_ind)->run;
+ arena_bin_t *bin = &arena->bins[run->binind];
+ malloc_mutex_lock(&bin->lock);
+ /* runs that are in the same chunk in as the current chunk, are likely to be the next currun */
+ if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) {
+ arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
+ size_t availregs = bin_info->nregs * bin->stats.curruns;
+ *bin_util = (bin->stats.curregs<<16) / availregs;
+ *run_util = ((bin_info->nregs - run->nfree)<<16) / bin_info->nregs;
+ defrag = 1;
+ }
+ malloc_mutex_unlock(&bin->lock);
+ }
+ }
+ return defrag;
+}