diff options
author | antirez <antirez@gmail.com> | 2017-01-30 09:58:34 +0100 |
---|---|---|
committer | antirez <antirez@gmail.com> | 2017-01-30 09:58:34 +0100 |
commit | 27e29f4fe61d822eb23d948bcb72db76c4c887e5 (patch) | |
tree | eb4ed2a2834c6330ab8d5f5af1d2fc8c30960a23 /deps/jemalloc/src | |
parent | 713fe0b7d7096cbf3ce774e0041a0b6b31f5f26f (diff) | |
download | redis-27e29f4fe61d822eb23d948bcb72db76c4c887e5.tar.gz |
Jemalloc updated to 4.4.0.
The original jemalloc source tree was modified to:
1. Remove the configure error that prevents nested builds.
2. Insert the Redis private Jemalloc API in order to allow the
Redis fragmentation function to work.
Diffstat (limited to 'deps/jemalloc/src')
26 files changed, 5335 insertions, 2965 deletions
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c index 3081519cc..648a8da3a 100644 --- a/deps/jemalloc/src/arena.c +++ b/deps/jemalloc/src/arena.c @@ -4,16 +4,23 @@ /******************************************************************************/ /* Data. */ +purge_mode_t opt_purge = PURGE_DEFAULT; +const char *purge_mode_names[] = { + "ratio", + "decay", + "N/A" +}; ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; static ssize_t lg_dirty_mult_default; +ssize_t opt_decay_time = DECAY_TIME_DEFAULT; +static ssize_t decay_time_default; + arena_bin_info_t arena_bin_info[NBINS]; size_t map_bias; size_t map_misc_offset; size_t arena_maxrun; /* Max run size for arenas. */ size_t large_maxclass; /* Max large size class. */ -static size_t small_maxrun; /* Max run size used for small size classes. */ -static bool *small_run_tab; /* Valid small run page multiples. */ unsigned nlclasses; /* Number of large size classes. */ unsigned nhclasses; /* Number of huge size classes. */ @@ -23,60 +30,57 @@ unsigned nhclasses; /* Number of huge size classes. */ * definition. */ -static void arena_purge(arena_t *arena, bool all); -static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned, bool decommitted); -static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); -static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); +static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk); +static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, + size_t ndirty_limit); +static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, + bool dirty, bool cleaned, bool decommitted); +static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); +static void arena_bin_lower_run(arena_t *arena, arena_run_t *run, + arena_bin_t *bin); /******************************************************************************/ -#define CHUNK_MAP_KEY ((uintptr_t)0x1U) - -JEMALLOC_INLINE_C arena_chunk_map_misc_t * -arena_miscelm_key_create(size_t size) +JEMALLOC_INLINE_C size_t +arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) { + arena_chunk_t *chunk; + size_t pageind, mapbits; - return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | - CHUNK_MAP_KEY)); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + pageind = arena_miscelm_to_pageind(miscelm); + mapbits = arena_mapbits_get(chunk, pageind); + return (arena_mapbits_size_decode(mapbits)); } -JEMALLOC_INLINE_C bool -arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) +JEMALLOC_INLINE_C const extent_node_t * +arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm) { + arena_chunk_t *chunk; - return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + return (&chunk->node); } -#undef CHUNK_MAP_KEY - -JEMALLOC_INLINE_C size_t -arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) +JEMALLOC_INLINE_C int +arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b) { + size_t a_sn, b_sn; - assert(arena_miscelm_is_key(miscelm)); - - return (arena_mapbits_size_decode((uintptr_t)miscelm)); -} - -JEMALLOC_INLINE_C size_t -arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) -{ - arena_chunk_t *chunk; - size_t pageind, mapbits; + assert(a != NULL); + assert(b != NULL); - assert(!arena_miscelm_is_key(miscelm)); + a_sn = extent_node_sn_get(arena_miscelm_extent_get(a)); + b_sn = extent_node_sn_get(arena_miscelm_extent_get(b)); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); - pageind = arena_miscelm_to_pageind(miscelm); - mapbits = arena_mapbits_get(chunk, pageind); - return (arena_mapbits_size_decode(mapbits)); + return ((a_sn > b_sn) - (a_sn < b_sn)); } JEMALLOC_INLINE_C int -arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) +arena_ad_comp(const arena_chunk_map_misc_t *a, + const arena_chunk_map_misc_t *b) { uintptr_t a_miscelm = (uintptr_t)a; uintptr_t b_miscelm = (uintptr_t)b; @@ -87,74 +91,79 @@ arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); } -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, - rb_link, arena_run_comp) - -static size_t -run_quantize(size_t size) +JEMALLOC_INLINE_C int +arena_snad_comp(const arena_chunk_map_misc_t *a, + const arena_chunk_map_misc_t *b) { - size_t qsize; + int ret; - assert(size != 0); - assert(size == PAGE_CEILING(size)); + assert(a != NULL); + assert(b != NULL); - /* Don't change sizes that are valid small run sizes. */ - if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) - return (size); + ret = arena_sn_comp(a, b); + if (ret != 0) + return (ret); - /* - * Round down to the nearest run size that can actually be requested - * during normal large allocation. Add large_pad so that cache index - * randomization can offset the allocation from the page boundary. - */ - qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; - if (qsize <= SMALL_MAXCLASS + large_pad) - return (run_quantize(size - large_pad)); - assert(qsize <= size); - return (qsize); + ret = arena_ad_comp(a, b); + return (ret); } +/* Generate pairing heap functions. */ +ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, + ph_link, arena_snad_comp) + +#ifdef JEMALLOC_JET +#undef run_quantize_floor +#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor) +#endif static size_t -run_quantize_next(size_t size) +run_quantize_floor(size_t size) { - size_t large_run_size_next; + size_t ret; + pszind_t pind; + + assert(size > 0); + assert(size <= HUGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); assert(size != 0); assert(size == PAGE_CEILING(size)); - /* - * Return the next quantized size greater than the input size. - * Quantized sizes comprise the union of run sizes that back small - * region runs, and run sizes that back large regions with no explicit - * alignment constraints. - */ - - if (size > SMALL_MAXCLASS) { - large_run_size_next = PAGE_CEILING(index2size(size2index(size - - large_pad) + 1) + large_pad); - } else - large_run_size_next = SIZE_T_MAX; - if (size >= small_maxrun) - return (large_run_size_next); - - while (true) { - size += PAGE; - assert(size <= small_maxrun); - if (small_run_tab[size >> LG_PAGE]) { - if (large_run_size_next < size) - return (large_run_size_next); - return (size); - } + pind = psz2ind(size - large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ + return (size); } + ret = pind2sz(pind - 1) + large_pad; + assert(ret <= size); + return (ret); } +#ifdef JEMALLOC_JET +#undef run_quantize_floor +#define run_quantize_floor JEMALLOC_N(run_quantize_floor) +run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor); +#endif +#ifdef JEMALLOC_JET +#undef run_quantize_ceil +#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil) +#endif static size_t -run_quantize_first(size_t size) +run_quantize_ceil(size_t size) { - size_t qsize = run_quantize(size); + size_t ret; + + assert(size > 0); + assert(size <= HUGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); - if (qsize < size) { + ret = run_quantize_floor(size); + if (ret < size) { /* * Skip a quantization that may have an adequately large run, * because under-sized runs may be mixed in. This only happens @@ -163,72 +172,50 @@ run_quantize_first(size_t size) * search would potentially find sufficiently aligned available * memory somewhere lower. */ - qsize = run_quantize_next(size); - } - return (qsize); -} - -JEMALLOC_INLINE_C int -arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) -{ - int ret; - uintptr_t a_miscelm = (uintptr_t)a; - size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ? - arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a)); - size_t b_qsize = run_quantize(arena_miscelm_size_get(b)); - - /* - * Compare based on quantized size rather than size, in order to sort - * equally useful runs only by address. - */ - ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); - if (ret == 0) { - if (!arena_miscelm_is_key(a)) { - uintptr_t b_miscelm = (uintptr_t)b; - - ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); - } else { - /* - * Treat keys as if they are lower than anything else. - */ - ret = -1; - } + ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad; } - return (ret); } - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, - arena_chunk_map_misc_t, rb_link, arena_avail_comp) +#ifdef JEMALLOC_JET +#undef run_quantize_ceil +#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) +run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); +#endif static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { - + pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( + arena_miscelm_get_const(chunk, pageind)))); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); - arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, - pageind)); + assert((npages << LG_PAGE) < chunksize); + assert(pind2sz(pind) <= chunksize); + arena_run_heap_insert(&arena->runs_avail[pind], + arena_miscelm_get_mutable(chunk, pageind)); } static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { - + pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( + arena_miscelm_get_const(chunk, pageind)))); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); - arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, - pageind)); + assert((npages << LG_PAGE) < chunksize); + assert(pind2sz(pind) <= chunksize); + arena_run_heap_remove(&arena->runs_avail[pind], + arena_miscelm_get_mutable(chunk, pageind)); } static void arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); @@ -245,7 +232,8 @@ static void arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); @@ -292,14 +280,14 @@ JEMALLOC_INLINE_C void * arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) { void *ret; - unsigned regind; + size_t regind; arena_chunk_map_misc_t *miscelm; void *rpages; assert(run->nfree > 0); assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); - regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); + regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); miscelm = arena_run_to_miscelm(run); rpages = arena_miscelm_to_rpages(miscelm); ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + @@ -316,7 +304,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr) size_t mapbits = arena_mapbits_get(chunk, pageind); szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind = arena_run_regind(run, bin_info, ptr); + size_t regind = arena_run_regind(run, bin_info, ptr); assert(run->nfree < bin_info->nregs); /* Freeing an interior pointer can cause assertion failure. */ @@ -364,16 +352,30 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) } static void -arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) +arena_nactive_add(arena_t *arena, size_t add_pages) { if (config_stats) { - ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages - - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << + size_t cactive_add = CHUNK_CEILING((arena->nactive + + add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << LG_PAGE); - if (cactive_diff != 0) - stats_cactive_add(cactive_diff); + if (cactive_add != 0) + stats_cactive_add(cactive_add); + } + arena->nactive += add_pages; +} + +static void +arena_nactive_sub(arena_t *arena, size_t sub_pages) +{ + + if (config_stats) { + size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - + CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); + if (cactive_sub != 0) + stats_cactive_sub(cactive_sub); } + arena->nactive -= sub_pages; } static void @@ -394,8 +396,7 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, arena_avail_remove(arena, chunk, run_ind, total_pages); if (flag_dirty != 0) arena_run_dirty_remove(arena, chunk, run_ind, total_pages); - arena_cactive_update(arena, need_pages, 0); - arena->nactive += need_pages; + arena_nactive_add(arena, need_pages); /* Keep track of trailing unused pages for later use. */ if (rem_pages > 0) { @@ -567,7 +568,8 @@ arena_chunk_init_spare(arena_t *arena) } static bool -arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) +arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + size_t sn, bool zero) { /* @@ -576,64 +578,67 @@ arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) * of runs is tracked individually, and upon chunk deallocation the * entire chunk is in a consistent commit state. */ - extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); + extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true); extent_node_achunk_set(&chunk->node, true); - return (chunk_register(chunk, &chunk->node)); + return (chunk_register(tsdn, chunk, &chunk->node)); } static arena_chunk_t * -arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - bool *zero, bool *commit) +arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) { arena_chunk_t *chunk; + size_t sn; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); - chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL, - chunksize, chunksize, zero, commit); + chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, + NULL, chunksize, chunksize, &sn, zero, commit); if (chunk != NULL && !*commit) { /* Commit header. */ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind)) { - chunk_dalloc_wrapper(arena, chunk_hooks, - (void *)chunk, chunksize, *commit); + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, + (void *)chunk, chunksize, sn, *zero, *commit); chunk = NULL; } } - if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { + if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn, + *zero)) { if (!*commit) { /* Undo commit of header. */ chunk_hooks->decommit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind); } - chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, - chunksize, *commit); + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk, + chunksize, sn, *zero, *commit); chunk = NULL; } - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); return (chunk); } static arena_chunk_t * -arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) +arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, + bool *commit) { arena_chunk_t *chunk; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + size_t sn; - chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, - chunksize, zero, true); + chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, + chunksize, &sn, zero, commit, true); if (chunk != NULL) { - if (arena_chunk_register(arena, chunk, *zero)) { - chunk_dalloc_cache(arena, &chunk_hooks, chunk, - chunksize, true); + if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) { + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, + chunksize, sn, true); return (NULL); } - *commit = true; } if (chunk == NULL) { - chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, - zero, commit); + chunk = arena_chunk_alloc_internal_hard(tsdn, arena, + &chunk_hooks, zero, commit); } if (config_stats && chunk != NULL) { @@ -645,7 +650,7 @@ arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) } static arena_chunk_t * -arena_chunk_init_hard(arena_t *arena) +arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) { arena_chunk_t *chunk; bool zero, commit; @@ -655,14 +660,16 @@ arena_chunk_init_hard(arena_t *arena) zero = false; commit = false; - chunk = arena_chunk_alloc_internal(arena, &zero, &commit); + chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit); if (chunk == NULL) return (NULL); + chunk->hugepage = true; + /* * Initialize the map to contain one maximal free untouched run. Mark - * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted - * chunk. + * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed + * or decommitted chunk. */ flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; @@ -674,17 +681,18 @@ arena_chunk_init_hard(arena_t *arena) */ if (!zero) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( - (void *)arena_bitselm_get(chunk, map_bias+1), - (size_t)((uintptr_t) arena_bitselm_get(chunk, - chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, - map_bias+1))); + (void *)arena_bitselm_get_const(chunk, map_bias+1), + (size_t)((uintptr_t)arena_bitselm_get_const(chunk, + chunk_npages-1) - + (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); for (i = map_bias+1; i < chunk_npages-1; i++) arena_mapbits_internal_set(chunk, i, flag_unzeroed); } else { JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) - arena_bitselm_get(chunk, chunk_npages-1) - - (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); + *)arena_bitselm_get_const(chunk, map_bias+1), + (size_t)((uintptr_t)arena_bitselm_get_const(chunk, + chunk_npages-1) - + (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); if (config_debug) { for (i = map_bias+1; i < chunk_npages-1; i++) { assert(arena_mapbits_unzeroed_get(chunk, i) == @@ -699,28 +707,85 @@ arena_chunk_init_hard(arena_t *arena) } static arena_chunk_t * -arena_chunk_alloc(arena_t *arena) +arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) { arena_chunk_t *chunk; if (arena->spare != NULL) chunk = arena_chunk_init_spare(arena); else { - chunk = arena_chunk_init_hard(arena); + chunk = arena_chunk_init_hard(tsdn, arena); if (chunk == NULL) return (NULL); } - /* Insert the run into the runs_avail tree. */ + ql_elm_new(&chunk->node, ql_link); + ql_tail_insert(&arena->achunks, &chunk->node, ql_link); arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); return (chunk); } static void -arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) +arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) +{ + size_t sn, hugepage; + bool committed; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + + chunk_deregister(chunk, &chunk->node); + + sn = extent_node_sn_get(&chunk->node); + hugepage = chunk->hugepage; + committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); + if (!committed) { + /* + * Decommit the header. Mark the chunk as decommitted even if + * header decommit fails, since treating a partially committed + * chunk as committed has a high potential for causing later + * access of decommitted memory. + */ + chunk_hooks = chunk_hooks_get(tsdn, arena); + chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, + arena->ind); + } + if (!hugepage) { + /* + * Convert chunk back to the default state, so that all + * subsequent chunk allocations start out with chunks that can + * be backed by transparent huge pages. + */ + pages_huge(chunk, chunksize); + } + + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize, + sn, committed); + + if (config_stats) { + arena->stats.mapped -= chunksize; + arena->stats.metadata_mapped -= (map_bias << LG_PAGE); + } +} + +static void +arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare) { + assert(arena->spare != spare); + + if (arena_mapbits_dirty_get(spare, map_bias) != 0) { + arena_run_dirty_remove(arena, spare, map_bias, + chunk_npages-map_bias); + } + + arena_chunk_discard(tsdn, arena, spare); +} + +static void +arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) +{ + arena_chunk_t *spare; + assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == @@ -732,49 +797,14 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) assert(arena_mapbits_decommitted_get(chunk, map_bias) == arena_mapbits_decommitted_get(chunk, chunk_npages-1)); - /* - * Remove run from the runs_avail tree, so that the arena does not use - * it. - */ + /* Remove run from runs_avail, so that the arena does not use it. */ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); - if (arena->spare != NULL) { - arena_chunk_t *spare = arena->spare; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - bool committed; - - arena->spare = chunk; - if (arena_mapbits_dirty_get(spare, map_bias) != 0) { - arena_run_dirty_remove(arena, spare, map_bias, - chunk_npages-map_bias); - } - - chunk_deregister(spare, &spare->node); - - committed = (arena_mapbits_decommitted_get(spare, map_bias) == - 0); - if (!committed) { - /* - * Decommit the header. Mark the chunk as decommitted - * even if header decommit fails, since treating a - * partially committed chunk as committed has a high - * potential for causing later access of decommitted - * memory. - */ - chunk_hooks = chunk_hooks_get(arena); - chunk_hooks.decommit(spare, chunksize, 0, map_bias << - LG_PAGE, arena->ind); - } - - chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare, - chunksize, committed); - - if (config_stats) { - arena->stats.mapped -= chunksize; - arena->stats.metadata_mapped -= (map_bias << LG_PAGE); - } - } else - arena->spare = chunk; + ql_remove(&arena->achunks, &chunk->node, ql_link); + spare = arena->spare; + arena->spare = chunk; + if (spare != NULL) + arena_spare_discard(tsdn, arena, spare); } static void @@ -817,6 +847,17 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) } static void +arena_huge_reset_stats_cancel(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.ndalloc_huge++; + arena->stats.hstats[index].ndalloc--; +} + +static void arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) { szind_t index = size2index(usize) - nlclasses - NBINS; @@ -847,243 +888,240 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, } extent_node_t * -arena_node_alloc(arena_t *arena) +arena_node_alloc(tsdn_t *tsdn, arena_t *arena) { extent_node_t *node; - malloc_mutex_lock(&arena->node_cache_mtx); + malloc_mutex_lock(tsdn, &arena->node_cache_mtx); node = ql_last(&arena->node_cache, ql_link); if (node == NULL) { - malloc_mutex_unlock(&arena->node_cache_mtx); - return (base_alloc(sizeof(extent_node_t))); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); + return (base_alloc(tsdn, sizeof(extent_node_t))); } ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); - malloc_mutex_unlock(&arena->node_cache_mtx); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); return (node); } void -arena_node_dalloc(arena_t *arena, extent_node_t *node) +arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) { - malloc_mutex_lock(&arena->node_cache_mtx); + malloc_mutex_lock(tsdn, &arena->node_cache_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->node_cache, node, ql_link); - malloc_mutex_unlock(&arena->node_cache_mtx); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); } static void * -arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - size_t usize, size_t alignment, bool *zero, size_t csize) +arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn, + bool *zero, size_t csize) { void *ret; bool commit = true; - ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, - zero, &commit); + ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, + alignment, sn, zero, &commit); if (ret == NULL) { /* Revert optimistic stats updates. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_malloc_stats_update_undo(arena, usize); arena->stats.mapped -= usize; } - arena->nactive -= (usize >> LG_PAGE); - malloc_mutex_unlock(&arena->lock); + arena_nactive_sub(arena, usize >> LG_PAGE); + malloc_mutex_unlock(tsdn, &arena->lock); } return (ret); } void * -arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, - bool *zero) +arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, size_t *sn, bool *zero) { void *ret; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; size_t csize = CHUNK_CEILING(usize); + bool commit = true; - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); /* Optimistically update stats. */ if (config_stats) { arena_huge_malloc_stats_update(arena, usize); arena->stats.mapped += usize; } - arena->nactive += (usize >> LG_PAGE); + arena_nactive_add(arena, usize >> LG_PAGE); - ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, - zero, true); - malloc_mutex_unlock(&arena->lock); + ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, + alignment, sn, zero, &commit, true); + malloc_mutex_unlock(tsdn, &arena->lock); if (ret == NULL) { - ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, - alignment, zero, csize); + ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, + usize, alignment, sn, zero, csize); } - if (config_stats && ret != NULL) - stats_cactive_add(usize); return (ret); } void -arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) +arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize, + size_t sn) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; size_t csize; csize = CHUNK_CEILING(usize); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_dalloc_stats_update(arena, usize); arena->stats.mapped -= usize; - stats_cactive_sub(usize); } - arena->nactive -= (usize >> LG_PAGE); + arena_nactive_sub(arena, usize >> LG_PAGE); - chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); - malloc_mutex_unlock(&arena->lock); + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true); + malloc_mutex_unlock(tsdn, &arena->lock); } void -arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, - size_t usize) +arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t oldsize, size_t usize) { assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); assert(oldsize != usize); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) arena_huge_ralloc_stats_update(arena, oldsize, usize); - if (oldsize < usize) { - size_t udiff = usize - oldsize; - arena->nactive += udiff >> LG_PAGE; - if (config_stats) - stats_cactive_add(udiff); - } else { - size_t udiff = oldsize - usize; - arena->nactive -= udiff >> LG_PAGE; - if (config_stats) - stats_cactive_sub(udiff); - } - malloc_mutex_unlock(&arena->lock); + if (oldsize < usize) + arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); + else + arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); + malloc_mutex_unlock(tsdn, &arena->lock); } void -arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, - size_t usize) +arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t oldsize, size_t usize, size_t sn) { size_t udiff = oldsize - usize; size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_ralloc_stats_update(arena, oldsize, usize); - if (cdiff != 0) { + if (cdiff != 0) arena->stats.mapped -= cdiff; - stats_cactive_sub(udiff); - } } - arena->nactive -= udiff >> LG_PAGE; + arena_nactive_sub(arena, udiff >> LG_PAGE); if (cdiff != 0) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(usize)); - chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, + sn, true); } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } static bool -arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, - size_t udiff, size_t cdiff) +arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, + size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff) { bool err; bool commit = true; - err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize, - zero, &commit) == NULL); + err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, + chunksize, sn, zero, &commit) == NULL); if (err) { /* Revert optimistic stats updates. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_ralloc_stats_update_undo(arena, oldsize, usize); arena->stats.mapped -= cdiff; } - arena->nactive -= (udiff >> LG_PAGE); - malloc_mutex_unlock(&arena->lock); + arena_nactive_sub(arena, udiff >> LG_PAGE); + malloc_mutex_unlock(tsdn, &arena->lock); } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { - chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, - true); + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, + *sn, *zero, true); err = true; } return (err); } bool -arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, - size_t usize, bool *zero) +arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t oldsize, size_t usize, bool *zero) { bool err; - chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); + chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); size_t udiff = usize - oldsize; size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); + size_t sn; + bool commit = true; - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); /* Optimistically update stats. */ if (config_stats) { arena_huge_ralloc_stats_update(arena, oldsize, usize); arena->stats.mapped += cdiff; } - arena->nactive += (udiff >> LG_PAGE); + arena_nactive_add(arena, udiff >> LG_PAGE); - err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, - chunksize, zero, true) == NULL); - malloc_mutex_unlock(&arena->lock); + err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, + chunksize, &sn, zero, &commit, true) == NULL); + malloc_mutex_unlock(tsdn, &arena->lock); if (err) { - err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, - chunk, oldsize, usize, zero, nchunk, udiff, - cdiff); + err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, + &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk, + udiff, cdiff); } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { - chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, - true); + chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, + sn, *zero, true); err = true; } - if (config_stats && !err) - stats_cactive_add(udiff); return (err); } /* * Do first-best-fit run selection, i.e. select the lowest run that best fits. - * Run sizes are quantized, so not all candidate runs are necessarily exactly - * the same size. + * Run sizes are indexed, so not all candidate runs are necessarily exactly the + * same size. */ static arena_run_t * arena_run_first_best_fit(arena_t *arena, size_t size) { - size_t search_size = run_quantize_first(size); - arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size); - arena_chunk_map_misc_t *miscelm = - arena_avail_tree_nsearch(&arena->runs_avail, key); - if (miscelm == NULL) - return (NULL); - return (&miscelm->run); + pszind_t pind, i; + + pind = psz2ind(run_quantize_ceil(size)); + + for (i = pind; pind2sz(i) <= chunksize; i++) { + arena_chunk_map_misc_t *miscelm = arena_run_heap_first( + &arena->runs_avail[i]); + if (miscelm != NULL) + return (&miscelm->run); + } + + return (NULL); } static arena_run_t * arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) { - arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); + arena_run_t *run = arena_run_first_best_fit(arena, size); if (run != NULL) { if (arena_run_split_large(arena, run, size, zero)) run = NULL; @@ -1092,7 +1130,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) } static arena_run_t * -arena_run_alloc_large(arena_t *arena, size_t size, bool zero) +arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) { arena_chunk_t *chunk; arena_run_t *run; @@ -1108,9 +1146,9 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero) /* * No usable runs. Create a new chunk from which to allocate the run. */ - chunk = arena_chunk_alloc(arena); + chunk = arena_chunk_alloc(tsdn, arena); if (chunk != NULL) { - run = &arena_miscelm_get(chunk, map_bias)->run; + run = &arena_miscelm_get_mutable(chunk, map_bias)->run; if (arena_run_split_large(arena, run, size, zero)) run = NULL; return (run); @@ -1136,7 +1174,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) } static arena_run_t * -arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) +arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) { arena_chunk_t *chunk; arena_run_t *run; @@ -1153,9 +1191,9 @@ arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) /* * No usable runs. Create a new chunk from which to allocate the run. */ - chunk = arena_chunk_alloc(arena); + chunk = arena_chunk_alloc(tsdn, arena); if (chunk != NULL) { - run = &arena_miscelm_get(chunk, map_bias)->run; + run = &arena_miscelm_get_mutable(chunk, map_bias)->run; if (arena_run_split_small(arena, run, size, binind)) run = NULL; return (run); @@ -1178,42 +1216,239 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) } ssize_t -arena_lg_dirty_mult_get(arena_t *arena) +arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena) { ssize_t lg_dirty_mult; - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); lg_dirty_mult = arena->lg_dirty_mult; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (lg_dirty_mult); } bool -arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) +arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult) { if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) return (true); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena->lg_dirty_mult = lg_dirty_mult; - arena_maybe_purge(arena); - malloc_mutex_unlock(&arena->lock); + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } -void -arena_maybe_purge(arena_t *arena) +static void +arena_decay_deadline_init(arena_t *arena) +{ + + assert(opt_purge == purge_mode_decay); + + /* + * Generate a new deadline that is uniformly random within the next + * epoch after the current one. + */ + nstime_copy(&arena->decay.deadline, &arena->decay.epoch); + nstime_add(&arena->decay.deadline, &arena->decay.interval); + if (arena->decay.time > 0) { + nstime_t jitter; + + nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state, + nstime_ns(&arena->decay.interval))); + nstime_add(&arena->decay.deadline, &jitter); + } +} + +static bool +arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) +{ + + assert(opt_purge == purge_mode_decay); + + return (nstime_compare(&arena->decay.deadline, time) <= 0); +} + +static size_t +arena_decay_backlog_npages_limit(const arena_t *arena) +{ + static const uint64_t h_steps[] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP + }; + uint64_t sum; + size_t npages_limit_backlog; + unsigned i; + + assert(opt_purge == purge_mode_decay); + + /* + * For each element of decay_backlog, multiply by the corresponding + * fixed-point smoothstep decay factor. Sum the products, then divide + * to round down to the nearest whole number of pages. + */ + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) + sum += arena->decay.backlog[i] * h_steps[i]; + npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); + + return (npages_limit_backlog); +} + +static void +arena_decay_backlog_update_last(arena_t *arena) { + size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? + arena->ndirty - arena->decay.ndirty : 0; + arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; +} + +static void +arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) +{ + + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; + + assert((uint64_t)nadvance_z == nadvance_u64); + + memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + } + } + + arena_decay_backlog_update_last(arena); +} + +static void +arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) +{ + uint64_t nadvance_u64; + nstime_t delta; + + assert(opt_purge == purge_mode_decay); + assert(arena_decay_deadline_reached(arena, time)); + + nstime_copy(&delta, time); + nstime_subtract(&delta, &arena->decay.epoch); + nadvance_u64 = nstime_divide(&delta, &arena->decay.interval); + assert(nadvance_u64 > 0); + + /* Add nadvance_u64 decay intervals to epoch. */ + nstime_copy(&delta, &arena->decay.interval); + nstime_imultiply(&delta, nadvance_u64); + nstime_add(&arena->decay.epoch, &delta); + + /* Set a new deadline. */ + arena_decay_deadline_init(arena); + + /* Update the backlog. */ + arena_decay_backlog_update(arena, nadvance_u64); +} + +static void +arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) +{ + size_t ndirty_limit = arena_decay_backlog_npages_limit(arena); + + if (arena->ndirty > ndirty_limit) + arena_purge_to_limit(tsdn, arena, ndirty_limit); + arena->decay.ndirty = arena->ndirty; +} + +static void +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time) +{ + + arena_decay_epoch_advance_helper(arena, time); + arena_decay_epoch_advance_purge(tsdn, arena); +} + +static void +arena_decay_init(arena_t *arena, ssize_t decay_time) +{ + + arena->decay.time = decay_time; + if (decay_time > 0) { + nstime_init2(&arena->decay.interval, decay_time, 0); + nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS); + } + + nstime_init(&arena->decay.epoch, 0); + nstime_update(&arena->decay.epoch); + arena->decay.jitter_state = (uint64_t)(uintptr_t)arena; + arena_decay_deadline_init(arena); + arena->decay.ndirty = arena->ndirty; + memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); +} + +static bool +arena_decay_time_valid(ssize_t decay_time) +{ + + if (decay_time < -1) + return (false); + if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) + return (true); + return (false); +} + +ssize_t +arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) +{ + ssize_t decay_time; + + malloc_mutex_lock(tsdn, &arena->lock); + decay_time = arena->decay.time; + malloc_mutex_unlock(tsdn, &arena->lock); + + return (decay_time); +} + +bool +arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) +{ + + if (!arena_decay_time_valid(decay_time)) + return (true); + + malloc_mutex_lock(tsdn, &arena->lock); + /* + * Restart decay backlog from scratch, which may cause many dirty pages + * to be immediately purged. It would conceptually be possible to map + * the old backlog onto the new backlog, but there is no justification + * for such complexity since decay_time changes are intended to be + * infrequent, either between the {-1, 0, >0} states, or a one-time + * arbitrary change during initial arena configuration. + */ + arena_decay_init(arena, decay_time); + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); + + return (false); +} + +static void +arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena) +{ + + assert(opt_purge == purge_mode_ratio); /* Don't purge if the option is disabled. */ if (arena->lg_dirty_mult < 0) return; - /* Don't recursively purge. */ - if (arena->purging) - return; + /* * Iterate, since preventing recursive purging could otherwise leave too * many dirty pages. @@ -1228,10 +1463,68 @@ arena_maybe_purge(arena_t *arena) */ if (arena->ndirty <= threshold) return; - arena_purge(arena, false); + arena_purge_to_limit(tsdn, arena, threshold); } } +static void +arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) +{ + nstime_t time; + + assert(opt_purge == purge_mode_decay); + + /* Purge all or nothing if the option is disabled. */ + if (arena->decay.time <= 0) { + if (arena->decay.time == 0) + arena_purge_to_limit(tsdn, arena, 0); + return; + } + + nstime_init(&time, 0); + nstime_update(&time); + if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch, + &time) > 0)) { + /* + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. + */ + nstime_copy(&arena->decay.epoch, &time); + arena_decay_deadline_init(arena); + } else { + /* Verify that time does not go backwards. */ + assert(nstime_compare(&arena->decay.epoch, &time) <= 0); + } + + /* + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances. + */ + if (arena_decay_deadline_reached(arena, &time)) + arena_decay_epoch_advance(tsdn, arena, &time); +} + +void +arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) +{ + + /* Don't recursively purge. */ + if (arena->purging) + return; + + if (opt_purge == purge_mode_ratio) + arena_maybe_purge_ratio(tsdn, arena); + else + arena_maybe_purge_decay(tsdn, arena); +} + static size_t arena_dirty_count(arena_t *arena) { @@ -1267,35 +1560,15 @@ arena_dirty_count(arena_t *arena) } static size_t -arena_compute_npurge(arena_t *arena, bool all) -{ - size_t npurge; - - /* - * Compute the minimum number of pages that this thread should try to - * purge. - */ - if (!all) { - size_t threshold = (arena->nactive >> arena->lg_dirty_mult); - threshold = threshold < chunk_npages ? chunk_npages : threshold; - - npurge = arena->ndirty - threshold; - } else - npurge = arena->ndirty; - - return (npurge); -} - -static size_t -arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, - size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel, +arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { arena_runs_dirty_link_t *rdelm, *rdelm_next; extent_node_t *chunkselm; size_t nstashed = 0; - /* Stash at least npurge pages. */ + /* Stash runs/chunks according to ndirty_limit. */ for (rdelm = qr_next(&arena->runs_dirty, rd_link), chunkselm = qr_next(&arena->chunks_cache, cc_link); rdelm != &arena->runs_dirty; rdelm = rdelm_next) { @@ -1304,24 +1577,32 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, if (rdelm == &chunkselm->rd) { extent_node_t *chunkselm_next; - bool zero; + size_t sn; + bool zero, commit; UNUSED void *chunk; + npages = extent_node_size_get(chunkselm) >> LG_PAGE; + if (opt_purge == purge_mode_decay && arena->ndirty - + (nstashed + npages) < ndirty_limit) + break; + chunkselm_next = qr_next(chunkselm, cc_link); /* * Allocate. chunkselm remains valid due to the * dalloc_node=false argument to chunk_alloc_cache(). */ zero = false; - chunk = chunk_alloc_cache(arena, chunk_hooks, + commit = false; + chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks, extent_node_addr_get(chunkselm), - extent_node_size_get(chunkselm), chunksize, &zero, - false); + extent_node_size_get(chunkselm), chunksize, &sn, + &zero, &commit, false); assert(chunk == extent_node_addr_get(chunkselm)); assert(zero == extent_node_zeroed_get(chunkselm)); extent_node_dirty_insert(chunkselm, purge_runs_sentinel, purge_chunks_sentinel); - npages = extent_node_size_get(chunkselm) >> LG_PAGE; + assert(npages == (extent_node_size_get(chunkselm) >> + LG_PAGE)); chunkselm = chunkselm_next; } else { arena_chunk_t *chunk = @@ -1334,6 +1615,9 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, arena_mapbits_unallocated_size_get(chunk, pageind); npages = run_size >> LG_PAGE; + if (opt_purge == purge_mode_decay && arena->ndirty - + (nstashed + npages) < ndirty_limit) + break; assert(pageind + npages <= chunk_npages); assert(arena_mapbits_dirty_get(chunk, pageind) == @@ -1344,7 +1628,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, * prior to allocation. */ if (chunk == arena->spare) - arena_chunk_alloc(arena); + arena_chunk_alloc(tsdn, arena); /* Temporarily allocate the free dirty run. */ arena_run_split_large(arena, run, run_size, false); @@ -1359,7 +1643,8 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, } nstashed += npages; - if (!all && nstashed >= npurge) + if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= + ndirty_limit) break; } @@ -1367,7 +1652,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, } static size_t -arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, +arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { @@ -1379,7 +1664,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, nmadvise = 0; npurged = 0; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); for (rdelm = qr_next(purge_runs_sentinel, rd_link), chunkselm = qr_next(purge_chunks_sentinel, cc_link); rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { @@ -1408,6 +1693,17 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, run_size = arena_mapbits_large_size_get(chunk, pageind); npages = run_size >> LG_PAGE; + /* + * If this is the first run purged within chunk, mark + * the chunk as non-huge. This will prevent all use of + * transparent huge pages for this chunk until the chunk + * as a whole is deallocated. + */ + if (chunk->hugepage) { + pages_nohuge(chunk, chunksize); + chunk->hugepage = false; + } + assert(pageind + npages <= chunk_npages); assert(!arena_mapbits_decommitted_get(chunk, pageind)); assert(!arena_mapbits_decommitted_get(chunk, @@ -1418,7 +1714,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, flag_unzeroed = 0; flags = CHUNK_MAP_DECOMMITTED; } else { - flag_unzeroed = chunk_purge_wrapper(arena, + flag_unzeroed = chunk_purge_wrapper(tsdn, arena, chunk_hooks, chunk, chunksize, pageind << LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; flags = flag_unzeroed; @@ -1449,7 +1745,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, if (config_stats) nmadvise++; } - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena->stats.nmadvise += nmadvise; @@ -1460,7 +1756,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, } static void -arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, +arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { @@ -1477,13 +1773,14 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, cc_link); void *addr = extent_node_addr_get(chunkselm); size_t size = extent_node_size_get(chunkselm); + size_t sn = extent_node_sn_get(chunkselm); bool zeroed = extent_node_zeroed_get(chunkselm); bool committed = extent_node_committed_get(chunkselm); extent_node_dirty_remove(chunkselm); - arena_node_dalloc(arena, chunkselm); + arena_node_dalloc(tsdn, arena, chunkselm); chunkselm = chunkselm_next; - chunk_dalloc_arena(arena, chunk_hooks, addr, size, - zeroed, committed); + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, + size, sn, zeroed, committed); } else { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); @@ -1494,16 +1791,26 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, pageind) != 0); arena_run_t *run = &miscelm->run; qr_remove(rdelm, rd_link); - arena_run_dalloc(arena, run, false, true, decommitted); + arena_run_dalloc(tsdn, arena, run, false, true, + decommitted); } } } +/* + * NB: ndirty_limit is interpreted differently depending on opt_purge: + * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the + * desired state: + * (arena->ndirty <= ndirty_limit) + * - purge_mode_decay: Purge as many dirty runs/chunks as possible without + * violating the invariant: + * (arena->ndirty >= ndirty_limit) + */ static void -arena_purge(arena_t *arena, bool all) +arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) { - chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); - size_t npurge, npurgeable, npurged; + chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); + size_t npurge, npurged; arena_runs_dirty_link_t purge_runs_sentinel; extent_node_t purge_chunks_sentinel; @@ -1517,34 +1824,183 @@ arena_purge(arena_t *arena, bool all) size_t ndirty = arena_dirty_count(arena); assert(ndirty == arena->ndirty); } - assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); - - if (config_stats) - arena->stats.npurge++; + assert(opt_purge != purge_mode_ratio || (arena->nactive >> + arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); - npurge = arena_compute_npurge(arena, all); qr_new(&purge_runs_sentinel, rd_link); extent_node_dirty_linkage_init(&purge_chunks_sentinel); - npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge, + npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit, &purge_runs_sentinel, &purge_chunks_sentinel); - assert(npurgeable >= npurge); - npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, - &purge_chunks_sentinel); - assert(npurged == npurgeable); - arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, + if (npurge == 0) + goto label_return; + npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks, + &purge_runs_sentinel, &purge_chunks_sentinel); + assert(npurged == npurge); + arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel, &purge_chunks_sentinel); + if (config_stats) + arena->stats.npurge++; + +label_return: arena->purging = false; } void -arena_purge_all(arena_t *arena) +arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) { - malloc_mutex_lock(&arena->lock); - arena_purge(arena, true); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); + if (all) + arena_purge_to_limit(tsdn, arena, 0); + else + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); +} + +static void +arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) +{ + size_t pageind, npages; + + cassert(config_prof); + assert(opt_prof); + + /* + * Iterate over the allocated runs and remove profiled allocations from + * the sample set. + */ + for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { + if (arena_mapbits_allocated_get(chunk, pageind) != 0) { + if (arena_mapbits_large_get(chunk, pageind) != 0) { + void *ptr = (void *)((uintptr_t)chunk + (pageind + << LG_PAGE)); + size_t usize = isalloc(tsd_tsdn(tsd), ptr, + config_prof); + + prof_free(tsd, ptr, usize); + npages = arena_mapbits_large_size_get(chunk, + pageind) >> LG_PAGE; + } else { + /* Skip small run. */ + size_t binind = arena_mapbits_binind_get(chunk, + pageind); + arena_bin_info_t *bin_info = + &arena_bin_info[binind]; + npages = bin_info->run_size >> LG_PAGE; + } + } else { + /* Skip unallocated run. */ + npages = arena_mapbits_unallocated_size_get(chunk, + pageind) >> LG_PAGE; + } + assert(pageind + npages <= chunk_npages); + } +} + +void +arena_reset(tsd_t *tsd, arena_t *arena) +{ + unsigned i; + extent_node_t *node; + + /* + * Locking in this function is unintuitive. The caller guarantees that + * no concurrent operations are happening in this arena, but there are + * still reasons that some locking is necessary: + * + * - Some of the functions in the transitive closure of calls assume + * appropriate locks are held, and in some cases these locks are + * temporarily dropped to avoid lock order reversal or deadlock due to + * reentry. + * - mallctl("epoch", ...) may concurrently refresh stats. While + * strictly speaking this is a "concurrent operation", disallowing + * stats refreshes would impose an inconvenient burden. + */ + + /* Remove large allocations from prof sample set. */ + if (config_prof && opt_prof) { + ql_foreach(node, &arena->achunks, ql_link) { + arena_achunk_prof_reset(tsd, arena, + extent_node_addr_get(node)); + } + } + + /* Reset curruns for large size classes. */ + if (config_stats) { + for (i = 0; i < nlclasses; i++) + arena->stats.lstats[i].curruns = 0; + } + + /* Huge allocations. */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); + for (node = ql_last(&arena->huge, ql_link); node != NULL; node = + ql_last(&arena->huge, ql_link)) { + void *ptr = extent_node_addr_get(node); + size_t usize; + + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); + if (config_stats || (config_prof && opt_prof)) + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + /* Remove huge allocation from prof sample set. */ + if (config_prof && opt_prof) + prof_free(tsd, ptr, usize); + huge_dalloc(tsd_tsdn(tsd), ptr); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); + /* Cancel out unwanted effects on stats. */ + if (config_stats) + arena_huge_reset_stats_cancel(arena, usize); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); + + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); + + /* Bins. */ + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + bin->runcur = NULL; + arena_run_heap_new(&bin->runs); + if (config_stats) { + bin->stats.curregs = 0; + bin->stats.curruns = 0; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + } + + /* + * Re-initialize runs_dirty such that the chunks_cache and runs_dirty + * chains directly correspond. + */ + qr_new(&arena->runs_dirty, rd_link); + for (node = qr_next(&arena->chunks_cache, cc_link); + node != &arena->chunks_cache; node = qr_next(node, cc_link)) { + qr_new(&node->rd, rd_link); + qr_meld(&arena->runs_dirty, &node->rd, rd_link); + } + + /* Arena chunks. */ + for (node = ql_last(&arena->achunks, ql_link); node != NULL; node = + ql_last(&arena->achunks, ql_link)) { + ql_remove(&arena->achunks, node, ql_link); + arena_chunk_discard(tsd_tsdn(tsd), arena, + extent_node_addr_get(node)); + } + + /* Spare. */ + if (arena->spare != NULL) { + arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare); + arena->spare = NULL; + } + + assert(!arena->purging); + arena->nactive = 0; + + for (i = 0; i < NPSIZES; i++) + arena_run_heap_new(&arena->runs_avail[i]); + + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } static void @@ -1660,21 +2116,9 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, return (size); } -static bool -arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t run_ind = arena_miscelm_to_pageind(miscelm); - size_t offset = run_ind << LG_PAGE; - size_t length = arena_run_size_get(arena, chunk, run, run_ind); - - return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length, - arena->ind)); -} - static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, - bool decommitted) +arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, + bool cleaned, bool decommitted) { arena_chunk_t *chunk; arena_chunk_map_misc_t *miscelm; @@ -1687,8 +2131,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, assert(run_ind < chunk_npages); size = arena_run_size_get(arena, chunk, run, run_ind); run_pages = (size >> LG_PAGE); - arena_cactive_update(arena, 0, run_pages); - arena->nactive -= run_pages; + arena_nactive_sub(arena, run_pages); /* * The run is dirty if the caller claims to have dirtied it, as well as @@ -1735,7 +2178,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, if (size == arena_maxrun) { assert(run_ind == map_bias); assert(run_pages == (arena_maxrun >> LG_PAGE)); - arena_chunk_dalloc(arena, chunk); + arena_chunk_dalloc(tsdn, arena, chunk); } /* @@ -1746,21 +2189,12 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, * chances of spuriously crossing the dirty page purging threshold. */ if (dirty) - arena_maybe_purge(arena); + arena_maybe_purge(tsdn, arena); } static void -arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run) -{ - bool committed = arena_run_decommit(arena, chunk, run); - - arena_run_dalloc(arena, run, committed, false, !committed); -} - -static void -arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize) +arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); size_t pageind = arena_miscelm_to_pageind(miscelm); @@ -1795,12 +2229,13 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+head_npages))); - arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); + arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted != + 0)); } static void -arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize, bool dirty) +arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); size_t pageind = arena_miscelm_to_pageind(miscelm); @@ -1837,20 +2272,10 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+head_npages))); - tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); + tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); tail_run = &tail_miscelm->run; - arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted != - 0)); -} - -static arena_run_t * -arena_bin_runs_first(arena_bin_t *bin) -{ - arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); - if (miscelm != NULL) - return (&miscelm->run); - - return (NULL); + arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted + != 0)); } static void @@ -1858,35 +2283,25 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); - - arena_run_tree_insert(&bin->runs, miscelm); -} - -static void -arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - - assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); - - arena_run_tree_remove(&bin->runs, miscelm); + arena_run_heap_insert(&bin->runs, miscelm); } static arena_run_t * arena_bin_nonfull_run_tryget(arena_bin_t *bin) { - arena_run_t *run = arena_bin_runs_first(bin); - if (run != NULL) { - arena_bin_runs_remove(bin, run); - if (config_stats) - bin->stats.reruns++; - } - return (run); + arena_chunk_map_misc_t *miscelm; + + miscelm = arena_run_heap_remove_first(&bin->runs); + if (miscelm == NULL) + return (NULL); + if (config_stats) + bin->stats.reruns++; + + return (&miscelm->run); } static arena_run_t * -arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) +arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) { arena_run_t *run; szind_t binind; @@ -1902,19 +2317,19 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) bin_info = &arena_bin_info[binind]; /* Allocate a new run. */ - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_small(arena, bin_info->run_size, binind); + malloc_mutex_lock(tsdn, &arena->lock); + run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind); if (run != NULL) { /* Initialize run internals. */ run->binind = binind; run->nfree = bin_info->nregs; bitmap_init(run->bitmap, &bin_info->bitmap_info); } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); /********************************/ - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); if (run != NULL) { if (config_stats) { bin->stats.nruns++; @@ -1937,7 +2352,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ static void * -arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) { szind_t binind; arena_bin_info_t *bin_info; @@ -1946,7 +2361,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; bin->runcur = NULL; - run = arena_bin_nonfull_run_get(arena, bin); + run = arena_bin_nonfull_run_get(tsdn, arena, bin); if (bin->runcur != NULL && bin->runcur->nfree > 0) { /* * Another thread updated runcur while this one ran without the @@ -1967,10 +2382,11 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) * were just deallocated from the run. */ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - if (run->nfree == bin_info->nregs) - arena_dalloc_bin_run(arena, chunk, run, bin); - else - arena_bin_lower_run(arena, chunk, run, bin); + if (run->nfree == bin_info->nregs) { + arena_dalloc_bin_run(tsdn, arena, chunk, run, + bin); + } else + arena_bin_lower_run(arena, run, bin); } return (ret); } @@ -1986,18 +2402,18 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) } void -arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, - uint64_t prof_accumbytes) +arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, + szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; arena_bin_t *bin; assert(tbin->ncached == 0); - if (config_prof && arena_prof_accum(arena, prof_accumbytes)) - prof_idump(); + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) + prof_idump(tsdn); bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> tbin->lg_fill_div); i < nfill; i++) { arena_run_t *run; @@ -2005,16 +2421,15 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, if ((run = bin->runcur) != NULL && run->nfree > 0) ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); else - ptr = arena_bin_malloc_hard(arena, bin); + ptr = arena_bin_malloc_hard(tsdn, arena, bin); if (ptr == NULL) { /* * OOM. tbin->avail isn't yet filled down to its first * element, so the successful allocations (if any) must - * be moved to the base of tbin->avail before bailing - * out. + * be moved just before tbin->avail before bailing out. */ if (i > 0) { - memmove(tbin->avail, &tbin->avail[nfill - i], + memmove(tbin->avail - i, tbin->avail - nfill, i * sizeof(void *)); } break; @@ -2024,7 +2439,7 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, true); } /* Insert such that low regions get used first. */ - tbin->avail[nfill - 1 - i] = ptr; + *(tbin->avail - nfill + i) = ptr; } if (config_stats) { bin->stats.nmalloc += i; @@ -2033,29 +2448,31 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, bin->stats.nfills++; tbin->tstats.nrequests = 0; } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->ncached = i; + arena_decay_tick(tsdn, arena); } void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) { + size_t redzone_size = bin_info->redzone_size; + if (zero) { - size_t redzone_size = bin_info->redzone_size; - memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, - redzone_size); - memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, - redzone_size); + memset((void *)((uintptr_t)ptr - redzone_size), + JEMALLOC_ALLOC_JUNK, redzone_size); + memset((void *)((uintptr_t)ptr + bin_info->reg_size), + JEMALLOC_ALLOC_JUNK, redzone_size); } else { - memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, - bin_info->reg_interval); + memset((void *)((uintptr_t)ptr - redzone_size), + JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); } } #ifdef JEMALLOC_JET #undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) +#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) #endif static void arena_redzone_corruption(void *ptr, size_t usize, bool after, @@ -2070,7 +2487,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after, #undef arena_redzone_corruption #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) arena_redzone_corruption_t *arena_redzone_corruption = - JEMALLOC_N(arena_redzone_corruption_impl); + JEMALLOC_N(n_arena_redzone_corruption); #endif static void @@ -2085,22 +2502,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) for (i = 1; i <= redzone_size; i++) { uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); - if (*byte != 0xa5) { + if (*byte != JEMALLOC_ALLOC_JUNK) { error = true; arena_redzone_corruption(ptr, size, false, i, *byte); if (reset) - *byte = 0xa5; + *byte = JEMALLOC_ALLOC_JUNK; } } for (i = 0; i < redzone_size; i++) { uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); - if (*byte != 0xa5) { + if (*byte != JEMALLOC_ALLOC_JUNK) { error = true; arena_redzone_corruption(ptr, size, true, i, *byte); if (reset) - *byte = 0xa5; + *byte = JEMALLOC_ALLOC_JUNK; } } } @@ -2111,7 +2528,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) #ifdef JEMALLOC_JET #undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) +#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) #endif void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) @@ -2119,14 +2536,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) size_t redzone_size = bin_info->redzone_size; arena_redzones_validate(ptr, bin_info, false); - memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, + memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, bin_info->reg_interval); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_small #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) arena_dalloc_junk_small_t *arena_dalloc_junk_small = - JEMALLOC_N(arena_dalloc_junk_small_impl); + JEMALLOC_N(n_arena_dalloc_junk_small); #endif void @@ -2144,27 +2561,26 @@ arena_quarantine_junk_small(void *ptr, size_t usize) arena_redzones_validate(ptr, bin_info, true); } -void * -arena_malloc_small(arena_t *arena, size_t size, bool zero) +static void * +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; arena_bin_t *bin; + size_t usize; arena_run_t *run; - szind_t binind; - binind = size2index(size); assert(binind < NBINS); bin = &arena->bins[binind]; - size = index2size(binind); + usize = index2size(binind); - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); if ((run = bin->runcur) != NULL && run->nfree > 0) ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); else - ret = arena_bin_malloc_hard(arena, bin); + ret = arena_bin_malloc_hard(tsdn, arena, bin); if (ret == NULL) { - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); return (NULL); } @@ -2173,9 +2589,9 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) bin->stats.nrequests++; bin->stats.curregs++; } - malloc_mutex_unlock(&bin->lock); - if (config_prof && !isthreaded && arena_prof_accum(arena, size)) - prof_idump(); + malloc_mutex_unlock(tsdn, &bin->lock); + if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize)) + prof_idump(tsdn); if (!zero) { if (config_fill) { @@ -2183,34 +2599,35 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (unlikely(opt_zero)) - memset(ret, 0, size); + memset(ret, 0, usize); } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); } else { if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); + memset(ret, 0, usize); } + arena_decay_tick(tsdn, arena); return (ret); } void * -arena_malloc_large(arena_t *arena, size_t size, bool zero) +arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; size_t usize; uintptr_t random_offset; arena_run_t *run; arena_chunk_map_misc_t *miscelm; - UNUSED bool idump; + UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); /* Large allocation. */ - usize = s2u(size); - malloc_mutex_lock(&arena->lock); + usize = index2size(binind); + malloc_mutex_lock(tsdn, &arena->lock); if (config_cache_oblivious) { uint64_t r; @@ -2219,22 +2636,21 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 * for 4 KiB pages and 64-byte cachelines. */ - prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state, - UINT64_C(6364136223846793009), - UINT64_C(1442695040888963409)); + r = prng_lg_range_zu(&arena->offset_state, LG_PAGE - + LG_CACHELINE, false); random_offset = ((uintptr_t)r) << LG_CACHELINE; } else random_offset = 0; - run = arena_run_alloc_large(arena, usize + large_pad, zero); + run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero); if (run == NULL) { - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } miscelm = arena_run_to_miscelm(run); ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + random_offset); if (config_stats) { - szind_t index = size2index(usize) - NBINS; + szind_t index = binind - NBINS; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; @@ -2245,25 +2661,45 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) } if (config_prof) idump = arena_prof_accum_locked(arena, usize); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); if (config_prof && idump) - prof_idump(); + prof_idump(tsdn); if (!zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); + memset(ret, JEMALLOC_ALLOC_JUNK, usize); else if (unlikely(opt_zero)) memset(ret, 0, usize); } } + arena_decay_tick(tsdn, arena); return (ret); } +void * +arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero) +{ + + assert(!tsdn_null(tsdn) || arena != NULL); + + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); + if (unlikely(arena == NULL)) + return (NULL); + + if (likely(size <= SMALL_MAXCLASS)) + return (arena_malloc_small(tsdn, arena, ind, zero)); + if (likely(size <= large_maxclass)) + return (arena_malloc_large(tsdn, arena, ind, zero)); + return (huge_malloc(tsdn, arena, index2size(ind), zero)); +} + /* Only handles large allocations that require more than page alignment. */ static void * -arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, +arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { void *ret; @@ -2273,19 +2709,21 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, arena_chunk_map_misc_t *miscelm; void *rpages; + assert(!tsdn_null(tsdn) || arena != NULL); assert(usize == PAGE_CEILING(usize)); - arena = arena_choose(tsd, arena); + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); if (unlikely(arena == NULL)) return (NULL); alignment = PAGE_CEILING(alignment); alloc_size = usize + large_pad + alignment - PAGE; - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_large(arena, alloc_size, false); + malloc_mutex_lock(tsdn, &arena->lock); + run = arena_run_alloc_large(tsdn, arena, alloc_size, false); if (run == NULL) { - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); @@ -2300,16 +2738,16 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, arena_chunk_map_misc_t *head_miscelm = miscelm; arena_run_t *head_run = run; - miscelm = arena_miscelm_get(chunk, + miscelm = arena_miscelm_get_mutable(chunk, arena_miscelm_to_pageind(head_miscelm) + (leadsize >> LG_PAGE)); run = &miscelm->run; - arena_run_trim_head(arena, chunk, head_run, alloc_size, + arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size, alloc_size - leadsize); } if (trailsize != 0) { - arena_run_trim_tail(arena, chunk, run, usize + large_pad + + arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad + trailsize, usize + large_pad, false); } if (arena_run_init_large(arena, run, usize + large_pad, zero)) { @@ -2320,8 +2758,8 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, run_ind) != 0); assert(decommitted); /* Cause of OOM. */ - arena_run_dalloc(arena, run, dirty, false, decommitted); - malloc_mutex_unlock(&arena->lock); + arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted); + malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } ret = arena_miscelm_to_rpages(miscelm); @@ -2336,19 +2774,20 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); if (config_fill && !zero) { if (unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); + memset(ret, JEMALLOC_ALLOC_JUNK, usize); else if (unlikely(opt_zero)) memset(ret, 0, usize); } + arena_decay_tick(tsdn, arena); return (ret); } void * -arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, +arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; @@ -2356,7 +2795,8 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { /* Small; alignment doesn't require special run placement. */ - ret = arena_malloc(tsd, arena, usize, zero, tcache); + ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, + tcache, true); } else if (usize <= large_maxclass && alignment <= PAGE) { /* * Large; alignment doesn't require special run placement. @@ -2364,25 +2804,25 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, * the base of the run, so do some bit manipulation to retrieve * the base. */ - ret = arena_malloc(tsd, arena, usize, zero, tcache); + ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, + tcache, true); if (config_cache_oblivious) ret = (void *)((uintptr_t)ret & ~PAGE_MASK); } else { if (likely(usize <= large_maxclass)) { - ret = arena_palloc_large(tsd, arena, usize, alignment, + ret = arena_palloc_large(tsdn, arena, usize, alignment, zero); } else if (likely(alignment <= chunksize)) - ret = huge_malloc(tsd, arena, usize, zero, tcache); + ret = huge_malloc(tsdn, arena, usize, zero); else { - ret = huge_palloc(tsd, arena, usize, alignment, zero, - tcache); + ret = huge_palloc(tsdn, arena, usize, alignment, zero); } } return (ret); } void -arena_prof_promoted(const void *ptr, size_t size) +arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) { arena_chunk_t *chunk; size_t pageind; @@ -2391,8 +2831,8 @@ arena_prof_promoted(const void *ptr, size_t size) cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(ptr, false) == LARGE_MINCLASS); - assert(isalloc(ptr, true) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS); assert(size <= SMALL_MAXCLASS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); @@ -2401,8 +2841,8 @@ arena_prof_promoted(const void *ptr, size_t size) assert(binind < NBINS); arena_mapbits_large_binind_set(chunk, pageind, binind); - assert(isalloc(ptr, false) == LARGE_MINCLASS); - assert(isalloc(ptr, true) == size); + assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, true) == size); } static void @@ -2418,48 +2858,51 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, &chunk->node), bin); arena_bin_info_t *bin_info = &arena_bin_info[binind]; + /* + * The following block's conditional is necessary because if the + * run only contains one region, then it never gets inserted + * into the non-full runs tree. + */ if (bin_info->nregs != 1) { - /* - * This block's conditional is necessary because if the - * run only contains one region, then it never gets - * inserted into the non-full runs tree. - */ - arena_bin_runs_remove(bin, run); + arena_chunk_map_misc_t *miscelm = + arena_run_to_miscelm(run); + + arena_run_heap_remove(&bin->runs, miscelm); } } } static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) +arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, arena_bin_t *bin) { assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == - NULL); - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - malloc_mutex_lock(&arena->lock); - arena_run_dalloc_decommit(arena, chunk, run); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); + arena_run_dalloc(tsdn, arena, run, true, false, false); + malloc_mutex_unlock(tsdn, &arena->lock); /****************************/ - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); if (config_stats) bin->stats.curruns--; } static void -arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) +arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin) { /* - * Make sure that if bin->runcur is non-NULL, it refers to the lowest - * non-full run. It is okay to NULL runcur out rather than proactively - * keeping it pointing at the lowest non-full run. + * Make sure that if bin->runcur is non-NULL, it refers to the + * oldest/lowest non-full run. It is okay to NULL runcur out rather + * than proactively keeping it pointing at the oldest/lowest non-full + * run. */ - if ((uintptr_t)run < (uintptr_t)bin->runcur) { + if (bin->runcur != NULL && + arena_snad_comp(arena_run_to_miscelm(bin->runcur), + arena_run_to_miscelm(run)) > 0) { /* Switch runcur. */ if (bin->runcur->nfree > 0) arena_bin_runs_insert(bin, bin->runcur); @@ -2471,8 +2914,8 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, } static void -arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_bits_t *bitselm, bool junked) +arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) { size_t pageind, rpages_ind; arena_run_t *run; @@ -2482,7 +2925,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get(chunk, rpages_ind)->run; + run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; binind = run->binind; bin = &arena->bins[binind]; bin_info = &arena_bin_info[binind]; @@ -2493,9 +2936,9 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_run_reg_dalloc(run, ptr); if (run->nfree == bin_info->nregs) { arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(arena, chunk, run, bin); + arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); } else if (run->nfree == 1 && run != bin->runcur) - arena_bin_lower_run(arena, chunk, run, bin); + arena_bin_lower_run(arena, run, bin); if (config_stats) { bin->stats.ndalloc++; @@ -2504,15 +2947,15 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, } void -arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_bits_t *bitselm) +arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) { - arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); + arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true); } void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm) { arena_run_t *run; @@ -2520,16 +2963,16 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t rpages_ind; rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get(chunk, rpages_ind)->run; + run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; bin = &arena->bins[run->binind]; - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); - malloc_mutex_unlock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); + arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false); + malloc_mutex_unlock(tsdn, &bin->lock); } void -arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind) +arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind) { arena_chunk_map_bits_t *bitselm; @@ -2538,34 +2981,36 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind)) != BININD_INVALID); } - bitselm = arena_bitselm_get(chunk, pageind); - arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); + bitselm = arena_bitselm_get_mutable(chunk, pageind); + arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm); + arena_decay_tick(tsdn, arena); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) +#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) #endif void arena_dalloc_junk_large(void *ptr, size_t usize) { if (config_fill && unlikely(opt_junk_free)) - memset(ptr, 0x5a, usize); + memset(ptr, JEMALLOC_FREE_JUNK, usize); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_large #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) arena_dalloc_junk_large_t *arena_dalloc_junk_large = - JEMALLOC_N(arena_dalloc_junk_large_impl); + JEMALLOC_N(n_arena_dalloc_junk_large); #endif static void -arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, - void *ptr, bool junked) +arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, bool junked) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); arena_run_t *run = &miscelm->run; if (config_fill || config_stats) { @@ -2584,32 +3029,35 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, } } - arena_run_dalloc_decommit(arena, chunk, run); + arena_run_dalloc(tsdn, arena, run, true, false, false); } void -arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr) +arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr) { - arena_dalloc_large_locked_impl(arena, chunk, ptr, true); + arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true); } void -arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) +arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr) { - malloc_mutex_lock(&arena->lock); - arena_dalloc_large_locked_impl(arena, chunk, ptr, false); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); + arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false); + malloc_mutex_unlock(tsdn, &arena->lock); + arena_decay_tick(tsdn, arena); } static void -arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size) +arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t size) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); arena_run_t *run = &miscelm->run; assert(size < oldsize); @@ -2618,8 +3066,8 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, * Shrink the run, and make trailing pages available for other * allocations. */ - malloc_mutex_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + + malloc_mutex_lock(tsdn, &arena->lock); + arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size + large_pad, true); if (config_stats) { szind_t oldindex = size2index(oldsize) - NBINS; @@ -2637,12 +3085,12 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } static bool -arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t usize_min, size_t usize_max, bool zero) +arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t npages = (oldsize + large_pad) >> LG_PAGE; @@ -2652,7 +3100,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, large_pad); /* Try to extend the run. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, pageind+npages) != 0) goto label_fail; @@ -2675,7 +3123,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, if (splitsize == 0) goto label_fail; - run = &arena_miscelm_get(chunk, pageind+npages)->run; + run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; if (arena_run_split_large(arena, run, splitsize, zero)) goto label_fail; @@ -2683,10 +3131,16 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, /* * Zero the trailing bytes of the original allocation's * last page, since they are in an indeterminate state. + * There will always be trailing bytes, because ptr's + * offset from the beginning of the run is a multiple of + * CACHELINE in [0 .. PAGE). */ - assert(PAGE_CEILING(oldsize) == oldsize); - memset((void *)((uintptr_t)ptr + oldsize), 0, - PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr); + void *zbase = (void *)((uintptr_t)ptr + oldsize); + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + + PAGE)); + size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; + assert(nzero > 0); + memset(zbase, 0, nzero); } size = oldsize + splitsize; @@ -2726,24 +3180,24 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } label_fail: - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (true); } #ifdef JEMALLOC_JET #undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) +#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) #endif static void arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) { if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, + memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, old_usize - usize); } } @@ -2751,7 +3205,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) #undef arena_ralloc_junk_large #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) arena_ralloc_junk_large_t *arena_ralloc_junk_large = - JEMALLOC_N(arena_ralloc_junk_large_impl); + JEMALLOC_N(n_arena_ralloc_junk_large); #endif /* @@ -2759,7 +3213,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large = * always fail if growing an object, and the following run is already in use. */ static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, +arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { arena_chunk_t *chunk; @@ -2774,15 +3228,16 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, arena = extent_node_arena_get(&chunk->node); if (oldsize < usize_max) { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, - usize_min, usize_max, zero); + bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr, + oldsize, usize_min, usize_max, zero); if (config_fill && !ret && !zero) { if (unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, - isalloc(ptr, config_prof) - oldsize); + memset((void *)((uintptr_t)ptr + oldsize), + JEMALLOC_ALLOC_JUNK, + isalloc(tsdn, ptr, config_prof) - oldsize); } else if (unlikely(opt_zero)) { memset((void *)((uintptr_t)ptr + oldsize), 0, - isalloc(ptr, config_prof) - oldsize); + isalloc(tsdn, ptr, config_prof) - oldsize); } } return (ret); @@ -2791,19 +3246,27 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, assert(oldsize > usize_max); /* Fill before shrinking in order avoid a race. */ arena_ralloc_junk_large(ptr, oldsize, usize_max); - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); + arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max); return (false); } bool -arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) +arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero) { size_t usize_min, usize_max; + /* Calls with non-zero extra had to clamp extra. */ + assert(extra == 0 || size + extra <= HUGE_MAXCLASS); + + if (unlikely(size > HUGE_MAXCLASS)) + return (true); + usize_min = s2u(size); usize_max = s2u(size + extra); if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { + arena_chunk_t *chunk; + /* * Avoid moving the allocation if the size class can be left the * same. @@ -2811,37 +3274,39 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, if (oldsize <= SMALL_MAXCLASS) { assert(arena_bin_info[size2index(oldsize)].reg_size == oldsize); - if ((usize_max <= SMALL_MAXCLASS && - size2index(usize_max) == size2index(oldsize)) || - (size <= oldsize && usize_max >= oldsize)) - return (false); + if ((usize_max > SMALL_MAXCLASS || + size2index(usize_max) != size2index(oldsize)) && + (size > oldsize || usize_max < oldsize)) + return (true); } else { - if (usize_max > SMALL_MAXCLASS) { - if (!arena_ralloc_large(ptr, oldsize, usize_min, - usize_max, zero)) - return (false); - } + if (usize_max <= SMALL_MAXCLASS) + return (true); + if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min, + usize_max, zero)) + return (true); } - /* Reallocation would require a move. */ - return (true); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node)); + return (false); } else { - return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, - zero)); + return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min, + usize_max, zero)); } } static void * -arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, +arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { if (alignment == 0) - return (arena_malloc(tsd, arena, usize, zero, tcache)); + return (arena_malloc(tsdn, arena, usize, size2index(usize), + zero, tcache, true)); usize = sa2u(usize, alignment); - if (usize == 0) + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return (NULL); - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); + return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); } void * @@ -2852,14 +3317,15 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t usize; usize = s2u(size); - if (usize == 0) + if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) return (NULL); if (likely(usize <= large_maxclass)) { size_t copysize; /* Try to avoid moving the allocation. */ - if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) + if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0, + zero)) return (ptr); /* @@ -2867,8 +3333,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, * the object. In that case, fall back to allocating new space * and copying. */ - ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, - zero, tcache); + ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, + alignment, zero, tcache); if (ret == NULL) return (NULL); @@ -2880,7 +3346,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, copysize = (usize < oldsize) ? usize : oldsize; JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache); + isqalloc(tsd, ptr, oldsize, tcache, true); } else { ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, zero, tcache); @@ -2889,25 +3355,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, } dss_prec_t -arena_dss_prec_get(arena_t *arena) +arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) { dss_prec_t ret; - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ret = arena->dss_prec; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (ret); } bool -arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) +arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) { if (!have_dss) return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena->dss_prec = dss_prec; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } @@ -2922,27 +3388,76 @@ bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) { + if (opt_purge != purge_mode_ratio) + return (true); if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) return (true); atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); return (false); } +ssize_t +arena_decay_time_default_get(void) +{ + + return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); +} + +bool +arena_decay_time_default_set(ssize_t decay_time) +{ + + if (opt_purge != purge_mode_decay) + return (true); + if (!arena_decay_time_valid(decay_time)) + return (true); + atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); + return (false); +} + +static void +arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, + size_t *nactive, size_t *ndirty) +{ + + *nthreads += arena_nthreads_get(arena, false); + *dss = dss_prec_names[arena->dss_prec]; + *lg_dirty_mult = arena->lg_dirty_mult; + *decay_time = arena->decay.time; + *nactive += arena->nactive; + *ndirty += arena->ndirty; +} + void -arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, +arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, + size_t *nactive, size_t *ndirty) +{ + + malloc_mutex_lock(tsdn, &arena->lock); + arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, + decay_time, nactive, ndirty); + malloc_mutex_unlock(tsdn, &arena->lock); +} + +void +arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats) { unsigned i; - malloc_mutex_lock(&arena->lock); - *dss = dss_prec_names[arena->dss_prec]; - *lg_dirty_mult = arena->lg_dirty_mult; - *nactive += arena->nactive; - *ndirty += arena->ndirty; + cassert(config_stats); + + malloc_mutex_lock(tsdn, &arena->lock); + arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, + decay_time, nactive, ndirty); astats->mapped += arena->stats.mapped; + astats->retained += arena->stats.retained; astats->npurge += arena->stats.npurge; astats->nmadvise += arena->stats.nmadvise; astats->purged += arena->stats.purged; @@ -2968,12 +3483,12 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); bstats[i].nmalloc += bin->stats.nmalloc; bstats[i].ndalloc += bin->stats.ndalloc; bstats[i].nrequests += bin->stats.nrequests; @@ -2985,33 +3500,61 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, bstats[i].nruns += bin->stats.nruns; bstats[i].reruns += bin->stats.reruns; bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); } } +unsigned +arena_nthreads_get(arena_t *arena, bool internal) +{ + + return (atomic_read_u(&arena->nthreads[internal])); +} + +void +arena_nthreads_inc(arena_t *arena, bool internal) +{ + + atomic_add_u(&arena->nthreads[internal], 1); +} + +void +arena_nthreads_dec(arena_t *arena, bool internal) +{ + + atomic_sub_u(&arena->nthreads[internal], 1); +} + +size_t +arena_extent_sn_next(arena_t *arena) +{ + + return (atomic_add_z(&arena->extent_sn_next, 1) - 1); +} + arena_t * -arena_new(unsigned ind) +arena_new(tsdn_t *tsdn, unsigned ind) { arena_t *arena; unsigned i; - arena_bin_t *bin; /* * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly * because there is no way to clean up if base_alloc() OOMs. */ if (config_stats) { - arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) - + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + - nhclasses) * sizeof(malloc_huge_stats_t)); + arena = (arena_t *)base_alloc(tsdn, + CACHELINE_CEILING(sizeof(arena_t)) + + QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t))) + + (nhclasses * sizeof(malloc_huge_stats_t))); } else - arena = (arena_t *)base_alloc(sizeof(arena_t)); + arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t)); if (arena == NULL) return (NULL); arena->ind = ind; - arena->nthreads = 0; - if (malloc_mutex_init(&arena->lock)) + arena->nthreads[0] = arena->nthreads[1] = 0; + if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) return (NULL); if (config_stats) { @@ -3041,11 +3584,15 @@ arena_new(unsigned ind) * deterministic seed. */ arena->offset_state = config_debug ? ind : - (uint64_t)(uintptr_t)arena; + (size_t)(uintptr_t)arena; } arena->dss_prec = chunk_dss_prec_get(); + ql_new(&arena->achunks); + + arena->extent_sn_next = 0; + arena->spare = NULL; arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); @@ -3053,33 +3600,42 @@ arena_new(unsigned ind) arena->nactive = 0; arena->ndirty = 0; - arena_avail_tree_new(&arena->runs_avail); + for (i = 0; i < NPSIZES; i++) + arena_run_heap_new(&arena->runs_avail[i]); + qr_new(&arena->runs_dirty, rd_link); qr_new(&arena->chunks_cache, cc_link); + if (opt_purge == purge_mode_decay) + arena_decay_init(arena, arena_decay_time_default_get()); + ql_new(&arena->huge); - if (malloc_mutex_init(&arena->huge_mtx)) + if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", + WITNESS_RANK_ARENA_HUGE)) return (NULL); - extent_tree_szad_new(&arena->chunks_szad_cached); + extent_tree_szsnad_new(&arena->chunks_szsnad_cached); extent_tree_ad_new(&arena->chunks_ad_cached); - extent_tree_szad_new(&arena->chunks_szad_retained); + extent_tree_szsnad_new(&arena->chunks_szsnad_retained); extent_tree_ad_new(&arena->chunks_ad_retained); - if (malloc_mutex_init(&arena->chunks_mtx)) + if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", + WITNESS_RANK_ARENA_CHUNKS)) return (NULL); ql_new(&arena->node_cache); - if (malloc_mutex_init(&arena->node_cache_mtx)) + if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache", + WITNESS_RANK_ARENA_NODE_CACHE)) return (NULL); arena->chunk_hooks = chunk_hooks_default; /* Initialize bins. */ for (i = 0; i < NBINS; i++) { - bin = &arena->bins[i]; - if (malloc_mutex_init(&bin->lock)) + arena_bin_t *bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock, "arena_bin", + WITNESS_RANK_ARENA_BIN)) return (NULL); bin->runcur = NULL; - arena_run_tree_new(&bin->runs); + arena_run_heap_new(&bin->runs); if (config_stats) memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); } @@ -3111,8 +3667,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) * be twice as large in order to maintain alignment. */ if (config_fill && unlikely(opt_redzone)) { - size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - - 1); + size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); if (align_min <= REDZONE_MINSIZE) { bin_info->redzone_size = REDZONE_MINSIZE; pad_size = 0; @@ -3132,18 +3687,19 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) * size). */ try_run_size = PAGE; - try_nregs = try_run_size / bin_info->reg_size; + try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); do { perfect_run_size = try_run_size; perfect_nregs = try_nregs; try_run_size += PAGE; - try_nregs = try_run_size / bin_info->reg_size; + try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); } while (perfect_run_size != perfect_nregs * bin_info->reg_size); assert(perfect_nregs <= RUN_MAXREGS); actual_run_size = perfect_run_size; - actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; + actual_nregs = (uint32_t)((actual_run_size - pad_size) / + bin_info->reg_interval); /* * Redzones can require enough padding that not even a single region can @@ -3155,8 +3711,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) assert(config_fill && unlikely(opt_redzone)); actual_run_size += PAGE; - actual_nregs = (actual_run_size - pad_size) / - bin_info->reg_interval; + actual_nregs = (uint32_t)((actual_run_size - pad_size) / + bin_info->reg_interval); } /* @@ -3164,8 +3720,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) */ while (actual_run_size > arena_maxrun) { actual_run_size -= PAGE; - actual_nregs = (actual_run_size - pad_size) / - bin_info->reg_interval; + actual_nregs = (uint32_t)((actual_run_size - pad_size) / + bin_info->reg_interval); } assert(actual_nregs > 0); assert(actual_run_size == s2u(actual_run_size)); @@ -3173,11 +3729,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) /* Copy final settings. */ bin_info->run_size = actual_run_size; bin_info->nregs = actual_nregs; - bin_info->reg0_offset = actual_run_size - (actual_nregs * - bin_info->reg_interval) - pad_size + bin_info->redzone_size; - - if (actual_run_size > small_maxrun) - small_maxrun = actual_run_size; + bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * + bin_info->reg_interval) - pad_size + bin_info->redzone_size); assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs * bin_info->reg_interval) + pad_size == bin_info->run_size); @@ -3194,7 +3747,7 @@ bin_info_init(void) bin_info_run_size_calc(bin_info); \ bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); #define BIN_INFO_INIT_bin_no(index, size) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) SIZE_CLASSES #undef BIN_INFO_INIT_bin_yes @@ -3202,38 +3755,13 @@ bin_info_init(void) #undef SC } -static bool -small_run_size_init(void) -{ - - assert(small_maxrun != 0); - - small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >> - LG_PAGE)); - if (small_run_tab == NULL) - return (true); - -#define TAB_INIT_bin_yes(index, size) { \ - arena_bin_info_t *bin_info = &arena_bin_info[index]; \ - small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ - } -#define TAB_INIT_bin_no(index, size) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ - TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) - SIZE_CLASSES -#undef TAB_INIT_bin_yes -#undef TAB_INIT_bin_no -#undef SC - - return (false); -} - -bool +void arena_boot(void) { unsigned i; arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); + arena_decay_time_default_set(opt_decay_time); /* * Compute the header size such that it is large enough to contain the @@ -3275,44 +3803,61 @@ arena_boot(void) nhclasses = NSIZES - nlclasses - NBINS; bin_info_init(); - return (small_run_size_init()); } void -arena_prefork(arena_t *arena) +arena_prefork0(tsdn_t *tsdn, arena_t *arena) +{ + + malloc_mutex_prefork(tsdn, &arena->lock); +} + +void +arena_prefork1(tsdn_t *tsdn, arena_t *arena) +{ + + malloc_mutex_prefork(tsdn, &arena->chunks_mtx); +} + +void +arena_prefork2(tsdn_t *tsdn, arena_t *arena) +{ + + malloc_mutex_prefork(tsdn, &arena->node_cache_mtx); +} + +void +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { unsigned i; - malloc_mutex_prefork(&arena->lock); - malloc_mutex_prefork(&arena->huge_mtx); - malloc_mutex_prefork(&arena->chunks_mtx); - malloc_mutex_prefork(&arena->node_cache_mtx); for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(&arena->bins[i].lock); + malloc_mutex_prefork(tsdn, &arena->bins[i].lock); + malloc_mutex_prefork(tsdn, &arena->huge_mtx); } void -arena_postfork_parent(arena_t *arena) +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; + malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx); for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(&arena->bins[i].lock); - malloc_mutex_postfork_parent(&arena->node_cache_mtx); - malloc_mutex_postfork_parent(&arena->chunks_mtx); - malloc_mutex_postfork_parent(&arena->huge_mtx); - malloc_mutex_postfork_parent(&arena->lock); + malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); + malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->lock); } void -arena_postfork_child(arena_t *arena) +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; + malloc_mutex_postfork_child(tsdn, &arena->huge_mtx); for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(&arena->bins[i].lock); - malloc_mutex_postfork_child(&arena->node_cache_mtx); - malloc_mutex_postfork_child(&arena->chunks_mtx); - malloc_mutex_postfork_child(&arena->huge_mtx); - malloc_mutex_postfork_child(&arena->lock); + malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); + malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx); + malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_child(tsdn, &arena->lock); } diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c index 7cdcfed86..5681a3f36 100644 --- a/deps/jemalloc/src/base.c +++ b/deps/jemalloc/src/base.c @@ -5,7 +5,8 @@ /* Data. */ static malloc_mutex_t base_mtx; -static extent_tree_t base_avail_szad; +static size_t base_extent_sn_next; +static extent_tree_t base_avail_szsnad; static extent_node_t *base_nodes; static size_t base_allocated; static size_t base_resident; @@ -13,12 +14,13 @@ static size_t base_mapped; /******************************************************************************/ -/* base_mtx must be held. */ static extent_node_t * -base_node_try_alloc(void) +base_node_try_alloc(tsdn_t *tsdn) { extent_node_t *node; + malloc_mutex_assert_owner(tsdn, &base_mtx); + if (base_nodes == NULL) return (NULL); node = base_nodes; @@ -27,33 +29,42 @@ base_node_try_alloc(void) return (node); } -/* base_mtx must be held. */ static void -base_node_dalloc(extent_node_t *node) +base_node_dalloc(tsdn_t *tsdn, extent_node_t *node) { + malloc_mutex_assert_owner(tsdn, &base_mtx); + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); *(extent_node_t **)node = base_nodes; base_nodes = node; } -/* base_mtx must be held. */ +static void +base_extent_node_init(extent_node_t *node, void *addr, size_t size) +{ + size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1; + + extent_node_init(node, NULL, addr, size, sn, true, true); +} + static extent_node_t * -base_chunk_alloc(size_t minsize) +base_chunk_alloc(tsdn_t *tsdn, size_t minsize) { extent_node_t *node; size_t csize, nsize; void *addr; + malloc_mutex_assert_owner(tsdn, &base_mtx); assert(minsize != 0); - node = base_node_try_alloc(); + node = base_node_try_alloc(tsdn); /* Allocate enough space to also carve a node out if necessary. */ nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; csize = CHUNK_CEILING(minsize + nsize); addr = chunk_alloc_base(csize); if (addr == NULL) { if (node != NULL) - base_node_dalloc(node); + base_node_dalloc(tsdn, node); return (NULL); } base_mapped += csize; @@ -66,7 +77,7 @@ base_chunk_alloc(size_t minsize) base_resident += PAGE_CEILING(nsize); } } - extent_node_init(node, NULL, addr, csize, true, true); + base_extent_node_init(node, addr, csize); return (node); } @@ -76,7 +87,7 @@ base_chunk_alloc(size_t minsize) * physical memory usage. */ void * -base_alloc(size_t size) +base_alloc(tsdn_t *tsdn, size_t size) { void *ret; size_t csize, usize; @@ -90,15 +101,15 @@ base_alloc(size_t size) csize = CACHELINE_CEILING(size); usize = s2u(csize); - extent_node_init(&key, NULL, NULL, usize, false, false); - malloc_mutex_lock(&base_mtx); - node = extent_tree_szad_nsearch(&base_avail_szad, &key); + extent_node_init(&key, NULL, NULL, usize, 0, false, false); + malloc_mutex_lock(tsdn, &base_mtx); + node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key); if (node != NULL) { /* Use existing space. */ - extent_tree_szad_remove(&base_avail_szad, node); + extent_tree_szsnad_remove(&base_avail_szsnad, node); } else { /* Try to allocate more space. */ - node = base_chunk_alloc(csize); + node = base_chunk_alloc(tsdn, csize); } if (node == NULL) { ret = NULL; @@ -109,9 +120,9 @@ base_alloc(size_t size) if (extent_node_size_get(node) > csize) { extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); extent_node_size_set(node, extent_node_size_get(node) - csize); - extent_tree_szad_insert(&base_avail_szad, node); + extent_tree_szsnad_insert(&base_avail_szsnad, node); } else - base_node_dalloc(node); + base_node_dalloc(tsdn, node); if (config_stats) { base_allocated += csize; /* @@ -123,52 +134,54 @@ base_alloc(size_t size) } JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); label_return: - malloc_mutex_unlock(&base_mtx); + malloc_mutex_unlock(tsdn, &base_mtx); return (ret); } void -base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) +base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, + size_t *mapped) { - malloc_mutex_lock(&base_mtx); + malloc_mutex_lock(tsdn, &base_mtx); assert(base_allocated <= base_resident); assert(base_resident <= base_mapped); *allocated = base_allocated; *resident = base_resident; *mapped = base_mapped; - malloc_mutex_unlock(&base_mtx); + malloc_mutex_unlock(tsdn, &base_mtx); } bool base_boot(void) { - if (malloc_mutex_init(&base_mtx)) + if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE)) return (true); - extent_tree_szad_new(&base_avail_szad); + base_extent_sn_next = 0; + extent_tree_szsnad_new(&base_avail_szsnad); base_nodes = NULL; return (false); } void -base_prefork(void) +base_prefork(tsdn_t *tsdn) { - malloc_mutex_prefork(&base_mtx); + malloc_mutex_prefork(tsdn, &base_mtx); } void -base_postfork_parent(void) +base_postfork_parent(tsdn_t *tsdn) { - malloc_mutex_postfork_parent(&base_mtx); + malloc_mutex_postfork_parent(tsdn, &base_mtx); } void -base_postfork_child(void) +base_postfork_child(tsdn_t *tsdn) { - malloc_mutex_postfork_child(&base_mtx); + malloc_mutex_postfork_child(tsdn, &base_mtx); } diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c index c733372b4..ac0f3b381 100644 --- a/deps/jemalloc/src/bitmap.c +++ b/deps/jemalloc/src/bitmap.c @@ -3,6 +3,8 @@ /******************************************************************************/ +#ifdef USE_TREE + void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { @@ -32,20 +34,11 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) binfo->nbits = nbits; } -size_t +static size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { - return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); -} - -size_t -bitmap_size(size_t nbits) -{ - bitmap_info_t binfo; - - bitmap_info_init(&binfo, nbits); - return (bitmap_info_ngroups(&binfo)); + return (binfo->levels[binfo->nlevels].group_offset); } void @@ -61,8 +54,7 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) * correspond to the first logical bit in the group, so extra bits * are the most significant bits of the last group. */ - memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << - LG_SIZEOF_BITMAP); + memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) @@ -76,3 +68,44 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; } } + +#else /* USE_TREE */ + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) +{ + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + binfo->ngroups = BITMAP_BITS2GROUPS(nbits); + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) +{ + + return (binfo->ngroups); +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t extra; + + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->ngroups - 1] >>= extra; +} + +#endif /* USE_TREE */ + +size_t +bitmap_size(const bitmap_info_t *binfo) +{ + + return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); +} diff --git a/deps/jemalloc/src/chunk.c b/deps/jemalloc/src/chunk.c index 6ba1ca7a5..c1c514a86 100644 --- a/deps/jemalloc/src/chunk.c +++ b/deps/jemalloc/src/chunk.c @@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = { * definition. */ -static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, - void *chunk, size_t size, bool zeroed, bool committed); +static void chunk_record(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad, + extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn, + bool zeroed, bool committed); /******************************************************************************/ @@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena) } chunk_hooks_t -chunk_hooks_get(arena_t *arena) +chunk_hooks_get(tsdn_t *tsdn, arena_t *arena) { chunk_hooks_t chunk_hooks; - malloc_mutex_lock(&arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); chunk_hooks = chunk_hooks_get_locked(arena); - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (chunk_hooks); } chunk_hooks_t -chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) +chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks) { chunk_hooks_t old_chunk_hooks; - malloc_mutex_lock(&arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); old_chunk_hooks = arena->chunk_hooks; /* * Copy each field atomically so that it is impossible for readers to @@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) ATOMIC_COPY_HOOK(split); ATOMIC_COPY_HOOK(merge); #undef ATOMIC_COPY_HOOK - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (old_chunk_hooks); } static void -chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, - bool locked) +chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, bool locked) { static const chunk_hooks_t uninitialized_hooks = CHUNK_HOOKS_INITIALIZER; @@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == 0) { *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : - chunk_hooks_get(arena); + chunk_hooks_get(tsdn, arena); } } static void -chunk_hooks_assure_initialized_locked(arena_t *arena, +chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks) { - chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); + chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true); } static void -chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) +chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks) { - chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); + chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false); } bool -chunk_register(const void *chunk, const extent_node_t *node) +chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node) { assert(extent_node_addr_get(node) == chunk); @@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node) high = atomic_read_z(&highchunks); } if (cur > high && prof_gdump_get_unlocked()) - prof_gdump(); + prof_gdump(tsdn); } return (false); @@ -181,33 +183,35 @@ chunk_deregister(const void *chunk, const extent_node_t *node) } /* - * Do first-best-fit chunk selection, i.e. select the lowest chunk that best - * fits. + * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that + * best fits. */ static extent_node_t * -chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, size_t size) +chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size) { extent_node_t key; assert(size == CHUNK_CEILING(size)); - extent_node_init(&key, arena, NULL, size, false, false); - return (extent_tree_szad_nsearch(chunks_szad, &key)); + extent_node_init(&key, arena, NULL, size, 0, false, false); + return (extent_tree_szsnad_nsearch(chunks_szsnad, &key)); } static void * -chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, - void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, - bool dalloc_node) +chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit, bool dalloc_node) { void *ret; extent_node_t *node; size_t alloc_size, leadsize, trailsize; bool zeroed, committed; + assert(CHUNK_CEILING(size) == size); + assert(alignment > 0); assert(new_addr == NULL || alignment == chunksize); + assert(CHUNK_ADDR2BASE(new_addr) == new_addr); /* * Cached chunks use the node linkage embedded in their headers, in * which case dalloc_node is true, and new_addr is non-NULL because @@ -215,24 +219,23 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, */ assert(dalloc_node || new_addr != NULL); - alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); + alloc_size = size + CHUNK_CEILING(alignment) - chunksize; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); - malloc_mutex_lock(&arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(arena, chunk_hooks); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); if (new_addr != NULL) { extent_node_t key; - extent_node_init(&key, arena, new_addr, alloc_size, false, + extent_node_init(&key, arena, new_addr, alloc_size, 0, false, false); node = extent_tree_ad_search(chunks_ad, &key); } else { - node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, - alloc_size); + node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size); } if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < size)) { - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (NULL); } leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), @@ -241,6 +244,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, assert(extent_node_size_get(node) >= leadsize + size); trailsize = extent_node_size_get(node) - leadsize - size; ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); + *sn = extent_node_sn_get(node); zeroed = extent_node_zeroed_get(node); if (zeroed) *zero = true; @@ -251,17 +255,17 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, if (leadsize != 0 && chunk_hooks->split(extent_node_addr_get(node), extent_node_size_get(node), leadsize, size, false, arena->ind)) { - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (NULL); } /* Remove node from the tree. */ - extent_tree_szad_remove(chunks_szad, node); + extent_tree_szsnad_remove(chunks_szsnad, node); extent_tree_ad_remove(chunks_ad, node); arena_chunk_cache_maybe_remove(arena, node, cache); if (leadsize != 0) { /* Insert the leading space as a smaller chunk. */ extent_node_size_set(node, leadsize); - extent_tree_szad_insert(chunks_szad, node); + extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_ad_insert(chunks_ad, node); arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; @@ -271,41 +275,42 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, if (chunk_hooks->split(ret, size + trailsize, size, trailsize, false, arena->ind)) { if (dalloc_node && node != NULL) - arena_node_dalloc(arena, node); - malloc_mutex_unlock(&arena->chunks_mtx); - chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, - cache, ret, size + trailsize, zeroed, committed); + arena_node_dalloc(tsdn, arena, node); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, + chunks_ad, cache, ret, size + trailsize, *sn, + zeroed, committed); return (NULL); } /* Insert the trailing space as a smaller chunk. */ if (node == NULL) { - node = arena_node_alloc(arena); + node = arena_node_alloc(tsdn, arena); if (node == NULL) { - malloc_mutex_unlock(&arena->chunks_mtx); - chunk_record(arena, chunk_hooks, chunks_szad, - chunks_ad, cache, ret, size + trailsize, - zeroed, committed); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, + chunks_szsnad, chunks_ad, cache, ret, size + + trailsize, *sn, zeroed, committed); return (NULL); } } extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), - trailsize, zeroed, committed); - extent_tree_szad_insert(chunks_szad, node); + trailsize, *sn, zeroed, committed); + extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_ad_insert(chunks_ad, node); arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { - malloc_mutex_unlock(&arena->chunks_mtx); - chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, - ret, size, zeroed, committed); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad, + cache, ret, size, *sn, zeroed, committed); return (NULL); } - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); assert(dalloc_node || node != NULL); if (dalloc_node && node != NULL) - arena_node_dalloc(arena, node); + arena_node_dalloc(tsdn, arena, node); if (*zero) { if (!zeroed) memset(ret, 0, size); @@ -313,10 +318,11 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, size_t i; size_t *p = (size_t *)(uintptr_t)ret; - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); for (i = 0; i < size / sizeof(size_t); i++) assert(p[i] == 0); } + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); } return (ret); } @@ -328,39 +334,29 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, * them if they are returned. */ static void * -chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit, dss_prec_t dss_prec) +chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { void *ret; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); - /* Retained. */ - if ((ret = chunk_recycle(arena, &chunk_hooks, - &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, - new_addr, size, alignment, zero, commit, true)) != NULL) - return (ret); - /* "primary" dss. */ if (have_dss && dss_prec == dss_prec_primary && (ret = - chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != - NULL) - return (ret); - /* - * mmap. Requesting an address is not implemented for - * chunk_alloc_mmap(), so only call it if (new_addr == NULL). - */ - if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero, + chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) return (ret); + /* mmap. */ + if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) != + NULL) + return (ret); /* "secondary" dss. */ if (have_dss && dss_prec == dss_prec_secondary && (ret = - chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != - NULL) + chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) return (ret); /* All strategies for allocation failed. */ @@ -380,7 +376,7 @@ chunk_alloc_base(size_t size) */ zero = true; commit = true; - ret = chunk_alloc_mmap(size, chunksize, &zero, &commit); + ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); if (ret == NULL) return (NULL); if (config_valgrind) @@ -390,37 +386,33 @@ chunk_alloc_base(size_t size) } void * -chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, - size_t size, size_t alignment, bool *zero, bool dalloc_node) +chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit, bool dalloc_node) { void *ret; - bool commit; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); - commit = true; - ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, - &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, - &commit, dalloc_node); + ret = chunk_recycle(tsdn, arena, chunk_hooks, + &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true, + new_addr, size, alignment, sn, zero, commit, dalloc_node); if (ret == NULL) return (NULL); - assert(commit); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } static arena_t * -chunk_arena_get(unsigned arena_ind) +chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind) { arena_t *arena; - /* Dodge tsd for a0 in order to avoid bootstrapping issues. */ - arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind, - false, true); + arena = arena_get(tsdn, arena_ind, false); /* * The arena we're allocating on behalf of must have been initialized * already. @@ -430,14 +422,12 @@ chunk_arena_get(unsigned arena_ind) } static void * -chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, - bool *commit, unsigned arena_ind) +chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; - arena_t *arena; - arena = chunk_arena_get(arena_ind); - ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, + ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero, commit, arena->dss_prec); if (ret == NULL) return (NULL); @@ -447,26 +437,80 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, return (ret); } +static void * +chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit, unsigned arena_ind) +{ + tsdn_t *tsdn; + arena_t *arena; + + tsdn = tsdn_fetch(); + arena = chunk_arena_get(tsdn, arena_ind); + + return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment, + zero, commit)); +} + +static void * +chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit) +{ + void *ret; + + assert(size != 0); + assert((size & chunksize_mask) == 0); + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); + + ret = chunk_recycle(tsdn, arena, chunk_hooks, + &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false, + new_addr, size, alignment, sn, zero, commit, true); + + if (config_stats && ret != NULL) + arena->stats.retained -= size; + + return (ret); +} + void * -chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit) +chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit) { void *ret; - chunk_hooks_assure_initialized(arena, chunk_hooks); - ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit, - arena->ind); - if (ret == NULL) - return (NULL); - if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); + + ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size, + alignment, sn, zero, commit); + if (ret == NULL) { + if (chunk_hooks->alloc == chunk_alloc_default) { + /* Call directly to propagate tsdn. */ + ret = chunk_alloc_default_impl(tsdn, arena, new_addr, + size, alignment, zero, commit); + } else { + ret = chunk_hooks->alloc(new_addr, size, alignment, + zero, commit, arena->ind); + } + + if (ret == NULL) + return (NULL); + + *sn = arena_extent_sn_next(arena); + + if (config_valgrind && chunk_hooks->alloc != + chunk_alloc_default) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); + } + return (ret); } static void -chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, - void *chunk, size_t size, bool zeroed, bool committed) +chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, + void *chunk, size_t size, size_t sn, bool zeroed, bool committed) { bool unzeroed; extent_node_t *node, *prev; @@ -476,9 +520,9 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, unzeroed = cache || !zeroed; JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); - malloc_mutex_lock(&arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(arena, chunk_hooks); - extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); + extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0, false, false); node = extent_tree_ad_nsearch(chunks_ad, &key); /* Try to coalesce forward. */ @@ -490,19 +534,21 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, /* * Coalesce chunk with the following address range. This does * not change the position within chunks_ad, so only - * remove/insert from/into chunks_szad. + * remove/insert from/into chunks_szsnad. */ - extent_tree_szad_remove(chunks_szad, node); + extent_tree_szsnad_remove(chunks_szsnad, node); arena_chunk_cache_maybe_remove(arena, node, cache); extent_node_addr_set(node, chunk); extent_node_size_set(node, size + extent_node_size_get(node)); + if (sn < extent_node_sn_get(node)) + extent_node_sn_set(node, sn); extent_node_zeroed_set(node, extent_node_zeroed_get(node) && !unzeroed); - extent_tree_szad_insert(chunks_szad, node); + extent_tree_szsnad_insert(chunks_szsnad, node); arena_chunk_cache_maybe_insert(arena, node, cache); } else { /* Coalescing forward failed, so insert a new node. */ - node = arena_node_alloc(arena); + node = arena_node_alloc(tsdn, arena); if (node == NULL) { /* * Node allocation failed, which is an exceedingly @@ -511,15 +557,15 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, * a virtual memory leak. */ if (cache) { - chunk_purge_wrapper(arena, chunk_hooks, chunk, - size, 0, size); + chunk_purge_wrapper(tsdn, arena, chunk_hooks, + chunk, size, 0, size); } goto label_return; } - extent_node_init(node, arena, chunk, size, !unzeroed, + extent_node_init(node, arena, chunk, size, sn, !unzeroed, committed); extent_tree_ad_insert(chunks_ad, node); - extent_tree_szad_insert(chunks_szad, node); + extent_tree_szsnad_insert(chunks_szsnad, node); arena_chunk_cache_maybe_insert(arena, node, cache); } @@ -533,31 +579,33 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, /* * Coalesce chunk with the previous address range. This does * not change the position within chunks_ad, so only - * remove/insert node from/into chunks_szad. + * remove/insert node from/into chunks_szsnad. */ - extent_tree_szad_remove(chunks_szad, prev); + extent_tree_szsnad_remove(chunks_szsnad, prev); extent_tree_ad_remove(chunks_ad, prev); arena_chunk_cache_maybe_remove(arena, prev, cache); - extent_tree_szad_remove(chunks_szad, node); + extent_tree_szsnad_remove(chunks_szsnad, node); arena_chunk_cache_maybe_remove(arena, node, cache); extent_node_addr_set(node, extent_node_addr_get(prev)); extent_node_size_set(node, extent_node_size_get(prev) + extent_node_size_get(node)); + if (extent_node_sn_get(prev) < extent_node_sn_get(node)) + extent_node_sn_set(node, extent_node_sn_get(prev)); extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && extent_node_zeroed_get(node)); - extent_tree_szad_insert(chunks_szad, node); + extent_tree_szsnad_insert(chunks_szsnad, node); arena_chunk_cache_maybe_insert(arena, node, cache); - arena_node_dalloc(arena, prev); + arena_node_dalloc(tsdn, arena, prev); } label_return: - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); } void -chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, bool committed) +chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, size_t sn, bool committed) { assert(chunk != NULL); @@ -565,24 +613,49 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, assert(size != 0); assert((size & chunksize_mask) == 0); - chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, - &arena->chunks_ad_cached, true, chunk, size, false, committed); - arena_maybe_purge(arena); + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached, + &arena->chunks_ad_cached, true, chunk, size, sn, false, + committed); + arena_maybe_purge(tsdn, arena); +} + +static bool +chunk_dalloc_default_impl(void *chunk, size_t size) +{ + + if (!have_dss || !chunk_in_dss(chunk)) + return (chunk_dalloc_mmap(chunk, size)); + return (true); +} + +static bool +chunk_dalloc_default(void *chunk, size_t size, bool committed, + unsigned arena_ind) +{ + + return (chunk_dalloc_default_impl(chunk, size)); } void -chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, bool zeroed, bool committed) +chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, size_t sn, bool zeroed, bool committed) { + bool err; assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(size != 0); assert((size & chunksize_mask) == 0); - chunk_hooks_assure_initialized(arena, chunk_hooks); + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); /* Try to deallocate. */ - if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) + if (chunk_hooks->dalloc == chunk_dalloc_default) { + /* Call directly to propagate tsdn. */ + err = chunk_dalloc_default_impl(chunk, size); + } else + err = chunk_hooks->dalloc(chunk, size, committed, arena->ind); + + if (!err) return; /* Try to decommit; purge if that fails. */ if (committed) { @@ -591,29 +664,12 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, } zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, arena->ind); - chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, - &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); -} - -static bool -chunk_dalloc_default(void *chunk, size_t size, bool committed, - unsigned arena_ind) -{ + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained, + &arena->chunks_ad_retained, false, chunk, size, sn, zeroed, + committed); - if (!have_dss || !chunk_in_dss(chunk)) - return (chunk_dalloc_mmap(chunk, size)); - return (true); -} - -void -chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, bool committed) -{ - - chunk_hooks_assure_initialized(arena, chunk_hooks); - chunk_hooks->dalloc(chunk, size, committed, arena->ind); - if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default) - JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); + if (config_stats) + arena->stats.retained += size; } static bool @@ -634,8 +690,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, length)); } -bool -chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) +static bool +chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) { assert(chunk != NULL); @@ -648,21 +705,12 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) length)); } -static bool -chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - - return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset, - length)); -} - bool -chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, size_t offset, size_t length) +chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, size_t offset, size_t length) { - chunk_hooks_assure_initialized(arena, chunk_hooks); + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); } @@ -677,23 +725,30 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, } static bool -chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, - bool committed, unsigned arena_ind) +chunk_merge_default_impl(void *chunk_a, void *chunk_b) { if (!maps_coalesce) return (true); - if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) + if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b)) return (true); return (false); } +static bool +chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + bool committed, unsigned arena_ind) +{ + + return (chunk_merge_default_impl(chunk_a, chunk_b)); +} + static rtree_node_elm_t * chunks_rtree_node_alloc(size_t nelms) { - return ((rtree_node_elm_t *)base_alloc(nelms * + return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms * sizeof(rtree_node_elm_t))); } @@ -716,7 +771,7 @@ chunk_boot(void) * so pages_map will always take fast path. */ if (!opt_lg_chunk) { - opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity) + opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity) - 1; } #else @@ -730,32 +785,11 @@ chunk_boot(void) chunksize_mask = chunksize - 1; chunk_npages = (chunksize >> LG_PAGE); - if (have_dss && chunk_dss_boot()) - return (true); - if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) - - opt_lg_chunk, chunks_rtree_node_alloc, NULL)) + if (have_dss) + chunk_dss_boot(); + if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - + opt_lg_chunk), chunks_rtree_node_alloc, NULL)) return (true); return (false); } - -void -chunk_prefork(void) -{ - - chunk_dss_prefork(); -} - -void -chunk_postfork_parent(void) -{ - - chunk_dss_postfork_parent(); -} - -void -chunk_postfork_child(void) -{ - - chunk_dss_postfork_child(); -} diff --git a/deps/jemalloc/src/chunk_dss.c b/deps/jemalloc/src/chunk_dss.c index 61fc91696..ee3f83888 100644 --- a/deps/jemalloc/src/chunk_dss.c +++ b/deps/jemalloc/src/chunk_dss.c @@ -10,20 +10,19 @@ const char *dss_prec_names[] = { "N/A" }; -/* Current dss precedence default, used when creating new arenas. */ -static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; - /* - * Protects sbrk() calls. This avoids malloc races among threads, though it - * does not protect against races with threads that call sbrk() directly. + * Current dss precedence default, used when creating new arenas. NB: This is + * stored as unsigned rather than dss_prec_t because in principle there's no + * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use + * atomic operations to synchronize the setting. */ -static malloc_mutex_t dss_mtx; +static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT; /* Base address of the DSS. */ static void *dss_base; -/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ -static void *dss_prev; -/* Current upper limit on DSS addresses. */ +/* Atomic boolean indicating whether the DSS is exhausted. */ +static unsigned dss_exhausted; +/* Atomic current upper limit on DSS addresses. */ static void *dss_max; /******************************************************************************/ @@ -47,9 +46,7 @@ chunk_dss_prec_get(void) if (!have_dss) return (dss_prec_disabled); - malloc_mutex_lock(&dss_mtx); - ret = dss_prec_default; - malloc_mutex_unlock(&dss_mtx); + ret = (dss_prec_t)atomic_read_u(&dss_prec_default); return (ret); } @@ -59,15 +56,46 @@ chunk_dss_prec_set(dss_prec_t dss_prec) if (!have_dss) return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(&dss_mtx); - dss_prec_default = dss_prec; - malloc_mutex_unlock(&dss_mtx); + atomic_write_u(&dss_prec_default, (unsigned)dss_prec); return (false); } +static void * +chunk_dss_max_update(void *new_addr) +{ + void *max_cur; + spin_t spinner; + + /* + * Get the current end of the DSS as max_cur and assure that dss_max is + * up to date. + */ + spin_init(&spinner); + while (true) { + void *max_prev = atomic_read_p(&dss_max); + + max_cur = chunk_dss_sbrk(0); + if ((uintptr_t)max_prev > (uintptr_t)max_cur) { + /* + * Another thread optimistically updated dss_max. Wait + * for it to finish. + */ + spin_adaptive(&spinner); + continue; + } + if (!atomic_cas_p(&dss_max, max_prev, max_cur)) + break; + } + /* Fixed new_addr can only be supported if it is at the edge of DSS. */ + if (new_addr != NULL && max_cur != new_addr) + return (NULL); + + return (max_cur); +} + void * -chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit) +chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit) { cassert(have_dss); assert(size > 0 && (size & chunksize_mask) == 0); @@ -80,28 +108,20 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, if ((intptr_t)size < 0) return (NULL); - malloc_mutex_lock(&dss_mtx); - if (dss_prev != (void *)-1) { - + if (!atomic_read_u(&dss_exhausted)) { /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ - do { - void *ret, *cpad, *dss_next; + while (true) { + void *ret, *cpad, *max_cur, *dss_next, *dss_prev; size_t gap_size, cpad_size; intptr_t incr; - /* Avoid an unnecessary system call. */ - if (new_addr != NULL && dss_max != new_addr) - break; - - /* Get the current end of the DSS. */ - dss_max = chunk_dss_sbrk(0); - /* Make sure the earlier condition still holds. */ - if (new_addr != NULL && dss_max != new_addr) - break; + max_cur = chunk_dss_max_update(new_addr); + if (max_cur == NULL) + goto label_oom; /* * Calculate how much padding is necessary to @@ -120,22 +140,29 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, cpad_size = (uintptr_t)ret - (uintptr_t)cpad; dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)dss_max || - (uintptr_t)dss_next < (uintptr_t)dss_max) { - /* Wrap-around. */ - malloc_mutex_unlock(&dss_mtx); - return (NULL); - } + (uintptr_t)dss_next < (uintptr_t)dss_max) + goto label_oom; /* Wrap-around. */ incr = gap_size + cpad_size + size; + + /* + * Optimistically update dss_max, and roll back below if + * sbrk() fails. No other thread will try to extend the + * DSS while dss_max is greater than the current DSS + * max reported by sbrk(0). + */ + if (atomic_cas_p(&dss_max, max_cur, dss_next)) + continue; + + /* Try to allocate. */ dss_prev = chunk_dss_sbrk(incr); - if (dss_prev == dss_max) { + if (dss_prev == max_cur) { /* Success. */ - dss_max = dss_next; - malloc_mutex_unlock(&dss_mtx); if (cpad_size != 0) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - chunk_dalloc_wrapper(arena, + chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, cpad, cpad_size, + arena_extent_sn_next(arena), false, true); } if (*zero) { @@ -147,68 +174,65 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, *commit = pages_decommit(ret, size); return (ret); } - } while (dss_prev != (void *)-1); - } - malloc_mutex_unlock(&dss_mtx); + /* + * Failure, whether due to OOM or a race with a raw + * sbrk() call from outside the allocator. Try to roll + * back optimistic dss_max update; if rollback fails, + * it's due to another caller of this function having + * succeeded since this invocation started, in which + * case rollback is not necessary. + */ + atomic_cas_p(&dss_max, dss_next, max_cur); + if (dss_prev == (void *)-1) { + /* OOM. */ + atomic_write_u(&dss_exhausted, (unsigned)true); + goto label_oom; + } + } + } +label_oom: return (NULL); } -bool -chunk_in_dss(void *chunk) +static bool +chunk_in_dss_helper(void *chunk, void *max) { - bool ret; - cassert(have_dss); - - malloc_mutex_lock(&dss_mtx); - if ((uintptr_t)chunk >= (uintptr_t)dss_base - && (uintptr_t)chunk < (uintptr_t)dss_max) - ret = true; - else - ret = false; - malloc_mutex_unlock(&dss_mtx); - - return (ret); + return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk < + (uintptr_t)max); } bool -chunk_dss_boot(void) +chunk_in_dss(void *chunk) { cassert(have_dss); - if (malloc_mutex_init(&dss_mtx)) - return (true); - dss_base = chunk_dss_sbrk(0); - dss_prev = dss_base; - dss_max = dss_base; - - return (false); + return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max))); } -void -chunk_dss_prefork(void) +bool +chunk_dss_mergeable(void *chunk_a, void *chunk_b) { + void *max; - if (have_dss) - malloc_mutex_prefork(&dss_mtx); -} - -void -chunk_dss_postfork_parent(void) -{ + cassert(have_dss); - if (have_dss) - malloc_mutex_postfork_parent(&dss_mtx); + max = atomic_read_p(&dss_max); + return (chunk_in_dss_helper(chunk_a, max) == + chunk_in_dss_helper(chunk_b, max)); } void -chunk_dss_postfork_child(void) +chunk_dss_boot(void) { - if (have_dss) - malloc_mutex_postfork_child(&dss_mtx); + cassert(have_dss); + + dss_base = chunk_dss_sbrk(0); + dss_exhausted = (unsigned)(dss_base == (void *)-1); + dss_max = dss_base; } /******************************************************************************/ diff --git a/deps/jemalloc/src/chunk_mmap.c b/deps/jemalloc/src/chunk_mmap.c index b9ba74191..73fc497af 100644 --- a/deps/jemalloc/src/chunk_mmap.c +++ b/deps/jemalloc/src/chunk_mmap.c @@ -16,23 +16,22 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) do { void *pages; size_t leadsize; - pages = pages_map(NULL, alloc_size); + pages = pages_map(NULL, alloc_size, commit); if (pages == NULL) return (NULL); leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages; - ret = pages_trim(pages, alloc_size, leadsize, size); + ret = pages_trim(pages, alloc_size, leadsize, size, commit); } while (ret == NULL); assert(ret != NULL); *zero = true; - if (!*commit) - *commit = pages_decommit(ret, size); return (ret); } void * -chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) +chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit) { void *ret; size_t offset; @@ -53,9 +52,10 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) assert(alignment != 0); assert((alignment & chunksize_mask) == 0); - ret = pages_map(NULL, size); - if (ret == NULL) - return (NULL); + ret = pages_map(new_addr, size, commit); + if (ret == NULL || ret == new_addr) + return (ret); + assert(new_addr == NULL); offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); if (offset != 0) { pages_unmap(ret, size); @@ -64,8 +64,6 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) assert(ret != NULL); *zero = true; - if (!*commit) - *commit = pages_decommit(ret, size); return (ret); } diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c index 53a1c1ef1..159bd8ae1 100644 --- a/deps/jemalloc/src/ckh.c +++ b/deps/jemalloc/src/ckh.c @@ -99,7 +99,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ - prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; @@ -141,7 +142,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, * were an item for which both hashes indicated the same * bucket. */ - prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + i = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); @@ -247,8 +249,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; - size_t lg_curcells; - unsigned lg_prevbuckets; + unsigned lg_prevbuckets, lg_curcells; #ifdef CKH_COUNT ckh->ngrows++; @@ -266,12 +267,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) lg_curcells++; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) { + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { ret = true; goto label_return; } - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, - true, NULL); + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, + true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { ret = true; goto label_return; @@ -283,12 +284,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } @@ -302,8 +303,8 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ckhc_t *tab, *ttab; - size_t lg_curcells, usize; - unsigned lg_prevbuckets; + size_t usize; + unsigned lg_prevbuckets, lg_curcells; /* * It is possible (though unlikely, given well behaved hashes) that the @@ -312,10 +313,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return; - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - NULL); + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, + true, arena_ichoose(tsd, NULL)); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't @@ -330,7 +331,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif @@ -338,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT @@ -387,12 +388,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh->keycomp = keycomp; usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (usize == 0) { + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { ret = true; goto label_return; } - ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - NULL); + ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, + NULL, true, arena_ichoose(tsd, NULL)); if (ckh->tab == NULL) { ret = true; goto label_return; @@ -421,9 +422,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) (unsigned long long)ckh->nrelocs); #endif - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); if (config_debug) - memset(ckh, 0x5a, sizeof(ckh_t)); + memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); } size_t diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c index 3de8e602d..bc78b2055 100644 --- a/deps/jemalloc/src/ctl.c +++ b/deps/jemalloc/src/ctl.c @@ -24,7 +24,7 @@ ctl_named_node(const ctl_node_t *node) } JEMALLOC_INLINE_C const ctl_named_node_t * -ctl_named_children(const ctl_named_node_t *node, int index) +ctl_named_children(const ctl_named_node_t *node, size_t index) { const ctl_named_node_t *children = ctl_named_node(node->children); @@ -42,25 +42,25 @@ ctl_indexed_node(const ctl_node_t *node) /* Function prototypes for non-inline static functions. */ #define CTL_PROTO(n) \ -static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen); +static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen); #define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(const size_t *mib, \ - size_t miblen, size_t i); +static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ + const size_t *mib, size_t miblen, size_t i); static bool ctl_arena_init(ctl_arena_stats_t *astats); static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, +static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena); static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats); -static void ctl_arena_refresh(arena_t *arena, unsigned i); -static bool ctl_grow(void); -static void ctl_refresh(void); -static bool ctl_init(void); -static int ctl_lookup(const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp); +static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i); +static bool ctl_grow(tsdn_t *tsdn); +static void ctl_refresh(tsdn_t *tsdn); +static bool ctl_init(tsdn_t *tsdn); +static int ctl_lookup(tsdn_t *tsdn, const char *name, + ctl_node_t const **nodesp, size_t *mibp, size_t *depthp); CTL_PROTO(version) CTL_PROTO(epoch) @@ -77,6 +77,7 @@ CTL_PROTO(config_cache_oblivious) CTL_PROTO(config_debug) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_malloc_conf) CTL_PROTO(config_munmap) CTL_PROTO(config_prof) CTL_PROTO(config_prof_libgcc) @@ -91,7 +92,9 @@ CTL_PROTO(opt_abort) CTL_PROTO(opt_dss) CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_narenas) +CTL_PROTO(opt_purge) CTL_PROTO(opt_lg_dirty_mult) +CTL_PROTO(opt_decay_time) CTL_PROTO(opt_stats_print) CTL_PROTO(opt_junk) CTL_PROTO(opt_zero) @@ -114,10 +117,13 @@ CTL_PROTO(opt_prof_accum) CTL_PROTO(tcache_create) CTL_PROTO(tcache_flush) CTL_PROTO(tcache_destroy) +static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all); CTL_PROTO(arena_i_purge) -static void arena_purge(unsigned arena_ind); +CTL_PROTO(arena_i_decay) +CTL_PROTO(arena_i_reset) CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_lg_dirty_mult) +CTL_PROTO(arena_i_decay_time) CTL_PROTO(arena_i_chunk_hooks) INDEX_PROTO(arena_i) CTL_PROTO(arenas_bin_i_size) @@ -131,6 +137,7 @@ INDEX_PROTO(arenas_hchunk_i) CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_initialized) CTL_PROTO(arenas_lg_dirty_mult) +CTL_PROTO(arenas_decay_time) CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_page) CTL_PROTO(arenas_tcache_max) @@ -181,9 +188,11 @@ INDEX_PROTO(stats_arenas_i_hchunks_j) CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_lg_dirty_mult) +CTL_PROTO(stats_arenas_i_decay_time) CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_retained) CTL_PROTO(stats_arenas_i_npurge) CTL_PROTO(stats_arenas_i_nmadvise) CTL_PROTO(stats_arenas_i_purged) @@ -196,6 +205,7 @@ CTL_PROTO(stats_active) CTL_PROTO(stats_metadata) CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) +CTL_PROTO(stats_retained) /******************************************************************************/ /* mallctl tree. */ @@ -241,6 +251,7 @@ static const ctl_named_node_t config_node[] = { {NAME("debug"), CTL(config_debug)}, {NAME("fill"), CTL(config_fill)}, {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("malloc_conf"), CTL(config_malloc_conf)}, {NAME("munmap"), CTL(config_munmap)}, {NAME("prof"), CTL(config_prof)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, @@ -258,7 +269,9 @@ static const ctl_named_node_t opt_node[] = { {NAME("dss"), CTL(opt_dss)}, {NAME("lg_chunk"), CTL(opt_lg_chunk)}, {NAME("narenas"), CTL(opt_narenas)}, + {NAME("purge"), CTL(opt_purge)}, {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, + {NAME("decay_time"), CTL(opt_decay_time)}, {NAME("stats_print"), CTL(opt_stats_print)}, {NAME("junk"), CTL(opt_junk)}, {NAME("zero"), CTL(opt_zero)}, @@ -288,8 +301,11 @@ static const ctl_named_node_t tcache_node[] = { static const ctl_named_node_t arena_i_node[] = { {NAME("purge"), CTL(arena_i_purge)}, + {NAME("decay"), CTL(arena_i_decay)}, + {NAME("reset"), CTL(arena_i_reset)}, {NAME("dss"), CTL(arena_i_dss)}, {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, + {NAME("decay_time"), CTL(arena_i_decay_time)}, {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} }; static const ctl_named_node_t super_arena_i_node[] = { @@ -339,6 +355,7 @@ static const ctl_named_node_t arenas_node[] = { {NAME("narenas"), CTL(arenas_narenas)}, {NAME("initialized"), CTL(arenas_initialized)}, {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, + {NAME("decay_time"), CTL(arenas_decay_time)}, {NAME("quantum"), CTL(arenas_quantum)}, {NAME("page"), CTL(arenas_page)}, {NAME("tcache_max"), CTL(arenas_tcache_max)}, @@ -439,9 +456,11 @@ static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, + {NAME("decay_time"), CTL(stats_arenas_i_decay_time)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("retained"), CTL(stats_arenas_i_retained)}, {NAME("npurge"), CTL(stats_arenas_i_npurge)}, {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, {NAME("purged"), CTL(stats_arenas_i_purged)}, @@ -468,6 +487,7 @@ static const ctl_named_node_t stats_node[] = { {NAME("metadata"), CTL(stats_metadata)}, {NAME("resident"), CTL(stats_resident)}, {NAME("mapped"), CTL(stats_mapped)}, + {NAME("retained"), CTL(stats_retained)}, {NAME("arenas"), CHILD(indexed, stats_arenas)} }; @@ -519,8 +539,10 @@ static void ctl_arena_clear(ctl_arena_stats_t *astats) { + astats->nthreads = 0; astats->dss = dss_prec_names[dss_prec_limit]; astats->lg_dirty_mult = -1; + astats->decay_time = -1; astats->pactive = 0; astats->pdirty = 0; if (config_stats) { @@ -538,20 +560,27 @@ ctl_arena_clear(ctl_arena_stats_t *astats) } static void -ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena) { unsigned i; - arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult, - &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, - cstats->lstats, cstats->hstats); - - for (i = 0; i < NBINS; i++) { - cstats->allocated_small += cstats->bstats[i].curregs * - index2size(i); - cstats->nmalloc_small += cstats->bstats[i].nmalloc; - cstats->ndalloc_small += cstats->bstats[i].ndalloc; - cstats->nrequests_small += cstats->bstats[i].nrequests; + if (config_stats) { + arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss, + &cstats->lg_dirty_mult, &cstats->decay_time, + &cstats->pactive, &cstats->pdirty, &cstats->astats, + cstats->bstats, cstats->lstats, cstats->hstats); + + for (i = 0; i < NBINS; i++) { + cstats->allocated_small += cstats->bstats[i].curregs * + index2size(i); + cstats->nmalloc_small += cstats->bstats[i].nmalloc; + cstats->ndalloc_small += cstats->bstats[i].ndalloc; + cstats->nrequests_small += cstats->bstats[i].nrequests; + } + } else { + arena_basic_stats_merge(tsdn, arena, &cstats->nthreads, + &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time, + &cstats->pactive, &cstats->pdirty); } } @@ -560,89 +589,91 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) { unsigned i; + sstats->nthreads += astats->nthreads; sstats->pactive += astats->pactive; sstats->pdirty += astats->pdirty; - sstats->astats.mapped += astats->astats.mapped; - sstats->astats.npurge += astats->astats.npurge; - sstats->astats.nmadvise += astats->astats.nmadvise; - sstats->astats.purged += astats->astats.purged; - - sstats->astats.metadata_mapped += astats->astats.metadata_mapped; - sstats->astats.metadata_allocated += astats->astats.metadata_allocated; - - sstats->allocated_small += astats->allocated_small; - sstats->nmalloc_small += astats->nmalloc_small; - sstats->ndalloc_small += astats->ndalloc_small; - sstats->nrequests_small += astats->nrequests_small; - - sstats->astats.allocated_large += astats->astats.allocated_large; - sstats->astats.nmalloc_large += astats->astats.nmalloc_large; - sstats->astats.ndalloc_large += astats->astats.ndalloc_large; - sstats->astats.nrequests_large += astats->astats.nrequests_large; - - sstats->astats.allocated_huge += astats->astats.allocated_huge; - sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; - sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; - - for (i = 0; i < NBINS; i++) { - sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; - sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; - sstats->bstats[i].nrequests += astats->bstats[i].nrequests; - sstats->bstats[i].curregs += astats->bstats[i].curregs; - if (config_tcache) { - sstats->bstats[i].nfills += astats->bstats[i].nfills; - sstats->bstats[i].nflushes += - astats->bstats[i].nflushes; + if (config_stats) { + sstats->astats.mapped += astats->astats.mapped; + sstats->astats.retained += astats->astats.retained; + sstats->astats.npurge += astats->astats.npurge; + sstats->astats.nmadvise += astats->astats.nmadvise; + sstats->astats.purged += astats->astats.purged; + + sstats->astats.metadata_mapped += + astats->astats.metadata_mapped; + sstats->astats.metadata_allocated += + astats->astats.metadata_allocated; + + sstats->allocated_small += astats->allocated_small; + sstats->nmalloc_small += astats->nmalloc_small; + sstats->ndalloc_small += astats->ndalloc_small; + sstats->nrequests_small += astats->nrequests_small; + + sstats->astats.allocated_large += + astats->astats.allocated_large; + sstats->astats.nmalloc_large += astats->astats.nmalloc_large; + sstats->astats.ndalloc_large += astats->astats.ndalloc_large; + sstats->astats.nrequests_large += + astats->astats.nrequests_large; + + sstats->astats.allocated_huge += astats->astats.allocated_huge; + sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; + sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; + + for (i = 0; i < NBINS; i++) { + sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sstats->bstats[i].nrequests += + astats->bstats[i].nrequests; + sstats->bstats[i].curregs += astats->bstats[i].curregs; + if (config_tcache) { + sstats->bstats[i].nfills += + astats->bstats[i].nfills; + sstats->bstats[i].nflushes += + astats->bstats[i].nflushes; + } + sstats->bstats[i].nruns += astats->bstats[i].nruns; + sstats->bstats[i].reruns += astats->bstats[i].reruns; + sstats->bstats[i].curruns += astats->bstats[i].curruns; } - sstats->bstats[i].nruns += astats->bstats[i].nruns; - sstats->bstats[i].reruns += astats->bstats[i].reruns; - sstats->bstats[i].curruns += astats->bstats[i].curruns; - } - for (i = 0; i < nlclasses; i++) { - sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; - sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; - sstats->lstats[i].nrequests += astats->lstats[i].nrequests; - sstats->lstats[i].curruns += astats->lstats[i].curruns; - } + for (i = 0; i < nlclasses; i++) { + sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; + sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; + sstats->lstats[i].nrequests += + astats->lstats[i].nrequests; + sstats->lstats[i].curruns += astats->lstats[i].curruns; + } - for (i = 0; i < nhclasses; i++) { - sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; - sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; - sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks; + for (i = 0; i < nhclasses; i++) { + sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; + sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; + sstats->hstats[i].curhchunks += + astats->hstats[i].curhchunks; + } } } static void -ctl_arena_refresh(arena_t *arena, unsigned i) +ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i) { ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; ctl_arena_clear(astats); - - sstats->nthreads += astats->nthreads; - if (config_stats) { - ctl_arena_stats_amerge(astats, arena); - /* Merge into sum stats as well. */ - ctl_arena_stats_smerge(sstats, astats); - } else { - astats->pactive += arena->nactive; - astats->pdirty += arena->ndirty; - /* Merge into sum stats as well. */ - sstats->pactive += arena->nactive; - sstats->pdirty += arena->ndirty; - } + ctl_arena_stats_amerge(tsdn, astats, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_smerge(sstats, astats); } static bool -ctl_grow(void) +ctl_grow(tsdn_t *tsdn) { ctl_arena_stats_t *astats; /* Initialize new arena. */ - if (arena_init(ctl_stats.narenas) == NULL) + if (arena_init(tsdn, ctl_stats.narenas) == NULL) return (true); /* Allocate extended arena stats. */ @@ -677,47 +708,32 @@ ctl_grow(void) } static void -ctl_refresh(void) +ctl_refresh(tsdn_t *tsdn) { - tsd_t *tsd; unsigned i; - bool refreshed; VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ - ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); - tsd = tsd_fetch(); - for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { - tarenas[i] = arena_get(tsd, i, false, false); - if (tarenas[i] == NULL && !refreshed) { - tarenas[i] = arena_get(tsd, i, false, true); - refreshed = true; - } - } - - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - ctl_stats.arenas[i].nthreads = arena_nbound(i); - else - ctl_stats.arenas[i].nthreads = 0; - } + for (i = 0; i < ctl_stats.narenas; i++) + tarenas[i] = arena_get(tsdn, i, false); for (i = 0; i < ctl_stats.narenas; i++) { bool initialized = (tarenas[i] != NULL); ctl_stats.arenas[i].initialized = initialized; if (initialized) - ctl_arena_refresh(tarenas[i], i); + ctl_arena_refresh(tsdn, tarenas[i], i); } if (config_stats) { size_t base_allocated, base_resident, base_mapped; - base_stats_get(&base_allocated, &base_resident, &base_mapped); + base_stats_get(tsdn, &base_allocated, &base_resident, + &base_mapped); ctl_stats.allocated = ctl_stats.arenas[ctl_stats.narenas].allocated_small + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + @@ -734,17 +750,19 @@ ctl_refresh(void) ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); ctl_stats.mapped = base_mapped + ctl_stats.arenas[ctl_stats.narenas].astats.mapped; + ctl_stats.retained = + ctl_stats.arenas[ctl_stats.narenas].astats.retained; } ctl_epoch++; } static bool -ctl_init(void) +ctl_init(tsdn_t *tsdn) { bool ret; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (!ctl_initialized) { /* * Allocate space for one extra arena stats element, which @@ -786,19 +804,19 @@ ctl_init(void) ctl_stats.arenas[ctl_stats.narenas].initialized = true; ctl_epoch = 0; - ctl_refresh(); + ctl_refresh(tsdn); ctl_initialized = true; } ret = false; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); return (ret); } static int -ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, - size_t *depthp) +ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; @@ -850,7 +868,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, } inode = ctl_indexed_node(node->children); - node = inode->index(mibp, *depthp, (size_t)index); + node = inode->index(tsdn, mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; @@ -894,8 +912,8 @@ label_return: } int -ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) +ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { int ret; size_t depth; @@ -903,19 +921,19 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; - if (!ctl_initialized && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; - ret = ctl_lookup(name, nodes, mib, &depth); + ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); if (ret != 0) goto label_return; node = ctl_named_node(nodes[depth-1]); if (node != NULL && node->ctl) - ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); + ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; @@ -926,29 +944,29 @@ label_return: } int -ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) +ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) { int ret; - if (!ctl_initialized && ctl_init()) { + if (!ctl_initialized && ctl_init(tsdn)) { ret = EAGAIN; goto label_return; } - ret = ctl_lookup(name, NULL, mibp, miblenp); + ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp); label_return: return(ret); } int -ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const ctl_named_node_t *node; size_t i; - if (!ctl_initialized && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { ret = EAGAIN; goto label_return; } @@ -960,7 +978,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, assert(node->nchildren > 0); if (ctl_named_node(node->children) != NULL) { /* Children are named. */ - if (node->nchildren <= mib[i]) { + if (node->nchildren <= (unsigned)mib[i]) { ret = ENOENT; goto label_return; } @@ -970,7 +988,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, /* Indexed element. */ inode = ctl_indexed_node(node->children); - node = inode->index(mib, miblen, mib[i]); + node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; @@ -980,7 +998,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, /* Call the ctl function. */ if (node && node->ctl) - ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); + ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); else { /* Partial MIB. */ ret = ENOENT; @@ -994,7 +1012,7 @@ bool ctl_boot(void) { - if (malloc_mutex_init(&ctl_mtx)) + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) return (true); ctl_initialized = false; @@ -1003,24 +1021,24 @@ ctl_boot(void) } void -ctl_prefork(void) +ctl_prefork(tsdn_t *tsdn) { - malloc_mutex_prefork(&ctl_mtx); + malloc_mutex_prefork(tsdn, &ctl_mtx); } void -ctl_postfork_parent(void) +ctl_postfork_parent(tsdn_t *tsdn) { - malloc_mutex_postfork_parent(&ctl_mtx); + malloc_mutex_postfork_parent(tsdn, &ctl_mtx); } void -ctl_postfork_child(void) +ctl_postfork_child(tsdn_t *tsdn) { - malloc_mutex_postfork_child(&ctl_mtx); + malloc_mutex_postfork_child(tsdn, &ctl_mtx); } /******************************************************************************/ @@ -1077,8 +1095,8 @@ ctl_postfork_child(void) */ #define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ @@ -1086,7 +1104,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ if (!(c)) \ return (ENOENT); \ if (l) \ - malloc_mutex_lock(&ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ @@ -1094,47 +1112,47 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ ret = 0; \ label_return: \ if (l) \ - malloc_mutex_unlock(&ctl_mtx); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return (ret); \ } #define CTL_RO_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if (!(c)) \ return (ENOENT); \ - malloc_mutex_lock(&ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return (ret); \ } #define CTL_RO_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ - malloc_mutex_lock(&ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return (ret); \ } @@ -1144,8 +1162,8 @@ label_return: \ */ #define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ @@ -1163,8 +1181,8 @@ label_return: \ #define CTL_RO_NL_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ @@ -1180,17 +1198,15 @@ label_return: \ #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ - tsd_t *tsd; \ \ if (!(c)) \ return (ENOENT); \ READONLY(); \ - tsd = tsd_fetch(); \ oldval = (m(tsd)); \ READ(oldval, t); \ \ @@ -1199,17 +1215,17 @@ label_return: \ return (ret); \ } -#define CTL_RO_BOOL_CONFIG_GEN(n) \ +#define CTL_RO_CONFIG_GEN(n, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ - bool oldval; \ + t oldval; \ \ READONLY(); \ oldval = n; \ - READ(oldval, bool); \ + READ(oldval, t); \ \ ret = 0; \ label_return: \ @@ -1221,48 +1237,51 @@ label_return: \ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int -epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(newval, uint64_t); if (newp != NULL) - ctl_refresh(); + ctl_refresh(tsd_tsdn(tsd)); READ(ctl_epoch, uint64_t); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } /******************************************************************************/ -CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious) -CTL_RO_BOOL_CONFIG_GEN(config_debug) -CTL_RO_BOOL_CONFIG_GEN(config_fill) -CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) -CTL_RO_BOOL_CONFIG_GEN(config_munmap) -CTL_RO_BOOL_CONFIG_GEN(config_prof) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) -CTL_RO_BOOL_CONFIG_GEN(config_stats) -CTL_RO_BOOL_CONFIG_GEN(config_tcache) -CTL_RO_BOOL_CONFIG_GEN(config_tls) -CTL_RO_BOOL_CONFIG_GEN(config_utrace) -CTL_RO_BOOL_CONFIG_GEN(config_valgrind) -CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) +CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) +CTL_RO_CONFIG_GEN(config_debug, bool) +CTL_RO_CONFIG_GEN(config_fill, bool) +CTL_RO_CONFIG_GEN(config_lazy_lock, bool) +CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) +CTL_RO_CONFIG_GEN(config_munmap, bool) +CTL_RO_CONFIG_GEN(config_prof, bool) +CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) +CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) +CTL_RO_CONFIG_GEN(config_stats, bool) +CTL_RO_CONFIG_GEN(config_tcache, bool) +CTL_RO_CONFIG_GEN(config_tls, bool) +CTL_RO_CONFIG_GEN(config_utrace, bool) +CTL_RO_CONFIG_GEN(config_valgrind, bool) +CTL_RO_CONFIG_GEN(config_xmalloc, bool) /******************************************************************************/ CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) -CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) +CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *) CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) @@ -1287,20 +1306,18 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) /******************************************************************************/ static int -thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; arena_t *oldarena; unsigned newind, oldind; - tsd = tsd_fetch(); oldarena = arena_choose(tsd, NULL); if (oldarena == NULL) return (EAGAIN); - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); newind = oldind = oldarena->ind; WRITE(newind, unsigned); READ(oldind, unsigned); @@ -1314,7 +1331,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, } /* Initialize arena if necessary. */ - newarena = arena_get(tsd, newind, true, true); + newarena = arena_get(tsd_tsdn(tsd), newind, true); if (newarena == NULL) { ret = EAGAIN; goto label_return; @@ -1324,15 +1341,15 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, if (config_tcache) { tcache_t *tcache = tsd_tcache_get(tsd); if (tcache != NULL) { - tcache_arena_reassociate(tcache, oldarena, - newarena); + tcache_arena_reassociate(tsd_tsdn(tsd), tcache, + oldarena, newarena); } } } ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } @@ -1346,8 +1363,8 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, tsd_thread_deallocatedp_get, uint64_t *) static int -thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; @@ -1371,8 +1388,8 @@ label_return: } static int -thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; @@ -1390,7 +1407,7 @@ label_return: } static int -thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; @@ -1401,20 +1418,16 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, READ_XOR_WRITE(); if (newp != NULL) { - tsd_t *tsd; - if (newlen != sizeof(const char *)) { ret = EINVAL; goto label_return; } - tsd = tsd_fetch(); - if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != 0) goto label_return; } else { - const char *oldname = prof_thread_name_get(); + const char *oldname = prof_thread_name_get(tsd); READ(oldname, const char *); } @@ -1424,7 +1437,7 @@ label_return: } static int -thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; @@ -1433,13 +1446,13 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, if (!config_prof) return (ENOENT); - oldval = prof_thread_active_get(); + oldval = prof_thread_active_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - if (prof_thread_active_set(*(bool *)newp)) { + if (prof_thread_active_set(tsd, *(bool *)newp)) { ret = EAGAIN; goto label_return; } @@ -1454,19 +1467,16 @@ label_return: /******************************************************************************/ static int -tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; unsigned tcache_ind; if (!config_tcache) return (ENOENT); - tsd = tsd_fetch(); - - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (tcaches_create(tsd, &tcache_ind)) { ret = EFAULT; @@ -1476,23 +1486,20 @@ tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static int -tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; unsigned tcache_ind; if (!config_tcache) return (ENOENT); - tsd = tsd_fetch(); - WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); @@ -1508,18 +1515,15 @@ label_return: } static int -tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; unsigned tcache_ind; if (!config_tcache) return (ENOENT); - tsd = tsd_fetch(); - WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); @@ -1536,48 +1540,56 @@ label_return: /******************************************************************************/ -/* ctl_mutex must be held during execution of this function. */ static void -arena_purge(unsigned arena_ind) +arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) { - tsd_t *tsd; - unsigned i; - bool refreshed; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); - tsd = tsd_fetch(); - for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { - tarenas[i] = arena_get(tsd, i, false, false); - if (tarenas[i] == NULL && !refreshed) { - tarenas[i] = arena_get(tsd, i, false, true); - refreshed = true; - } - } + malloc_mutex_lock(tsdn, &ctl_mtx); + { + unsigned narenas = ctl_stats.narenas; - if (arena_ind == ctl_stats.narenas) { - unsigned i; - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - arena_purge_all(tarenas[i]); + if (arena_ind == narenas) { + unsigned i; + VARIABLE_ARRAY(arena_t *, tarenas, narenas); + + for (i = 0; i < narenas; i++) + tarenas[i] = arena_get(tsdn, i, false); + + /* + * No further need to hold ctl_mtx, since narenas and + * tarenas contain everything needed below. + */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + for (i = 0; i < narenas; i++) { + if (tarenas[i] != NULL) + arena_purge(tsdn, tarenas[i], all); + } + } else { + arena_t *tarena; + + assert(arena_ind < narenas); + + tarena = arena_get(tsdn, arena_ind, false); + + /* No further need to hold ctl_mtx. */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + if (tarena != NULL) + arena_purge(tsdn, tarena, all); } - } else { - assert(arena_ind < ctl_stats.narenas); - if (tarenas[arena_ind] != NULL) - arena_purge_all(tarenas[arena_ind]); } } static int -arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; READONLY(); WRITEONLY(); - malloc_mutex_lock(&ctl_mtx); - arena_purge(mib[1]); - malloc_mutex_unlock(&ctl_mtx); + arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true); ret = 0; label_return: @@ -1585,16 +1597,65 @@ label_return: } static int -arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + READONLY(); + WRITEONLY(); + arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false); + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned arena_ind; + arena_t *arena; + + READONLY(); + WRITEONLY(); + + if ((config_valgrind && unlikely(in_valgrind)) || (config_fill && + unlikely(opt_quarantine))) { + ret = EFAULT; + goto label_return; + } + + arena_ind = (unsigned)mib[1]; + if (config_debug) { + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + assert(arena_ind < ctl_stats.narenas); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + } + assert(arena_ind >= opt_narenas); + + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + + arena_reset(tsd, arena); + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *dss = NULL; - unsigned arena_ind = mib[1]; + unsigned arena_ind = (unsigned)mib[1]; dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(dss, const char *); if (dss != NULL) { int i; @@ -1615,13 +1676,13 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, } if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true); + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL || (dss_prec != dss_prec_limit && - arena_dss_prec_set(arena, dss_prec))) { + arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) { ret = EFAULT; goto label_return; } - dss_prec_old = arena_dss_prec_get(arena); + dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena); } else { if (dss_prec != dss_prec_limit && chunk_dss_prec_set(dss_prec)) { @@ -1636,26 +1697,26 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static int -arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - unsigned arena_ind = mib[1]; + unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - arena = arena_get(tsd_fetch(), arena_ind, false, true); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; } if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_lg_dirty_mult_get(arena); + size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena); READ(oldval, ssize_t); } if (newp != NULL) { @@ -1663,7 +1724,8 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ret = EINVAL; goto label_return; } - if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) { + if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } @@ -1675,24 +1737,60 @@ label_return: } static int -arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, +arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - unsigned arena_ind = mib[1]; + unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - malloc_mutex_lock(&ctl_mtx); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (arena_decay_time_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned arena_ind = (unsigned)mib[1]; + arena_t *arena; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); if (arena_ind < narenas_total_get() && (arena = - arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { if (newp != NULL) { chunk_hooks_t old_chunk_hooks, new_chunk_hooks; WRITE(new_chunk_hooks, chunk_hooks_t); - old_chunk_hooks = chunk_hooks_set(arena, + old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena, &new_chunk_hooks); READ(old_chunk_hooks, chunk_hooks_t); } else { - chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena); + chunk_hooks_t old_chunk_hooks = + chunk_hooks_get(tsd_tsdn(tsd), arena); READ(old_chunk_hooks, chunk_hooks_t); } } else { @@ -1701,16 +1799,16 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, } ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static const ctl_named_node_t * -arena_i_index(const size_t *mib, size_t miblen, size_t i) +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { - const ctl_named_node_t * ret; + const ctl_named_node_t *ret; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (i > ctl_stats.narenas) { ret = NULL; goto label_return; @@ -1718,20 +1816,20 @@ arena_i_index(const size_t *mib, size_t miblen, size_t i) ret = super_arena_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); return (ret); } /******************************************************************************/ static int -arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != sizeof(unsigned)) { ret = EINVAL; @@ -1742,23 +1840,23 @@ arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static int -arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, +arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned nread, i; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { ret = EINVAL; nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) - ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; + ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas; } else { ret = 0; nread = ctl_stats.narenas; @@ -1768,13 +1866,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static int -arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; @@ -1798,6 +1896,32 @@ label_return: return (ret); } +static int +arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = arena_decay_time_default_get(); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (arena_decay_time_default_set(*(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return (ret); +} + CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) @@ -1807,7 +1931,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) static const ctl_named_node_t * -arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > NBINS) @@ -1816,9 +1940,9 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) } CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) -CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t) +CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t) static const ctl_named_node_t * -arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) +arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > nlclasses) @@ -1827,9 +1951,10 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) } CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) -CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t) +CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]), + size_t) static const ctl_named_node_t * -arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) +arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > nhclasses) @@ -1838,15 +1963,15 @@ arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) } static int -arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); - if (ctl_grow()) { + if (ctl_grow(tsd_tsdn(tsd))) { ret = EAGAIN; goto label_return; } @@ -1855,15 +1980,15 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } /******************************************************************************/ static int -prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; @@ -1876,9 +2001,10 @@ prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, ret = EINVAL; goto label_return; } - oldval = prof_thread_active_init_set(*(bool *)newp); + oldval = prof_thread_active_init_set(tsd_tsdn(tsd), + *(bool *)newp); } else - oldval = prof_thread_active_init_get(); + oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); READ(oldval, bool); ret = 0; @@ -1887,8 +2013,8 @@ label_return: } static int -prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; @@ -1901,9 +2027,9 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ret = EINVAL; goto label_return; } - oldval = prof_active_set(*(bool *)newp); + oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); } else - oldval = prof_active_get(); + oldval = prof_active_get(tsd_tsdn(tsd)); READ(oldval, bool); ret = 0; @@ -1912,8 +2038,8 @@ label_return: } static int -prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; @@ -1924,7 +2050,7 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, WRITEONLY(); WRITE(filename, const char *); - if (prof_mdump(filename)) { + if (prof_mdump(tsd, filename)) { ret = EFAULT; goto label_return; } @@ -1935,8 +2061,8 @@ label_return: } static int -prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; @@ -1949,9 +2075,9 @@ prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ret = EINVAL; goto label_return; } - oldval = prof_gdump_set(*(bool *)newp); + oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); } else - oldval = prof_gdump_get(); + oldval = prof_gdump_get(tsd_tsdn(tsd)); READ(oldval, bool); ret = 0; @@ -1960,12 +2086,11 @@ label_return: } static int -prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t lg_sample = lg_prof_sample; - tsd_t *tsd; if (!config_prof) return (ENOENT); @@ -1975,8 +2100,6 @@ prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, if (lg_sample >= (sizeof(uint64_t) << 3)) lg_sample = (sizeof(uint64_t) << 3) - 1; - tsd = tsd_fetch(); - prof_reset(tsd, lg_sample); ret = 0; @@ -1995,15 +2118,20 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t) CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, ssize_t) +CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time, + ssize_t) CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_retained, + ctl_stats.arenas[mib[2]].astats.retained, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, @@ -2060,7 +2188,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) static const ctl_named_node_t * -stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { if (j > NBINS) @@ -2078,7 +2207,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) +stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { if (j > nlclasses) @@ -2097,7 +2227,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) static const ctl_named_node_t * -stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) +stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { if (j > nhclasses) @@ -2106,11 +2237,11 @@ stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) } static const ctl_named_node_t * -stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t * ret; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { ret = NULL; goto label_return; @@ -2118,6 +2249,6 @@ stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) ret = super_stats_arenas_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); return (ret); } diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c index 13f94411c..218156c60 100644 --- a/deps/jemalloc/src/extent.c +++ b/deps/jemalloc/src/extent.c @@ -3,45 +3,48 @@ /******************************************************************************/ +/* + * Round down to the nearest chunk size that can actually be requested during + * normal huge allocation. + */ JEMALLOC_INLINE_C size_t extent_quantize(size_t size) { + size_t ret; + szind_t ind; - /* - * Round down to the nearest chunk size that can actually be requested - * during normal huge allocation. - */ - return (index2size(size2index(size + 1) - 1)); + assert(size > 0); + + ind = size2index(size + 1); + if (ind == 0) { + /* Avoid underflow. */ + return (index2size(0)); + } + ret = index2size(ind - 1); + assert(ret <= size); + return (ret); } JEMALLOC_INLINE_C int -extent_szad_comp(extent_node_t *a, extent_node_t *b) +extent_sz_comp(const extent_node_t *a, const extent_node_t *b) { - int ret; size_t a_qsize = extent_quantize(extent_node_size_get(a)); size_t b_qsize = extent_quantize(extent_node_size_get(b)); - /* - * Compare based on quantized size rather than size, in order to sort - * equally useful extents only by address. - */ - ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); - if (ret == 0) { - uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); - uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); + return ((a_qsize > b_qsize) - (a_qsize < b_qsize)); +} - ret = (a_addr > b_addr) - (a_addr < b_addr); - } +JEMALLOC_INLINE_C int +extent_sn_comp(const extent_node_t *a, const extent_node_t *b) +{ + size_t a_sn = extent_node_sn_get(a); + size_t b_sn = extent_node_sn_get(b); - return (ret); + return ((a_sn > b_sn) - (a_sn < b_sn)); } -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link, - extent_szad_comp) - JEMALLOC_INLINE_C int -extent_ad_comp(extent_node_t *a, extent_node_t *b) +extent_ad_comp(const extent_node_t *a, const extent_node_t *b) { uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); @@ -49,5 +52,26 @@ extent_ad_comp(extent_node_t *a, extent_node_t *b) return ((a_addr > b_addr) - (a_addr < b_addr)); } +JEMALLOC_INLINE_C int +extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b) +{ + int ret; + + ret = extent_sz_comp(a, b); + if (ret != 0) + return (ret); + + ret = extent_sn_comp(a, b); + if (ret != 0) + return (ret); + + ret = extent_ad_comp(a, b); + return (ret); +} + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link, + extent_szsnad_comp) + /* Generate red-black tree functions. */ rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) diff --git a/deps/jemalloc/src/huge.c b/deps/jemalloc/src/huge.c index 1e9a66512..8abd8c00c 100644 --- a/deps/jemalloc/src/huge.c +++ b/deps/jemalloc/src/huge.c @@ -15,12 +15,21 @@ huge_node_get(const void *ptr) } static bool -huge_node_set(const void *ptr, extent_node_t *node) +huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node) { assert(extent_node_addr_get(node) == ptr); assert(!extent_node_achunk_get(node)); - return (chunk_register(ptr, node)); + return (chunk_register(tsdn, ptr, node)); +} + +static void +huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node) +{ + bool err; + + err = huge_node_set(tsdn, ptr, node); + assert(!err); } static void @@ -31,39 +40,39 @@ huge_node_unset(const void *ptr, const extent_node_t *node) } void * -huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, - tcache_t *tcache) +huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { - size_t usize; - usize = s2u(size); - if (usize == 0) { - /* size_t overflow. */ - return (NULL); - } + assert(usize == s2u(usize)); - return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache)); + return (huge_palloc(tsdn, arena, usize, chunksize, zero)); } void * -huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, - bool zero, tcache_t *tcache) +huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero) { void *ret; - size_t usize; + size_t ausize; + arena_t *iarena; extent_node_t *node; + size_t sn; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ - usize = sa2u(size, alignment); - if (unlikely(usize == 0)) + assert(!tsdn_null(tsdn) || arena != NULL); + + ausize = sa2u(usize, alignment); + if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) return (NULL); - assert(usize >= chunksize); + assert(ausize >= chunksize); /* Allocate an extent node with which to track the chunk. */ - node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), - CACHELINE, false, tcache, true, arena); + iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : + a0get(); + node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)), + CACHELINE, false, NULL, true, iarena); if (node == NULL) return (NULL); @@ -72,33 +81,35 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, - size, alignment, &is_zeroed)) == NULL) { - idalloctm(tsd, node, tcache, true); + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); + if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, + arena, usize, alignment, &sn, &is_zeroed)) == NULL) { + idalloctm(tsdn, node, NULL, true, true); return (NULL); } - extent_node_init(node, arena, ret, size, is_zeroed, true); + extent_node_init(node, arena, ret, usize, sn, is_zeroed, true); - if (huge_node_set(ret, node)) { - arena_chunk_dalloc_huge(arena, ret, size); - idalloctm(tsd, node, tcache, true); + if (huge_node_set(tsdn, ret, node)) { + arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn); + idalloctm(tsdn, node, NULL, true, true); return (NULL); } /* Insert node into huge. */ - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->huge, node, ql_link); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) - memset(ret, 0, size); + memset(ret, 0, usize); } else if (config_fill && unlikely(opt_junk_alloc)) - memset(ret, 0xa5, size); + memset(ret, JEMALLOC_ALLOC_JUNK, usize); + arena_decay_tick(tsdn, arena); return (ret); } @@ -116,7 +127,7 @@ huge_dalloc_junk(void *ptr, size_t usize) * unmapped. */ if (!config_munmap || (have_dss && chunk_in_dss(ptr))) - memset(ptr, 0x5a, usize); + memset(ptr, JEMALLOC_FREE_JUNK, usize); } } #ifdef JEMALLOC_JET @@ -126,8 +137,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); #endif static void -huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, - size_t usize_max, bool zero) +huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize_min, size_t usize_max, bool zero) { size_t usize, usize_next; extent_node_t *node; @@ -151,24 +162,28 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); + memset((void *)((uintptr_t)ptr + usize), + JEMALLOC_FREE_JUNK, sdiff); post_zeroed = false; } else { - post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, - ptr, CHUNK_CEILING(oldsize), usize, sdiff); + post_zeroed = !chunk_purge_wrapper(tsdn, arena, + &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, + sdiff); } } else post_zeroed = pre_zeroed; - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); /* Update the size of the huge allocation. */ + huge_node_unset(ptr, node); assert(extent_node_size_get(node) != usize); extent_node_size_set(node, usize); + huge_node_reset(tsdn, ptr, node); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); - arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); + arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize); /* Fill if necessary (growing). */ if (oldsize < usize) { @@ -178,14 +193,15 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, usize - oldsize); } } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - - oldsize); + memset((void *)((uintptr_t)ptr + oldsize), + JEMALLOC_ALLOC_JUNK, usize - oldsize); } } } static bool -huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) +huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize) { extent_node_t *node; arena_t *arena; @@ -196,7 +212,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); - chunk_hooks = chunk_hooks_get(arena); + chunk_hooks = chunk_hooks_get(tsdn, arena); assert(oldsize > usize); @@ -213,53 +229,59 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) sdiff); post_zeroed = false; } else { - post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, - CHUNK_ADDR2BASE((uintptr_t)ptr + usize), - CHUNK_CEILING(oldsize), + post_zeroed = !chunk_purge_wrapper(tsdn, arena, + &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + + usize), CHUNK_CEILING(oldsize), CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); } } else post_zeroed = pre_zeroed; - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); /* Update the size of the huge allocation. */ + huge_node_unset(ptr, node); extent_node_size_set(node, usize); + huge_node_reset(tsdn, ptr, node); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); /* Zap the excess chunks. */ - arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize); + arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize, + extent_node_sn_get(node)); return (false); } static bool -huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) { +huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize, bool zero) { extent_node_t *node; arena_t *arena; bool is_zeroed_subchunk, is_zeroed_chunk; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); is_zeroed_subchunk = extent_node_zeroed_get(node); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); /* - * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so - * that it is possible to make correct junk/zero fill decisions below. + * Use is_zeroed_chunk to detect whether the trailing memory is zeroed, + * update extent's zeroed field, and zero as necessary. */ - is_zeroed_chunk = zero; - - if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize, + is_zeroed_chunk = false; + if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize, &is_zeroed_chunk)) return (true); - malloc_mutex_lock(&arena->huge_mtx); - /* Update the size of the huge allocation. */ + malloc_mutex_lock(tsdn, &arena->huge_mtx); + huge_node_unset(ptr, node); extent_node_size_set(node, usize); - malloc_mutex_unlock(&arena->huge_mtx); + extent_node_zeroed_set(node, extent_node_zeroed_get(node) && + is_zeroed_chunk); + huge_node_reset(tsdn, ptr, node); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed_subchunk) { @@ -272,19 +294,21 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) { CHUNK_CEILING(oldsize)); } } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - - oldsize); + memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK, + usize - oldsize); } return (false); } bool -huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, +huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { assert(s2u(oldsize) == oldsize); + /* The following should have been caught by callers. */ + assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS); /* Both allocations must be huge to avoid a move. */ if (oldsize < chunksize || usize_max < chunksize) @@ -292,13 +316,18 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { /* Attempt to expand the allocation in-place. */ - if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero)) + if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max, + zero)) { + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); + } /* Try again, this time with usize_min. */ if (usize_min < usize_max && CHUNK_CEILING(usize_min) > - CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr, - oldsize, usize_min, zero)) + CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn, + ptr, oldsize, usize_min, zero)) { + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); + } } /* @@ -307,36 +336,46 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, */ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { - huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max, - zero); + huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min, + usize_max, zero); + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } /* Attempt to shrink the allocation in-place. */ - if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) - return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)); + if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) { + if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize, + usize_max)) { + arena_decay_tick(tsdn, huge_aalloc(ptr)); + return (false); + } + } return (true); } static void * -huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) +huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero) { if (alignment <= chunksize) - return (huge_malloc(tsd, arena, usize, zero, tcache)); - return (huge_palloc(tsd, arena, usize, alignment, zero, tcache)); + return (huge_malloc(tsdn, arena, usize, zero)); + return (huge_palloc(tsdn, arena, usize, alignment, zero)); } void * -huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) +huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t copysize; + /* The following should have been caught by callers. */ + assert(usize > 0 && usize <= HUGE_MAXCLASS); + /* Try to avoid moving the allocation. */ - if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero)) + if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize, + zero)) return (ptr); /* @@ -344,19 +383,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, * different size class. In that case, fall back to allocating new * space and copying. */ - ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero, - tcache); + ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment, + zero); if (ret == NULL) return (NULL); copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache); + isqalloc(tsd, ptr, oldsize, tcache, true); return (ret); } void -huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) +huge_dalloc(tsdn_t *tsdn, void *ptr) { extent_node_t *node; arena_t *arena; @@ -364,15 +403,18 @@ huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) node = huge_node_get(ptr); arena = extent_node_arena_get(node); huge_node_unset(ptr, node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_remove(&arena->huge, node, ql_link); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); huge_dalloc_junk(extent_node_addr_get(node), extent_node_size_get(node)); - arena_chunk_dalloc_huge(extent_node_arena_get(node), - extent_node_addr_get(node), extent_node_size_get(node)); - idalloctm(tsd, node, tcache, true); + arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node), + extent_node_addr_get(node), extent_node_size_get(node), + extent_node_sn_get(node)); + idalloctm(tsdn, node, NULL, true, true); + + arena_decay_tick(tsdn, arena); } arena_t * @@ -383,7 +425,7 @@ huge_aalloc(const void *ptr) } size_t -huge_salloc(const void *ptr) +huge_salloc(tsdn_t *tsdn, const void *ptr) { size_t size; extent_node_t *node; @@ -391,15 +433,15 @@ huge_salloc(const void *ptr) node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); size = extent_node_size_get(node); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); return (size); } prof_tctx_t * -huge_prof_tctx_get(const void *ptr) +huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) { prof_tctx_t *tctx; extent_node_t *node; @@ -407,29 +449,29 @@ huge_prof_tctx_get(const void *ptr) node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); tctx = extent_node_prof_tctx_get(node); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); return (tctx); } void -huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) +huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); extent_node_prof_tctx_set(node, tctx); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); } void -huge_prof_tctx_reset(const void *ptr) +huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr) { - huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); + huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U); } diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index fe77c2475..07389ca2f 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -5,7 +5,11 @@ /* Data. */ /* Runtime configuration options. */ -const char *je_malloc_conf JEMALLOC_ATTR(weak); +const char *je_malloc_conf +#ifndef _WIN32 + JEMALLOC_ATTR(weak) +#endif + ; bool opt_abort = #ifdef JEMALLOC_DEBUG true @@ -40,14 +44,14 @@ bool opt_redzone = false; bool opt_utrace = false; bool opt_xmalloc = false; bool opt_zero = false; -size_t opt_narenas = 0; +unsigned opt_narenas = 0; /* Initialized to true if the process is running inside Valgrind. */ bool in_valgrind; unsigned ncpus; -/* Protects arenas initialization (arenas, narenas_total). */ +/* Protects arenas initialization. */ static malloc_mutex_t arenas_lock; /* * Arenas that are used to service external requests. Not all elements of the @@ -57,10 +61,10 @@ static malloc_mutex_t arenas_lock; * arenas. arenas[narenas_auto..narenas_total) are only used if the application * takes some action to create them and allocate from them. */ -static arena_t **arenas; -static unsigned narenas_total; +arena_t **arenas; +static unsigned narenas_total; /* Use narenas_total_*(). */ static arena_t *a0; /* arenas[0]; read-only after initialization. */ -static unsigned narenas_auto; /* Read-only after initialization. */ +unsigned narenas_auto; /* Read-only after initialization. */ typedef enum { malloc_init_uninitialized = 3, @@ -70,9 +74,37 @@ typedef enum { } malloc_init_t; static malloc_init_t malloc_init_state = malloc_init_uninitialized; +/* False should be the common case. Set to true to trigger initialization. */ +static bool malloc_slow = true; + +/* When malloc_slow is true, set the corresponding bits for sanity check. */ +enum { + flag_opt_junk_alloc = (1U), + flag_opt_junk_free = (1U << 1), + flag_opt_quarantine = (1U << 2), + flag_opt_zero = (1U << 3), + flag_opt_utrace = (1U << 4), + flag_in_valgrind = (1U << 5), + flag_opt_xmalloc = (1U << 6) +}; +static uint8_t malloc_slow_flags; + +JEMALLOC_ALIGNED(CACHELINE) +const size_t pind2sz_tab[NPSIZES] = { +#define PSZ_yes(lg_grp, ndelta, lg_delta) \ + (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))), +#define PSZ_no(lg_grp, ndelta, lg_delta) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ + PSZ_##psz(lg_grp, ndelta, lg_delta) + SIZE_CLASSES +#undef PSZ_yes +#undef PSZ_no +#undef SC +}; + JEMALLOC_ALIGNED(CACHELINE) const size_t index2size_tab[NSIZES] = { -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), SIZE_CLASSES #undef SC @@ -144,7 +176,7 @@ const uint8_t size2index_tab[] = { #define S2B_11(i) S2B_10(i) S2B_10(i) #endif #define S2B_no(i) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ S2B_##lg_delta_lookup(index) SIZE_CLASSES #undef S2B_3 @@ -195,7 +227,7 @@ _init_init_lock(void) * really only matters early in the process creation, before any * separate thread normally starts doing anything. */ if (!init_lock_initialized) - malloc_mutex_init(&init_lock); + malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); init_lock_initialized = true; } @@ -290,18 +322,10 @@ malloc_init(void) } /* - * The a0*() functions are used instead of i[mcd]alloc() in situations that + * The a0*() functions are used instead of i{d,}alloc() in situations that * cannot tolerate TLS variable access. */ -arena_t * -a0get(void) -{ - - assert(a0 != NULL); - return (a0); -} - static void * a0ialloc(size_t size, bool zero, bool is_metadata) { @@ -309,14 +333,22 @@ a0ialloc(size_t size, bool zero, bool is_metadata) if (unlikely(malloc_init_a0())) return (NULL); - return (iallocztm(NULL, size, zero, false, is_metadata, a0get())); + return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, + is_metadata, arena_get(TSDN_NULL, 0, true), true)); } static void a0idalloc(void *ptr, bool is_metadata) { - idalloctm(NULL, ptr, false, is_metadata); + idalloctm(TSDN_NULL, ptr, false, is_metadata, true); +} + +arena_t * +a0get(void) +{ + + return (a0); } void * @@ -373,92 +405,88 @@ bootstrap_free(void *ptr) a0idalloc(ptr, false); } +static void +arena_set(unsigned ind, arena_t *arena) +{ + + atomic_write_p((void **)&arenas[ind], arena); +} + +static void +narenas_total_set(unsigned narenas) +{ + + atomic_write_u(&narenas_total, narenas); +} + +static void +narenas_total_inc(void) +{ + + atomic_add_u(&narenas_total, 1); +} + +unsigned +narenas_total_get(void) +{ + + return (atomic_read_u(&narenas_total)); +} + /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * -arena_init_locked(unsigned ind) +arena_init_locked(tsdn_t *tsdn, unsigned ind) { arena_t *arena; - /* Expand arenas if necessary. */ - assert(ind <= narenas_total); + assert(ind <= narenas_total_get()); if (ind > MALLOCX_ARENA_MAX) return (NULL); - if (ind == narenas_total) { - unsigned narenas_new = narenas_total + 1; - arena_t **arenas_new = - (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new * - sizeof(arena_t *))); - if (arenas_new == NULL) - return (NULL); - memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *)); - arenas_new[ind] = NULL; - /* - * Deallocate only if arenas came from a0malloc() (not - * base_alloc()). - */ - if (narenas_total != narenas_auto) - a0dalloc(arenas); - arenas = arenas_new; - narenas_total = narenas_new; - } + if (ind == narenas_total_get()) + narenas_total_inc(); /* * Another thread may have already initialized arenas[ind] if it's an * auto arena. */ - arena = arenas[ind]; + arena = arena_get(tsdn, ind, false); if (arena != NULL) { assert(ind < narenas_auto); return (arena); } /* Actually initialize the arena. */ - arena = arenas[ind] = arena_new(ind); + arena = arena_new(tsdn, ind); + arena_set(ind, arena); return (arena); } arena_t * -arena_init(unsigned ind) +arena_init(tsdn_t *tsdn, unsigned ind) { arena_t *arena; - malloc_mutex_lock(&arenas_lock); - arena = arena_init_locked(ind); - malloc_mutex_unlock(&arenas_lock); + malloc_mutex_lock(tsdn, &arenas_lock); + arena = arena_init_locked(tsdn, ind); + malloc_mutex_unlock(tsdn, &arenas_lock); return (arena); } -unsigned -narenas_total_get(void) -{ - unsigned narenas; - - malloc_mutex_lock(&arenas_lock); - narenas = narenas_total; - malloc_mutex_unlock(&arenas_lock); - - return (narenas); -} - static void -arena_bind_locked(tsd_t *tsd, unsigned ind) +arena_bind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; - arena = arenas[ind]; - arena->nthreads++; - - if (tsd_nominal(tsd)) - tsd_arena_set(tsd, arena); -} + if (!tsd_nominal(tsd)) + return; -static void -arena_bind(tsd_t *tsd, unsigned ind) -{ + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_inc(arena, internal); - malloc_mutex_lock(&arenas_lock); - arena_bind_locked(tsd, ind); - malloc_mutex_unlock(&arenas_lock); + if (internal) + tsd_iarena_set(tsd, arena); + else + tsd_arena_set(tsd, arena); } void @@ -466,131 +494,139 @@ arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; - malloc_mutex_lock(&arenas_lock); - oldarena = arenas[oldind]; - newarena = arenas[newind]; - oldarena->nthreads--; - newarena->nthreads++; - malloc_mutex_unlock(&arenas_lock); + oldarena = arena_get(tsd_tsdn(tsd), oldind, false); + newarena = arena_get(tsd_tsdn(tsd), newind, false); + arena_nthreads_dec(oldarena, false); + arena_nthreads_inc(newarena, false); tsd_arena_set(tsd, newarena); } -unsigned -arena_nbound(unsigned ind) -{ - unsigned nthreads; - - malloc_mutex_lock(&arenas_lock); - nthreads = arenas[ind]->nthreads; - malloc_mutex_unlock(&arenas_lock); - return (nthreads); -} - static void -arena_unbind(tsd_t *tsd, unsigned ind) +arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; - malloc_mutex_lock(&arenas_lock); - arena = arenas[ind]; - arena->nthreads--; - malloc_mutex_unlock(&arenas_lock); - tsd_arena_set(tsd, NULL); + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_dec(arena, internal); + if (internal) + tsd_iarena_set(tsd, NULL); + else + tsd_arena_set(tsd, NULL); } -arena_t * -arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing) +arena_tdata_t * +arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { - arena_t *arena; - arena_t **arenas_cache = tsd_arenas_cache_get(tsd); - unsigned narenas_cache = tsd_narenas_cache_get(tsd); + arena_tdata_t *tdata, *arenas_tdata_old; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + unsigned narenas_tdata_old, i; + unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); unsigned narenas_actual = narenas_total_get(); - /* Deallocate old cache if it's too small. */ - if (arenas_cache != NULL && narenas_cache < narenas_actual) { - a0dalloc(arenas_cache); - arenas_cache = NULL; - narenas_cache = 0; - tsd_arenas_cache_set(tsd, arenas_cache); - tsd_narenas_cache_set(tsd, narenas_cache); - } - - /* Allocate cache if it's missing. */ - if (arenas_cache == NULL) { - bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd); - assert(ind < narenas_actual || !init_if_missing); - narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1; - - if (tsd_nominal(tsd) && !*arenas_cache_bypassp) { - *arenas_cache_bypassp = true; - arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) * - narenas_cache); - *arenas_cache_bypassp = false; + /* + * Dissociate old tdata array (and set up for deallocation upon return) + * if it's too small. + */ + if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { + arenas_tdata_old = arenas_tdata; + narenas_tdata_old = narenas_tdata; + arenas_tdata = NULL; + narenas_tdata = 0; + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } else { + arenas_tdata_old = NULL; + narenas_tdata_old = 0; + } + + /* Allocate tdata array if it's missing. */ + if (arenas_tdata == NULL) { + bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); + narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; + + if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { + *arenas_tdata_bypassp = true; + arenas_tdata = (arena_tdata_t *)a0malloc( + sizeof(arena_tdata_t) * narenas_tdata); + *arenas_tdata_bypassp = false; } - if (arenas_cache == NULL) { - /* - * This function must always tell the truth, even if - * it's slow, so don't let OOM, thread cleanup (note - * tsd_nominal check), nor recursive allocation - * avoidance (note arenas_cache_bypass check) get in the - * way. - */ - if (ind >= narenas_actual) - return (NULL); - malloc_mutex_lock(&arenas_lock); - arena = arenas[ind]; - malloc_mutex_unlock(&arenas_lock); - return (arena); + if (arenas_tdata == NULL) { + tdata = NULL; + goto label_return; } - assert(tsd_nominal(tsd) && !*arenas_cache_bypassp); - tsd_arenas_cache_set(tsd, arenas_cache); - tsd_narenas_cache_set(tsd, narenas_cache); + assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); } /* - * Copy to cache. It's possible that the actual number of arenas has - * increased since narenas_total_get() was called above, but that causes - * no correctness issues unless two threads concurrently execute the - * arenas.extend mallctl, which we trust mallctl synchronization to + * Copy to tdata array. It's possible that the actual number of arenas + * has increased since narenas_total_get() was called above, but that + * causes no correctness issues unless two threads concurrently execute + * the arenas.extend mallctl, which we trust mallctl synchronization to * prevent. */ - malloc_mutex_lock(&arenas_lock); - memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual); - malloc_mutex_unlock(&arenas_lock); - if (narenas_cache > narenas_actual) { - memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) * - (narenas_cache - narenas_actual)); - } - - /* Read the refreshed cache, and init the arena if necessary. */ - arena = arenas_cache[ind]; - if (init_if_missing && arena == NULL) - arena = arenas_cache[ind] = arena_init(ind); - return (arena); + + /* Copy/initialize tickers. */ + for (i = 0; i < narenas_actual; i++) { + if (i < narenas_tdata_old) { + ticker_copy(&arenas_tdata[i].decay_ticker, + &arenas_tdata_old[i].decay_ticker); + } else { + ticker_init(&arenas_tdata[i].decay_ticker, + DECAY_NTICKS_PER_UPDATE); + } + } + if (narenas_tdata > narenas_actual) { + memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) + * (narenas_tdata - narenas_actual)); + } + + /* Read the refreshed tdata array. */ + tdata = &arenas_tdata[ind]; +label_return: + if (arenas_tdata_old != NULL) + a0dalloc(arenas_tdata_old); + return (tdata); } /* Slow path, called only by arena_choose(). */ arena_t * -arena_choose_hard(tsd_t *tsd) +arena_choose_hard(tsd_t *tsd, bool internal) { - arena_t *ret; + arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (narenas_auto > 1) { - unsigned i, choose, first_null; + unsigned i, j, choose[2], first_null; + + /* + * Determine binding for both non-internal and internal + * allocation. + * + * choose[0]: For application allocation. + * choose[1]: For internal metadata allocation. + */ + + for (j = 0; j < 2; j++) + choose[j] = 0; - choose = 0; first_null = narenas_auto; - malloc_mutex_lock(&arenas_lock); - assert(a0get() != NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); + assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); for (i = 1; i < narenas_auto; i++) { - if (arenas[i] != NULL) { + if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ - if (arenas[i]->nthreads < - arenas[choose]->nthreads) - choose = i; + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get( + tsd_tsdn(tsd), i, false), !!j) < + arena_nthreads_get(arena_get( + tsd_tsdn(tsd), choose[j], false), + !!j)) + choose[j] = i; + } } else if (first_null == narenas_auto) { /* * Record the index of the first uninitialized @@ -605,27 +641,40 @@ arena_choose_hard(tsd_t *tsd) } } - if (arenas[choose]->nthreads == 0 - || first_null == narenas_auto) { - /* - * Use an unloaded arena, or the least loaded arena if - * all arenas are already initialized. - */ - ret = arenas[choose]; - } else { - /* Initialize a new arena. */ - choose = first_null; - ret = arena_init_locked(choose); - if (ret == NULL) { - malloc_mutex_unlock(&arenas_lock); - return (NULL); + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), + choose[j], false), !!j) == 0 || first_null == + narenas_auto) { + /* + * Use an unloaded arena, or the least loaded + * arena if all arenas are already initialized. + */ + if (!!j == internal) { + ret = arena_get(tsd_tsdn(tsd), + choose[j], false); + } + } else { + arena_t *arena; + + /* Initialize a new arena. */ + choose[j] = first_null; + arena = arena_init_locked(tsd_tsdn(tsd), + choose[j]); + if (arena == NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), + &arenas_lock); + return (NULL); + } + if (!!j == internal) + ret = arena; } + arena_bind(tsd, choose[j], !!j); } - arena_bind_locked(tsd, choose); - malloc_mutex_unlock(&arenas_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); } else { - ret = a0get(); - arena_bind(tsd, 0); + ret = arena_get(tsd_tsdn(tsd), 0, false); + arena_bind(tsd, 0, false); + arena_bind(tsd, 0, true); } return (ret); @@ -646,36 +695,49 @@ thread_deallocated_cleanup(tsd_t *tsd) } void +iarena_cleanup(tsd_t *tsd) +{ + arena_t *iarena; + + iarena = tsd_iarena_get(tsd); + if (iarena != NULL) + arena_unbind(tsd, iarena->ind, true); +} + +void arena_cleanup(tsd_t *tsd) { arena_t *arena; arena = tsd_arena_get(tsd); if (arena != NULL) - arena_unbind(tsd, arena->ind); + arena_unbind(tsd, arena->ind, false); } void -arenas_cache_cleanup(tsd_t *tsd) +arenas_tdata_cleanup(tsd_t *tsd) { - arena_t **arenas_cache; + arena_tdata_t *arenas_tdata; - arenas_cache = tsd_arenas_cache_get(tsd); - if (arenas_cache != NULL) { - tsd_arenas_cache_set(tsd, NULL); - a0dalloc(arenas_cache); + /* Prevent tsd->arenas_tdata from being (re)created. */ + *tsd_arenas_tdata_bypassp_get(tsd) = true; + + arenas_tdata = tsd_arenas_tdata_get(tsd); + if (arenas_tdata != NULL) { + tsd_arenas_tdata_set(tsd, NULL); + a0dalloc(arenas_tdata); } } void -narenas_cache_cleanup(tsd_t *tsd) +narenas_tdata_cleanup(tsd_t *tsd) { /* Do nothing. */ } void -arenas_cache_bypass_cleanup(tsd_t *tsd) +arenas_tdata_bypass_cleanup(tsd_t *tsd) { /* Do nothing. */ @@ -686,8 +748,11 @@ stats_print_atexit(void) { if (config_tcache && config_stats) { + tsdn_t *tsdn; unsigned narenas, i; + tsdn = tsdn_fetch(); + /* * Merge stats from extant threads. This is racy, since * individual threads do not lock when recording tcache stats @@ -696,7 +761,7 @@ stats_print_atexit(void) * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arenas[i]; + arena_t *arena = arena_get(tsdn, i, false); if (arena != NULL) { tcache_t *tcache; @@ -706,11 +771,11 @@ stats_print_atexit(void) * and bin locks in the opposite order, * deadlocks may result. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tcache, arena); + tcache_stats_merge(tsdn, tcache, arena); } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } } } @@ -747,6 +812,20 @@ malloc_ncpus(void) SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwNumberOfProcessors; +#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) + /* + * glibc >= 2.6 has the CPU_COUNT macro. + * + * glibc's sysconf() uses isspace(). glibc allocates for the first time + * *before* setting up the isspace tables. Therefore we need a + * different method to get the number of CPUs. + */ + { + cpu_set_t set; + + pthread_getaffinity_np(pthread_self(), sizeof(set), &set); + result = CPU_COUNT(&set); + } #else result = sysconf(_SC_NPROCESSORS_ONLN); #endif @@ -839,6 +918,26 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, } static void +malloc_slow_flag_init(void) +{ + /* + * Combine the runtime options into malloc_slow for fast path. Called + * after processing all the options. + */ + malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) + | (opt_junk_free ? flag_opt_junk_free : 0) + | (opt_quarantine ? flag_opt_quarantine : 0) + | (opt_zero ? flag_opt_zero : 0) + | (opt_utrace ? flag_opt_utrace : 0) + | (opt_xmalloc ? flag_opt_xmalloc : 0); + + if (config_valgrind) + malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); + + malloc_slow = (malloc_slow_flags != 0); +} + +static void malloc_conf_init(void) { unsigned i; @@ -864,10 +963,13 @@ malloc_conf_init(void) opt_tcache = false; } - for (i = 0; i < 3; i++) { + for (i = 0; i < 4; i++) { /* Get runtime configuration. */ switch (i) { case 0: + opts = config_malloc_conf; + break; + case 1: if (je_malloc_conf != NULL) { /* * Use options that were compiled into the @@ -880,8 +982,8 @@ malloc_conf_init(void) opts = buf; } break; - case 1: { - int linklen = 0; + case 2: { + ssize_t linklen = 0; #ifndef _WIN32 int saved_errno = errno; const char *linkname = @@ -907,7 +1009,7 @@ malloc_conf_init(void) buf[linklen] = '\0'; opts = buf; break; - } case 2: { + } case 3: { const char *envname = #ifdef JEMALLOC_PREFIX JEMALLOC_CPREFIX"MALLOC_CONF" @@ -954,7 +1056,11 @@ malloc_conf_init(void) if (cont) \ continue; \ } -#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ +#define CONF_MIN_no(um, min) false +#define CONF_MIN_yes(um, min) ((um) < (min)) +#define CONF_MAX_no(um, max) false +#define CONF_MAX_yes(um, max) ((um) > (max)) +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ if (CONF_MATCH(n)) { \ uintmax_t um; \ char *end; \ @@ -967,24 +1073,35 @@ malloc_conf_init(void) "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ - if ((min) != 0 && um < (min)) \ - o = (min); \ - else if (um > (max)) \ - o = (max); \ + if (CONF_MIN_##check_min(um, \ + (min))) \ + o = (t)(min); \ + else if (CONF_MAX_##check_max( \ + um, (max))) \ + o = (t)(max); \ else \ - o = um; \ + o = (t)um; \ } else { \ - if (((min) != 0 && um < (min)) \ - || um > (max)) { \ + if (CONF_MIN_##check_min(um, \ + (min)) || \ + CONF_MAX_##check_max(um, \ + (max))) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ } else \ - o = um; \ + o = (t)um; \ } \ continue; \ } +#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ + clip) \ + CONF_HANDLE_T_U(unsigned, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(size_t, o, n, min, max, \ + check_min, check_max, clip) #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ if (CONF_MATCH(n)) { \ long l; \ @@ -1027,7 +1144,7 @@ malloc_conf_init(void) */ CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), - (sizeof(size_t) << 3) - 1, true) + (sizeof(size_t) << 3) - 1, yes, yes, true) if (strncmp("dss", k, klen) == 0) { int i; bool match = false; @@ -1052,17 +1169,47 @@ malloc_conf_init(void) } continue; } - CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, - SIZE_T_MAX, false) + CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, + UINT_MAX, yes, no, false) + if (strncmp("purge", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < purge_mode_limit; i++) { + if (strncmp(purge_mode_names[i], v, + vlen) == 0) { + opt_purge = (purge_mode_t)i; + match = true; + break; + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; + } CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", -1, (sizeof(size_t) << 3) - 1) + CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, + NSTIME_SEC_MAX); CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) if (config_fill) { if (CONF_MATCH("junk")) { if (CONF_MATCH_VALUE("true")) { - opt_junk = "true"; - opt_junk_alloc = opt_junk_free = - true; + if (config_valgrind && + unlikely(in_valgrind)) { + malloc_conf_error( + "Deallocation-time " + "junk filling cannot " + "be enabled while " + "running inside " + "Valgrind", k, klen, v, + vlen); + } else { + opt_junk = "true"; + opt_junk_alloc = true; + opt_junk_free = true; + } } else if (CONF_MATCH_VALUE("false")) { opt_junk = "false"; opt_junk_alloc = opt_junk_free = @@ -1072,9 +1219,20 @@ malloc_conf_init(void) opt_junk_alloc = true; opt_junk_free = false; } else if (CONF_MATCH_VALUE("free")) { - opt_junk = "free"; - opt_junk_alloc = false; - opt_junk_free = true; + if (config_valgrind && + unlikely(in_valgrind)) { + malloc_conf_error( + "Deallocation-time " + "junk filling cannot " + "be enabled while " + "running inside " + "Valgrind", k, klen, v, + vlen); + } else { + opt_junk = "free"; + opt_junk_alloc = false; + opt_junk_free = true; + } } else { malloc_conf_error( "Invalid conf value", k, @@ -1083,7 +1241,7 @@ malloc_conf_init(void) continue; } CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", - 0, SIZE_T_MAX, false) + 0, SIZE_T_MAX, no, no, false) CONF_HANDLE_BOOL(opt_redzone, "redzone", true) CONF_HANDLE_BOOL(opt_zero, "zero", true) } @@ -1120,8 +1278,8 @@ malloc_conf_init(void) CONF_HANDLE_BOOL(opt_prof_thread_active_init, "prof_thread_active_init", true) CONF_HANDLE_SIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, - (sizeof(uint64_t) << 3) - 1, true) + "lg_prof_sample", 0, (sizeof(uint64_t) << 3) + - 1, no, yes, true) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", true) CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, @@ -1137,7 +1295,14 @@ malloc_conf_init(void) malloc_conf_error("Invalid conf pair", k, klen, v, vlen); #undef CONF_MATCH +#undef CONF_MATCH_VALUE #undef CONF_HANDLE_BOOL +#undef CONF_MIN_no +#undef CONF_MIN_yes +#undef CONF_MAX_no +#undef CONF_MAX_yes +#undef CONF_HANDLE_T_U +#undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P @@ -1145,7 +1310,6 @@ malloc_conf_init(void) } } -/* init_lock must be held. */ static bool malloc_init_hard_needed(void) { @@ -1161,11 +1325,14 @@ malloc_init_hard_needed(void) } #ifdef JEMALLOC_THREADED_INIT if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { + spin_t spinner; + /* Busy-wait until the initializing thread completes. */ + spin_init(&spinner); do { - malloc_mutex_unlock(&init_lock); - CPU_SPINWAIT; - malloc_mutex_lock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + spin_adaptive(&spinner); + malloc_mutex_lock(TSDN_NULL, &init_lock); } while (!malloc_initialized()); return (false); } @@ -1173,9 +1340,8 @@ malloc_init_hard_needed(void) return (true); } -/* init_lock must be held. */ static bool -malloc_init_hard_a0_locked(void) +malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; @@ -1191,6 +1357,7 @@ malloc_init_hard_a0_locked(void) abort(); } } + pages_boot(); if (base_boot()) return (true); if (chunk_boot()) @@ -1199,26 +1366,28 @@ malloc_init_hard_a0_locked(void) return (true); if (config_prof) prof_boot1(); - if (arena_boot()) - return (true); - if (config_tcache && tcache_boot()) + arena_boot(); + if (config_tcache && tcache_boot(TSDN_NULL)) return (true); - if (malloc_mutex_init(&arenas_lock)) + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) return (true); /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ - narenas_total = narenas_auto = 1; + narenas_auto = 1; + narenas_total_set(narenas_auto); arenas = &a0; memset(arenas, 0, sizeof(arena_t *) * narenas_auto); /* * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). */ - if (arena_init(0) == NULL) + if (arena_init(TSDN_NULL, 0) == NULL) return (true); + malloc_init_state = malloc_init_a0_initialized; + return (false); } @@ -1227,45 +1396,42 @@ malloc_init_hard_a0(void) { bool ret; - malloc_mutex_lock(&init_lock); + malloc_mutex_lock(TSDN_NULL, &init_lock); ret = malloc_init_hard_a0_locked(); - malloc_mutex_unlock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); return (ret); } -/* - * Initialize data structures which may trigger recursive allocation. - * - * init_lock must be held. - */ -static void +/* Initialize data structures which may trigger recursive allocation. */ +static bool malloc_init_hard_recursible(void) { malloc_init_state = malloc_init_recursible; - malloc_mutex_unlock(&init_lock); ncpus = malloc_ncpus(); -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ - && !defined(_WIN32) && !defined(__native_client__)) - /* LinuxThreads's pthread_atfork() allocates. */ +#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ + && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ + !defined(__native_client__)) + /* LinuxThreads' pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { malloc_write("<jemalloc>: Error in pthread_atfork()\n"); if (opt_abort) abort(); + return (true); } #endif - malloc_mutex_lock(&init_lock); + + return (false); } -/* init_lock must be held. */ static bool -malloc_init_hard_finish(void) +malloc_init_hard_finish(tsdn_t *tsdn) { - if (mutex_boot()) + if (malloc_mutex_boot()) return (true); if (opt_narenas == 0) { @@ -1280,68 +1446,69 @@ malloc_init_hard_finish(void) } narenas_auto = opt_narenas; /* - * Make sure that the arenas array can be allocated. In practice, this - * limit is enough to allow the allocator to function, but the ctl - * machinery will fail to allocate memory at far lower limits. + * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). */ - if (narenas_auto > chunksize / sizeof(arena_t *)) { - narenas_auto = chunksize / sizeof(arena_t *); + if (narenas_auto > MALLOCX_ARENA_MAX) { + narenas_auto = MALLOCX_ARENA_MAX; malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", narenas_auto); } - narenas_total = narenas_auto; + narenas_total_set(narenas_auto); /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); + arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * + (MALLOCX_ARENA_MAX+1)); if (arenas == NULL) return (true); - /* - * Zero the array. In practice, this should always be pre-zeroed, - * since it was just mmap()ed, but let's be sure. - */ - memset(arenas, 0, sizeof(arena_t *) * narenas_total); /* Copy the pointer to the one arena that was already initialized. */ - arenas[0] = a0; + arena_set(0, a0); malloc_init_state = malloc_init_initialized; + malloc_slow_flag_init(); + return (false); } static bool malloc_init_hard(void) { + tsd_t *tsd; #if defined(_WIN32) && _WIN32_WINNT < 0x0600 _init_init_lock(); #endif - malloc_mutex_lock(&init_lock); + malloc_mutex_lock(TSDN_NULL, &init_lock); if (!malloc_init_hard_needed()) { - malloc_mutex_unlock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); return (false); } if (malloc_init_state != malloc_init_a0_initialized && malloc_init_hard_a0_locked()) { - malloc_mutex_unlock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); return (true); } - if (malloc_tsd_boot0()) { - malloc_mutex_unlock(&init_lock); + + malloc_mutex_unlock(TSDN_NULL, &init_lock); + /* Recursive allocation relies on functional tsd. */ + tsd = malloc_tsd_boot0(); + if (tsd == NULL) return (true); - } - if (config_prof && prof_boot2()) { - malloc_mutex_unlock(&init_lock); + if (malloc_init_hard_recursible()) return (true); - } + malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); - malloc_init_hard_recursible(); + if (config_prof && prof_boot2(tsd)) { + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); + return (true); + } - if (malloc_init_hard_finish()) { - malloc_mutex_unlock(&init_lock); + if (malloc_init_hard_finish(tsd_tsdn(tsd))) { + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); return (true); } - malloc_mutex_unlock(&init_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); malloc_tsd_boot1(); return (false); } @@ -1355,61 +1522,104 @@ malloc_init_hard(void) */ static void * -imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) +ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, + prof_tctx_t *tctx, bool slow_path) { void *p; if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { - p = imalloc(tsd, LARGE_MINCLASS); + szind_t ind_large = size2index(LARGE_MINCLASS); + p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else - p = imalloc(tsd, usize); + p = ialloc(tsd, usize, ind, zero, slow_path); return (p); } JEMALLOC_ALWAYS_INLINE_C void * -imalloc_prof(tsd_t *tsd, size_t usize) +ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) { void *p; prof_tctx_t *tctx; tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = imalloc_prof_sample(tsd, usize, tctx); + p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); else - p = imalloc(tsd, usize); + p = ialloc(tsd, usize, ind, zero, slow_path); if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(p, usize, tctx); + prof_malloc(tsd_tsdn(tsd), p, usize, tctx); return (p); } +/* + * ialloc_body() is inlined so that fast and slow paths are generated separately + * with statically known slow_path. + * + * This function guarantees that *tsdn is non-NULL on success. + */ JEMALLOC_ALWAYS_INLINE_C void * -imalloc_body(size_t size, tsd_t **tsd, size_t *usize) +ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, + bool slow_path) { + tsd_t *tsd; + szind_t ind; - if (unlikely(malloc_init())) + if (slow_path && unlikely(malloc_init())) { + *tsdn = NULL; return (NULL); - *tsd = tsd_fetch(); + } - if (config_prof && opt_prof) { - *usize = s2u(size); - if (unlikely(*usize == 0)) - return (NULL); - return (imalloc_prof(*tsd, *usize)); + tsd = tsd_fetch(); + *tsdn = tsd_tsdn(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); + + ind = size2index(size); + if (unlikely(ind >= NSIZES)) + return (NULL); + + if (config_stats || (config_prof && opt_prof) || (slow_path && + config_valgrind && unlikely(in_valgrind))) { + *usize = index2size(ind); + assert(*usize > 0 && *usize <= HUGE_MAXCLASS); } - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - *usize = s2u(size); - return (imalloc(*tsd, size)); + if (config_prof && opt_prof) + return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); + + return (ialloc(tsd, size, ind, zero, slow_path)); +} + +JEMALLOC_ALWAYS_INLINE_C void +ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, + bool update_errno, bool slow_path) +{ + + assert(!tsdn_null(tsdn) || ret == NULL); + + if (unlikely(ret == NULL)) { + if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { + malloc_printf("<jemalloc>: Error in %s(): out of " + "memory\n", func); + abort(); + } + if (update_errno) + set_errno(ENOMEM); + } + if (config_stats && likely(ret != NULL)) { + assert(usize == isalloc(tsdn, ret, config_prof)); + *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; + } + witness_assert_lockless(tsdn); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -1418,27 +1628,22 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { void *ret; - tsd_t *tsd; + tsdn_t *tsdn; size_t usize JEMALLOC_CC_SILENCE_INIT(0); if (size == 0) size = 1; - ret = imalloc_body(size, &tsd, &usize); - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in malloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; + if (likely(!malloc_slow)) { + ret = ialloc_body(size, false, &tsdn, &usize, false); + ialloc_post_check(ret, tsdn, usize, "malloc", true, false); + } else { + ret = ialloc_body(size, false, &tsdn, &usize, true); + ialloc_post_check(ret, tsdn, usize, "malloc", true, true); + UTRACE(0, size, ret); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); } - UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); + return (ret); } @@ -1455,7 +1660,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else p = ipalloc(tsd, usize, alignment, false); @@ -1477,7 +1682,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(p, usize, tctx); + prof_malloc(tsd_tsdn(tsd), p, usize, tctx); return (p); } @@ -1494,10 +1699,12 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) assert(min_alignment != 0); if (unlikely(malloc_init())) { + tsd = NULL; result = NULL; goto label_oom; } tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); if (size == 0) size = 1; @@ -1515,7 +1722,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) } usize = sa2u(size, alignment); - if (unlikely(usize == 0)) { + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { result = NULL; goto label_oom; } @@ -1532,10 +1739,13 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) ret = 0; label_return: if (config_stats && likely(result != NULL)) { - assert(usize == isalloc(result, config_prof)); + assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); *tsd_thread_allocatedp_get(tsd) += usize; } UTRACE(0, size, result); + JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, + false); + witness_assert_lockless(tsd_tsdn(tsd)); return (ret); label_oom: assert(result == NULL); @@ -1545,6 +1755,7 @@ label_oom: abort(); } ret = ENOMEM; + witness_assert_lockless(tsd_tsdn(tsd)); goto label_return; } @@ -1552,9 +1763,10 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) je_posix_memalign(void **memptr, size_t alignment, size_t size) { - int ret = imemalign(memptr, alignment, size, sizeof(void *)); - JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, - config_prof), false); + int ret; + + ret = imemalign(memptr, alignment, size, sizeof(void *)); + return (ret); } @@ -1570,47 +1782,8 @@ je_aligned_alloc(size_t alignment, size_t size) ret = NULL; set_errno(err); } - JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), - false); - return (ret); -} - -static void * -icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - p = icalloc(tsd, LARGE_MINCLASS); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = icalloc(tsd, usize); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -icalloc_prof(tsd_t *tsd, size_t usize) -{ - void *p; - prof_tctx_t *tctx; - - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = icalloc_prof_sample(tsd, usize, tctx); - else - p = icalloc(tsd, usize); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(p, usize, tctx); - return (p); + return (ret); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -1619,65 +1792,35 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) je_calloc(size_t num, size_t size) { void *ret; - tsd_t *tsd; + tsdn_t *tsdn; size_t num_size; size_t usize JEMALLOC_CC_SILENCE_INIT(0); - if (unlikely(malloc_init())) { - num_size = 0; - ret = NULL; - goto label_return; - } - tsd = tsd_fetch(); - num_size = num * size; if (unlikely(num_size == 0)) { if (num == 0 || size == 0) num_size = 1; - else { - ret = NULL; - goto label_return; - } + else + num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ /* * Try to avoid division here. We know that it isn't possible to * overflow during multiplication if neither operand uses any of the * most significant half of the bits in a size_t. */ } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << - 2))) && (num_size / size != num))) { - /* size_t overflow. */ - ret = NULL; - goto label_return; - } + 2))) && (num_size / size != num))) + num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ - if (config_prof && opt_prof) { - usize = s2u(num_size); - if (unlikely(usize == 0)) { - ret = NULL; - goto label_return; - } - ret = icalloc_prof(tsd, usize); + if (likely(!malloc_slow)) { + ret = ialloc_body(num_size, true, &tsdn, &usize, false); + ialloc_post_check(ret, tsdn, usize, "calloc", true, false); } else { - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = s2u(num_size); - ret = icalloc(tsd, num_size); + ret = ialloc_body(num_size, true, &tsdn, &usize, true); + ialloc_post_check(ret, tsdn, usize, "calloc", true, true); + UTRACE(0, num_size, ret); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); } -label_return: - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in calloc(): out of " - "memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - } - UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); return (ret); } @@ -1693,7 +1836,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); @@ -1708,7 +1851,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(old_ptr); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); tctx = prof_alloc_prep(tsd, usize, prof_active, true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); @@ -1725,32 +1868,41 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) } JEMALLOC_INLINE_C void -ifree(tsd_t *tsd, void *ptr, tcache_t *tcache) +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { size_t usize; UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + witness_assert_lockless(tsd_tsdn(tsd)); + assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); if (config_prof && opt_prof) { - usize = isalloc(ptr, config_prof); + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); prof_free(tsd, ptr, usize); } else if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; - if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(ptr); - iqalloc(tsd, ptr, tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); + + if (likely(!slow_path)) + iqalloc(tsd, ptr, tcache, false); + else { + if (config_valgrind && unlikely(in_valgrind)) + rzsize = p2rz(tsd_tsdn(tsd), ptr); + iqalloc(tsd, ptr, tcache, true); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); + } } JEMALLOC_INLINE_C void -isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache) +isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + witness_assert_lockless(tsd_tsdn(tsd)); + assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); @@ -1759,8 +1911,8 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache) if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(ptr); - isqalloc(tsd, ptr, usize, tcache); + rzsize = p2rz(tsd_tsdn(tsd), ptr); + isqalloc(tsd, ptr, usize, tcache, slow_path); JEMALLOC_VALGRIND_FREE(ptr, rzsize); } @@ -1770,44 +1922,57 @@ JEMALLOC_ALLOC_SIZE(2) je_realloc(void *ptr, size_t size) { void *ret; - tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL); + tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); if (unlikely(size == 0)) { if (ptr != NULL) { + tsd_t *tsd; + /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); tsd = tsd_fetch(); - ifree(tsd, ptr, tcache_get(tsd, false)); + ifree(tsd, ptr, tcache_get(tsd, false), true); return (NULL); } size = 1; } if (likely(ptr != NULL)) { + tsd_t *tsd; + assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); + witness_assert_lockless(tsd_tsdn(tsd)); + + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + if (config_valgrind && unlikely(in_valgrind)) { + old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : + u2rz(old_usize); + } if (config_prof && opt_prof) { usize = s2u(size); - ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd, - ptr, old_usize, usize); + ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? + NULL : irealloc_prof(tsd, ptr, old_usize, usize); } else { if (config_stats || (config_valgrind && unlikely(in_valgrind))) usize = s2u(size); ret = iralloc(tsd, ptr, old_usize, size, 0, false); } + tsdn = tsd_tsdn(tsd); } else { /* realloc(NULL, size) is equivalent to malloc(size). */ - ret = imalloc_body(size, &tsd, &usize); + if (likely(!malloc_slow)) + ret = ialloc_body(size, false, &tsdn, &usize, false); + else + ret = ialloc_body(size, false, &tsdn, &usize, true); + assert(!tsdn_null(tsdn) || ret == NULL); } if (unlikely(ret == NULL)) { @@ -1819,13 +1984,17 @@ je_realloc(void *ptr, size_t size) set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); + tsd_t *tsd; + + assert(usize == isalloc(tsdn, ret, config_prof)); + tsd = tsdn_tsd(tsdn); *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize, - old_rzsize, true, false); + JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr, + old_usize, old_rzsize, maybe, false); + witness_assert_lockless(tsdn); return (ret); } @@ -1836,7 +2005,12 @@ je_free(void *ptr) UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { tsd_t *tsd = tsd_fetch(); - ifree(tsd, ptr, tcache_get(tsd, false)); + witness_assert_lockless(tsd_tsdn(tsd)); + if (likely(!malloc_slow)) + ifree(tsd, ptr, tcache_get(tsd, false), false); + else + ifree(tsd, ptr, tcache_get(tsd, false), true); + witness_assert_lockless(tsd_tsdn(tsd)); } } @@ -1857,7 +2031,6 @@ je_memalign(size_t alignment, size_t size) void *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) ret = NULL; - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); return (ret); } #endif @@ -1871,7 +2044,6 @@ je_valloc(size_t size) void *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) ret = NULL; - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); return (ret); } #endif @@ -1901,6 +2073,29 @@ JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = je_memalign; # endif + +#ifdef CPU_COUNT +/* + * To enable static linking with glibc, the libc specific malloc interface must + * be implemented also, so none of glibc's malloc.o functions are added to the + * link. + */ +#define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) +/* To force macro expansion of je_ prefix before stringification. */ +#define PREALIAS(je_fn) ALIAS(je_fn) +void *__libc_malloc(size_t size) PREALIAS(je_malloc); +void __libc_free(void* ptr) PREALIAS(je_free); +void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); +void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); +void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); +void *__libc_valloc(size_t size) PREALIAS(je_valloc); +int __posix_memalign(void** r, size_t a, size_t s) + PREALIAS(je_posix_memalign); +#undef PREALIAS +#undef ALIAS + +#endif + #endif /* @@ -1912,7 +2107,7 @@ JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = */ JEMALLOC_ALWAYS_INLINE_C bool -imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, +imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) { @@ -1923,7 +2118,8 @@ imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); *usize = sa2u(size, *alignment); } - assert(*usize != 0); + if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) + return (true); *zero = MALLOCX_ZERO_GET(flags); if ((flags & MALLOCX_TCACHE_MASK) != 0) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) @@ -1934,7 +2130,7 @@ imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, *tcache = tcache_get(tsd, true); if ((flags & MALLOCX_ARENA_MASK) != 0) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); - *arena = arena_get(tsd, arena_ind, true, true); + *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); if (unlikely(*arena == NULL)) return (true); } else @@ -1942,59 +2138,44 @@ imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, return (false); } -JEMALLOC_ALWAYS_INLINE_C bool -imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, - size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) -{ - - if (likely(flags == 0)) { - *usize = s2u(size); - assert(*usize != 0); - *alignment = 0; - *zero = false; - *tcache = tcache_get(tsd, true); - *arena = NULL; - return (false); - } else { - return (imallocx_flags_decode_hard(tsd, size, flags, usize, - alignment, zero, tcache, arena)); - } -} - JEMALLOC_ALWAYS_INLINE_C void * -imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) +imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena, bool slow_path) { + szind_t ind; if (unlikely(alignment != 0)) - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); - if (unlikely(zero)) - return (icalloct(tsd, usize, tcache, arena)); - return (imalloct(tsd, usize, tcache, arena)); + return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); + ind = size2index(usize); + assert(ind < NSIZES); + return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, + slow_path)); } static void * -imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) +imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena, bool slow_path) { void *p; if (usize <= SMALL_MAXCLASS) { assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); - p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache, - arena); + p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, + tcache, arena, slow_path); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); - } else - p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena); + arena_prof_promoted(tsdn, p, usize); + } else { + p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, + slow_path); + } return (p); } JEMALLOC_ALWAYS_INLINE_C void * -imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) +imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) { void *p; size_t alignment; @@ -2007,25 +2188,27 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) &zero, &tcache, &arena))) return (NULL); tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); - if (likely((uintptr_t)tctx == (uintptr_t)1U)) - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); - else if ((uintptr_t)tctx > (uintptr_t)1U) { - p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache, - arena); + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { + p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, + tcache, arena, slow_path); + } else if ((uintptr_t)tctx > (uintptr_t)1U) { + p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, + tcache, arena, slow_path); } else p = NULL; if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(p, *usize, tctx); + prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); return (p); } JEMALLOC_ALWAYS_INLINE_C void * -imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) +imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, + bool slow_path) { void *p; size_t alignment; @@ -2033,56 +2216,78 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) tcache_t *tcache; arena_t *arena; - if (likely(flags == 0)) { - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - *usize = s2u(size); - return (imalloc(tsd, size)); - } - - if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize, - &alignment, &zero, &tcache, &arena))) + if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, + &zero, &tcache, &arena))) return (NULL); - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); + p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, + arena, slow_path); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); return (p); } +/* This function guarantees that *tsdn is non-NULL on success. */ +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, + bool slow_path) +{ + tsd_t *tsd; + + if (slow_path && unlikely(malloc_init())) { + *tsdn = NULL; + return (NULL); + } + + tsd = tsd_fetch(); + *tsdn = tsd_tsdn(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); + + if (likely(flags == 0)) { + szind_t ind = size2index(size); + if (unlikely(ind >= NSIZES)) + return (NULL); + if (config_stats || (config_prof && opt_prof) || (slow_path && + config_valgrind && unlikely(in_valgrind))) { + *usize = index2size(ind); + assert(*usize > 0 && *usize <= HUGE_MAXCLASS); + } + + if (config_prof && opt_prof) { + return (ialloc_prof(tsd, *usize, ind, false, + slow_path)); + } + + return (ialloc(tsd, size, ind, false, slow_path)); + } + + if (config_prof && opt_prof) + return (imallocx_prof(tsd, size, flags, usize, slow_path)); + + return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); +} + JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_mallocx(size_t size, int flags) { - tsd_t *tsd; + tsdn_t *tsdn; void *p; size_t usize; assert(size != 0); - if (unlikely(malloc_init())) - goto label_oom; - tsd = tsd_fetch(); - - if (config_prof && opt_prof) - p = imallocx_prof(tsd, size, flags, &usize); - else - p = imallocx_no_prof(tsd, size, flags, &usize); - if (unlikely(p == NULL)) - goto label_oom; - - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; + if (likely(!malloc_slow)) { + p = imallocx_body(size, flags, &tsdn, &usize, false); + ialloc_post_check(p, tsdn, usize, "mallocx", false, false); + } else { + p = imallocx_body(size, flags, &tsdn, &usize, true); + ialloc_post_check(p, tsdn, usize, "mallocx", false, true); + UTRACE(0, size, p); + JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, + MALLOCX_ZERO_GET(flags)); } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags)); + return (p); -label_oom: - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in mallocx(): out of memory\n"); - abort(); - } - UTRACE(0, size, 0); - return (NULL); } static void * @@ -2099,7 +2304,7 @@ irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, zero, tcache, arena); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else { p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, tcache, arena); @@ -2118,8 +2323,8 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(old_ptr); - tctx = prof_alloc_prep(tsd, *usize, prof_active, true); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); + tctx = prof_alloc_prep(tsd, *usize, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, alignment, zero, tcache, arena, tctx); @@ -2128,7 +2333,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, tcache, arena); } if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); + prof_alloc_rollback(tsd, tctx, false); return (NULL); } @@ -2141,9 +2346,9 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, * be the same as the current usize because of in-place large * reallocation. Therefore, query the actual value of usize. */ - *usize = isalloc(p, config_prof); + *usize = isalloc(tsd_tsdn(tsd), p, config_prof); } - prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr, + prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, old_usize, old_tctx); return (p); @@ -2169,10 +2374,11 @@ je_rallocx(void *ptr, size_t size, int flags) assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); - arena = arena_get(tsd, arena_ind, true, true); + arena = arena_get(tsd_tsdn(tsd), arena_ind, true); if (unlikely(arena == NULL)) goto label_oom; } else @@ -2186,13 +2392,14 @@ je_rallocx(void *ptr, size_t size, int flags) } else tcache = tcache_get(tsd, true); - old_usize = isalloc(ptr, config_prof); + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); if (config_valgrind && unlikely(in_valgrind)) old_rzsize = u2rz(old_usize); if (config_prof && opt_prof) { usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + goto label_oom; p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, zero, tcache, arena); if (unlikely(p == NULL)) @@ -2203,7 +2410,7 @@ je_rallocx(void *ptr, size_t size, int flags) if (unlikely(p == NULL)) goto label_oom; if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = isalloc(p, config_prof); + usize = isalloc(tsd_tsdn(tsd), p, config_prof); } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); @@ -2212,8 +2419,9 @@ je_rallocx(void *ptr, size_t size, int flags) *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize, - old_rzsize, false, zero); + JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr, + old_usize, old_rzsize, no, zero); + witness_assert_lockless(tsd_tsdn(tsd)); return (p); label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { @@ -2221,31 +2429,33 @@ label_oom: abort(); } UTRACE(ptr, size, 0); + witness_assert_lockless(tsd_tsdn(tsd)); return (NULL); } JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, bool zero) +ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero) { size_t usize; - if (ixalloc(ptr, old_usize, size, extra, alignment, zero)) + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) return (old_usize); - usize = isalloc(ptr, config_prof); + usize = isalloc(tsdn, ptr, config_prof); return (usize); } static size_t -ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, bool zero, prof_tctx_t *tctx) +ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { size_t usize; if (tctx == NULL) return (old_usize); - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero); + usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, + zero); return (usize); } @@ -2259,23 +2469,36 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(ptr); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); /* * usize isn't knowable before ixalloc() returns when extra is non-zero. * Therefore, compute its maximum possible value and use that in * prof_alloc_prep() to decide whether to capture a backtrace. * prof_realloc() will use the actual usize to decide whether to sample. */ - usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra, - alignment); - assert(usize_max != 0); + if (alignment == 0) { + usize_max = s2u(size+extra); + assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); + } else { + usize_max = sa2u(size+extra, alignment); + if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { + /* + * usize_max is out of range, and chances are that + * allocation will fail, but use the maximum possible + * value and carry on with prof_alloc_prep(), just in + * case allocation succeeds. + */ + usize_max = HUGE_MAXCLASS; + } + } tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - usize = ixallocx_prof_sample(ptr, old_usize, size, extra, - alignment, zero, tctx); + usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, + size, extra, alignment, zero, tctx); } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } if (usize == old_usize) { prof_alloc_rollback(tsd, tctx, false); @@ -2302,18 +2525,25 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); - old_usize = isalloc(ptr, config_prof); + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - /* Clamp extra if necessary to avoid (size + extra) overflow. */ - if (unlikely(size + extra > HUGE_MAXCLASS)) { - /* Check for size overflow. */ - if (unlikely(size > HUGE_MAXCLASS)) { - usize = old_usize; - goto label_not_resized; - } - extra = HUGE_MAXCLASS - size; + /* + * The API explicitly absolves itself of protecting against (size + + * extra) numerical overflow, but we may need to clamp extra to avoid + * exceeding HUGE_MAXCLASS. + * + * Ordinarily, size limit checking is handled deeper down, but here we + * have to check as part of (size + extra) clamping, since we need the + * clamped value in the above helper functions. + */ + if (unlikely(size > HUGE_MAXCLASS)) { + usize = old_usize; + goto label_not_resized; } + if (unlikely(HUGE_MAXCLASS - size < extra)) + extra = HUGE_MAXCLASS - size; if (config_valgrind && unlikely(in_valgrind)) old_rzsize = u2rz(old_usize); @@ -2322,8 +2552,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, alignment, zero); } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } if (unlikely(usize == old_usize)) goto label_not_resized; @@ -2332,10 +2562,11 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } - JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize, - old_rzsize, false, zero); + JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr, + old_usize, old_rzsize, no, zero); label_not_resized: UTRACE(ptr, size, ptr); + witness_assert_lockless(tsd_tsdn(tsd)); return (usize); } @@ -2344,15 +2575,20 @@ JEMALLOC_ATTR(pure) je_sallocx(const void *ptr, int flags) { size_t usize; + tsdn_t *tsdn; assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + if (config_ivsalloc) - usize = ivsalloc(ptr, config_prof); + usize = ivsalloc(tsdn, ptr, config_prof); else - usize = isalloc(ptr, config_prof); + usize = isalloc(tsdn, ptr, config_prof); + witness_assert_lockless(tsdn); return (usize); } @@ -2366,6 +2602,7 @@ je_dallocx(void *ptr, int flags) assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; @@ -2375,19 +2612,25 @@ je_dallocx(void *ptr, int flags) tcache = tcache_get(tsd, false); UTRACE(ptr, 0, 0); - ifree(tsd_fetch(), ptr, tcache); + if (likely(!malloc_slow)) + ifree(tsd, ptr, tcache, false); + else + ifree(tsd, ptr, tcache, true); + witness_assert_lockless(tsd_tsdn(tsd)); } JEMALLOC_ALWAYS_INLINE_C size_t -inallocx(size_t size, int flags) +inallocx(tsdn_t *tsdn, size_t size, int flags) { size_t usize; + witness_assert_lockless(tsdn); + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) usize = s2u(size); else usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); - assert(usize != 0); + witness_assert_lockless(tsdn); return (usize); } @@ -2400,10 +2643,11 @@ je_sdallocx(void *ptr, size_t size, int flags) assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - usize = inallocx(size, flags); - assert(usize == isalloc(ptr, config_prof)); - tsd = tsd_fetch(); + usize = inallocx(tsd_tsdn(tsd), size, flags); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); + + witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; @@ -2413,75 +2657,116 @@ je_sdallocx(void *ptr, size_t size, int flags) tcache = tcache_get(tsd, false); UTRACE(ptr, 0, 0); - isfree(tsd, ptr, usize, tcache); + if (likely(!malloc_slow)) + isfree(tsd, ptr, usize, tcache, false); + else + isfree(tsd, ptr, usize, tcache, true); + witness_assert_lockless(tsd_tsdn(tsd)); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_nallocx(size_t size, int flags) { + size_t usize; + tsdn_t *tsdn; assert(size != 0); if (unlikely(malloc_init())) return (0); - return (inallocx(size, flags)); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + + usize = inallocx(tsdn, size, flags); + if (unlikely(usize > HUGE_MAXCLASS)) + return (0); + + witness_assert_lockless(tsdn); + return (usize); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + tsd_t *tsd; if (unlikely(malloc_init())) return (EAGAIN); - return (ctl_byname(name, oldp, oldlenp, newp, newlen)); + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); + witness_assert_lockless(tsd_tsdn(tsd)); + return (ret); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { + int ret; + tsdn_t *tsdn; if (unlikely(malloc_init())) return (EAGAIN); - return (ctl_nametomib(name, mibp, miblenp)); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + ret = ctl_nametomib(tsdn, name, mibp, miblenp); + witness_assert_lockless(tsdn); + return (ret); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + tsd_t *tsd; if (unlikely(malloc_init())) return (EAGAIN); - return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + witness_assert_lockless(tsd_tsdn(tsd)); + return (ret); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { + tsdn_t *tsdn; + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); stats_print(write_cb, cbopaque, opts); + witness_assert_lockless(tsdn); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; + tsdn_t *tsdn; assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); + ret = ivsalloc(tsdn, ptr, config_prof); else - ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof); + ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); + witness_assert_lockless(tsdn); return (ret); } @@ -2507,6 +2792,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) * to trigger the deadlock described above, but doing so would involve forking * via a library constructor that runs before jemalloc's runs. */ +#ifndef JEMALLOC_JET JEMALLOC_ATTR(constructor) static void jemalloc_constructor(void) @@ -2514,6 +2800,7 @@ jemalloc_constructor(void) malloc_init(); } +#endif #ifndef JEMALLOC_MUTEX_INIT_CB void @@ -2523,7 +2810,9 @@ JEMALLOC_EXPORT void _malloc_prefork(void) #endif { - unsigned i; + tsd_t *tsd; + unsigned i, j, narenas; + arena_t *arena; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) @@ -2531,16 +2820,40 @@ _malloc_prefork(void) #endif assert(malloc_initialized()); + tsd = tsd_fetch(); + + narenas = narenas_total_get(); + + witness_prefork(tsd); /* Acquire all mutexes in a safe order. */ - ctl_prefork(); - prof_prefork(); - malloc_mutex_prefork(&arenas_lock); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_prefork(arenas[i]); + ctl_prefork(tsd_tsdn(tsd)); + malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); + prof_prefork0(tsd_tsdn(tsd)); + for (i = 0; i < 3; i++) { + for (j = 0; j < narenas; j++) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != + NULL) { + switch (i) { + case 0: + arena_prefork0(tsd_tsdn(tsd), arena); + break; + case 1: + arena_prefork1(tsd_tsdn(tsd), arena); + break; + case 2: + arena_prefork2(tsd_tsdn(tsd), arena); + break; + default: not_reached(); + } + } + } + } + base_prefork(tsd_tsdn(tsd)); + for (i = 0; i < narenas; i++) { + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_prefork3(tsd_tsdn(tsd), arena); } - chunk_prefork(); - base_prefork(); + prof_prefork1(tsd_tsdn(tsd)); } #ifndef JEMALLOC_MUTEX_INIT_CB @@ -2551,7 +2864,8 @@ JEMALLOC_EXPORT void _malloc_postfork(void) #endif { - unsigned i; + tsd_t *tsd; + unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) @@ -2559,35 +2873,44 @@ _malloc_postfork(void) #endif assert(malloc_initialized()); + tsd = tsd_fetch(); + + witness_postfork_parent(tsd); /* Release all mutexes, now that fork() has completed. */ - base_postfork_parent(); - chunk_postfork_parent(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_parent(arenas[i]); + base_postfork_parent(tsd_tsdn(tsd)); + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_postfork_parent(tsd_tsdn(tsd), arena); } - malloc_mutex_postfork_parent(&arenas_lock); - prof_postfork_parent(); - ctl_postfork_parent(); + prof_postfork_parent(tsd_tsdn(tsd)); + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); + ctl_postfork_parent(tsd_tsdn(tsd)); } void jemalloc_postfork_child(void) { - unsigned i; + tsd_t *tsd; + unsigned i, narenas; assert(malloc_initialized()); + tsd = tsd_fetch(); + + witness_postfork_child(tsd); /* Release all mutexes, now that fork() has completed. */ - base_postfork_child(); - chunk_postfork_child(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_child(arenas[i]); + base_postfork_child(tsd_tsdn(tsd)); + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_postfork_child(tsd_tsdn(tsd), arena); } - malloc_mutex_postfork_child(&arenas_lock); - prof_postfork_child(); - ctl_postfork_child(); + prof_postfork_child(tsd_tsdn(tsd)); + malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); + ctl_postfork_child(tsd_tsdn(tsd)); } /******************************************************************************/ @@ -2607,9 +2930,10 @@ je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) { if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */ arena_t *arena = extent_node_arena_get(&chunk->node); size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - arena_run_t *run = &arena_miscelm_get(chunk, rpages_ind)->run; + arena_run_t *run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; arena_bin_t *bin = &arena->bins[run->binind]; - malloc_mutex_lock(&bin->lock); + tsd_t *tsd = tsd_fetch(); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); /* runs that are in the same chunk in as the current chunk, are likely to be the next currun */ if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) { arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; @@ -2618,7 +2942,7 @@ je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) { *run_util = ((bin_info->nregs - run->nfree)<<16) / bin_info->nregs; defrag = 1; } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } } return defrag; diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c index 2d47af976..6333e73d6 100644 --- a/deps/jemalloc/src/mutex.c +++ b/deps/jemalloc/src/mutex.c @@ -69,7 +69,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, #endif bool -malloc_mutex_init(malloc_mutex_t *mutex) +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) { #ifdef _WIN32 @@ -80,6 +80,8 @@ malloc_mutex_init(malloc_mutex_t *mutex) _CRT_SPINCOUNT)) return (true); # endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mutex->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mutex->lock = 0; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) @@ -103,31 +105,34 @@ malloc_mutex_init(malloc_mutex_t *mutex) } pthread_mutexattr_destroy(&attr); #endif + if (config_debug) + witness_init(&mutex->witness, name, rank, NULL); return (false); } void -malloc_mutex_prefork(malloc_mutex_t *mutex) +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { - malloc_mutex_lock(mutex); + malloc_mutex_lock(tsdn, mutex); } void -malloc_mutex_postfork_parent(malloc_mutex_t *mutex) +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { - malloc_mutex_unlock(mutex); + malloc_mutex_unlock(tsdn, mutex); } void -malloc_mutex_postfork_child(malloc_mutex_t *mutex) +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(mutex); + malloc_mutex_unlock(tsdn, mutex); #else - if (malloc_mutex_init(mutex)) { + if (malloc_mutex_init(mutex, mutex->witness.name, + mutex->witness.rank)) { malloc_printf("<jemalloc>: Error re-initializing mutex in " "child\n"); if (opt_abort) @@ -137,7 +142,7 @@ malloc_mutex_postfork_child(malloc_mutex_t *mutex) } bool -mutex_boot(void) +malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB diff --git a/deps/jemalloc/src/nstime.c b/deps/jemalloc/src/nstime.c new file mode 100644 index 000000000..0948e29fa --- /dev/null +++ b/deps/jemalloc/src/nstime.c @@ -0,0 +1,194 @@ +#include "jemalloc/internal/jemalloc_internal.h" + +#define BILLION UINT64_C(1000000000) + +void +nstime_init(nstime_t *time, uint64_t ns) +{ + + time->ns = ns; +} + +void +nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) +{ + + time->ns = sec * BILLION + nsec; +} + +uint64_t +nstime_ns(const nstime_t *time) +{ + + return (time->ns); +} + +uint64_t +nstime_sec(const nstime_t *time) +{ + + return (time->ns / BILLION); +} + +uint64_t +nstime_nsec(const nstime_t *time) +{ + + return (time->ns % BILLION); +} + +void +nstime_copy(nstime_t *time, const nstime_t *source) +{ + + *time = *source; +} + +int +nstime_compare(const nstime_t *a, const nstime_t *b) +{ + + return ((a->ns > b->ns) - (a->ns < b->ns)); +} + +void +nstime_add(nstime_t *time, const nstime_t *addend) +{ + + assert(UINT64_MAX - time->ns >= addend->ns); + + time->ns += addend->ns; +} + +void +nstime_subtract(nstime_t *time, const nstime_t *subtrahend) +{ + + assert(nstime_compare(time, subtrahend) >= 0); + + time->ns -= subtrahend->ns; +} + +void +nstime_imultiply(nstime_t *time, uint64_t multiplier) +{ + + assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << + 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); + + time->ns *= multiplier; +} + +void +nstime_idivide(nstime_t *time, uint64_t divisor) +{ + + assert(divisor != 0); + + time->ns /= divisor; +} + +uint64_t +nstime_divide(const nstime_t *time, const nstime_t *divisor) +{ + + assert(divisor->ns != 0); + + return (time->ns / divisor->ns); +} + +#ifdef _WIN32 +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + FILETIME ft; + uint64_t ticks_100ns; + + GetSystemTimeAsFileTime(&ft); + ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + + nstime_init(time, ticks_100ns * 100); +} +#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif JEMALLOC_HAVE_CLOCK_MONOTONIC +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + + nstime_init(time, mach_absolute_time()); +} +#else +# define NSTIME_MONOTONIC false +static void +nstime_get(nstime_t *time) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); +} +#endif + +#ifdef JEMALLOC_JET +#undef nstime_monotonic +#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic) +#endif +bool +nstime_monotonic(void) +{ + + return (NSTIME_MONOTONIC); +#undef NSTIME_MONOTONIC +} +#ifdef JEMALLOC_JET +#undef nstime_monotonic +#define nstime_monotonic JEMALLOC_N(nstime_monotonic) +nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic); +#endif + +#ifdef JEMALLOC_JET +#undef nstime_update +#define nstime_update JEMALLOC_N(n_nstime_update) +#endif +bool +nstime_update(nstime_t *time) +{ + nstime_t old_time; + + nstime_copy(&old_time, time); + nstime_get(time); + + /* Handle non-monotonic clocks. */ + if (unlikely(nstime_compare(&old_time, time) > 0)) { + nstime_copy(time, &old_time); + return (true); + } + + return (false); +} +#ifdef JEMALLOC_JET +#undef nstime_update +#define nstime_update JEMALLOC_N(nstime_update) +nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update); +#endif diff --git a/deps/jemalloc/src/pages.c b/deps/jemalloc/src/pages.c index 83a167f67..5f0c9669d 100644 --- a/deps/jemalloc/src/pages.c +++ b/deps/jemalloc/src/pages.c @@ -1,29 +1,49 @@ #define JEMALLOC_PAGES_C_ #include "jemalloc/internal/jemalloc_internal.h" +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#include <sys/sysctl.h> +#endif + +/******************************************************************************/ +/* Data. */ + +#ifndef _WIN32 +# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) +# define PAGES_PROT_DECOMMIT (PROT_NONE) +static int mmap_flags; +#endif +static bool os_overcommits; + /******************************************************************************/ void * -pages_map(void *addr, size_t size) +pages_map(void *addr, size_t size, bool *commit) { void *ret; assert(size != 0); + if (os_overcommits) + *commit = true; + #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is * given, it fails and returns NULL. */ - ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, + ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), PAGE_READWRITE); #else /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ - ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, - -1, 0); + { + int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + + ret = mmap(addr, size, prot, mmap_flags, -1, 0); + } assert(ret != NULL); if (ret == MAP_FAILED) @@ -67,7 +87,8 @@ pages_unmap(void *addr, size_t size) } void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) +pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, + bool *commit) { void *ret = (void *)((uintptr_t)addr + leadsize); @@ -77,7 +98,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) void *new_addr; pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size); + new_addr = pages_map(ret, size, commit); if (new_addr == ret) return (ret); if (new_addr) @@ -101,17 +122,17 @@ static bool pages_commit_impl(void *addr, size_t size, bool commit) { -#ifndef _WIN32 - /* - * The following decommit/commit implementation is functional, but - * always disabled because it doesn't add value beyong improved - * debugging (at the cost of extra system calls) on systems that - * overcommit. - */ - if (false) { - int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE; - void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON | - MAP_FIXED, -1, 0); + if (os_overcommits) + return (true); + +#ifdef _WIN32 + return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, + PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); +#else + { + int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, + -1, 0); if (result == MAP_FAILED) return (true); if (result != addr) { @@ -125,7 +146,6 @@ pages_commit_impl(void *addr, size_t size, bool commit) return (false); } #endif - return (true); } bool @@ -150,15 +170,16 @@ pages_purge(void *addr, size_t size) #ifdef _WIN32 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); unzeroed = true; -#elif defined(JEMALLOC_HAVE_MADVISE) -# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -# define JEMALLOC_MADV_ZEROS true -# elif defined(JEMALLOC_PURGE_MADVISE_FREE) +#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED)) +# if defined(JEMALLOC_PURGE_MADVISE_FREE) # define JEMALLOC_MADV_PURGE MADV_FREE # define JEMALLOC_MADV_ZEROS false +# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) +# define JEMALLOC_MADV_PURGE MADV_DONTNEED +# define JEMALLOC_MADV_ZEROS true # else -# error "No madvise(2) flag defined for purging unused dirty pages." +# error No madvise(2) flag defined for purging unused dirty pages # endif int err = madvise(addr, size, JEMALLOC_MADV_PURGE); unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); @@ -171,3 +192,111 @@ pages_purge(void *addr, size_t size) return (unzeroed); } +bool +pages_huge(void *addr, size_t size) +{ + + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + +#ifdef JEMALLOC_THP + return (madvise(addr, size, MADV_HUGEPAGE) != 0); +#else + return (false); +#endif +} + +bool +pages_nohuge(void *addr, size_t size) +{ + + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + +#ifdef JEMALLOC_THP + return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); +#else + return (false); +#endif +} + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +static bool +os_overcommits_sysctl(void) +{ + int vm_overcommit; + size_t sz; + + sz = sizeof(vm_overcommit); + if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) + return (false); /* Error. */ + + return ((vm_overcommit & 0x3) == 0); +} +#endif + +#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY +/* + * Use syscall(2) rather than {open,read,close}(2) when possible to avoid + * reentry during bootstrapping if another library has interposed system call + * wrappers. + */ +static bool +os_overcommits_proc(void) +{ + int fd; + char buf[1]; + ssize_t nread; + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); +#else + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); +#endif + if (fd == -1) + return (false); /* Error. */ + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) + nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); +#else + nread = read(fd, &buf, sizeof(buf)); +#endif + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (nread < 1) + return (false); /* Error. */ + /* + * /proc/sys/vm/overcommit_memory meanings: + * 0: Heuristic overcommit. + * 1: Always overcommit. + * 2: Never overcommit. + */ + return (buf[0] == '0' || buf[0] == '1'); +} +#endif + +void +pages_boot(void) +{ + +#ifndef _WIN32 + mmap_flags = MAP_PRIVATE | MAP_ANON; +#endif + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT + os_overcommits = os_overcommits_sysctl(); +#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) + os_overcommits = os_overcommits_proc(); +# ifdef MAP_NORESERVE + if (os_overcommits) + mmap_flags |= MAP_NORESERVE; +# endif +#else + os_overcommits = false; +#endif +} diff --git a/deps/jemalloc/src/prng.c b/deps/jemalloc/src/prng.c new file mode 100644 index 000000000..76646a2a4 --- /dev/null +++ b/deps/jemalloc/src/prng.c @@ -0,0 +1,2 @@ +#define JEMALLOC_PRNG_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c index 5d2b9598f..c89dade1f 100644 --- a/deps/jemalloc/src/prof.c +++ b/deps/jemalloc/src/prof.c @@ -109,7 +109,7 @@ static char prof_dump_buf[ 1 #endif ]; -static unsigned prof_dump_buf_end; +static size_t prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ @@ -121,13 +121,13 @@ static bool prof_booted = false; * definition. */ -static bool prof_tctx_should_destroy(prof_tctx_t *tctx); +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); -static bool prof_tdata_should_destroy(prof_tdata_t *tdata, +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached); static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached); -static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); /******************************************************************************/ /* Red-black trees. */ @@ -213,22 +213,23 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) } if ((uintptr_t)tctx > (uintptr_t)1U) { - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); tctx->prepared = false; - if (prof_tctx_should_destroy(tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) prof_tctx_destroy(tsd, tctx); else - malloc_mutex_unlock(tctx->tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } } void -prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) +prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) { - prof_tctx_set(ptr, usize, tctx); + prof_tctx_set(tsdn, ptr, usize, tctx); - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsdn, tctx->tdata->lock); tctx->cnts.curobjs++; tctx->cnts.curbytes += usize; if (opt_prof_accum) { @@ -236,23 +237,23 @@ prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) tctx->cnts.accumbytes += usize; } tctx->prepared = false; - malloc_mutex_unlock(tctx->tdata->lock); + malloc_mutex_unlock(tsdn, tctx->tdata->lock); } void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; - if (prof_tctx_should_destroy(tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) prof_tctx_destroy(tsd, tctx); else - malloc_mutex_unlock(tctx->tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } void @@ -277,7 +278,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata) tdata->enq = true; } - malloc_mutex_lock(&bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); } JEMALLOC_INLINE_C void @@ -287,7 +288,7 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); - malloc_mutex_unlock(&bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); if (tdata != NULL) { bool idump, gdump; @@ -300,9 +301,9 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) tdata->enq_gdump = false; if (idump) - prof_idump(); + prof_idump(tsd_tsdn(tsd)); if (gdump) - prof_gdump(); + prof_gdump(tsd_tsdn(tsd)); } } @@ -546,14 +547,15 @@ prof_tdata_mutex_choose(uint64_t thr_uid) } static prof_gctx_t * -prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { /* * Create a single allocation that has space for vec of length bt->len. */ - prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t, - vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true), - true, NULL); + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); if (gctx == NULL) return (NULL); gctx->lock = prof_gctx_mutex_choose(); @@ -585,7 +587,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, * into this function. */ prof_enter(tsd, tdata_self); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ @@ -593,24 +595,25 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, not_reached(); prof_leave(tsd, tdata_self); /* Destroy gctx. */ - malloc_mutex_unlock(gctx->lock); - idalloctm(tsd, gctx, tcache_get(tsd, false), true); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true); } else { /* * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ gctx->nlimbo--; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_leave(tsd, tdata_self); } } -/* tctx->tdata->lock must be held. */ static bool -prof_tctx_should_destroy(prof_tctx_t *tctx) +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + if (opt_prof_accum) return (false); if (tctx->cnts.curobjs != 0) @@ -633,7 +636,6 @@ prof_gctx_should_destroy(prof_gctx_t *gctx) return (true); } -/* tctx->tdata->lock is held upon entry, and released before return. */ static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { @@ -641,6 +643,8 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); assert(!opt_prof_accum); @@ -648,10 +652,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) assert(tctx->cnts.accumbytes == 0); ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - destroy_tdata = prof_tdata_should_destroy(tdata, false); - malloc_mutex_unlock(tdata->lock); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: tctx_tree_remove(&gctx->tctxs, tctx); @@ -691,17 +695,19 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) destroy_tctx = false; destroy_gctx = false; } - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); if (destroy_gctx) { prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, tdata); } + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + if (destroy_tdata) prof_tdata_destroy(tsd, tdata, false); if (destroy_tctx) - idalloctm(tsd, tctx, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true); } static bool @@ -721,7 +727,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ - gctx.p = prof_gctx_create(tsd, bt); + gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); if (gctx.v == NULL) { prof_leave(tsd, tdata); return (true); @@ -730,7 +736,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { /* OOM. */ prof_leave(tsd, tdata); - idalloctm(tsd, gctx.v, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true); return (true); } new_gctx = true; @@ -739,9 +745,9 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ - malloc_mutex_lock(gctx.p->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); gctx.p->nlimbo++; - malloc_mutex_unlock(gctx.p->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); new_gctx = false; } prof_leave(tsd, tdata); @@ -768,13 +774,12 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) if (tdata == NULL) return (NULL); - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); if (!not_found) /* Note double negative! */ ret.p->prepared = true; - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { - tcache_t *tcache; void *btkey; prof_gctx_t *gctx; bool new_gctx, error; @@ -788,9 +793,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) return (NULL); /* Link a prof_tctx_t into gctx for this thread. */ - tcache = tcache_get(tsd, true); - ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true, - NULL); + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); if (ret.p == NULL) { if (new_gctx) prof_gctx_try_destroy(tsd, tdata, gctx, tdata); @@ -804,41 +809,41 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; ret.p->state = prof_tctx_state_initializing; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (error) { if (new_gctx) prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - idalloctm(tsd, ret.v, tcache, true); + idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true); return (NULL); } - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } return (ret.p); } +/* + * The bodies of this function and prof_leakcheck() are compiled out unless heap + * profiling is enabled, so that it is possible to compile jemalloc with + * floating point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a workaround for + * versions of glibc that don't properly save/restore floating point registers + * during dynamic lazy symbol loading (which internally calls into whatever + * malloc implementation happens to be integrated into the application). Note + * that some compilers (e.g. gcc 4.8) may use floating point registers for fast + * memory moves, so jemalloc must be compiled with such optimizations disabled + * (e.g. + * -mno-sse) in order for the workaround to be complete. + */ void prof_sample_threshold_update(prof_tdata_t *tdata) { - /* - * The body of this function is compiled out unless heap profiling is - * enabled, so that it is possible to compile jemalloc with floating - * point support completely disabled. Avoiding floating point code is - * important on memory-constrained systems, but it also enables a - * workaround for versions of glibc that don't properly save/restore - * floating point registers during dynamic lazy symbol loading (which - * internally calls into whatever malloc implementation happens to be - * integrated into the application). Note that some compilers (e.g. - * gcc 4.8) may use floating point registers for fast memory moves, so - * jemalloc must be compiled with such optimizations disabled (e.g. - * -mno-sse) in order for the workaround to be complete. - */ #ifdef JEMALLOC_PROF uint64_t r; double u; @@ -869,8 +874,7 @@ prof_sample_threshold_update(prof_tdata_t *tdata) * pp 500 * (http://luc.devroye.org/rnbookindex.html) */ - prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005), - UINT64_C(1442695040888963407)); + r = prng_lg_range_u64(&tdata->prng_state, 53); u = (double)r * (1.0/9007199254740992.0L); tdata->bytes_until_sample = (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) @@ -893,11 +897,13 @@ size_t prof_tdata_count(void) { size_t tdata_count = 0; + tsdn_t *tsdn; - malloc_mutex_lock(&tdatas_mtx); + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); return (tdata_count); } @@ -916,9 +922,9 @@ prof_bt_count(void) if (tdata == NULL) return (0); - malloc_mutex_lock(&bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); - malloc_mutex_unlock(&bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); return (bt_count); } @@ -988,7 +994,7 @@ prof_dump_close(bool propagate_err) static bool prof_dump_write(bool propagate_err, const char *s) { - unsigned i, slen, n; + size_t i, slen, n; cassert(config_prof); @@ -1031,20 +1037,21 @@ prof_dump_printf(bool propagate_err, const char *format, ...) return (ret); } -/* tctx->tdata->lock is held. */ static void -prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata) +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { - malloc_mutex_lock(tctx->gctx->lock); + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + malloc_mutex_lock(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: - malloc_mutex_unlock(tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); return; case prof_tctx_state_nominal: tctx->state = prof_tctx_state_dumping; - malloc_mutex_unlock(tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); @@ -1063,11 +1070,12 @@ prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata) } } -/* gctx->lock is held. */ static void -prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx) +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; if (opt_prof_accum) { @@ -1076,10 +1084,12 @@ prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx) } } -/* tctx->gctx is held. */ static prof_tctx_t * prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: @@ -1087,7 +1097,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: - prof_tctx_merge_gctx(tctx, tctx->gctx); + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); break; default: not_reached(); @@ -1096,11 +1106,18 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) return (NULL); } -/* gctx->lock is held. */ +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + static prof_tctx_t * -prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { - bool propagate_err = *(bool *)arg; + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: @@ -1109,7 +1126,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: - if (prof_dump_printf(propagate_err, + if (prof_dump_printf(arg->propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, @@ -1122,12 +1139,14 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) return (NULL); } -/* tctx->gctx is held. */ static prof_tctx_t * prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; prof_tctx_t *ret; + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ @@ -1148,12 +1167,12 @@ label_return: } static void -prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsdn, gctx->lock); /* * Increment nlimbo so that gctx won't go away before dump. @@ -1165,19 +1184,26 @@ prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsdn, gctx->lock); } +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; + static prof_gctx_t * -prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { - size_t *leak_ngctx = (size_t *)arg; + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; - malloc_mutex_lock(gctx->lock); - tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL); + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); if (gctx->cnt_summed.curobjs != 0) - (*leak_ngctx)++; - malloc_mutex_unlock(gctx->lock); + arg->leak_ngctx++; + malloc_mutex_unlock(arg->tsdn, gctx->lock); return (NULL); } @@ -1196,7 +1222,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) */ while ((gctx = gctx_tree_first(gctxs)) != NULL) { gctx_tree_remove(gctxs, gctx); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); { prof_tctx_t *next; @@ -1204,14 +1230,15 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) do { prof_tctx_t *to_destroy = tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, NULL); + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); if (to_destroy != NULL) { next = tctx_tree_next(&gctx->tctxs, to_destroy); tctx_tree_remove(&gctx->tctxs, to_destroy); - idalloctm(tsd, to_destroy, - tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, true, true); } else next = NULL; } while (next != NULL); @@ -1219,19 +1246,26 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } else - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } } +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + static prof_tdata_t * -prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) { - prof_cnt_t *cnt_all = (prof_cnt_t *)arg; + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; union { @@ -1243,17 +1277,17 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) - prof_tctx_merge_tdata(tctx.p, tdata); + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); - cnt_all->curobjs += tdata->cnt_summed.curobjs; - cnt_all->curbytes += tdata->cnt_summed.curbytes; + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; if (opt_prof_accum) { - cnt_all->accumobjs += tdata->cnt_summed.accumobjs; - cnt_all->accumbytes += tdata->cnt_summed.accumbytes; + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; } } else tdata->dumping = false; - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(arg->tsdn, tdata->lock); return (NULL); } @@ -1282,7 +1316,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) #define prof_dump_header JEMALLOC_N(prof_dump_header_impl) #endif static bool -prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) +prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { bool ret; @@ -1293,10 +1327,10 @@ prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) return (true); - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsdn, &tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, (void *)&propagate_err) != NULL); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); return (ret); } #ifdef JEMALLOC_JET @@ -1305,15 +1339,16 @@ prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); #endif -/* gctx->lock is held. */ static bool -prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, - prof_gctx_tree_t *gctxs) +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; cassert(config_prof); + malloc_mutex_assert_owner(tsdn, gctx->lock); /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || @@ -1347,8 +1382,10 @@ prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, goto label_return; } + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, - (void *)&propagate_err) != NULL) { + (void *)&prof_tctx_dump_iter_arg) != NULL) { ret = true; goto label_return; } @@ -1358,6 +1395,7 @@ label_return: return (ret); } +#ifndef _WIN32 JEMALLOC_FORMAT_PRINTF(1, 2) static int prof_open_maps(const char *format, ...) @@ -1373,6 +1411,18 @@ prof_open_maps(const char *format, ...) return (mfd); } +#endif + +static int +prof_getpid(void) +{ + +#ifdef _WIN32 + return (GetCurrentProcessId()); +#else + return (getpid()); +#endif +} static bool prof_dump_maps(bool propagate_err) @@ -1383,9 +1433,11 @@ prof_dump_maps(bool propagate_err) cassert(config_prof); #ifdef __FreeBSD__ mfd = prof_open_maps("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented #else { - int pid = getpid(); + int pid = prof_getpid(); mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); if (mfd == -1) @@ -1426,39 +1478,66 @@ label_return: return (ret); } +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, const char *filename) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ if (cnt_all->curbytes != 0) { - malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %" - FMTu64" object%s, %zu context%s\n", - cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", - cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", - leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); malloc_printf( "<jemalloc>: Run jeprof on \"%s\" for leak detail\n", filename); } +#endif } +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + static prof_gctx_t * -prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { prof_gctx_t *ret; - bool propagate_err = *(bool *)arg; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(arg->tsdn, gctx->lock); - if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) { + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { ret = gctx; goto label_return; } ret = NULL; label_return: - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(arg->tsdn, gctx->lock); return (ret); } @@ -1466,13 +1545,14 @@ static bool prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) { prof_tdata_t *tdata; - prof_cnt_t cnt_all; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; size_t tabind; union { prof_gctx_t *p; void *v; } gctx; - size_t leak_ngctx; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; prof_gctx_tree_t gctxs; cassert(config_prof); @@ -1481,7 +1561,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) if (tdata == NULL) return (true); - malloc_mutex_lock(&prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); prof_enter(tsd, tdata); /* @@ -1490,20 +1570,24 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) */ gctx_tree_new(&gctxs); for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) - prof_dump_gctx_prep(gctx.p, &gctxs); + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs); /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. */ - memset(&cnt_all, 0, sizeof(prof_cnt_t)); - malloc_mutex_lock(&tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all); - malloc_mutex_unlock(&tdatas_mtx); + prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)&prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); /* Merge tctx stats into gctx's. */ - leak_ngctx = 0; - gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx); + prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg.leak_ngctx = 0; + gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, + (void *)&prof_gctx_merge_iter_arg); prof_leave(tsd, tdata); @@ -1512,12 +1596,15 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) goto label_open_close_error; /* Dump profile header. */ - if (prof_dump_header(propagate_err, &cnt_all)) + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg.cnt_all)) goto label_write_error; /* Dump per gctx profile stats. */ + prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg.propagate_err = propagate_err; if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, - (void *)&propagate_err) != NULL) + (void *)&prof_gctx_dump_iter_arg) != NULL) goto label_write_error; /* Dump /proc/<pid>/maps if possible. */ @@ -1528,17 +1615,18 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) goto label_open_close_error; prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(&prof_dump_mtx); - - if (leakcheck) - prof_leakcheck(&cnt_all, leak_ngctx, filename); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } return (false); label_write_error: prof_dump_close(propagate_err); label_open_close_error: prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(&prof_dump_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); return (true); } @@ -1554,12 +1642,12 @@ prof_dump_filename(char *filename, char v, uint64_t vseq) /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c%"FMTu64".heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); + opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); } else { /* "<prefix>.<pid>.<seq>.<v>.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c.heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v); + opt_prof_prefix, prof_getpid(), prof_dump_seq, v); } prof_dump_seq++; } @@ -1578,23 +1666,23 @@ prof_fdump(void) return; tsd = tsd_fetch(); - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, opt_prof_leak); } void -prof_idump(void) +prof_idump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted) + if (!prof_booted || tsdn_null(tsdn)) return; - tsd = tsd_fetch(); + tsd = tsdn_tsd(tsdn); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return; @@ -1605,50 +1693,48 @@ prof_idump(void) if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } bool -prof_mdump(const char *filename) +prof_mdump(tsd_t *tsd, const char *filename) { - tsd_t *tsd; char filename_buf[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); if (!opt_prof || !prof_booted) return (true); - tsd = tsd_fetch(); if (filename == NULL) { /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') return (true); - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } return (prof_dump(tsd, true, filename, false)); } void -prof_gdump(void) +prof_gdump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted) + if (!prof_booted || tsdn_null(tsdn)) return; - tsd = tsd_fetch(); + tsd = tsdn_tsd(tsdn); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return; @@ -1659,10 +1745,10 @@ prof_gdump(void) if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } @@ -1691,14 +1777,14 @@ prof_bt_keycomp(const void *k1, const void *k2) } JEMALLOC_INLINE_C uint64_t -prof_thr_uid_alloc(void) +prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; - malloc_mutex_lock(&next_thr_uid_mtx); + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); thr_uid = next_thr_uid; next_thr_uid++; - malloc_mutex_unlock(&next_thr_uid_mtx); + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); return (thr_uid); } @@ -1708,14 +1794,13 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, char *thread_name, bool active) { prof_tdata_t *tdata; - tcache_t *tcache; cassert(config_prof); /* Initialize an empty cache for this thread. */ - tcache = tcache_get(tsd, true); - tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false, - tcache, true, NULL); + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); if (tdata == NULL) return (NULL); @@ -1727,9 +1812,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata->expired = false; tdata->tctx_uid_next = 0; - if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp)) { - idalloctm(tsd, tdata, tcache, true); + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); return (NULL); } @@ -1743,9 +1828,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata->dumping = false; tdata->active = active; - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_insert(&tdatas, tdata); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); return (tdata); } @@ -1754,13 +1839,12 @@ prof_tdata_t * prof_tdata_init(tsd_t *tsd) { - return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL, - prof_thread_active_init_get())); + return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd)))); } -/* tdata->lock must be held. */ static bool -prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached) +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { if (tdata->attached && !even_if_attached) @@ -1770,32 +1854,40 @@ prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached) return (true); } -/* tdatas_mtx must be held. */ +static bool +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) +{ + + malloc_mutex_assert_owner(tsdn, tdata->lock); + + return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); +} + static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { - tcache_t *tcache; - assert(prof_tdata_should_destroy(tdata, even_if_attached)); - assert(tsd_prof_tdata_get(tsd) != tdata); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_remove(&tdatas, tdata); - tcache = tcache_get(tsd, false); + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + if (tdata->thread_name != NULL) - idalloctm(tsd, tdata->thread_name, tcache, true); + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); ckh_delete(tsd, &tdata->bt2tctx); - idalloctm(tsd, tdata, tcache, true); + idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); } static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); } static void @@ -1803,9 +1895,10 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tdata, true); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. @@ -1815,7 +1908,7 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) tsd_prof_tdata_set(tsd, NULL); } else destroy_tdata = false; - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (destroy_tdata) prof_tdata_destroy(tsd, tdata, true); } @@ -1826,7 +1919,7 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; char *thread_name = (tdata->thread_name != NULL) ? - prof_thread_name_alloc(tsd, tdata->thread_name) : NULL; + prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; bool active = tdata->active; prof_tdata_detach(tsd, tdata); @@ -1835,18 +1928,18 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) } static bool -prof_tdata_expire(prof_tdata_t *tdata) +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsdn, tdata->lock); if (!tdata->expired) { tdata->expired = true; destroy_tdata = tdata->attached ? false : - prof_tdata_should_destroy(tdata, false); + prof_tdata_should_destroy(tsdn, tdata, false); } else destroy_tdata = false; - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsdn, tdata->lock); return (destroy_tdata); } @@ -1854,8 +1947,9 @@ prof_tdata_expire(prof_tdata_t *tdata) static prof_tdata_t * prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; - return (prof_tdata_expire(tdata) ? tdata : NULL); + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } void @@ -1865,15 +1959,15 @@ prof_reset(tsd_t *tsd, size_t lg_sample) assert(lg_sample < (sizeof(uint64_t) << 3)); - malloc_mutex_lock(&prof_dump_mtx); - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); lg_prof_sample = lg_sample; next = NULL; do { prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, NULL); + prof_tdata_reset_iter, (void *)tsd); if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); prof_tdata_destroy_locked(tsd, to_destroy, false); @@ -1881,8 +1975,8 @@ prof_reset(tsd_t *tsd, size_t lg_sample) next = NULL; } while (next != NULL); - malloc_mutex_unlock(&tdatas_mtx); - malloc_mutex_unlock(&prof_dump_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); } void @@ -1899,35 +1993,33 @@ prof_tdata_cleanup(tsd_t *tsd) } bool -prof_active_get(void) +prof_active_get(tsdn_t *tsdn) { bool prof_active_current; - malloc_mutex_lock(&prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_current = prof_active; - malloc_mutex_unlock(&prof_active_mtx); + malloc_mutex_unlock(tsdn, &prof_active_mtx); return (prof_active_current); } bool -prof_active_set(bool active) +prof_active_set(tsdn_t *tsdn, bool active) { bool prof_active_old; - malloc_mutex_lock(&prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_old = prof_active; prof_active = active; - malloc_mutex_unlock(&prof_active_mtx); + malloc_mutex_unlock(tsdn, &prof_active_mtx); return (prof_active_old); } const char * -prof_thread_name_get(void) +prof_thread_name_get(tsd_t *tsd) { - tsd_t *tsd; prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (""); @@ -1935,7 +2027,7 @@ prof_thread_name_get(void) } static char * -prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { char *ret; size_t size; @@ -1947,7 +2039,8 @@ prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) if (size == 1) return (""); - ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL); + ret = iallocztm(tsdn, size, size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); if (ret == NULL) return (NULL); memcpy(ret, thread_name, size); @@ -1974,13 +2067,12 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) return (EFAULT); } - s = prof_thread_name_alloc(tsd, thread_name); + s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); if (s == NULL) return (EAGAIN); if (tdata->thread_name != NULL) { - idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false), - true); + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); tdata->thread_name = NULL; } if (strlen(s) > 0) @@ -1989,12 +2081,10 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) } bool -prof_thread_active_get(void) +prof_thread_active_get(tsd_t *tsd) { - tsd_t *tsd; prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (false); @@ -2002,12 +2092,10 @@ prof_thread_active_get(void) } bool -prof_thread_active_set(bool active) +prof_thread_active_set(tsd_t *tsd, bool active) { - tsd_t *tsd; prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (true); @@ -2016,48 +2104,48 @@ prof_thread_active_set(bool active) } bool -prof_thread_active_init_get(void) +prof_thread_active_init_get(tsdn_t *tsdn) { bool active_init; - malloc_mutex_lock(&prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init = prof_thread_active_init; - malloc_mutex_unlock(&prof_thread_active_init_mtx); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return (active_init); } bool -prof_thread_active_init_set(bool active_init) +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { bool active_init_old; - malloc_mutex_lock(&prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init_old = prof_thread_active_init; prof_thread_active_init = active_init; - malloc_mutex_unlock(&prof_thread_active_init_mtx); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return (active_init_old); } bool -prof_gdump_get(void) +prof_gdump_get(tsdn_t *tsdn) { bool prof_gdump_current; - malloc_mutex_lock(&prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_current = prof_gdump_val; - malloc_mutex_unlock(&prof_gdump_mtx); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return (prof_gdump_current); } bool -prof_gdump_set(bool gdump) +prof_gdump_set(tsdn_t *tsdn, bool gdump) { bool prof_gdump_old; - malloc_mutex_lock(&prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_old = prof_gdump_val; prof_gdump_val = gdump; - malloc_mutex_unlock(&prof_gdump_mtx); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return (prof_gdump_old); } @@ -2098,47 +2186,54 @@ prof_boot1(void) } bool -prof_boot2(void) +prof_boot2(tsd_t *tsd) { cassert(config_prof); if (opt_prof) { - tsd_t *tsd; unsigned i; lg_prof_sample = opt_lg_prof_sample; prof_active = opt_prof_active; - if (malloc_mutex_init(&prof_active_mtx)) + if (malloc_mutex_init(&prof_active_mtx, "prof_active", + WITNESS_RANK_PROF_ACTIVE)) return (true); prof_gdump_val = opt_prof_gdump; - if (malloc_mutex_init(&prof_gdump_mtx)) + if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", + WITNESS_RANK_PROF_GDUMP)) return (true); prof_thread_active_init = opt_prof_thread_active_init; - if (malloc_mutex_init(&prof_thread_active_init_mtx)) + if (malloc_mutex_init(&prof_thread_active_init_mtx, + "prof_thread_active_init", + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) return (true); - tsd = tsd_fetch(); if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) return (true); - if (malloc_mutex_init(&bt2gctx_mtx)) + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", + WITNESS_RANK_PROF_BT2GCTX)) return (true); tdata_tree_new(&tdatas); - if (malloc_mutex_init(&tdatas_mtx)) + if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", + WITNESS_RANK_PROF_TDATAS)) return (true); next_thr_uid = 0; - if (malloc_mutex_init(&next_thr_uid_mtx)) + if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", + WITNESS_RANK_PROF_NEXT_THR_UID)) return (true); - if (malloc_mutex_init(&prof_dump_seq_mtx)) + if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", + WITNESS_RANK_PROF_DUMP_SEQ)) return (true); - if (malloc_mutex_init(&prof_dump_mtx)) + if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", + WITNESS_RANK_PROF_DUMP)) return (true); if (opt_prof_final && opt_prof_prefix[0] != '\0' && @@ -2148,21 +2243,23 @@ prof_boot2(void) abort(); } - gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * - sizeof(malloc_mutex_t)); + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + PROF_NCTX_LOCKS * sizeof(malloc_mutex_t)); if (gctx_locks == NULL) return (true); for (i = 0; i < PROF_NCTX_LOCKS; i++) { - if (malloc_mutex_init(&gctx_locks[i])) + if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", + WITNESS_RANK_PROF_GCTX)) return (true); } - tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS * - sizeof(malloc_mutex_t)); + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t)); if (tdata_locks == NULL) return (true); for (i = 0; i < PROF_NTDATA_LOCKS; i++) { - if (malloc_mutex_init(&tdata_locks[i])) + if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", + WITNESS_RANK_PROF_TDATA)) return (true); } } @@ -2181,56 +2278,77 @@ prof_boot2(void) } void -prof_prefork(void) +prof_prefork0(tsdn_t *tsdn) { if (opt_prof) { unsigned i; - malloc_mutex_prefork(&tdatas_mtx); - malloc_mutex_prefork(&bt2gctx_mtx); - malloc_mutex_prefork(&next_thr_uid_mtx); - malloc_mutex_prefork(&prof_dump_seq_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(&gctx_locks[i]); + malloc_mutex_prefork(tsdn, &prof_dump_mtx); + malloc_mutex_prefork(tsdn, &bt2gctx_mtx); + malloc_mutex_prefork(tsdn, &tdatas_mtx); for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_prefork(&tdata_locks[i]); + malloc_mutex_prefork(tsdn, &tdata_locks[i]); + for (i = 0; i < PROF_NCTX_LOCKS; i++) + malloc_mutex_prefork(tsdn, &gctx_locks[i]); } } void -prof_postfork_parent(void) +prof_prefork1(tsdn_t *tsdn) +{ + + if (opt_prof) { + malloc_mutex_prefork(tsdn, &prof_active_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); + malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); + } +} + +void +prof_postfork_parent(tsdn_t *tsdn) { if (opt_prof) { unsigned i; - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_parent(&tdata_locks[i]); + malloc_mutex_postfork_parent(tsdn, + &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(&gctx_locks[i]); - malloc_mutex_postfork_parent(&prof_dump_seq_mtx); - malloc_mutex_postfork_parent(&next_thr_uid_mtx); - malloc_mutex_postfork_parent(&bt2gctx_mtx); - malloc_mutex_postfork_parent(&tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); } } void -prof_postfork_child(void) +prof_postfork_child(tsdn_t *tsdn) { if (opt_prof) { unsigned i; - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_child(&tdata_locks[i]); + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); + malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(&gctx_locks[i]); - malloc_mutex_postfork_child(&prof_dump_seq_mtx); - malloc_mutex_postfork_child(&next_thr_uid_mtx); - malloc_mutex_postfork_child(&bt2gctx_mtx); - malloc_mutex_postfork_child(&tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + malloc_mutex_postfork_child(tsdn, &tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); } } diff --git a/deps/jemalloc/src/quarantine.c b/deps/jemalloc/src/quarantine.c index 6c43dfcaa..18903fb5c 100644 --- a/deps/jemalloc/src/quarantine.c +++ b/deps/jemalloc/src/quarantine.c @@ -13,22 +13,22 @@ /* Function prototypes for non-inline static functions. */ static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); -static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine); -static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, +static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine); +static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound); /******************************************************************************/ static quarantine_t * -quarantine_init(tsd_t *tsd, size_t lg_maxobjs) +quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs) { quarantine_t *quarantine; + size_t size; - assert(tsd_nominal(tsd)); - - quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs) - + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false, - tcache_get(tsd, true), true, NULL); + size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) * + sizeof(quarantine_obj_t)); + quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size), + false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (quarantine == NULL) return (NULL); quarantine->curbytes = 0; @@ -47,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd) if (!tsd_nominal(tsd)) return; - quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT); + quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT); /* * Check again whether quarantine has been initialized, because * quarantine_init() may have triggered recursive initialization. @@ -55,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd) if (tsd_quarantine_get(tsd) == NULL) tsd_quarantine_set(tsd, quarantine); else - idalloctm(tsd, quarantine, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); } static quarantine_t * @@ -63,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) { quarantine_t *ret; - ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1); + ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1); if (ret == NULL) { - quarantine_drain_one(tsd, quarantine); + quarantine_drain_one(tsd_tsdn(tsd), quarantine); return (quarantine); } @@ -87,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * sizeof(quarantine_obj_t)); } - idalloctm(tsd, quarantine, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); tsd_quarantine_set(tsd, ret); return (ret); } static void -quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine) +quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine) { quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; - assert(obj->usize == isalloc(obj->ptr, config_prof)); - idalloctm(tsd, obj->ptr, NULL, false); + assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof)); + idalloctm(tsdn, obj->ptr, NULL, false, true); quarantine->curbytes -= obj->usize; quarantine->curobjs--; quarantine->first = (quarantine->first + 1) & ((ZU(1) << @@ -106,24 +106,24 @@ quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine) } static void -quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound) +quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound) { while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) - quarantine_drain_one(tsd, quarantine); + quarantine_drain_one(tsdn, quarantine); } void quarantine(tsd_t *tsd, void *ptr) { quarantine_t *quarantine; - size_t usize = isalloc(ptr, config_prof); + size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); cassert(config_fill); assert(opt_quarantine); if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { - idalloctm(tsd, ptr, NULL, false); + idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); return; } /* @@ -133,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr) if (quarantine->curbytes + usize > opt_quarantine) { size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - usize : 0; - quarantine_drain(tsd, quarantine, upper_bound); + quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound); } /* Grow the quarantine ring buffer if it's full. */ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) @@ -158,11 +158,11 @@ quarantine(tsd_t *tsd, void *ptr) && usize <= SMALL_MAXCLASS) arena_quarantine_junk_small(ptr, usize); else - memset(ptr, 0x5a, usize); + memset(ptr, JEMALLOC_FREE_JUNK, usize); } } else { assert(quarantine->curbytes == 0); - idalloctm(tsd, ptr, NULL, false); + idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); } } @@ -176,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd) quarantine = tsd_quarantine_get(tsd); if (quarantine != NULL) { - quarantine_drain(tsd, quarantine, 0); - idalloctm(tsd, quarantine, tcache_get(tsd, false), true); + quarantine_drain(tsd_tsdn(tsd), quarantine, 0); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); tsd_quarantine_set(tsd, NULL); } } diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c index af0d97e75..f2e2997d5 100644 --- a/deps/jemalloc/src/rtree.c +++ b/deps/jemalloc/src/rtree.c @@ -15,6 +15,8 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, { unsigned bits_in_leaf, height, i; + assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) / + RTREE_BITS_PER_LEVEL)); assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL @@ -94,12 +96,15 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) rtree_node_elm_t *node; if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { + spin_t spinner; + /* * Another thread is already in the process of initializing. * Spin-wait until initialization is complete. */ + spin_init(&spinner); do { - CPU_SPINWAIT; + spin_adaptive(&spinner); node = atomic_read_p((void **)elmp); } while (node == RTREE_NODE_INITIALIZING); } else { @@ -123,5 +128,5 @@ rtree_node_elm_t * rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) { - return (rtree_node_init(rtree, level, &elm->child)); + return (rtree_node_init(rtree, level+1, &elm->child)); } diff --git a/deps/jemalloc/src/spin.c b/deps/jemalloc/src/spin.c new file mode 100644 index 000000000..5242d95aa --- /dev/null +++ b/deps/jemalloc/src/spin.c @@ -0,0 +1,2 @@ +#define JEMALLOC_SPIN_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c index 154c3e74c..1360f3bd0 100644..100755 --- a/deps/jemalloc/src/stats.c +++ b/deps/jemalloc/src/stats.c @@ -3,7 +3,7 @@ #define CTL_GET(n, v, t) do { \ size_t sz = sizeof(t); \ - xmallctl(n, v, &sz, NULL, 0); \ + xmallctl(n, (void *)v, &sz, NULL, 0); \ } while (0) #define CTL_M2_GET(n, i, v, t) do { \ @@ -12,7 +12,7 @@ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) #define CTL_M2_M4_GET(n, i, j, v, t) do { \ @@ -22,7 +22,7 @@ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ mib[4] = (j); \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) /******************************************************************************/ @@ -33,85 +33,106 @@ bool opt_stats_print = false; size_t stats_cactive = 0; /******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void stats_arena_bins_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_hchunks_print( - void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); -static void stats_arena_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i, bool bins, bool large, bool huge); - -/******************************************************************************/ static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) + bool json, bool large, bool huge, unsigned i) { size_t page; - bool config_tcache, in_gap; + bool config_tcache, in_gap, in_gap_prev; unsigned nbins, j; CTL_GET("arenas.page", &page, size_t); - CTL_GET("config.tcache", &config_tcache, bool); - if (config_tcache) { + CTL_GET("arenas.nbins", &nbins, unsigned); + if (json) { malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs curruns regs" - " pgs util nfills nflushes newruns" - " reruns\n"); + "\t\t\t\t\"bins\": [\n"); } else { - malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs curruns regs" - " pgs util newruns reruns\n"); + CTL_GET("config.tcache", &config_tcache, bool); + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs" + " curruns regs pgs util nfills" + " nflushes newruns reruns\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs" + " curruns regs pgs util newruns" + " reruns\n"); + } } - CTL_GET("arenas.nbins", &nbins, unsigned); for (j = 0, in_gap = false; j < nbins; j++) { uint64_t nruns; + size_t reg_size, run_size, curregs; + size_t curruns; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nreruns; CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, uint64_t); - if (nruns == 0) - in_gap = true; - else { - size_t reg_size, run_size, curregs, availregs, milli; - size_t curruns; - uint32_t nregs; - uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t reruns; - char util[6]; /* "x.yyy". */ + in_gap_prev = in_gap; + in_gap = (nruns == 0); - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } - CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); - CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); - CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, - size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, - &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, - &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, - &curregs, size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, - &nrequests, uint64_t); + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); + CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); + CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t); + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, + &nrequests, uint64_t); + if (config_tcache) { + CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, + &nfills, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, + &nflushes, uint64_t); + } + CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns, + size_t); + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curregs\": %zu,\n" + "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n", + nmalloc, + ndalloc, + curregs, + nrequests); if (config_tcache) { - CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, - j, &nfills, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", - i, j, &nflushes, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n" + "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n", + nfills, + nflushes); } - CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, - &reruns, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, - &curruns, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curruns\": %zu\n" + "\t\t\t\t\t}%s\n", + nreruns, + curruns, + (j + 1 < nbins) ? "," : ""); + } else if (!in_gap) { + size_t availregs, milli; + char util[6]; /* "x.yyy". */ availregs = nregs * curruns; milli = (availregs != 0) ? (1000 * curregs) / availregs @@ -138,7 +159,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, reg_size, j, curregs * reg_size, nmalloc, ndalloc, nrequests, curregs, curruns, nregs, run_size / page, util, nfills, nflushes, - nruns, reruns); + nruns, nreruns); } else { malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64 @@ -147,28 +168,38 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, " %12"FMTu64"\n", reg_size, j, curregs * reg_size, nmalloc, ndalloc, nrequests, curregs, curruns, nregs, - run_size / page, util, nruns, reruns); + run_size / page, util, nruns, nreruns); } } } - if (in_gap) { + if (json) { malloc_cprintf(write_cb, cbopaque, - " ---\n"); + "\t\t\t\t]%s\n", (large || huge) ? "," : ""); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } } } static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) + bool json, bool huge, unsigned i) { unsigned nbins, nlruns, j; - bool in_gap; + bool in_gap, in_gap_prev; - malloc_cprintf(write_cb, cbopaque, - "large: size ind allocated nmalloc ndalloc" - " nrequests curruns\n"); CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlruns", &nlruns, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"lruns\": [\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: size ind allocated nmalloc" + " ndalloc nrequests curruns\n"); + } for (j = 0, in_gap = false; j < nlruns; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t run_size, curruns; @@ -179,17 +210,25 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, &nrequests, uint64_t); - if (nrequests == 0) - in_gap = true; - else { - CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, - &curruns, size_t); - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns, + size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"curruns\": %zu\n" + "\t\t\t\t\t}%s\n", + curruns, + (j + 1 < nlruns) ? "," : ""); + } else if (!in_gap) { malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64" %12zu\n", @@ -197,25 +236,35 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, ndalloc, nrequests, curruns); } } - if (in_gap) { + if (json) { malloc_cprintf(write_cb, cbopaque, - " ---\n"); + "\t\t\t\t]%s\n", huge ? "," : ""); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } } } static void stats_arena_hchunks_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i) + void *cbopaque, bool json, unsigned i) { unsigned nbins, nlruns, nhchunks, j; - bool in_gap; + bool in_gap, in_gap_prev; - malloc_cprintf(write_cb, cbopaque, - "huge: size ind allocated nmalloc ndalloc" - " nrequests curhchunks\n"); CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlruns", &nlruns, unsigned); CTL_GET("arenas.nhchunks", &nhchunks, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"hchunks\": [\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "huge: size ind allocated nmalloc" + " ndalloc nrequests curhchunks\n"); + } for (j = 0, in_gap = false; j < nhchunks; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t hchunk_size, curhchunks; @@ -226,18 +275,25 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *), &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, &nrequests, uint64_t); - if (nrequests == 0) - in_gap = true; - else { - CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, - size_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, - j, &curhchunks, size_t); - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j, + &curhchunks, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"curhchunks\": %zu\n" + "\t\t\t\t\t}%s\n", + curhchunks, + (j + 1 < nhchunks) ? "," : ""); + } else if (!in_gap) { malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64" %12zu\n", @@ -246,20 +302,25 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *), nrequests, curhchunks); } } - if (in_gap) { + if (json) { malloc_cprintf(write_cb, cbopaque, - " ---\n"); + "\t\t\t\t]\n"); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } } } static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i, bool bins, bool large, bool huge) + bool json, unsigned i, bool bins, bool large, bool huge) { unsigned nthreads; const char *dss; - ssize_t lg_dirty_mult; - size_t page, pactive, pdirty, mapped; + ssize_t lg_dirty_mult, decay_time; + size_t page, pactive, pdirty, mapped, retained; size_t metadata_mapped, metadata_allocated; uint64_t npurge, nmadvise, purged; size_t small_allocated; @@ -272,240 +333,435 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, CTL_GET("arenas.page", &page, size_t); CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); - malloc_cprintf(write_cb, cbopaque, - "assigned threads: %u\n", nthreads); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"nthreads\": %u,\n", nthreads); + } else { + malloc_cprintf(write_cb, cbopaque, + "assigned threads: %u\n", nthreads); + } + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); - malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", - dss); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dss\": \"%s\",\n", dss); + } else { + malloc_cprintf(write_cb, cbopaque, + "dss allocation precedence: %s\n", dss); + } + CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); - if (lg_dirty_mult >= 0) { + if (json) { malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: %u:1\n", - (1U << lg_dirty_mult)); + "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult); } else { + if (opt_purge == purge_mode_ratio) { + if (lg_dirty_mult >= 0) { + malloc_cprintf(write_cb, cbopaque, + "min active:dirty page ratio: %u:1\n", + (1U << lg_dirty_mult)); + } else { + malloc_cprintf(write_cb, cbopaque, + "min active:dirty page ratio: N/A\n"); + } + } + } + + CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t); + if (json) { malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: N/A\n"); + "\t\t\t\t\"decay_time\": %zd,\n", decay_time); + } else { + if (opt_purge == purge_mode_decay) { + if (decay_time >= 0) { + malloc_cprintf(write_cb, cbopaque, + "decay time: %zd\n", decay_time); + } else { + malloc_cprintf(write_cb, cbopaque, + "decay time: N/A\n"); + } + } } + CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64 - " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge == - 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged); - - malloc_cprintf(write_cb, cbopaque, - " allocated nmalloc ndalloc" - " nrequests\n"); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pactive\": %zu,\n", pactive); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pdirty\": %zu,\n", pdirty); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"purged\": %"FMTu64",\n", purged); + } else { + malloc_cprintf(write_cb, cbopaque, + "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64 + ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged); + } + CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, size_t); CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "small: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated, small_nmalloc, small_ndalloc, small_nrequests); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"small\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + " allocated nmalloc" + " ndalloc nrequests\n"); + malloc_cprintf(write_cb, cbopaque, + "small: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated, small_nmalloc, small_ndalloc, + small_nrequests); + } + CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, size_t); CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "large: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - large_allocated, large_nmalloc, large_ndalloc, large_nrequests); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"large\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + large_allocated, large_nmalloc, large_ndalloc, + large_nrequests); + } + CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "huge: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); - malloc_cprintf(write_cb, cbopaque, - "total: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated + large_allocated + huge_allocated, - small_nmalloc + large_nmalloc + huge_nmalloc, - small_ndalloc + large_ndalloc + huge_ndalloc, - small_nrequests + large_nrequests + huge_nrequests); - malloc_cprintf(write_cb, cbopaque, - "active: %12zu\n", pactive * page); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"huge\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "huge: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); + malloc_cprintf(write_cb, cbopaque, + "total: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated + large_allocated + huge_allocated, + small_nmalloc + large_nmalloc + huge_nmalloc, + small_ndalloc + large_ndalloc + huge_ndalloc, + small_nrequests + large_nrequests + huge_nrequests); + } + if (!json) { + malloc_cprintf(write_cb, cbopaque, + "active: %12zu\n", pactive * page); + } + CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "mapped: %12zu\n", mapped); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"mapped\": %zu,\n", mapped); + } else { + malloc_cprintf(write_cb, cbopaque, + "mapped: %12zu\n", mapped); + } + + CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"retained\": %zu,\n", retained); + } else { + malloc_cprintf(write_cb, cbopaque, + "retained: %12zu\n", retained); + } + CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, size_t); CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, size_t); - malloc_cprintf(write_cb, cbopaque, - "metadata: mapped: %zu, allocated: %zu\n", - metadata_mapped, metadata_allocated); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"metadata\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "metadata: mapped: %zu, allocated: %zu\n", + metadata_mapped, metadata_allocated); + } - if (bins) - stats_arena_bins_print(write_cb, cbopaque, i); + if (bins) { + stats_arena_bins_print(write_cb, cbopaque, json, large, huge, + i); + } if (large) - stats_arena_lruns_print(write_cb, cbopaque, i); + stats_arena_lruns_print(write_cb, cbopaque, json, huge, i); if (huge) - stats_arena_hchunks_print(write_cb, cbopaque, i); + stats_arena_hchunks_print(write_cb, cbopaque, json, i); } -void -stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) +static void +stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool merged, bool unmerged) { - int err; - uint64_t epoch; - size_t u64sz; - bool general = true; - bool merged = true; - bool unmerged = true; - bool bins = true; - bool large = true; - bool huge = true; + const char *cpv; + bool bv; + unsigned uv; + uint32_t u32v; + uint64_t u64v; + ssize_t ssv; + size_t sv, bsz, usz, ssz, sssz, cpsz; - /* - * Refresh stats, in case mallctl() was called by the application. - * - * Check for OOM here, since refreshing the ctl cache can trigger - * allocation. In practice, none of the subsequent mallctl()-related - * calls in this function will cause OOM if this one succeeds. - * */ - epoch = 1; - u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); - if (err != 0) { - if (err == EAGAIN) { - malloc_write("<jemalloc>: Memory allocation failure in " - "mallctl(\"epoch\", ...)\n"); - return; - } - malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " - "...)\n"); - abort(); - } + bsz = sizeof(bool); + usz = sizeof(unsigned); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); - if (opts != NULL) { - unsigned i; + CTL_GET("version", &cpv, const char *); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"version\": \"%s\",\n", cpv); + } else + malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - for (i = 0; opts[i] != '\0'; i++) { - switch (opts[i]) { - case 'g': - general = false; - break; - case 'm': - merged = false; - break; - case 'a': - unmerged = false; - break; - case 'b': - bins = false; - break; - case 'l': - large = false; - break; - case 'h': - huge = false; - break; - default:; - } - } + /* config. */ +#define CONFIG_WRITE_BOOL_JSON(n, c) \ + if (json) { \ + CTL_GET("config."#n, &bv, bool); \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \ + (c)); \ } - malloc_cprintf(write_cb, cbopaque, - "___ Begin jemalloc statistics ___\n"); - if (general) { - const char *cpv; - bool bv; - unsigned uv; - ssize_t ssv; - size_t sv, bsz, ssz, sssz, cpsz; + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"config\": {\n"); + } - bsz = sizeof(bool); - ssz = sizeof(size_t); - sssz = sizeof(ssize_t); - cpsz = sizeof(const char *); + CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",") - CTL_GET("version", &cpv, const char *); - malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - CTL_GET("config.debug", &bv, bool); + CTL_GET("config.debug", &bv, bool); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"debug\": %s,\n", bv ? "true" : "false"); + } else { malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", bv ? "enabled" : "disabled"); + } + + CONFIG_WRITE_BOOL_JSON(fill, ",") + CONFIG_WRITE_BOOL_JSON(lazy_lock, ",") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"malloc_conf\": \"%s\",\n", + config_malloc_conf); + } else { + malloc_cprintf(write_cb, cbopaque, + "config.malloc_conf: \"%s\"\n", config_malloc_conf); + } -#define OPT_WRITE_BOOL(n) \ - if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \ + CONFIG_WRITE_BOOL_JSON(munmap, ",") + CONFIG_WRITE_BOOL_JSON(prof, ",") + CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",") + CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",") + CONFIG_WRITE_BOOL_JSON(stats, ",") + CONFIG_WRITE_BOOL_JSON(tcache, ",") + CONFIG_WRITE_BOOL_JSON(tls, ",") + CONFIG_WRITE_BOOL_JSON(utrace, ",") + CONFIG_WRITE_BOOL_JSON(valgrind, ",") + CONFIG_WRITE_BOOL_JSON(xmalloc, "") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } +#undef CONFIG_WRITE_BOOL_JSON + + /* opt. */ +#define OPT_WRITE_BOOL(n, c) \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s\n", bv ? "true" : "false"); \ - } -#define OPT_WRITE_BOOL_MUTABLE(n, m) { \ - bool bv2; \ - if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \ - je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \ + } \ + } +#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \ + bool bv2; \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \ + je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s ("#m": %s)\n", bv ? "true" \ : "false", bv2 ? "true" : "false"); \ } \ + } \ } -#define OPT_WRITE_SIZE_T(n) \ - if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \ +#define OPT_WRITE_UNSIGNED(n, c) \ + if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %u%s\n", uv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %u\n", uv); \ + } \ + } +#define OPT_WRITE_SIZE_T(n, c) \ + if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zu\n", sv); \ - } -#define OPT_WRITE_SSIZE_T(n) \ - if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \ + } \ + } +#define OPT_WRITE_SSIZE_T(n, c) \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd\n", ssv); \ - } -#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \ - ssize_t ssv2; \ - if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \ - je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \ + } \ + } +#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \ + ssize_t ssv2; \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \ + je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd ("#m": %zd)\n", \ ssv, ssv2); \ } \ + } \ } -#define OPT_WRITE_CHAR_P(n) \ - if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \ +#define OPT_WRITE_CHAR_P(n, c) \ + if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": \"%s\"\n", cpv); \ - } + } \ + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"opt\": {\n"); + } else { malloc_cprintf(write_cb, cbopaque, "Run-time option settings:\n"); - OPT_WRITE_BOOL(abort) - OPT_WRITE_SIZE_T(lg_chunk) - OPT_WRITE_CHAR_P(dss) - OPT_WRITE_SIZE_T(narenas) - OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult) - OPT_WRITE_BOOL(stats_print) - OPT_WRITE_CHAR_P(junk) - OPT_WRITE_SIZE_T(quarantine) - OPT_WRITE_BOOL(redzone) - OPT_WRITE_BOOL(zero) - OPT_WRITE_BOOL(utrace) - OPT_WRITE_BOOL(valgrind) - OPT_WRITE_BOOL(xmalloc) - OPT_WRITE_BOOL(tcache) - OPT_WRITE_SSIZE_T(lg_tcache_max) - OPT_WRITE_BOOL(prof) - OPT_WRITE_CHAR_P(prof_prefix) - OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active) - OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, - prof.thread_active_init) - OPT_WRITE_SSIZE_T(lg_prof_sample) - OPT_WRITE_BOOL(prof_accum) - OPT_WRITE_SSIZE_T(lg_prof_interval) - OPT_WRITE_BOOL(prof_gdump) - OPT_WRITE_BOOL(prof_final) - OPT_WRITE_BOOL(prof_leak) + } + OPT_WRITE_BOOL(abort, ",") + OPT_WRITE_SIZE_T(lg_chunk, ",") + OPT_WRITE_CHAR_P(dss, ",") + OPT_WRITE_UNSIGNED(narenas, ",") + OPT_WRITE_CHAR_P(purge, ",") + if (json || opt_purge == purge_mode_ratio) { + OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, + arenas.lg_dirty_mult, ",") + } + if (json || opt_purge == purge_mode_decay) { + OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",") + } + OPT_WRITE_CHAR_P(junk, ",") + OPT_WRITE_SIZE_T(quarantine, ",") + OPT_WRITE_BOOL(redzone, ",") + OPT_WRITE_BOOL(zero, ",") + OPT_WRITE_BOOL(utrace, ",") + OPT_WRITE_BOOL(xmalloc, ",") + OPT_WRITE_BOOL(tcache, ",") + OPT_WRITE_SSIZE_T(lg_tcache_max, ",") + OPT_WRITE_BOOL(prof, ",") + OPT_WRITE_CHAR_P(prof_prefix, ",") + OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",") + OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init, + ",") + OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",") + OPT_WRITE_BOOL(prof_accum, ",") + OPT_WRITE_SSIZE_T(lg_prof_interval, ",") + OPT_WRITE_BOOL(prof_gdump, ",") + OPT_WRITE_BOOL(prof_final, ",") + OPT_WRITE_BOOL(prof_leak, ",") + /* + * stats_print is always emitted, so as long as stats_print comes last + * it's safe to unconditionally omit the comma here (rather than having + * to conditionally omit it elsewhere depending on configuration). + */ + OPT_WRITE_BOOL(stats_print, "") + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } #undef OPT_WRITE_BOOL #undef OPT_WRITE_BOOL_MUTABLE @@ -513,128 +769,386 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, #undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_CHAR_P - malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); + /* arenas. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"arenas\": {\n"); + } - CTL_GET("arenas.narenas", &uv, unsigned); + CTL_GET("arenas.narenas", &uv, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"narenas\": %u,\n", uv); + } else malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); - malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", - sizeof(void *)); + CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv); + } else if (opt_purge == purge_mode_ratio) { + if (ssv >= 0) { + malloc_cprintf(write_cb, cbopaque, + "Min active:dirty page ratio per arena: " + "%u:1\n", (1U << ssv)); + } else { + malloc_cprintf(write_cb, cbopaque, + "Min active:dirty page ratio per arena: " + "N/A\n"); + } + } + CTL_GET("arenas.decay_time", &ssv, ssize_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"decay_time\": %zd,\n", ssv); + } else if (opt_purge == purge_mode_decay) { + malloc_cprintf(write_cb, cbopaque, + "Unused dirty page decay time: %zd%s\n", + ssv, (ssv < 0) ? " (no decay)" : ""); + } - CTL_GET("arenas.quantum", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", - sv); + CTL_GET("arenas.quantum", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"quantum\": %zu,\n", sv); + } else + malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); - CTL_GET("arenas.page", &sv, size_t); + CTL_GET("arenas.page", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"page\": %zu,\n", sv); + } else malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); - CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); - if (ssv >= 0) { + if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { + if (json) { malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: %u:1\n", - (1U << ssv)); + "\t\t\t\"tcache_max\": %zu,\n", sv); } else { malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: N/A\n"); + "Maximum thread-cached size class: %zu\n", sv); } - if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) { + } + + if (json) { + unsigned nbins, nlruns, nhchunks, i; + + CTL_GET("arenas.nbins", &nbins, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nbins\": %u,\n", nbins); + + CTL_GET("arenas.nhbins", &uv, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nhbins\": %u,\n", uv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"bin\": [\n"); + for (i = 0; i < nbins; i++) { malloc_cprintf(write_cb, cbopaque, - "Maximum thread-cached size class: %zu\n", sv); + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu,\n", sv); + + CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v); + + CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"run_size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : ""); } - if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) { - CTL_GET("prof.lg_sample", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t],\n"); + + CTL_GET("arenas.nlruns", &nlruns, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nlruns\": %u,\n", nlruns); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lrun\": [\n"); + for (i = 0; i < nlruns; i++) { malloc_cprintf(write_cb, cbopaque, - "Average profile sample interval: %"FMTu64 - " (2^%zu)\n", (((uint64_t)1U) << sv), sv); + "\t\t\t\t{\n"); - CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: %"FMTu64 - " (2^%zd)\n", - (((uint64_t)1U) << ssv), ssv); - } else { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: N/A\n"); - } + CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : ""); } - CTL_GET("opt.lg_chunk", &sv, size_t); malloc_cprintf(write_cb, cbopaque, - "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); + "\t\t\t],\n"); + + CTL_GET("arenas.nhchunks", &nhchunks, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nhchunks\": %u,\n", nhchunks); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"hchunk\": [\n"); + for (i = 0; i < nhchunks; i++) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : ""); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t]\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); } - if (config_stats) { - size_t *cactive; - size_t allocated, active, metadata, resident, mapped; + /* prof. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"prof\": {\n"); - CTL_GET("stats.cactive", &cactive, size_t *); - CTL_GET("stats.allocated", &allocated, size_t); - CTL_GET("stats.active", &active, size_t); - CTL_GET("stats.metadata", &metadata, size_t); - CTL_GET("stats.resident", &resident, size_t); - CTL_GET("stats.mapped", &mapped, size_t); + CTL_GET("prof.thread_active_init", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" : + "false"); + + CTL_GET("prof.active", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.gdump", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.interval", &u64v, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"interval\": %"FMTu64",\n", u64v); + + CTL_GET("prof.lg_sample", &ssv, ssize_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lg_sample\": %zd\n", ssv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (config_stats || merged || unmerged) ? "," : + ""); + } +} + +static void +stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool merged, bool unmerged, bool bins, bool large, bool huge) +{ + size_t *cactive; + size_t allocated, active, metadata, resident, mapped, retained; + + CTL_GET("stats.cactive", &cactive, size_t *); + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.resident", &resident, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + CTL_GET("stats.retained", &retained, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive)); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"allocated\": %zu,\n", allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %zu,\n", active); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"metadata\": %zu,\n", metadata); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"resident\": %zu,\n", resident); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"mapped\": %zu,\n", mapped); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"retained\": %zu\n", retained); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (merged || unmerged) ? "," : ""); + } else { malloc_cprintf(write_cb, cbopaque, "Allocated: %zu, active: %zu, metadata: %zu," - " resident: %zu, mapped: %zu\n", - allocated, active, metadata, resident, mapped); + " resident: %zu, mapped: %zu, retained: %zu\n", + allocated, active, metadata, resident, mapped, retained); malloc_cprintf(write_cb, cbopaque, "Current active ceiling: %zu\n", atomic_read_z(cactive)); + } - if (merged) { - unsigned narenas; - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i, ninitialized; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - for (i = ninitialized = 0; i < narenas; i++) { - if (initialized[i]) - ninitialized++; - } + if (merged || unmerged) { + unsigned narenas; + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats.arenas\": {\n"); + } + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + VARIABLE_ARRAY(bool, initialized, narenas); + size_t isz; + unsigned i, j, ninitialized; + + isz = sizeof(bool) * narenas; + xmallctl("arenas.initialized", (void *)initialized, + &isz, NULL, 0); + for (i = ninitialized = 0; i < narenas; i++) { + if (initialized[i]) + ninitialized++; + } - if (ninitialized > 1 || !unmerged) { - /* Print merged arena stats. */ + /* Merged stats. */ + if (merged && (ninitialized > 1 || !unmerged)) { + /* Print merged arena stats. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"merged\": {\n"); + } else { malloc_cprintf(write_cb, cbopaque, "\nMerged arenas stats:\n"); + } + stats_arena_print(write_cb, cbopaque, json, + narenas, bins, large, huge); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t}%s\n", (ninitialized > 1) ? + "," : ""); + } + } + + /* Unmerged stats. */ + for (i = j = 0; i < narenas; i++) { + if (initialized[i]) { + if (json) { + j++; + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t\"%u\": {\n", i); + } else { + malloc_cprintf(write_cb, + cbopaque, "\narenas[%u]:\n", + i); + } stats_arena_print(write_cb, cbopaque, - narenas, bins, large, huge); + json, i, bins, large, huge); + if (json) { + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t}%s\n", (j < + ninitialized) ? "," : ""); + } } } } - if (unmerged) { - unsigned narenas; + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t}\n"); + } + } +} - /* Print stats for each arena. */ +void +stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + int err; + uint64_t epoch; + size_t u64sz; + bool json = false; + bool general = true; + bool merged = true; + bool unmerged = true; + bool bins = true; + bool large = true; + bool huge = true; - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i; + /* + * Refresh stats, in case mallctl() was called by the application. + * + * Check for OOM here, since refreshing the ctl cache can trigger + * allocation. In practice, none of the subsequent mallctl()-related + * calls in this function will cause OOM if this one succeeds. + * */ + epoch = 1; + u64sz = sizeof(uint64_t); + err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, + sizeof(uint64_t)); + if (err != 0) { + if (err == EAGAIN) { + malloc_write("<jemalloc>: Memory allocation failure in " + "mallctl(\"epoch\", ...)\n"); + return; + } + malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " + "...)\n"); + abort(); + } - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); + if (opts != NULL) { + unsigned i; - for (i = 0; i < narenas; i++) { - if (initialized[i]) { - malloc_cprintf(write_cb, - cbopaque, - "\narenas[%u]:\n", i); - stats_arena_print(write_cb, - cbopaque, i, bins, large, - huge); - } - } + for (i = 0; opts[i] != '\0'; i++) { + switch (opts[i]) { + case 'J': + json = true; + break; + case 'g': + general = false; + break; + case 'm': + merged = false; + break; + case 'a': + unmerged = false; + break; + case 'b': + bins = false; + break; + case 'l': + large = false; + break; + case 'h': + huge = false; + break; + default:; } } } - malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "{\n" + "\t\"jemalloc\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "___ Begin jemalloc statistics ___\n"); + } + + if (general) + stats_general_print(write_cb, cbopaque, json, merged, unmerged); + if (config_stats) { + stats_print_helper(write_cb, cbopaque, json, merged, unmerged, + bins, large, huge); + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t}\n" + "}\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "--- End jemalloc statistics ---\n"); + } } diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c index fdafd0c62..21540ff46 100644..100755 --- a/deps/jemalloc/src/tcache.c +++ b/deps/jemalloc/src/tcache.c @@ -10,7 +10,7 @@ ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; tcache_bin_info_t *tcache_bin_info; static unsigned stack_nelms; /* Total stack elms per tcache. */ -size_t nhbins; +unsigned nhbins; size_t tcache_maxclass; tcaches_t *tcaches; @@ -23,10 +23,11 @@ static tcaches_t *tcaches_avail; /******************************************************************************/ -size_t tcache_salloc(const void *ptr) +size_t +tcache_salloc(tsdn_t *tsdn, const void *ptr) { - return (arena_salloc(ptr, false)); + return (arena_salloc(tsdn, ptr, false)); } void @@ -67,20 +68,19 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) tcache->next_gc_bin++; if (tcache->next_gc_bin == nhbins) tcache->next_gc_bin = 0; - tcache->ev_cnt = 0; } void * -tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, szind_t binind) +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; - arena_tcache_fill_small(arena, tbin, binind, config_prof ? + arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); if (config_prof) tcache->prof_accumbytes = 0; - ret = tcache_alloc_easy(tbin); + ret = tcache_alloc_easy(tbin, tcache_success); return (ret); } @@ -102,17 +102,18 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena bin associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); + *(tbin->avail - 1)); arena_t *bin_arena = extent_node_arena_get(&chunk->node); arena_bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { - if (arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); + if (arena_prof_accum(tsd_tsdn(tsd), arena, + tcache->prof_accumbytes)) + prof_idump(tsd_tsdn(tsd)); tcache->prof_accumbytes = 0; } - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (config_stats && bin_arena == arena) { assert(!merged_stats); merged_stats = true; @@ -122,16 +123,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, } ndeferred = 0; for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; + ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (extent_node_arena_get(&chunk->node) == bin_arena) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_bits_t *bitselm = - arena_bitselm_get(chunk, pageind); - arena_dalloc_bin_junked_locked(bin_arena, chunk, - ptr, bitselm); + arena_bitselm_get_mutable(chunk, pageind); + arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), + bin_arena, chunk, ptr, bitselm); } else { /* * This object was allocated via a different @@ -139,11 +140,12 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, * locked. Stash the object, so that it can be * handled in a future pass. */ - tbin->avail[ndeferred] = ptr; + *(tbin->avail - 1 - ndeferred) = ptr; ndeferred++; } } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); } if (config_stats && !merged_stats) { /* @@ -151,15 +153,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, * arena, so the stats didn't get merged. Manually do so now. */ arena_bin_t *bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; @@ -182,13 +184,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); + *(tbin->avail - 1)); arena_t *locked_arena = extent_node_arena_get(&chunk->node); UNUSED bool idump; if (config_prof) idump = false; - malloc_mutex_lock(&locked_arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock); if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { idump = arena_prof_accum_locked(arena, @@ -206,13 +208,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, } ndeferred = 0; for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; + ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (extent_node_arena_get(&chunk->node) == locked_arena) { - arena_dalloc_large_junked_locked(locked_arena, - chunk, ptr); + arena_dalloc_large_junked_locked(tsd_tsdn(tsd), + locked_arena, chunk, ptr); } else { /* * This object was allocated via a different @@ -220,62 +222,56 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, * Stash the object, so that it can be handled * in a future pass. */ - tbin->avail[ndeferred] = ptr; + *(tbin->avail - 1 - ndeferred) = ptr; ndeferred++; } } - malloc_mutex_unlock(&locked_arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock); if (config_prof && idump) - prof_idump(); + prof_idump(tsd_tsdn(tsd)); + arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - + ndeferred); } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; } -void -tcache_arena_associate(tcache_t *tcache, arena_t *arena) +static void +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Link into list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } } -void -tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena) -{ - - tcache_arena_dissociate(tcache, oldarena); - tcache_arena_associate(tcache, newarena); -} - -void -tcache_arena_dissociate(tcache_t *tcache, arena_t *arena) +static void +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_debug) { bool in_ql = false; tcache_t *iter; @@ -288,11 +284,20 @@ tcache_arena_dissociate(tcache_t *tcache, arena_t *arena) assert(in_ql); } ql_remove(&arena->tcache_ql, tcache, link); - tcache_stats_merge(tcache, arena); - malloc_mutex_unlock(&arena->lock); + tcache_stats_merge(tsdn, tcache, arena); + malloc_mutex_unlock(tsdn, &arena->lock); } } +void +tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, + arena_t *newarena) +{ + + tcache_arena_dissociate(tsdn, tcache, oldarena); + tcache_arena_associate(tsdn, tcache, newarena); +} + tcache_t * tcache_get_hard(tsd_t *tsd) { @@ -306,11 +311,11 @@ tcache_get_hard(tsd_t *tsd) arena = arena_choose(tsd, NULL); if (unlikely(arena == NULL)) return (NULL); - return (tcache_create(tsd, arena)); + return (tcache_create(tsd_tsdn(tsd), arena)); } tcache_t * -tcache_create(tsd_t *tsd, arena_t *arena) +tcache_create(tsdn_t *tsdn, arena_t *arena) { tcache_t *tcache; size_t size, stack_offset; @@ -324,18 +329,26 @@ tcache_create(tsd_t *tsd, arena_t *arena) /* Avoid false cacheline sharing. */ size = sa2u(size, CACHELINE); - tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get()); + tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); if (tcache == NULL) return (NULL); - tcache_arena_associate(tcache, arena); + tcache_arena_associate(tsdn, tcache, arena); + + ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); for (i = 0; i < nhbins; i++) { tcache->tbins[i].lg_fill_div = 1; + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + /* + * avail points past the available space. Allocations will + * access the slots toward higher addresses (for the benefit of + * prefetch). + */ tcache->tbins[i].avail = (void **)((uintptr_t)tcache + (uintptr_t)stack_offset); - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); } return (tcache); @@ -348,7 +361,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) unsigned i; arena = arena_choose(tsd, NULL); - tcache_arena_dissociate(tcache, arena); + tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena); for (i = 0; i < NBINS; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; @@ -356,9 +369,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) if (config_stats && tbin->tstats.nrequests != 0) { arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } } @@ -367,19 +380,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) tcache_bin_flush_large(tsd, tbin, i, 0, tcache); if (config_stats && tbin->tstats.nrequests != 0) { - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[i - NBINS].nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } } if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); + arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) + prof_idump(tsd_tsdn(tsd)); - idalloctm(tsd, tcache, false, true); + idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true); } void @@ -403,21 +416,22 @@ tcache_enabled_cleanup(tsd_t *tsd) /* Do nothing. */ } -/* Caller must own arena->lock. */ void -tcache_stats_merge(tcache_t *tcache, arena_t *arena) +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); + malloc_mutex_assert_owner(tsdn, &arena->lock); + /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } @@ -433,11 +447,12 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena) bool tcaches_create(tsd_t *tsd, unsigned *r_ind) { + arena_t *arena; tcache_t *tcache; tcaches_t *elm; if (tcaches == NULL) { - tcaches = base_alloc(sizeof(tcache_t *) * + tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1)); if (tcaches == NULL) return (true); @@ -445,7 +460,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) return (true); - tcache = tcache_create(tsd, a0get()); + arena = arena_ichoose(tsd, NULL); + if (unlikely(arena == NULL)) + return (true); + tcache = tcache_create(tsd_tsdn(tsd), arena); if (tcache == NULL) return (true); @@ -453,7 +471,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) elm = tcaches_avail; tcaches_avail = tcaches_avail->next; elm->tcache = tcache; - *r_ind = elm - tcaches; + *r_ind = (unsigned)(elm - tcaches); } else { elm = &tcaches[tcaches_past]; elm->tcache = tcache; @@ -491,7 +509,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) } bool -tcache_boot(void) +tcache_boot(tsdn_t *tsdn) { unsigned i; @@ -499,17 +517,17 @@ tcache_boot(void) * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is * known. */ - if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) + if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS) tcache_maxclass = SMALL_MAXCLASS; - else if ((1U << opt_lg_tcache_max) > large_maxclass) + else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass) tcache_maxclass = large_maxclass; else - tcache_maxclass = (1U << opt_lg_tcache_max); + tcache_maxclass = (ZU(1) << opt_lg_tcache_max); nhbins = size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * + tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins * sizeof(tcache_bin_info_t)); if (tcache_bin_info == NULL) return (true); diff --git a/deps/jemalloc/src/ticker.c b/deps/jemalloc/src/ticker.c new file mode 100644 index 000000000..db0902404 --- /dev/null +++ b/deps/jemalloc/src/ticker.c @@ -0,0 +1,2 @@ +#define JEMALLOC_TICKER_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c index 9ffe9afef..ec69a51c3 100644 --- a/deps/jemalloc/src/tsd.c +++ b/deps/jemalloc/src/tsd.c @@ -77,7 +77,7 @@ tsd_cleanup(void *arg) /* Do nothing. */ break; case tsd_state_nominal: -#define O(n, t) \ +#define O(n, t) \ n##_cleanup(tsd); MALLOC_TSD #undef O @@ -106,15 +106,17 @@ MALLOC_TSD } } -bool +tsd_t * malloc_tsd_boot0(void) { + tsd_t *tsd; ncleanups = 0; if (tsd_boot0()) - return (true); - *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true; - return (false); + return (NULL); + tsd = tsd_fetch(); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + return (tsd); } void @@ -122,7 +124,7 @@ malloc_tsd_boot1(void) { tsd_boot1(); - *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false; + *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false; } #ifdef _WIN32 @@ -148,13 +150,15 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) #ifdef _MSC_VER # ifdef _M_IX86 # pragma comment(linker, "/INCLUDE:__tls_used") +# pragma comment(linker, "/INCLUDE:_tls_callback") # else # pragma comment(linker, "/INCLUDE:_tls_used") +# pragma comment(linker, "/INCLUDE:tls_callback") # endif # pragma section(".CRT$XLY",long,read) #endif JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) -static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, +BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; #endif @@ -167,10 +171,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) tsd_init_block_t *iter; /* Check whether this thread has already inserted into the list. */ - malloc_mutex_lock(&head->lock); + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_foreach(iter, &head->blocks, link) { if (iter->thread == self) { - malloc_mutex_unlock(&head->lock); + malloc_mutex_unlock(TSDN_NULL, &head->lock); return (iter->data); } } @@ -178,7 +182,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) ql_elm_new(block, link); block->thread = self; ql_tail_insert(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); + malloc_mutex_unlock(TSDN_NULL, &head->lock); return (NULL); } @@ -186,8 +190,8 @@ void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { - malloc_mutex_lock(&head->lock); + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_remove(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); + malloc_mutex_unlock(TSDN_NULL, &head->lock); } #endif diff --git a/deps/jemalloc/src/util.c b/deps/jemalloc/src/util.c index 4cb0d6c1e..dd8c23630 100644..100755 --- a/deps/jemalloc/src/util.c +++ b/deps/jemalloc/src/util.c @@ -1,3 +1,7 @@ +/* + * Define simple versions of assertion macros that won't recurse in case + * of assertion failures in malloc_*printf(). + */ #define assert(e) do { \ if (config_debug && !(e)) { \ malloc_write("<jemalloc>: Failed assertion\n"); \ @@ -10,6 +14,7 @@ malloc_write("<jemalloc>: Unreachable code reached\n"); \ abort(); \ } \ + unreachable(); \ } while (0) #define not_implemented() do { \ @@ -44,15 +49,19 @@ static void wrtmessage(void *cbopaque, const char *s) { -#ifdef SYS_write +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) /* * Use syscall(2) rather than write(2) when possible in order to avoid * the possibility of memory allocation within libc. This is necessary * on FreeBSD; most operating systems do not have this problem though. + * + * syscall() returns long or int, depending on platform, so capture the + * unused result in the widest plausible type to avoid compiler + * warnings. */ - UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); + UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); #else - UNUSED int result = write(STDERR_FILENO, s, strlen(s)); + UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s)); #endif } @@ -82,7 +91,7 @@ buferror(int err, char *buf, size_t buflen) #ifdef _WIN32 FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, - (LPSTR)buf, buflen, NULL); + (LPSTR)buf, (DWORD)buflen, NULL); return (0); #elif defined(__GLIBC__) && defined(_GNU_SOURCE) char *b = strerror_r(err, buf, buflen); @@ -191,7 +200,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) p++; } if (neg) - ret = -ret; + ret = (uintmax_t)(-((intmax_t)ret)); if (p == ns) { /* No conversion performed. */ @@ -306,10 +315,9 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) return (s); } -int +size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { - int ret; size_t i; const char *f; @@ -400,6 +408,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) int prec = -1; int width = -1; unsigned char len = '?'; + char *s; + size_t slen; f++; /* Flags. */ @@ -490,8 +500,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) } /* Conversion specifier. */ switch (*f) { - char *s; - size_t slen; case '%': /* %% */ APPEND_C(*f); @@ -577,20 +585,19 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) str[i] = '\0'; else str[size - 1] = '\0'; - ret = i; #undef APPEND_C #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC - return (ret); + return (i); } JEMALLOC_FORMAT_PRINTF(3, 4) -int +size_t malloc_snprintf(char *str, size_t size, const char *format, ...) { - int ret; + size_t ret; va_list ap; va_start(ap, format); @@ -648,3 +655,12 @@ malloc_printf(const char *format, ...) malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); } + +/* + * Restore normal assertion macros, in order to make it possible to compile all + * C files as a single concatenation. + */ +#undef assert +#undef not_reached +#undef not_implemented +#include "jemalloc/internal/assert.h" diff --git a/deps/jemalloc/src/witness.c b/deps/jemalloc/src/witness.c new file mode 100644 index 000000000..23753f246 --- /dev/null +++ b/deps/jemalloc/src/witness.c @@ -0,0 +1,136 @@ +#define JEMALLOC_WITNESS_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +void +witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp) +{ + + witness->name = name; + witness->rank = rank; + witness->comp = comp; +} + +#ifdef JEMALLOC_JET +#undef witness_lock_error +#define witness_lock_error JEMALLOC_N(n_witness_lock_error) +#endif +void +witness_lock_error(const witness_list_t *witnesses, const witness_t *witness) +{ + witness_t *w; + + malloc_printf("<jemalloc>: Lock rank order reversal:"); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf(" %s(%u)\n", witness->name, witness->rank); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_lock_error +#define witness_lock_error JEMALLOC_N(witness_lock_error) +witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error); +#endif + +#ifdef JEMALLOC_JET +#undef witness_owner_error +#define witness_owner_error JEMALLOC_N(n_witness_owner_error) +#endif +void +witness_owner_error(const witness_t *witness) +{ + + malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_owner_error +#define witness_owner_error JEMALLOC_N(witness_owner_error) +witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error); +#endif + +#ifdef JEMALLOC_JET +#undef witness_not_owner_error +#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error) +#endif +void +witness_not_owner_error(const witness_t *witness) +{ + + malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_not_owner_error +#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) +witness_not_owner_error_t *witness_not_owner_error = + JEMALLOC_N(n_witness_not_owner_error); +#endif + +#ifdef JEMALLOC_JET +#undef witness_lockless_error +#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error) +#endif +void +witness_lockless_error(const witness_list_t *witnesses) +{ + witness_t *w; + + malloc_printf("<jemalloc>: Should not own any locks:"); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf("\n"); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_lockless_error +#define witness_lockless_error JEMALLOC_N(witness_lockless_error) +witness_lockless_error_t *witness_lockless_error = + JEMALLOC_N(n_witness_lockless_error); +#endif + +void +witnesses_cleanup(tsd_t *tsd) +{ + + witness_assert_lockless(tsd_tsdn(tsd)); + + /* Do nothing. */ +} + +void +witness_fork_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +witness_prefork(tsd_t *tsd) +{ + + tsd_witness_fork_set(tsd, true); +} + +void +witness_postfork_parent(tsd_t *tsd) +{ + + tsd_witness_fork_set(tsd, false); +} + +void +witness_postfork_child(tsd_t *tsd) +{ +#ifndef JEMALLOC_MUTEX_INIT_CB + witness_list_t *witnesses; + + witnesses = tsd_witnessesp_get(tsd); + ql_new(witnesses); +#endif + tsd_witness_fork_set(tsd, false); +} diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c index 12e1734a9..0571920e4 100644 --- a/deps/jemalloc/src/zone.c +++ b/deps/jemalloc/src/zone.c @@ -4,7 +4,7 @@ #endif /* - * The malloc_default_purgeable_zone function is only available on >= 10.6. + * The malloc_default_purgeable_zone() function is only available on >= 10.6. * We need to check whether it is present at runtime, thus the weak_import. */ extern malloc_zone_t *malloc_default_purgeable_zone(void) @@ -13,8 +13,9 @@ JEMALLOC_ATTR(weak_import); /******************************************************************************/ /* Data. */ -static malloc_zone_t zone; -static struct malloc_introspection_t zone_introspect; +static malloc_zone_t *default_zone, *purgeable_zone; +static malloc_zone_t jemalloc_zone; +static struct malloc_introspection_t jemalloc_zone_introspect; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ @@ -56,7 +57,7 @@ zone_size(malloc_zone_t *zone, void *ptr) * not work in practice, we must check all pointers to assure that they * reside within a mapped chunk before determining size. */ - return (ivsalloc(ptr, config_prof)); + return (ivsalloc(tsdn_fetch(), ptr, config_prof)); } static void * @@ -87,7 +88,7 @@ static void zone_free(malloc_zone_t *zone, void *ptr) { - if (ivsalloc(ptr, config_prof) != 0) { + if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) { je_free(ptr); return; } @@ -99,7 +100,7 @@ static void * zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { - if (ivsalloc(ptr, config_prof) != 0) + if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) return (je_realloc(ptr, size)); return (realloc(ptr, size)); @@ -121,9 +122,11 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { + size_t alloc_size; - if (ivsalloc(ptr, config_prof) != 0) { - assert(ivsalloc(ptr, config_prof) == size); + alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof); + if (alloc_size != 0) { + assert(alloc_size == size); je_free(ptr); return; } @@ -162,89 +165,103 @@ static void zone_force_unlock(malloc_zone_t *zone) { + /* + * Call jemalloc_postfork_child() rather than + * jemalloc_postfork_parent(), because this function is executed by both + * parent and child. The parent can tolerate having state + * reinitialized, but the child cannot unlock mutexes that were locked + * by the parent. + */ if (isthreaded) - jemalloc_postfork_parent(); + jemalloc_postfork_child(); } -JEMALLOC_ATTR(constructor) -void -register_zone(void) +static void +zone_init(void) { - /* - * If something else replaced the system default zone allocator, don't - * register jemalloc's. - */ - malloc_zone_t *default_zone = malloc_default_zone(); - malloc_zone_t *purgeable_zone = NULL; - if (!default_zone->zone_name || - strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { - return; - } - - zone.size = (void *)zone_size; - zone.malloc = (void *)zone_malloc; - zone.calloc = (void *)zone_calloc; - zone.valloc = (void *)zone_valloc; - zone.free = (void *)zone_free; - zone.realloc = (void *)zone_realloc; - zone.destroy = (void *)zone_destroy; - zone.zone_name = "jemalloc_zone"; - zone.batch_malloc = NULL; - zone.batch_free = NULL; - zone.introspect = &zone_introspect; - zone.version = JEMALLOC_ZONE_VERSION; + jemalloc_zone.size = (void *)zone_size; + jemalloc_zone.malloc = (void *)zone_malloc; + jemalloc_zone.calloc = (void *)zone_calloc; + jemalloc_zone.valloc = (void *)zone_valloc; + jemalloc_zone.free = (void *)zone_free; + jemalloc_zone.realloc = (void *)zone_realloc; + jemalloc_zone.destroy = (void *)zone_destroy; + jemalloc_zone.zone_name = "jemalloc_zone"; + jemalloc_zone.batch_malloc = NULL; + jemalloc_zone.batch_free = NULL; + jemalloc_zone.introspect = &jemalloc_zone_introspect; + jemalloc_zone.version = JEMALLOC_ZONE_VERSION; #if (JEMALLOC_ZONE_VERSION >= 5) - zone.memalign = zone_memalign; + jemalloc_zone.memalign = zone_memalign; #endif #if (JEMALLOC_ZONE_VERSION >= 6) - zone.free_definite_size = zone_free_definite_size; + jemalloc_zone.free_definite_size = zone_free_definite_size; #endif #if (JEMALLOC_ZONE_VERSION >= 8) - zone.pressure_relief = NULL; + jemalloc_zone.pressure_relief = NULL; #endif - zone_introspect.enumerator = NULL; - zone_introspect.good_size = (void *)zone_good_size; - zone_introspect.check = NULL; - zone_introspect.print = NULL; - zone_introspect.log = NULL; - zone_introspect.force_lock = (void *)zone_force_lock; - zone_introspect.force_unlock = (void *)zone_force_unlock; - zone_introspect.statistics = NULL; + jemalloc_zone_introspect.enumerator = NULL; + jemalloc_zone_introspect.good_size = (void *)zone_good_size; + jemalloc_zone_introspect.check = NULL; + jemalloc_zone_introspect.print = NULL; + jemalloc_zone_introspect.log = NULL; + jemalloc_zone_introspect.force_lock = (void *)zone_force_lock; + jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock; + jemalloc_zone_introspect.statistics = NULL; #if (JEMALLOC_ZONE_VERSION >= 6) - zone_introspect.zone_locked = NULL; + jemalloc_zone_introspect.zone_locked = NULL; #endif #if (JEMALLOC_ZONE_VERSION >= 7) - zone_introspect.enable_discharge_checking = NULL; - zone_introspect.disable_discharge_checking = NULL; - zone_introspect.discharge = NULL; -#ifdef __BLOCKS__ - zone_introspect.enumerate_discharged_pointers = NULL; -#else - zone_introspect.enumerate_unavailable_without_blocks = NULL; -#endif + jemalloc_zone_introspect.enable_discharge_checking = NULL; + jemalloc_zone_introspect.disable_discharge_checking = NULL; + jemalloc_zone_introspect.discharge = NULL; +# ifdef __BLOCKS__ + jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; +# else + jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; +# endif #endif +} + +static malloc_zone_t * +zone_default_get(void) +{ + malloc_zone_t **zones = NULL; + unsigned int num_zones = 0; /* - * The default purgeable zone is created lazily by OSX's libc. It uses - * the default zone when it is created for "small" allocations - * (< 15 KiB), but assumes the default zone is a scalable_zone. This - * obviously fails when the default zone is the jemalloc zone, so - * malloc_default_purgeable_zone is called beforehand so that the - * default purgeable zone is created when the default zone is still - * a scalable_zone. As purgeable zones only exist on >= 10.6, we need - * to check for the existence of malloc_default_purgeable_zone() at - * run time. + * On OSX 10.12, malloc_default_zone returns a special zone that is not + * present in the list of registered zones. That zone uses a "lite zone" + * if one is present (apparently enabled when malloc stack logging is + * enabled), or the first registered zone otherwise. In practice this + * means unless malloc stack logging is enabled, the first registered + * zone is the default. So get the list of zones to get the first one, + * instead of relying on malloc_default_zone. */ - if (malloc_default_purgeable_zone != NULL) - purgeable_zone = malloc_default_purgeable_zone(); + if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, + (vm_address_t**)&zones, &num_zones)) { + /* + * Reset the value in case the failure happened after it was + * set. + */ + num_zones = 0; + } - /* Register the custom zone. At this point it won't be the default. */ - malloc_zone_register(&zone); + if (num_zones) + return (zones[0]); + + return (malloc_default_zone()); +} + +/* As written, this function can only promote jemalloc_zone. */ +static void +zone_promote(void) +{ + malloc_zone_t *zone; do { - default_zone = malloc_default_zone(); /* * Unregister and reregister the default zone. On OSX >= 10.6, * unregistering takes the last registered zone and places it @@ -255,6 +272,7 @@ register_zone(void) */ malloc_zone_unregister(default_zone); malloc_zone_register(default_zone); + /* * On OSX 10.6, having the default purgeable zone appear before * the default zone makes some things crash because it thinks it @@ -266,9 +284,47 @@ register_zone(void) * above, i.e. the default zone. Registering it again then puts * it at the end, obviously after the default zone. */ - if (purgeable_zone) { + if (purgeable_zone != NULL) { malloc_zone_unregister(purgeable_zone); malloc_zone_register(purgeable_zone); } - } while (malloc_default_zone() != &zone); + + zone = zone_default_get(); + } while (zone != &jemalloc_zone); +} + +JEMALLOC_ATTR(constructor) +void +zone_register(void) +{ + + /* + * If something else replaced the system default zone allocator, don't + * register jemalloc's. + */ + default_zone = zone_default_get(); + if (!default_zone->zone_name || strcmp(default_zone->zone_name, + "DefaultMallocZone") != 0) + return; + + /* + * The default purgeable zone is created lazily by OSX's libc. It uses + * the default zone when it is created for "small" allocations + * (< 15 KiB), but assumes the default zone is a scalable_zone. This + * obviously fails when the default zone is the jemalloc zone, so + * malloc_default_purgeable_zone() is called beforehand so that the + * default purgeable zone is created when the default zone is still + * a scalable_zone. As purgeable zones only exist on >= 10.6, we need + * to check for the existence of malloc_default_purgeable_zone() at + * run time. + */ + purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : + malloc_default_purgeable_zone(); + + /* Register the custom zone. At this point it won't be the default. */ + zone_init(); + malloc_zone_register(&jemalloc_zone); + + /* Promote the custom zone to be default. */ + zone_promote(); } |